├── .gitignore ├── LICENSE ├── README.md ├── assets ├── certificate.PNG └── certificate.pdf ├── week1 ├── introduction.pdf ├── linear-agebra.pdf └── linear-regression-one-variable.pdf ├── week10 ├── assets │ ├── question-1.PNG │ ├── question-2.PNG │ ├── question-3.PNG │ ├── question-4.PNG │ └── question-5.PNG └── large-scale-ml-quiz.md ├── week11 ├── application-photo-ocr-quiz.md └── assets │ ├── question-1.PNG │ ├── question-2.PNG │ ├── question-3.PNG │ ├── question-4.PNG │ └── question-5.PNG ├── week2 ├── ex1.pdf ├── ex1 │ ├── computeCost.m │ ├── computeCostMulti.m │ ├── ex1.mlx │ ├── ex1_companion.mlx │ ├── ex1data1.txt │ ├── ex1data2.txt │ ├── featureNormalize.m │ ├── gradientDescent.m │ ├── gradientDescentMulti.m │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── noramldist.mat │ ├── normalEqn.m │ ├── plotData.m │ ├── submit.m │ ├── token.mat │ └── warmUpExercise.m ├── linear-regression-multiple-variables.pdf ├── linear-regression.m └── octave-matlab-tutoria.pdf ├── week3 ├── assets │ ├── logistic-regresion-quiz-1-b.PNG │ ├── logistic-regresion-quiz-1.PNG │ ├── logistic-regresion-quiz-2-b.PNG │ ├── logistic-regresion-quiz-2-c.PNG │ ├── logistic-regresion-quiz-2.PNG │ ├── logistic-regresion-quiz-3-3.PNG │ ├── logistic-regresion-quiz-3-b.PNG │ ├── logistic-regresion-quiz-3-w.PNG │ ├── logistic-regresion-quiz-4-2.PNG │ ├── logistic-regresion-quiz-4.PNG │ ├── logistic-regresion-quiz-5-w.PNG │ ├── question-3.PNG │ ├── question-4.PNG │ ├── question-5.PNG │ ├── regularization-1.PNG │ ├── regularization-2.PNG │ ├── regularization-3.PNG │ ├── regularization-4.PNG │ └── regularization-5.PNG ├── ex2.pdf ├── ex2 │ ├── costFunction.m │ ├── costFunctionReg.m │ ├── ex2.mlx │ ├── ex2_companion.mlx │ ├── ex2data1.txt │ ├── ex2data2.txt │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── mapFeature.m │ ├── plotData.m │ ├── plotDecisionBoundary.m │ ├── predict.m │ ├── sigmoid.m │ ├── submit.m │ └── token.mat ├── logistic-regression-quiz.md ├── logistic_regression.m ├── regularization-quiz.md ├── regularized-logistic-regression.m └── test.m ├── week4 ├── assets │ ├── neural-networks-1.PNG │ ├── neural-networks-2.PNG │ ├── neural-networks-3.PNG │ ├── neural-networks-4-1a.PNG │ ├── neural-networks-4-1b.PNG │ └── neural-networks-5.PNG ├── machine-learning-ex3 │ ├── ex3.pdf │ └── ex3 │ │ ├── displayData.m │ │ ├── ex3.m │ │ ├── ex3_nn.m │ │ ├── ex3data1.mat │ │ ├── ex3weights.mat │ │ ├── fmincg.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── lrCostFunction.m │ │ ├── oneVsAll.m │ │ ├── predict.m │ │ ├── predictOneVsAll.m │ │ ├── sigmoid.m │ │ ├── submit.m │ │ └── token.mat ├── neural-networks-quiz.md └── neural-networks.m ├── week5 ├── assets │ ├── quiz-1.PNG │ ├── quiz-2.PNG │ ├── quiz-3.PNG │ ├── quiz-4.PNG │ └── quiz-5.PNG ├── ex4.pdf ├── ex4 │ ├── checkNNGradients.m │ ├── computeNumericalGradient.m │ ├── debugInitializeWeights.m │ ├── displayData.m │ ├── ex4.m │ ├── ex4data1.mat │ ├── ex4weights.mat │ ├── fmincg.m │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── nnCostFunction.m │ ├── predict.m │ ├── randInitializeWeights.m │ ├── sigmoid.m │ ├── sigmoidGradient.m │ ├── submit.m │ └── token.mat ├── neural-networks-quiz.md └── week5.m ├── week6 ├── advice-for-applying-machine-learning.md ├── assets │ ├── machine-learning-system-design │ │ ├── question-1.PNG │ │ ├── question-2.PNG │ │ ├── question-3-2.PNG │ │ ├── question-3.PNG │ │ ├── question-4-2.PNG │ │ ├── question-5-2.PNG │ │ └── question-5.PNG │ ├── question-1-2.PNG │ ├── question-2-2.PNG │ ├── question-2-incorrect.PNG │ ├── question-3-2.PNG │ ├── question-3.PNG │ ├── question-4-2.PNG │ ├── question-4.PNG │ ├── question-5-2.PNG │ ├── question-5.PNG │ └── quiz-1.PNG ├── ex5.pdf ├── ex5 │ ├── ex5.m │ ├── ex5data1.mat │ ├── featureNormalize.m │ ├── fmincg.m │ ├── learningCurve.m │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── linearRegCostFunction.m │ ├── plotFit.m │ ├── polyFeatures.m │ ├── submit.m │ ├── test.m │ ├── token.mat │ ├── trainLinearReg.m │ └── validationCurve.m ├── machine-learning-system-design-quiz.md └── week6.m ├── week7 ├── assets │ ├── question-1.PNG │ ├── question-2-ans.PNG │ ├── question-2.PNG │ ├── question-3.PNG │ ├── question-4.PNG │ └── question-5.PNG ├── ex6.pdf ├── ex6 │ ├── dataset3Params.m │ ├── emailFeatures.m │ ├── emailSample1.txt │ ├── emailSample2.txt │ ├── ex6.m │ ├── ex6_spam.m │ ├── ex6data1.mat │ ├── ex6data2.mat │ ├── ex6data3.mat │ ├── gaussianKernel.m │ ├── getVocabList.m │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── linearKernel.m │ ├── plotData.m │ ├── porterStemmer.m │ ├── processEmail.m │ ├── readFile.m │ ├── spamSample1.txt │ ├── spamSample2.txt │ ├── spamTest.mat │ ├── spamTrain.mat │ ├── submit.m │ ├── svmPredict.m │ ├── svmTrain.m │ ├── token.mat │ ├── visualizeBoundary.m │ ├── visualizeBoundaryLinear.m │ └── vocab.txt ├── support-vector-machines-quiz.md └── week7.m ├── week8 ├── assets │ ├── pca │ │ ├── question-1-ans-1.PNG │ │ ├── question-1-ans-2.PNG │ │ ├── question-1.PNG │ │ ├── question-2.PNG │ │ ├── question-3-3.PNG │ │ ├── question-4-2.PNG │ │ ├── question-4.PNG │ │ └── question-5.PNG │ └── unsupervised-learning │ │ ├── question-1.PNG │ │ ├── question-2.PNG │ │ ├── question-3.PNG │ │ ├── question-4.PNG │ │ └── question-5.PNG ├── ex7.pdf ├── ex7 │ ├── bird_small.mat │ ├── bird_small.png │ ├── computeCentroids.m │ ├── displayData.m │ ├── drawLine.m │ ├── ex7.m │ ├── ex7_pca.m │ ├── ex7data1.mat │ ├── ex7data2.mat │ ├── ex7faces.mat │ ├── featureNormalize.m │ ├── findClosestCentroids.m │ ├── kMeansInitCentroids.m │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ ├── pca.m │ ├── plotDataPoints.m │ ├── plotProgresskMeans.m │ ├── projectData.m │ ├── recoverData.m │ ├── runkMeans.m │ ├── submit.m │ └── token.mat ├── pca.m ├── principal-component-analysis.md ├── unsupervised-learning-quiz.md └── week8.m └── week9 ├── anomaly-detection-quiz.md ├── assets ├── anomaly-detection │ ├── question-1.PNG │ ├── question-2.PNG │ ├── question-3.PNG │ ├── question-4.PNG │ └── question-5.PNG └── reccomender-systems │ ├── question-1.PNG │ ├── question-2-3.PNG │ ├── question-3-2.PNG │ ├── question-3.PNG │ ├── question-4-2.PNG │ ├── question-4.PNG │ └── question-5.PNG ├── collaborative-filtering.m ├── combined-collaborative-filtering.m ├── content-based-reccomendation.m ├── desnity-estimation.m ├── ex8.pdf ├── ex8 ├── checkCostFunction.m ├── cofiCostFunc.m ├── computeNumericalGradient.m ├── estimateGaussian.m ├── ex8.m ├── ex8_cofi.m ├── ex8_movieParams.mat ├── ex8_movies.mat ├── ex8data1.mat ├── ex8data2.mat ├── fmincg.m ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m ├── loadMovieList.m ├── movie_ids.txt ├── multivariateGaussian.m ├── normalizeRatings.m ├── selectThreshold.m ├── submit.m ├── token.mat └── visualizeFit.m ├── reccomender-systems-quiz.md └── test.m /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | out 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2020 anishLearnsToCode 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 4 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation the 5 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit 6 | persons to whom the Software is furnished to do so, subject to the following conditions: 7 | 8 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the 9 | Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 12 | WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 13 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 14 | OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /assets/certificate.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/assets/certificate.PNG -------------------------------------------------------------------------------- /assets/certificate.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/assets/certificate.pdf -------------------------------------------------------------------------------- /week1/introduction.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week1/introduction.pdf -------------------------------------------------------------------------------- /week1/linear-agebra.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week1/linear-agebra.pdf -------------------------------------------------------------------------------- /week1/linear-regression-one-variable.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week1/linear-regression-one-variable.pdf -------------------------------------------------------------------------------- /week10/assets/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week10/assets/question-1.PNG -------------------------------------------------------------------------------- /week10/assets/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week10/assets/question-2.PNG -------------------------------------------------------------------------------- /week10/assets/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week10/assets/question-3.PNG -------------------------------------------------------------------------------- /week10/assets/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week10/assets/question-4.PNG -------------------------------------------------------------------------------- /week10/assets/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week10/assets/question-5.PNG -------------------------------------------------------------------------------- /week10/large-scale-ml-quiz.md: -------------------------------------------------------------------------------- 1 | # Large Scale Machine Learning Quiz 2 | 3 | ![Question 1](assets/question-1.PNG) 4 | ![Question 2](assets/question-2.PNG) 5 | ![Question 3](assets/question-3.PNG) 6 | ![Question 4](assets/question-4.PNG) 7 | ![Question 5](assets/question-5.PNG) 8 | -------------------------------------------------------------------------------- /week11/application-photo-ocr-quiz.md: -------------------------------------------------------------------------------- 1 | # Application: Photo OCR Quiz 2 | 3 | ![Question 1](assets/question-1.PNG) 4 | ![Question 2](assets/question-2.PNG) 5 | ![Question 3](assets/question-3.PNG) 6 | ![Question 4](assets/question-4.PNG) 7 | ![Question 5](assets/question-5.PNG) 8 | -------------------------------------------------------------------------------- /week11/assets/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week11/assets/question-1.PNG -------------------------------------------------------------------------------- /week11/assets/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week11/assets/question-2.PNG -------------------------------------------------------------------------------- /week11/assets/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week11/assets/question-3.PNG -------------------------------------------------------------------------------- /week11/assets/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week11/assets/question-4.PNG -------------------------------------------------------------------------------- /week11/assets/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week11/assets/question-5.PNG -------------------------------------------------------------------------------- /week2/ex1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/ex1.pdf -------------------------------------------------------------------------------- /week2/ex1/computeCost.m: -------------------------------------------------------------------------------- 1 | function J = computeCost(X, y, theta) 2 | %COMPUTECOST Compute cost for linear regression 3 | % J = COMPUTECOST(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | trainingExamles = size(X)(1); 7 | predictions = X * theta; 8 | squareErrors = (predictions - y) .^ 2; 9 | J = 0.5 / trainingExamles * sum(squareErrors); 10 | end 11 | -------------------------------------------------------------------------------- /week2/ex1/computeCostMulti.m: -------------------------------------------------------------------------------- 1 | function J = computeCostMulti(X, y, theta) 2 | %COMPUTECOSTMULTI Compute cost for linear regression with multiple variables 3 | % J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | 9 | % You need to return the following variables correctly 10 | J = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Compute the cost of a particular choice of theta 14 | % You should set J to the cost. 15 | 16 | 17 | 18 | 19 | 20 | % ========================================================================= 21 | 22 | end 23 | -------------------------------------------------------------------------------- /week2/ex1/ex1.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/ex1/ex1.mlx -------------------------------------------------------------------------------- /week2/ex1/ex1_companion.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/ex1/ex1_companion.mlx -------------------------------------------------------------------------------- /week2/ex1/ex1data1.txt: -------------------------------------------------------------------------------- 1 | 6.1101,17.592 2 | 5.5277,9.1302 3 | 8.5186,13.662 4 | 7.0032,11.854 5 | 5.8598,6.8233 6 | 8.3829,11.886 7 | 7.4764,4.3483 8 | 8.5781,12 9 | 6.4862,6.5987 10 | 5.0546,3.8166 11 | 5.7107,3.2522 12 | 14.164,15.505 13 | 5.734,3.1551 14 | 8.4084,7.2258 15 | 5.6407,0.71618 16 | 5.3794,3.5129 17 | 6.3654,5.3048 18 | 5.1301,0.56077 19 | 6.4296,3.6518 20 | 7.0708,5.3893 21 | 6.1891,3.1386 22 | 20.27,21.767 23 | 5.4901,4.263 24 | 6.3261,5.1875 25 | 5.5649,3.0825 26 | 18.945,22.638 27 | 12.828,13.501 28 | 10.957,7.0467 29 | 13.176,14.692 30 | 22.203,24.147 31 | 5.2524,-1.22 32 | 6.5894,5.9966 33 | 9.2482,12.134 34 | 5.8918,1.8495 35 | 8.2111,6.5426 36 | 7.9334,4.5623 37 | 8.0959,4.1164 38 | 5.6063,3.3928 39 | 12.836,10.117 40 | 6.3534,5.4974 41 | 5.4069,0.55657 42 | 6.8825,3.9115 43 | 11.708,5.3854 44 | 5.7737,2.4406 45 | 7.8247,6.7318 46 | 7.0931,1.0463 47 | 5.0702,5.1337 48 | 5.8014,1.844 49 | 11.7,8.0043 50 | 5.5416,1.0179 51 | 7.5402,6.7504 52 | 5.3077,1.8396 53 | 7.4239,4.2885 54 | 7.6031,4.9981 55 | 6.3328,1.4233 56 | 6.3589,-1.4211 57 | 6.2742,2.4756 58 | 5.6397,4.6042 59 | 9.3102,3.9624 60 | 9.4536,5.4141 61 | 8.8254,5.1694 62 | 5.1793,-0.74279 63 | 21.279,17.929 64 | 14.908,12.054 65 | 18.959,17.054 66 | 7.2182,4.8852 67 | 8.2951,5.7442 68 | 10.236,7.7754 69 | 5.4994,1.0173 70 | 20.341,20.992 71 | 10.136,6.6799 72 | 7.3345,4.0259 73 | 6.0062,1.2784 74 | 7.2259,3.3411 75 | 5.0269,-2.6807 76 | 6.5479,0.29678 77 | 7.5386,3.8845 78 | 5.0365,5.7014 79 | 10.274,6.7526 80 | 5.1077,2.0576 81 | 5.7292,0.47953 82 | 5.1884,0.20421 83 | 6.3557,0.67861 84 | 9.7687,7.5435 85 | 6.5159,5.3436 86 | 8.5172,4.2415 87 | 9.1802,6.7981 88 | 6.002,0.92695 89 | 5.5204,0.152 90 | 5.0594,2.8214 91 | 5.7077,1.8451 92 | 7.6366,4.2959 93 | 5.8707,7.2029 94 | 5.3054,1.9869 95 | 8.2934,0.14454 96 | 13.394,9.0551 97 | 5.4369,0.61705 98 | -------------------------------------------------------------------------------- /week2/ex1/ex1data2.txt: -------------------------------------------------------------------------------- 1 | 2104,3,399900 2 | 1600,3,329900 3 | 2400,3,369000 4 | 1416,2,232000 5 | 3000,4,539900 6 | 1985,4,299900 7 | 1534,3,314900 8 | 1427,3,198999 9 | 1380,3,212000 10 | 1494,3,242500 11 | 1940,4,239999 12 | 2000,3,347000 13 | 1890,3,329999 14 | 4478,5,699900 15 | 1268,3,259900 16 | 2300,4,449900 17 | 1320,2,299900 18 | 1236,3,199900 19 | 2609,4,499998 20 | 3031,4,599000 21 | 1767,3,252900 22 | 1888,2,255000 23 | 1604,3,242900 24 | 1962,4,259900 25 | 3890,3,573900 26 | 1100,3,249900 27 | 1458,3,464500 28 | 2526,3,469000 29 | 2200,3,475000 30 | 2637,3,299900 31 | 1839,2,349900 32 | 1000,1,169900 33 | 2040,4,314900 34 | 3137,3,579900 35 | 1811,4,285900 36 | 1437,3,249900 37 | 1239,3,229900 38 | 2132,4,345000 39 | 4215,4,549000 40 | 2162,4,287000 41 | 1664,2,368500 42 | 2238,3,329900 43 | 2567,4,314000 44 | 1200,3,299000 45 | 852,2,179900 46 | 1852,4,299900 47 | 1203,3,239500 48 | -------------------------------------------------------------------------------- /week2/ex1/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | % You need to set these values correctly 9 | X_norm = X; 10 | mu = zeros(1, size(X, 2)); 11 | sigma = zeros(1, size(X, 2)); 12 | 13 | % ====================== YOUR CODE HERE ====================== 14 | % Instructions: First, for each feature dimension, compute the mean 15 | % of the feature and subtract it from the dataset, 16 | % storing the mean value in mu. Next, compute the 17 | % standard deviation of each feature and divide 18 | % each feature by it's standard deviation, storing 19 | % the standard deviation in sigma. 20 | % 21 | % Note that X is a matrix where each column is a 22 | % feature and each row is an example. You need 23 | % to perform the normalization separately for 24 | % each feature. 25 | % 26 | % Hint: You might find the 'mean' and 'std' functions useful. 27 | % 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | % ============================================================ 38 | 39 | end 40 | -------------------------------------------------------------------------------- /week2/ex1/gradientDescent.m: -------------------------------------------------------------------------------- 1 | grfunction [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENT Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | trainingExamples = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | features = length(theta) - 1; 10 | 11 | for iter = 1:num_iters 12 | theta = theta - alpha * (1/trainingExamples) * (((X*theta) - y)' * X)'; % Vectorized 13 | J_history(iter) = computeCost(X, y, theta); 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /week2/ex1/gradientDescentMulti.m: -------------------------------------------------------------------------------- 1 | function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENTMULTI Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | trainingExamples = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | features = length(theta) - 1; 10 | 11 | for iter = 1:num_iters 12 | theta = theta - alpha * (1/trainingExamples) * (((X*theta) - y)' * X)'; % Vectorized 13 | J_history(iter) = computeCost(X, y, theta); 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/ex1/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week2/ex1/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | 4GFeo5nVvzLk3gmO 14 | 15 | 16 | -------------------------------------------------------------------------------- /week2/ex1/warmUpExercise.m: -------------------------------------------------------------------------------- 1 | function A = warmUpExercise() 2 | %WARMUPEXERCISE Example function in octave 3 | % A = WARMUPEXERCISE() is an example function that returns the 5x5 identity matrix 4 | 5 | A = eye(5); 6 | % ============= YOUR CODE HERE ============== 7 | % Instructions: Return the 5x5 identity matrix 8 | % In octave, we return values by defining which variables 9 | % represent the return values (at the top of the file) 10 | % and then set them accordingly. 11 | % =========================================== 12 | end 13 | -------------------------------------------------------------------------------- /week2/linear-regression-multiple-variables.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/linear-regression-multiple-variables.pdf -------------------------------------------------------------------------------- /week2/linear-regression.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | 4 | function theta = normalizedLinearRegression(theta, X, y) 5 | theta = inv(X' * X) * X' * y; 6 | endfunction 7 | 8 | function theta = normalizedLinearRegressionWithRegularization(theta, X, y) 9 | regularizationParameter = 100; 10 | features = size(X)(2) - 1; 11 | regularizationMatrix = regularizationParameter * eye(features + 1); 12 | regularizationMatrix(1, 1) = 0; 13 | theta = inv(X' * X + regularizationMatrix) * X' * y; 14 | endfunction 15 | 16 | hypothesis = [0 ; 0]; 17 | data = [1 1 ; 1 2 ; 1 3]; 18 | result = [1 ; 2 ; 3]; 19 | optimizedHypothesis = normalizedLinearRegression(hypothesis, data, result); 20 | disp(round(optimizedHypothesis)); 21 | 22 | optimizedHypothesis = normalizedLinearRegressionWithRegularization(hypothesis, data, result); 23 | disp(optimizedHypothesis); 24 | -------------------------------------------------------------------------------- /week2/octave-matlab-tutoria.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week2/octave-matlab-tutoria.pdf -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-1-b.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-1-b.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-1.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-2-b.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-2-b.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-2-c.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-2-c.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-2.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-3-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-3-3.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-3-b.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-3-b.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-3-w.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-3-w.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-4-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-4-2.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-4.PNG -------------------------------------------------------------------------------- /week3/assets/logistic-regresion-quiz-5-w.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/logistic-regresion-quiz-5-w.PNG -------------------------------------------------------------------------------- /week3/assets/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/question-3.PNG -------------------------------------------------------------------------------- /week3/assets/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/question-4.PNG -------------------------------------------------------------------------------- /week3/assets/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/question-5.PNG -------------------------------------------------------------------------------- /week3/assets/regularization-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/regularization-1.PNG -------------------------------------------------------------------------------- /week3/assets/regularization-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/regularization-2.PNG -------------------------------------------------------------------------------- /week3/assets/regularization-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/regularization-3.PNG -------------------------------------------------------------------------------- /week3/assets/regularization-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/regularization-4.PNG -------------------------------------------------------------------------------- /week3/assets/regularization-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/assets/regularization-5.PNG -------------------------------------------------------------------------------- /week3/ex2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/ex2.pdf -------------------------------------------------------------------------------- /week3/ex2/costFunction.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = costFunction(theta, X, y) 2 | %COSTFUNCTION Compute cost and gradient for logistic regression 3 | % J = COSTFUNCTION(theta, X, y) computes the cost of using theta as the 4 | % parameter for logistic regression and the gradient of the cost 5 | % w.r.t. to the parameters. 6 | 7 | function J = logisticRegressionCost(theta, X, y) 8 | estimatedResults = sigmoid(X * theta); 9 | trainingSamples = length(y); 10 | J = -(1 / trainingSamples) * ( 11 | y' * log(estimatedResults) 12 | + (1 - y)' * log(1 - estimatedResults) 13 | ); 14 | endfunction 15 | 16 | function gradient = gradientVector(theta, X, y) 17 | trainingExamples = length(y); 18 | gradient = (1 / trainingExamples) * (X' * (sigmoid(X * theta) - y)); 19 | endfunction 20 | 21 | J = logisticRegressionCost(theta, X, y); 22 | grad = gradientVector(theta, X, y); 23 | end 24 | -------------------------------------------------------------------------------- /week3/ex2/costFunctionReg.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = costFunctionReg(theta, X, y, lambda) 2 | %COSTFUNCTIONREG Compute cost and gradient for logistic regression with regularization 3 | % J = COSTFUNCTIONREG(theta, X, y, lambda) computes the cost of using 4 | % theta as the parameter for regularized logistic regression and the 5 | % gradient of the cost w.r.t. to the parameters. 6 | 7 | function J = logisticRegressionRegularizedCost(theta, X, y) 8 | estimatedResults = sigmoid(X * theta); 9 | trainingExamples = length(y); 10 | 11 | J = (- 1 / trainingExamples) * ( 12 | y' * log(estimatedResults) 13 | + (1 - y)' * log(1 - estimatedResults) 14 | ) + (lambda / (2 * trainingExamples)) * ( 15 | sum(theta .^ 2) - theta(1) ^ 2 16 | ); 17 | endfunction 18 | 19 | function gradient = gradientVector(theta, X, y) 20 | trainingExamples = length(y); 21 | gradient = (1 / trainingExamples) * (X' * (sigmoid(X * theta) - y)); 22 | endfunction 23 | 24 | function gradient = regularizedGradientVector(theta, X, y) 25 | trainingExamples = length(y); 26 | gradient = gradientVector(theta, X, y); 27 | modifiedHypothesis = (lambda / trainingExamples) * theta; 28 | modifiedHypothesis(1) = 0; 29 | gradient += modifiedHypothesis; 30 | endfunction 31 | 32 | J = logisticRegressionRegularizedCost(theta, X, y); 33 | grad = regularizedGradientVector(theta, X, y); 34 | end 35 | -------------------------------------------------------------------------------- /week3/ex2/ex2.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/ex2/ex2.mlx -------------------------------------------------------------------------------- /week3/ex2/ex2_companion.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/ex2/ex2_companion.mlx -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week3/ex2/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week3/ex2/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i3 matrix, where the first column is all-ones 10 | 11 | % Plot Data 12 | plotData(X(:,2:3), y); 13 | hold on 14 | 15 | if size(X, 2) <= 3 16 | % Only need 2 points to define a line, so choose two endpoints 17 | plot_x = [min(X(:,2))-2, max(X(:,2))+2]; 18 | 19 | % Calculate the decision boundary line 20 | plot_y = (-1./theta(3)).*(theta(2).*plot_x + theta(1)); 21 | 22 | % Plot, and adjust axes for better viewing 23 | plot(plot_x, plot_y) 24 | 25 | % Legend, specific for the exercise 26 | legend('Admitted', 'Not admitted', 'Decision Boundary') 27 | axis([30, 100, 30, 100]) 28 | else 29 | % Here is the grid range 30 | u = linspace(-1, 1.5, 50); 31 | v = linspace(-1, 1.5, 50); 32 | 33 | z = zeros(length(u), length(v)); 34 | % Evaluate z = theta*x over the grid 35 | for i = 1:length(u) 36 | for j = 1:length(v) 37 | z(i,j) = mapFeature(u(i), v(j))*theta; 38 | end 39 | end 40 | z = z'; % important to transpose z before calling contour 41 | 42 | % Plot z = 0 43 | % Notice you need to specify the range [0, 0] 44 | contour(u, v, z, [0, 0], 'LineWidth', 2) 45 | end 46 | hold off 47 | 48 | end 49 | -------------------------------------------------------------------------------- /week3/ex2/predict.m: -------------------------------------------------------------------------------- 1 | function p = predict(theta, X) 2 | %PREDICT Predict whether the label is 0 or 1 using learned logistic 3 | %regression parameters theta 4 | % p = PREDICT(theta, X) computes the predictions for X using a 5 | % threshold at 0.5 (i.e., if sigmoid(theta'*x) >= 0.5, predict 1) 6 | 7 | probability = sigmoid(X * theta); 8 | p = probability >= 0.5; 9 | end 10 | -------------------------------------------------------------------------------- /week3/ex2/sigmoid.m: -------------------------------------------------------------------------------- 1 | function g = sigmoid(z) 2 | g = 1 ./ (1 + exp(-z)); 3 | end 4 | -------------------------------------------------------------------------------- /week3/ex2/submit.m: -------------------------------------------------------------------------------- 1 | function submit() 2 | addpath('./lib'); 3 | 4 | conf.assignmentSlug = 'logistic-regression'; 5 | conf.itemName = 'Logistic Regression'; 6 | conf.partArrays = { ... 7 | { ... 8 | '1', ... 9 | { 'sigmoid.m' }, ... 10 | 'Sigmoid Function', ... 11 | }, ... 12 | { ... 13 | '2', ... 14 | { 'costFunction.m' }, ... 15 | 'Logistic Regression Cost', ... 16 | }, ... 17 | { ... 18 | '3', ... 19 | { 'costFunction.m' }, ... 20 | 'Logistic Regression Gradient', ... 21 | }, ... 22 | { ... 23 | '4', ... 24 | { 'predict.m' }, ... 25 | 'Predict', ... 26 | }, ... 27 | { ... 28 | '5', ... 29 | { 'costFunctionReg.m' }, ... 30 | 'Regularized Logistic Regression Cost', ... 31 | }, ... 32 | { ... 33 | '6', ... 34 | { 'costFunctionReg.m' }, ... 35 | 'Regularized Logistic Regression Gradient', ... 36 | }, ... 37 | }; 38 | conf.output = @output; 39 | 40 | submitWithConfiguration(conf); 41 | end 42 | 43 | function out = output(partId, auxstring) 44 | % Random Test Cases 45 | X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))']; 46 | y = sin(X(:,1) + X(:,2)) > 0; 47 | if partId == '1' 48 | out = sprintf('%0.5f ', sigmoid(X)); 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y)); 51 | elseif partId == '3' 52 | [cost, grad] = costFunction([0.25 0.5 -0.5]', X, y); 53 | out = sprintf('%0.5f ', grad); 54 | elseif partId == '4' 55 | out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X)); 56 | elseif partId == '5' 57 | out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1)); 58 | elseif partId == '6' 59 | [cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1); 60 | out = sprintf('%0.5f ', grad); 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /week3/ex2/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 5.2.0, Sun Jun 14 16:10:20 2020 GMT 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | PC6C0bIwqHp3RUdu 14 | 15 | 16 | -------------------------------------------------------------------------------- /week3/logistic-regression-quiz.md: -------------------------------------------------------------------------------- 1 | ## Logistic Regression Quiz 2 | 3 | ![Question 1](assets/logistic-regresion-quiz-1.PNG) 4 | ![Question 2](assets/logistic-regresion-quiz-2-b.PNG) 5 | ![Question 3](assets/question-3.PNG) 6 | ![Question 4](assets/logistic-regresion-quiz-4.PNG) 7 | ![Question 4](assets/logistic-regresion-quiz-4-2.PNG) 8 | ![Question 4](assets/question-4.PNG) 9 | ![Question 5](assets/question-5.PNG) 10 | -------------------------------------------------------------------------------- /week3/logistic_regression.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | 4 | function [value, gradient] = costFunction(theta) 5 | value = (theta(1) - 5)^2 + (theta(2) - 10)^2; 6 | gradient = zeros(2, 1); 7 | gradient(1) = 2 * (theta(1) - 5); 8 | gradient(2) = 2 * (theta(2) - 10); 9 | endfunction 10 | 11 | 12 | options = optimset('GradObj', 'on', 'MaxIter', 100); 13 | initialTheta = zeros(2, 1); 14 | [theta, functionVal, exitFlag] = fminunc(@costFunction, initialTheta, options); 15 | disp(theta); 16 | disp(functionVal); 17 | disp(exitFlag); 18 | -------------------------------------------------------------------------------- /week3/regularization-quiz.md: -------------------------------------------------------------------------------- 1 | # Regularization Quiz 2 | 3 | ![Question 1](assets/regularization-1.PNG) 4 | ![Question 2](assets/regularization-2.PNG) 5 | ![Question 3](assets/regularization-3.PNG) 6 | ![Question 4](assets/regularization-4.PNG) 7 | ![Question 5](assets/regularization-5.PNG) 8 | -------------------------------------------------------------------------------- /week3/test.m: -------------------------------------------------------------------------------- 1 | clear; 2 | clc; 3 | 4 | function value = sigmoidFunction(matrix) 5 | value = (1 ./ (1 + exp(-matrix))); 6 | endfunction 7 | 8 | function J = cost(theta, X, y) 9 | trainingExamples = length(y); 10 | J = (1 / (2 * trainingExamples)) * sum((sigmoidFunction(X * theta) - y) .^ 2); 11 | endfunction 12 | 13 | function J = regularizedCost(theta, X, y) 14 | trainingExamples = length(y); 15 | regularizationParameter = 100; 16 | 17 | J = (1 / (2 * trainingExamples)) * ( 18 | sum((sigmoidFunction(X * theta) - y) .^ 2) 19 | + regularizationParameter * (sum(theta .^ 2) - theta(1) ^ 2) 20 | ); 21 | endfunction 22 | 23 | function gradient = gradientVector(theta, X, y) 24 | trainingExamples = length(y); 25 | gradient = (1 / trainingExamples) * (X' * (sigmoidFunction(X * theta) - y)); 26 | endfunction 27 | 28 | function gradient = regularizedGradientVector(theta, X, y) 29 | trainingExamples = length(y); 30 | regularizationParameter = 100; 31 | gradient = (1 / trainingExamples) * ( 32 | X' * (sigmoidFunction(X * theta) - y) 33 | + regularizationParameter * theta 34 | ); 35 | endfunction 36 | 37 | function [value, gradient] = optimizationFunction(theta) 38 | data = [1 1 ; 1 2 ; 1 3]; 39 | result = [1 ; 2 ; 3]; 40 | value = cost(theta, data, result); 41 | gradient = gradientVector(theta, data, result); 42 | endfunction 43 | 44 | function [theta, costMemory, minCost] = gradientDescent(theta, X, y, iterations, learningRate) 45 | costMemory = [cost(theta, X, y)]; 46 | for i = 1:iterations 47 | theta = theta - learningRate * gradientVector(theta, X, y); 48 | costMemory = [costMemory cost(theta, X, y)]; 49 | end 50 | minCost = cost(theta, X, y); 51 | endfunction 52 | 53 | data = [1 1 ; 1 2 ; 1 3]; 54 | result = [1 ; 2 ; 3]; 55 | hypothesis = [10 ; 0]; 56 | 57 | [theta, costMemory, minCost] = gradientDescent(hypothesis, data, result, 100, 0.03); 58 | disp(theta); 59 | disp(minCost); 60 | plot(costMemory); 61 | -------------------------------------------------------------------------------- /week4/assets/neural-networks-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-1.PNG -------------------------------------------------------------------------------- /week4/assets/neural-networks-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-2.PNG -------------------------------------------------------------------------------- /week4/assets/neural-networks-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-3.PNG -------------------------------------------------------------------------------- /week4/assets/neural-networks-4-1a.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-4-1a.PNG -------------------------------------------------------------------------------- /week4/assets/neural-networks-4-1b.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-4-1b.PNG -------------------------------------------------------------------------------- /week4/assets/neural-networks-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/assets/neural-networks-5.PNG -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/machine-learning-ex3/ex3.pdf -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/ex3data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/machine-learning-ex3/ex3/ex3data1.mat -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/ex3weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/machine-learning-ex3/ex3/ex3weights.mat -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/machine-learning-ex3/ex3/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 0; 37 | Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ... 38 | 1 1 ; 1 2 ; 2 1 ; 2 2 ; ... 39 | -1 1 ; -1 2 ; -2 1 ; -2 2 ; ... 40 | 1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ]; 41 | ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]'; 42 | t1 = sin(reshape(1:2:24, 4, 3)); 43 | t2 = cos(reshape(1:2:40, 4, 5)); 44 | 45 | if partId == '1' 46 | [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1); 47 | out = sprintf('%0.5f ', J); 48 | out = [out sprintf('%0.5f ', grad)]; 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1)); 51 | elseif partId == '3' 52 | out = sprintf('%0.5f ', predictOneVsAll(t1, Xm)); 53 | elseif partId == '4' 54 | out = sprintf('%0.5f ', predict(t1, t2, Xm)); 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /week4/machine-learning-ex3/ex3/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 5.2.0, Mon Jun 15 04:01:39 2020 GMT 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | RV8uyaZ6iH1Bc0On 14 | 15 | 16 | -------------------------------------------------------------------------------- /week4/neural-networks-quiz.md: -------------------------------------------------------------------------------- 1 | # Neural Networks: Representation Quiz 2 | 3 | ![Question 1](assets/neural-networks-1.PNG) 4 | ![Question 2](assets/neural-networks-2.PNG) 5 | ![Question 3](assets/neural-networks-3.PNG) 6 | ![Question 4](assets/neural-networks-4-1a.PNG) 7 | ![Question 4](assets/neural-networks-4-1b.PNG) 8 | ![Question 5](assets/neural-networks-5.PNG) 9 | -------------------------------------------------------------------------------- /week4/neural-networks.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week4/neural-networks.m -------------------------------------------------------------------------------- /week5/assets/quiz-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/assets/quiz-1.PNG -------------------------------------------------------------------------------- /week5/assets/quiz-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/assets/quiz-2.PNG -------------------------------------------------------------------------------- /week5/assets/quiz-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/assets/quiz-3.PNG -------------------------------------------------------------------------------- /week5/assets/quiz-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/assets/quiz-4.PNG -------------------------------------------------------------------------------- /week5/assets/quiz-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/assets/quiz-5.PNG -------------------------------------------------------------------------------- /week5/ex4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/ex4.pdf -------------------------------------------------------------------------------- /week5/ex4/checkNNGradients.m: -------------------------------------------------------------------------------- 1 | function checkNNGradients(lambda) 2 | %CHECKNNGRADIENTS Creates a small neural network to check the 3 | %backpropagation gradients 4 | % CHECKNNGRADIENTS(lambda) Creates a small neural network to check the 5 | % backpropagation gradients, it will output the analytical gradients 6 | % produced by your backprop code and the numerical gradients (computed 7 | % using computeNumericalGradient). These two gradient computations should 8 | % result in very similar values. 9 | % 10 | 11 | if ~exist('lambda', 'var') || isempty(lambda) 12 | lambda = 0; 13 | end 14 | 15 | input_layer_size = 3; 16 | hidden_layer_size = 5; 17 | num_labels = 3; 18 | m = 5; 19 | 20 | % We generate some 'random' test data 21 | Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size); 22 | Theta2 = debugInitializeWeights(num_labels, hidden_layer_size); 23 | % Reusing debugInitializeWeights to generate X 24 | X = debugInitializeWeights(m, input_layer_size - 1); 25 | y = 1 + mod(1:m, num_labels)'; 26 | 27 | % Unroll parameters 28 | nn_params = [Theta1(:) ; Theta2(:)]; 29 | 30 | % Short hand for cost function 31 | costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ... 32 | num_labels, X, y, lambda); 33 | 34 | [cost, grad] = costFunc(nn_params); 35 | numgrad = computeNumericalGradient(costFunc, nn_params); 36 | 37 | % Visually examine the two gradient computations. The two columns 38 | % you get should be very similar. 39 | disp([numgrad grad]); 40 | fprintf(['The above two columns you get should be very similar.\n' ... 41 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); 42 | 43 | % Evaluate the norm of the difference between two solutions. 44 | % If you have a correct implementation, and assuming you used EPSILON = 0.0001 45 | % in computeNumericalGradient.m, then diff below should be less than 1e-9 46 | diff = norm(numgrad-grad)/norm(numgrad+grad); 47 | 48 | fprintf(['If your backpropagation implementation is correct, then \n' ... 49 | 'the relative difference will be small (less than 1e-9). \n' ... 50 | '\nRelative Difference: %g\n'], diff); 51 | 52 | end 53 | -------------------------------------------------------------------------------- /week5/ex4/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /week5/ex4/debugInitializeWeights.m: -------------------------------------------------------------------------------- 1 | function W = debugInitializeWeights(fan_out, fan_in) 2 | %DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in 3 | %incoming connections and fan_out outgoing connections using a fixed 4 | %strategy, this will help you later in debugging 5 | % W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights 6 | % of a layer with fan_in incoming connections and fan_out outgoing 7 | % connections using a fix set of values 8 | % 9 | % Note that W should be set to a matrix of size(1 + fan_in, fan_out) as 10 | % the first row of W handles the "bias" terms 11 | % 12 | 13 | % Set W to zeros 14 | W = zeros(fan_out, 1 + fan_in); 15 | 16 | % Initialize W using "sin", this ensures that W is always of the same 17 | % values and will be useful for debugging 18 | W = reshape(sin(1:numel(W)), size(W)) / 10; 19 | 20 | % ========================================================================= 21 | 22 | end 23 | -------------------------------------------------------------------------------- /week5/ex4/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /week5/ex4/ex4data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/ex4/ex4data1.mat -------------------------------------------------------------------------------- /week5/ex4/ex4weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/ex4/ex4weights.mat -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week5/ex4/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week5/ex4/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | ALvkQxthWRgBov04 14 | 15 | 16 | -------------------------------------------------------------------------------- /week5/neural-networks-quiz.md: -------------------------------------------------------------------------------- 1 | # Neural Networks: Learning 2 | 3 | ![Question 1](assets/quiz-1.PNG) 4 | ![Question 2](assets/quiz-2.PNG) 5 | ![Question 3](assets/quiz-3.PNG) 6 | ![Question 4](assets/quiz-4.PNG) 7 | ![Question 5](assets/quiz-5.PNG) 8 | -------------------------------------------------------------------------------- /week5/week5.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | 4 | % Gradiant Approximater (Gradiant Checker) 5 | function gradent = approximateGradient(theta, cost) 6 | n = length(theta); 7 | gradient = zeros(n, 1); 8 | EPSILON = 1e-4; 9 | for i = 1:n 10 | thetaPlus = theta; 11 | thetaPlus(i) += EPSILON; 12 | thetaMinus = theta; 13 | thetaMinus(i) -= EPSILON; 14 | gradent(i) = (cost(thetaPlus) - cost(thetaMinus)) / (2 * EPSILON); 15 | endfor 16 | endfunction 17 | 18 | function J = costFunction(theta) 19 | J = 100 * rand(1, 1); 20 | endfunction 21 | 22 | hypothesis = [0 ; 1 ; 2]; 23 | disp(approximateGradient(hypothesis, @costFunction)); 24 | -------------------------------------------------------------------------------- /week6/advice-for-applying-machine-learning.md: -------------------------------------------------------------------------------- 1 | # Advice for Applying Machine Learning Quiz 2 | 3 | ![Question 1](assets/quiz-1.PNG) 4 | ![Question 1](assets/question-1-2.PNG) 5 | ![Question 2](assets/question-2-2.PNG) 6 | ![Question 3](assets/question-3.PNG) 7 | ![Question 3](assets/question-3-2.PNG) 8 | ![Question 4](assets/question-4.PNG) 9 | ![Question 4](assets/question-4-2.PNG) 10 | ![Question 5](assets/question-5.PNG) 11 | ![Question 5](assets/question-5-2.PNG) 12 | -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-1.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-2.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-3-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-3-2.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-3.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-4-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-4-2.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-5-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-5-2.PNG -------------------------------------------------------------------------------- /week6/assets/machine-learning-system-design/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/machine-learning-system-design/question-5.PNG -------------------------------------------------------------------------------- /week6/assets/question-1-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-1-2.PNG -------------------------------------------------------------------------------- /week6/assets/question-2-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-2-2.PNG -------------------------------------------------------------------------------- /week6/assets/question-2-incorrect.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-2-incorrect.PNG -------------------------------------------------------------------------------- /week6/assets/question-3-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-3-2.PNG -------------------------------------------------------------------------------- /week6/assets/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-3.PNG -------------------------------------------------------------------------------- /week6/assets/question-4-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-4-2.PNG -------------------------------------------------------------------------------- /week6/assets/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-4.PNG -------------------------------------------------------------------------------- /week6/assets/question-5-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-5-2.PNG -------------------------------------------------------------------------------- /week6/assets/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/question-5.PNG -------------------------------------------------------------------------------- /week6/assets/quiz-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/assets/quiz-1.PNG -------------------------------------------------------------------------------- /week6/ex5.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/ex5.pdf -------------------------------------------------------------------------------- /week6/ex5/ex5data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/ex5/ex5data1.mat -------------------------------------------------------------------------------- /week6/ex5/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | mu = mean(X); 9 | X_norm = bsxfun(@minus, X, mu); 10 | 11 | sigma = std(X_norm); 12 | X_norm = bsxfun(@rdivide, X_norm, sigma); 13 | end 14 | -------------------------------------------------------------------------------- /week6/ex5/learningCurve.m: -------------------------------------------------------------------------------- 1 | function [error_train, error_val] = ... 2 | learningCurve(X, y, Xval, yval, lambda) 3 | %LEARNINGCURVE Generates the train and cross validation set errors needed 4 | %to plot a learning curve 5 | % [error_train, error_val] = ... 6 | % LEARNINGCURVE(X, y, Xval, yval, lambda) returns the train and 7 | % cross validation set errors for a learning curve. In particular, 8 | % it returns two vectors of the same length - error_train and 9 | % error_val. Then, error_train(i) contains the training error for 10 | % i examples (and similarly for error_val(i)). 11 | % 12 | % In this function, you will compute the train and test errors for 13 | % dataset sizes from 1 up to m. In practice, when working with larger 14 | % datasets, you might want to do this in larger intervals. 15 | % 16 | 17 | % Number of training examples 18 | m = size(X, 1); 19 | 20 | error_train = zeros(m, 1); 21 | error_val = zeros(m, 1); 22 | 23 | for i = 1:m 24 | [X_train, Y_train] = rangeSet(X, y, i); 25 | theta = trainLinearReg(X_train, Y_train, lambda); 26 | error_train(i) = linearRegCostFunction(X_train, Y_train, theta, 0); 27 | error_val(i) = linearRegCostFunction(Xval, yval, theta, 0); 28 | endfor 29 | 30 | function [X_set, Y_set] = rangeSet(X, y, index) 31 | X_set = X(1:index, :); 32 | Y_set = y(1:index); 33 | endfunction 34 | end 35 | -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week6/ex5/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week6/ex5/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | BKjNEXITB9fFjrRP 14 | 15 | 16 | -------------------------------------------------------------------------------- /week6/ex5/trainLinearReg.m: -------------------------------------------------------------------------------- 1 | function [theta] = trainLinearReg(X, y, lambda) 2 | %TRAINLINEARREG Trains linear regression given a dataset (X, y) and a 3 | %regularization parameter lambda 4 | % [theta] = TRAINLINEARREG (X, y, lambda) trains linear regression using 5 | % the dataset (X, y) and regularization parameter lambda. Returns the 6 | % trained parameters theta. 7 | % 8 | 9 | % Initialize Theta 10 | initial_theta = zeros(size(X, 2), 1); 11 | 12 | % Create "short hand" for the cost function to be minimized 13 | costFunction = @(t) linearRegCostFunction(X, y, t, lambda); 14 | 15 | % Now, costFunction is a function that takes in only one argument 16 | options = optimset('MaxIter', 200, 'GradObj', 'on'); 17 | 18 | % Minimize using fmincg 19 | theta = fmincg(costFunction, initial_theta, options); 20 | 21 | end 22 | -------------------------------------------------------------------------------- /week6/ex5/validationCurve.m: -------------------------------------------------------------------------------- 1 | function [lambda_vec, error_train, error_val] = ... 2 | validationCurve(X, y, Xval, yval) 3 | %VALIDATIONCURVE Generate the train and validation errors needed to 4 | %plot a validation curve that we can use to select lambda 5 | % [lambda_vec, error_train, error_val] = ... 6 | % VALIDATIONCURVE(X, y, Xval, yval) returns the train 7 | % and validation errors (in error_train, error_val) 8 | % for different values of lambda. You are given the training set (X, 9 | % y) and validation set (Xval, yval). 10 | % 11 | 12 | % Selected values of lambda (you should not change this) 13 | lambda_vec = [0 0.001 0.003 0.01 0.03 0.1 0.3 1 3 10]'; 14 | 15 | % You need to return these variables correctly. 16 | error_train = zeros(length(lambda_vec), 1); 17 | error_val = zeros(length(lambda_vec), 1); 18 | 19 | for i = 1:length(lambda_vec) 20 | lambda = lambda_vec(i); 21 | theta = trainLinearReg(X, y, lambda); 22 | error_train(i) = linearRegCostFunction(X, y, theta, 0); 23 | error_val(i) = linearRegCostFunction(Xval, yval, theta, 0); 24 | endfor 25 | end 26 | -------------------------------------------------------------------------------- /week6/machine-learning-system-design-quiz.md: -------------------------------------------------------------------------------- 1 | # Machine Learning System Design Quiz 2 | 3 | ![Question 1](assets/machine-learning-system-design/question-1.PNG) 4 | ![Question 2](assets/machine-learning-system-design/question-2.PNG) 5 | ![Question 3](assets/machine-learning-system-design/question-3.PNG) 6 | ![Question 3](assets/machine-learning-system-design/question-3-2.PNG) 7 | ![Question 4](assets/machine-learning-system-design/question-4-2.PNG) 8 | ![Question 5](assets/machine-learning-system-design/question-5.PNG) 9 | ![Question 5](assets/machine-learning-system-design/question-5-2.PNG) 10 | -------------------------------------------------------------------------------- /week7/assets/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-1.PNG -------------------------------------------------------------------------------- /week7/assets/question-2-ans.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-2-ans.PNG -------------------------------------------------------------------------------- /week7/assets/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-2.PNG -------------------------------------------------------------------------------- /week7/assets/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-3.PNG -------------------------------------------------------------------------------- /week7/assets/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-4.PNG -------------------------------------------------------------------------------- /week7/assets/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/assets/question-5.PNG -------------------------------------------------------------------------------- /week7/ex6.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/ex6.pdf -------------------------------------------------------------------------------- /week7/ex6/dataset3Params.m: -------------------------------------------------------------------------------- 1 | function [C, sigma] = dataset3Params(X, y, Xval, yval) 2 | %DATASET3PARAMS returns your choice of C and sigma for Part 3 of the exercise 3 | %where you select the optimal (C, sigma) learning parameters to use for SVM 4 | %with RBF kernel 5 | % [C, sigma] = DATASET3PARAMS(X, y, Xval, yval) returns your choice of C and 6 | % sigma. You should complete this function to return the optimal C and 7 | % sigma based on a cross-validation set. 8 | % 9 | 10 | C = 1; 11 | sigma = 0.1; 12 | 13 | % Code to compute the parameters with minimum cost 14 | % [C, sigma] = computeMinCostParameters() 15 | 16 | function [C, sig] = computeMinCostParameters() 17 | results = eye(64,3); 18 | errorRow = 0; 19 | 20 | for C_test = [0.01 0.03 0.1 0.3 1, 3, 10 30] 21 | for sigma_test = [0.01 0.03 0.1 0.3 1, 3, 10 30] 22 | errorRow = errorRow + 1; 23 | model = svmTrain(X, y, C_test, @(x1, x2) gaussianKernel(x1, x2, sigma_test)); 24 | predictions = svmPredict(model, Xval); 25 | prediction_error = mean(double(predictions ~= yval)); 26 | results(errorRow,:) = [C_test, sigma_test, prediction_error]; 27 | end 28 | end 29 | 30 | sorted_results = sortrows(results, 3); % sort matrix by column #3, the error, ascending 31 | 32 | C = sorted_results(1,1); 33 | sig = sorted_results(1,2); 34 | disp('C'); disp(C); 35 | disp('sigma'); disp(sig); 36 | endfunction 37 | end 38 | -------------------------------------------------------------------------------- /week7/ex6/emailFeatures.m: -------------------------------------------------------------------------------- 1 | function x = emailFeatures(word_indices) 2 | %EMAILFEATURES takes in a word_indices vector and produces a feature vector 3 | %from the word indices 4 | % x = EMAILFEATURES(word_indices) takes in a word_indices vector and 5 | % produces a feature vector from the word indices. 6 | 7 | % Total number of words in the dictionary 8 | n = 1899; 9 | 10 | % You need to return the following variables correctly. 11 | x = zeros(n, 1); 12 | 13 | for i = 1:length(word_indices) 14 | x(word_indices(i)) = 1; 15 | endfor 16 | end 17 | -------------------------------------------------------------------------------- /week7/ex6/emailSample1.txt: -------------------------------------------------------------------------------- 1 | > Anyone knows how much it costs to host a web portal ? 2 | > 3 | Well, it depends on how many visitors you're expecting. 4 | This can be anywhere from less than 10 bucks a month to a couple of $100. 5 | You should checkout http://www.rackspace.com/ or perhaps Amazon EC2 6 | if youre running something big.. 7 | 8 | To unsubscribe yourself from this mailing list, send an email to: 9 | groupname-unsubscribe@egroups.com 10 | 11 | -------------------------------------------------------------------------------- /week7/ex6/emailSample2.txt: -------------------------------------------------------------------------------- 1 | Folks, 2 | 3 | my first time posting - have a bit of Unix experience, but am new to Linux. 4 | 5 | 6 | Just got a new PC at home - Dell box with Windows XP. Added a second hard disk 7 | for Linux. Partitioned the disk and have installed Suse 7.2 from CD, which went 8 | fine except it didn't pick up my monitor. 9 | 10 | I have a Dell branded E151FPp 15" LCD flat panel monitor and a nVidia GeForce4 11 | Ti4200 video card, both of which are probably too new to feature in Suse's default 12 | set. I downloaded a driver from the nVidia website and installed it using RPM. 13 | Then I ran Sax2 (as was recommended in some postings I found on the net), but 14 | it still doesn't feature my video card in the available list. What next? 15 | 16 | Another problem. I have a Dell branded keyboard and if I hit Caps-Lock twice, 17 | the whole machine crashes (in Linux, not Windows) - even the on/off switch is 18 | inactive, leaving me to reach for the power cable instead. 19 | 20 | If anyone can help me in any way with these probs., I'd be really grateful - 21 | I've searched the 'net but have run out of ideas. 22 | 23 | Or should I be going for a different version of Linux such as RedHat? Opinions 24 | welcome. 25 | 26 | Thanks a lot, 27 | Peter 28 | 29 | -- 30 | Irish Linux Users' Group: ilug@linux.ie 31 | http://www.linux.ie/mailman/listinfo/ilug for (un)subscription information. 32 | List maintainer: listmaster@linux.ie 33 | 34 | 35 | -------------------------------------------------------------------------------- /week7/ex6/ex6data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/ex6/ex6data1.mat -------------------------------------------------------------------------------- /week7/ex6/ex6data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/ex6/ex6data2.mat -------------------------------------------------------------------------------- /week7/ex6/ex6data3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/ex6/ex6data3.mat -------------------------------------------------------------------------------- /week7/ex6/gaussianKernel.m: -------------------------------------------------------------------------------- 1 | function sim = gaussianKernel(x1, x2, sigma) 2 | %RBFKERNEL returns a radial basis function kernel between x1 and x2 3 | % sim = gaussianKernel(x1, x2) returns a gaussian kernel between x1 and x2 4 | % and returns the value in sim 5 | 6 | % Ensure that x1 and x2 are column vectors 7 | x1 = x1(:); x2 = x2(:); 8 | 9 | difference = x1 - x2; 10 | euclideanDistanceSquared = sum(difference .^ 2); 11 | variance = sigma ^ 2; 12 | sim = exp(- euclideanDistanceSquared / (2 * variance)); 13 | end 14 | -------------------------------------------------------------------------------- /week7/ex6/getVocabList.m: -------------------------------------------------------------------------------- 1 | function vocabList = getVocabList() 2 | %GETVOCABLIST reads the fixed vocabulary list in vocab.txt and returns a 3 | %cell array of the words 4 | % vocabList = GETVOCABLIST() reads the fixed vocabulary list in vocab.txt 5 | % and returns a cell array of the words in vocabList. 6 | 7 | 8 | %% Read the fixed vocabulary list 9 | fid = fopen('vocab.txt'); 10 | 11 | % Store all dictionary words in cell array vocab{} 12 | n = 1899; % Total number of words in the dictionary 13 | 14 | % For ease of implementation, we use a struct to map the strings => integers 15 | % In practice, you'll want to use some form of hashmap 16 | vocabList = cell(n, 1); 17 | for i = 1:n 18 | % Word Index (can ignore since it will be = i) 19 | fscanf(fid, '%d', 1); 20 | % Actual Word 21 | vocabList{i} = fscanf(fid, '%s', 1); 22 | end 23 | fclose(fid); 24 | 25 | end 26 | -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week7/ex6/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week7/ex6/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i= 0) = 1; 51 | pred(p < 0) = 0; 52 | 53 | end 54 | 55 | -------------------------------------------------------------------------------- /week7/ex6/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 5.2.0, Fri Jun 19 22:59:46 2020 GMT 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | os5jCmiNxUcu0sgx 14 | 15 | 16 | -------------------------------------------------------------------------------- /week7/ex6/visualizeBoundary.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundary(X, y, model, varargin) 2 | %VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM 3 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision 4 | % boundary learned by the SVM and overlays the data on it 5 | 6 | % Plot the training data on top of the boundary 7 | plotData(X, y) 8 | 9 | % Make classification predictions over a grid of values 10 | x1plot = linspace(min(X(:,1)), max(X(:,1)), 100)'; 11 | x2plot = linspace(min(X(:,2)), max(X(:,2)), 100)'; 12 | [X1, X2] = meshgrid(x1plot, x2plot); 13 | vals = zeros(size(X1)); 14 | for i = 1:size(X1, 2) 15 | this_X = [X1(:, i), X2(:, i)]; 16 | vals(:, i) = svmPredict(model, this_X); 17 | end 18 | 19 | % Plot the SVM boundary 20 | hold on 21 | contour(X1, X2, vals, [0.5 0.5], 'b'); 22 | hold off; 23 | 24 | end 25 | -------------------------------------------------------------------------------- /week7/ex6/visualizeBoundaryLinear.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundaryLinear(X, y, model) 2 | %VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the 3 | %SVM 4 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 5 | % learned by the SVM and overlays the data on it 6 | 7 | w = model.w; 8 | b = model.b; 9 | xp = linspace(min(X(:,1)), max(X(:,1)), 100); 10 | yp = - (w(1)*xp + b)/w(2); 11 | plotData(X, y); 12 | hold on; 13 | plot(xp, yp, '-b'); 14 | hold off 15 | 16 | end 17 | -------------------------------------------------------------------------------- /week7/support-vector-machines-quiz.md: -------------------------------------------------------------------------------- 1 | # Support Vector Machines 2 | 3 | ![Question 1](assets/question-1.PNG) 4 | ![Question 2](assets/question-2.PNG) 5 | ![Question 2](assets/question-2-ans.PNG) 6 | ![Question 3](assets/question-3.PNG) 7 | ![Question 4](assets/question-4.PNG) 8 | ![Question 5](assets/question-5.PNG) 9 | -------------------------------------------------------------------------------- /week7/week7.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | 4 | function g = cost1(z) 5 | mask = z < 1; 6 | g = z - 1; 7 | g .*= mask; 8 | g = abs(g); 9 | endfunction 10 | 11 | function g = cost0(z) 12 | mask = z > -1; 13 | g = z + 1; 14 | g .*= mask; 15 | g = abs(g); 16 | endfunction 17 | 18 | function J = svmCost(theta, X, y, C) 19 | probabilities = X * theta; 20 | J = y .* cost1(probabilities) + (1 - y) .* cost0(probabilities); 21 | J *= C; 22 | endfunction 23 | 24 | function J = regularizedWeights(theta) 25 | J = sum(theta .^ 2) - theta(1) ^ 2; 26 | endfunction 27 | 28 | function J = svmRegularizedCost(theta, X, y, C) 29 | J = svmCost(theta, X, y, C) + (1 / 2) * regularizedWeights(theta); 30 | endfunction 31 | 32 | % Gaussian Kernel Function 33 | function f1 = similarity(X, l1) 34 | trainingDataSize = size(X, 1); 35 | similarityVector = repelem(l1, trainingDataSize, 1); 36 | difference = X - similarityVector; 37 | euclideanDistanceSquared = sum(difference .^ 2, 2); 38 | variance = 1; 39 | f1 = exp(- euclideanDistanceSquared ./ (2 * variance)); 40 | endfunction 41 | 42 | function k = kernel(X, l) 43 | trainingSampleSize = size(X, 1); 44 | anchorPoints = size(l, 1); 45 | k = zeros(trainingSampleSize, anchorPoints); 46 | for i = 1:anchorPoints 47 | anchorVector = l(i, :); 48 | k(:, i) = similarity(X, anchorVector); 49 | endfor 50 | endfunction 51 | 52 | function p = predictKernel(theta, X) 53 | probabilities = X * theta; 54 | p = probabilities >= 0; 55 | endfunction 56 | 57 | mat = [1 2 3 0 -1 -2 -3]; 58 | % disp(cost0(mat)); 59 | % disp(cost1(mat)); 60 | 61 | X = [1 1 ; 1 2 ; 1 3]; 62 | l1 = [10 10]; 63 | l = [10 10 ; 5 6 ; -10 -8 ; 0 0 ; 100 100]; 64 | % disp(similarity(X, l1)); 65 | disp(kernel(X, X)); 66 | disp(predictKernel([1 ; 2], [1 1 ; 1 2 ; 1 3 ; 1 -10])); 67 | 68 | 69 | % disp(var([1, 2, 3, 4, 10])); 70 | % disp(var(X, [], 2)); 71 | 72 | mat = [ 1 2 3 ; 10 -89 0]; 73 | [min, index] = min(mat(:, 3)); 74 | disp(min); 75 | disp(index); 76 | -------------------------------------------------------------------------------- /week8/assets/pca/question-1-ans-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-1-ans-1.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-1-ans-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-1-ans-2.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-1.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-2.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-3-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-3-3.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-4-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-4-2.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-4.PNG -------------------------------------------------------------------------------- /week8/assets/pca/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/pca/question-5.PNG -------------------------------------------------------------------------------- /week8/assets/unsupervised-learning/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/unsupervised-learning/question-1.PNG -------------------------------------------------------------------------------- /week8/assets/unsupervised-learning/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/unsupervised-learning/question-2.PNG -------------------------------------------------------------------------------- /week8/assets/unsupervised-learning/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/unsupervised-learning/question-3.PNG -------------------------------------------------------------------------------- /week8/assets/unsupervised-learning/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/unsupervised-learning/question-4.PNG -------------------------------------------------------------------------------- /week8/assets/unsupervised-learning/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/assets/unsupervised-learning/question-5.PNG -------------------------------------------------------------------------------- /week8/ex7.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7.pdf -------------------------------------------------------------------------------- /week8/ex7/bird_small.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/bird_small.mat -------------------------------------------------------------------------------- /week8/ex7/bird_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/bird_small.png -------------------------------------------------------------------------------- /week8/ex7/computeCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = computeCentroids(X, idx, K) 2 | %COMPUTECENTROIDS returns the new centroids by computing the means of the 3 | %data points assigned to each centroid. 4 | % centroids = COMPUTECENTROIDS(X, idx, K) returns the new centroids by 5 | % computing the means of the data points assigned to each centroid. It is 6 | % given a dataset X where each row is a single data point, a vector 7 | % idx of centroid assignments (i.e. each entry in range [1..K]) for each 8 | % example, and K, the number of centroids. You should return a matrix 9 | % centroids, where each row of centroids is the mean of the data points 10 | % assigned to it. 11 | 12 | [m n] = size(X); 13 | centroids = zeros(K, n); 14 | frequency = zeros(K, 1); 15 | for i = 1:m 16 | frequency(idx(i))++; 17 | centroids(idx(i), :) += X(i, :); 18 | endfor 19 | mask = centroids == 0; 20 | frequency = maskZeroAsOne(frequency); 21 | centroids = centroids ./ frequency; 22 | 23 | function mat = maskZeroAsOne(mat) 24 | mask = mat == 0; 25 | mat += mask; 26 | endfunction 27 | end 28 | -------------------------------------------------------------------------------- /week8/ex7/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /week8/ex7/drawLine.m: -------------------------------------------------------------------------------- 1 | function drawLine(p1, p2, varargin) 2 | %DRAWLINE Draws a line from point p1 to point p2 3 | % DRAWLINE(p1, p2) Draws a line from point p1 to point p2 and holds the 4 | % current figure 5 | 6 | plot([p1(1) p2(1)], [p1(2) p2(2)], varargin{:}); 7 | 8 | end -------------------------------------------------------------------------------- /week8/ex7/ex7data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/ex7data1.mat -------------------------------------------------------------------------------- /week8/ex7/ex7data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/ex7data2.mat -------------------------------------------------------------------------------- /week8/ex7/ex7faces.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/ex7faces.mat -------------------------------------------------------------------------------- /week8/ex7/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | mu = mean(X); 9 | X_norm = bsxfun(@minus, X, mu); 10 | 11 | sigma = std(X_norm); 12 | X_norm = bsxfun(@rdivide, X_norm, sigma); 13 | 14 | 15 | % ============================================================ 16 | 17 | end 18 | -------------------------------------------------------------------------------- /week8/ex7/findClosestCentroids.m: -------------------------------------------------------------------------------- 1 | function idx = findClosestCentroids(X, centroids) 2 | %FINDCLOSESTCENTROIDS computes the centroid memberships for every example 3 | % idx = FINDCLOSESTCENTROIDS (X, centroids) returns the closest centroids 4 | % in idx for a dataset X where each row is a single example. idx = m x 1 5 | % vector of centroid assignments (i.e. each entry in range [1..K]) 6 | 7 | K = size(centroids, 1); 8 | pointGroup = distanceFromClusterPoints(X, K, centroids); 9 | [~, idx] = min(pointGroup, [], 2); 10 | 11 | function mat = distanceFromClusterPoints(X, K, clusters) 12 | m = size(X, 1); 13 | mat = zeros(m, K); 14 | for i = 1:K 15 | mat(:, i) = euclideanDistanceSquare(X, clusters(i, :)); 16 | endfor 17 | endfunction 18 | 19 | function d = euclideanDistanceSquare(X, cluster) 20 | trainingDataSize = size(X, 1); 21 | similarityMatrix = repelem(cluster, trainingDataSize, 1); 22 | difference = X - similarityMatrix; 23 | d = sum(difference .^ 2, 2); 24 | endfunction 25 | end 26 | -------------------------------------------------------------------------------- /week8/ex7/kMeansInitCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = kMeansInitCentroids(X, K) 2 | %KMEANSINITCENTROIDS This function initializes K centroids that are to be 3 | %used in K-Means on the dataset X 4 | % centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be 5 | % used with the K-Means on the dataset X 6 | 7 | m = size(X, 1); 8 | % randomly generate K clusters and positions 9 | randomizedDataSet = randperm(m); 10 | centroids = X(randomizedDataSet(1:K), :); 11 | end 12 | -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week8/ex7/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week8/ex7/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | SieOIGAtUz1SGMW4 14 | 15 | 16 | -------------------------------------------------------------------------------- /week8/pca.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | close all; 4 | 5 | % Principal Component Analysis (PCA) Algorithm 6 | function x = meanNormalizedVectors(x) 7 | mu = mean(x); 8 | maxValues = max(x); 9 | x -= mu; 10 | % x ./= maxValues; 11 | endfunction 12 | 13 | % @return n,k 14 | function [u, S] = reducedDimensionSpace(X, k) 15 | m = size(X, 1); 16 | covMatrix = (1 / m) * X' * X; 17 | [eigenVectors, S, ~] = svd(covMatrix); % eigenVectors: n,n n=#features(X) 18 | u = eigenVectors(:, 1:k); 19 | endfunction 20 | 21 | function z = dimensionallyReducedData(X, k) 22 | % X = meanNormalizedVectors(X); 23 | reductionMatrix = reducedDimensionSpace(X, k); 24 | z = X * reductionMatrix; 25 | endfunction 26 | 27 | % @return x;m,n 28 | % @param z: m,k 29 | function x = dimensionallyReconstructedData(z, reducedDimMask) 30 | x = z * reducedDimMask'; 31 | endfunction 32 | 33 | % @return v:scalar 34 | % @param original:m,n 35 | % @param reconstructed:m,n 36 | function v = pcaVariance(original, reconstructed) 37 | difference = original - reconstructed; 38 | euclidianDistanceSquaredCost = sum(sum(difference .^ 2)); 39 | distanceFromOriginCost = sum(sum(original .^ 2)); 40 | v = euclidianDistanceSquaredCost / distanceFromOriginCost ; 41 | endfunction 42 | 43 | % @return v: scalar (variance) 44 | % @param S:n,n diagonal matrix 45 | % @param k:scalar #reducedDimensions 46 | function v = variancePcaUsingCovMat(S, k) 47 | reducedDimensionSum = sum(sum(S(1:k, :))); 48 | completeDimensionSum = sum(sum(S)); 49 | v = 1 - (reducedDimensionSum / completeDimensionSum); 50 | endfunction 51 | 52 | function k = minDimensionalityReduction(x, threshold) 53 | n = size(x, 2); 54 | [~, S] = reducedDimensionSpace(x, 1); 55 | for i = 1:n 56 | variance = variancePcaUsingCovMat(S, i); 57 | if variance < threshold 58 | k = i; 59 | break; 60 | endif 61 | endfor 62 | endfunction 63 | 64 | x = [1 2 3 ; 4 5 6 ; 10 10 0 ; 100 -90 34]; 65 | x2 = [1 2 ; 4 5]; 66 | % disp(meanNormalizedVectors(x)); 67 | % disp('original data'); disp(x); 68 | % disp('dimensionally reduced data'); disp(dimensionallyReducedData(x, 3)); 69 | 70 | u = reducedDimensionSpace(x, 3); 71 | z = dimensionallyReducedData(x, 3); 72 | r = dimensionallyReconstructedData(z, u); 73 | % disp('reconstruction of dimensionally reduced data'); disp(r); 74 | 75 | % variance in PCA algo 76 | disp('min dimension data can be reduced to'); disp(minDimensionalityReduction(x, 0.1)); 77 | -------------------------------------------------------------------------------- /week8/principal-component-analysis.md: -------------------------------------------------------------------------------- 1 | # Principal Component Analysis Quiz 2 | 3 | ![Question 1](assets/pca/question-1.PNG) 4 | ![Question 1](assets/pca/question-1-ans-1.PNG) 5 | ![Question 1](assets/pca/question-1-ans-2.PNG) 6 | ![Question 2](assets/pca/question-2.PNG) 7 | ![Question 3](assets/pca/question-3-3.PNG) 8 | ![Question 4](assets/pca/question-4.PNG) 9 | ![Question 4](assets/pca/question-4-2.PNG) 10 | ![Question 5](assets/pca/question-5.PNG) 11 | -------------------------------------------------------------------------------- /week8/unsupervised-learning-quiz.md: -------------------------------------------------------------------------------- 1 | # Unsupervised Learning Quiz 2 | 3 | ![Question 1](assets/unsupervised-learning/question-1.PNG) 4 | ![Question 2](assets/unsupervised-learning/question-2.PNG) 5 | ![Question 3](assets/unsupervised-learning/question-3.PNG) 6 | ![Question 4](assets/unsupervised-learning/question-4.PNG) 7 | ![Question 5](assets/unsupervised-learning/question-5.PNG) 8 | -------------------------------------------------------------------------------- /week9/anomaly-detection-quiz.md: -------------------------------------------------------------------------------- 1 | # Anomaly Detection Quiz 2 | 3 | ![Question 1](assets/anomaly-detection/question-1.PNG) 4 | ![Question 2](assets/anomaly-detection/question-2.PNG) 5 | ![Question 3](assets/anomaly-detection/question-3.PNG) 6 | ![Question 4](assets/anomaly-detection/question-4.PNG) 7 | ![Question 5](assets/anomaly-detection/question-5.PNG) 8 | gradient -------------------------------------------------------------------------------- /week9/assets/anomaly-detection/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/anomaly-detection/question-1.PNG -------------------------------------------------------------------------------- /week9/assets/anomaly-detection/question-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/anomaly-detection/question-2.PNG -------------------------------------------------------------------------------- /week9/assets/anomaly-detection/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/anomaly-detection/question-3.PNG -------------------------------------------------------------------------------- /week9/assets/anomaly-detection/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/anomaly-detection/question-4.PNG -------------------------------------------------------------------------------- /week9/assets/anomaly-detection/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/anomaly-detection/question-5.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-1.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-2-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-2-3.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-3-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-3-2.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-3.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-4-2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-4-2.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-4.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-4.PNG -------------------------------------------------------------------------------- /week9/assets/reccomender-systems/question-5.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/assets/reccomender-systems/question-5.PNG -------------------------------------------------------------------------------- /week9/collaborative-filtering.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | close all; 4 | 5 | % Collaborative filtering algorithm 6 | 7 | % @return J:scalar cost 8 | % @param X:(n_m),(n) feature vector per movie 9 | % @param y:(n_m),(n_u) rating per user for a given movie 10 | % @param thetas:(n_u),(n + 1) theta vector per user 11 | % @param r:(n_m),(n_u) boolean matrix whether user has rated a movie or not (1/0) 12 | % @param userId:scalar user id 13 | % n_m: number of movies n_u: number of users 14 | function J = linearRegressionCost(X, y, thetas, r, userId) 15 | moviesSeen = find(r(:, userId) == 1); 16 | X = X(moviesSeen, :); 17 | [m, n] = size(X); 18 | X = [ones(m, 1) X]; 19 | y = y(moviesSeen, userId); 20 | theta = thetas(userId, :)'; 21 | J = 0.5 * sum((X * theta - y) .^ 2); 22 | endfunction 23 | 24 | function J = linearRegressionRegularizedCost(X, y, thetas, r, userId) 25 | J = linearRegressionCost(X, y, thetas, r, userId); 26 | theta = thetas(userId, :); 27 | J += (regularizationFactor() / 2) * (sum(sum(X .^ 2)) - sum(X(1) .^ 2)); 28 | endfunction 29 | 30 | function lambda = regularizationFactor() 31 | lambda = 5; 32 | endfunction 33 | 34 | function J = collaborativeFilteringCost(X, y, thetas, r) 35 | J = 0; 36 | [numberOfMovies, numberOfUsers] = size(y); 37 | for i = 1:numberOfUsers 38 | J += linearRegressionRegularizedCost(X, y, thetas, r, i); 39 | endfor 40 | endfunction 41 | 42 | X = [2 ; 3 ; 4 ; 0]; 43 | y = [4 5 0 ; 1 1 1 ; 2 2 4 ; 0 2 0]; 44 | theta1 = [1 2]; 45 | theta2 = [3 4]; 46 | theta3 = [0 -2]; 47 | thetas = [theta1 ; theta2 ; theta3]; 48 | r = [1 1 0 ; 1 1 1 ; 1 1 1 ; 0 1 0]; 49 | 50 | disp(collaborativeFilteringCost(X, y, thetas, r)); 51 | -------------------------------------------------------------------------------- /week9/desnity-estimation.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | close all; 4 | 5 | function g = gaussian(x, mu, variance) 6 | g = ((x - mu) .^ 2) ./ (2 * variance); 7 | g = exp(-g); 8 | % factor = 1 ./ sqrt(2 * pi * variance); 9 | % g .*= factor; 10 | endfunction 11 | 12 | function g = gaussianMat(x) 13 | mu = mean(x); 14 | variance = var(x); 15 | g = gaussian(x, mu, variance); 16 | endfunction 17 | 18 | function p = gaussianProbabilities(x) 19 | g = gaussianMat(x); 20 | p = prod(g, 2); 21 | endfunction 22 | 23 | function p = probabilityGaussianModel(x, mu, variance) 24 | g = gaussian(x, mu, variance); 25 | p = prod(g, 2); 26 | endfunction 27 | 28 | function y = isAnomaly(x, mu, variance, threshold) 29 | p = probabilityGaussianModel(x, mu, variance); 30 | y = p < threshold; 31 | endfunction 32 | 33 | function [mu, variance] = gaussianModel(x) 34 | mu = mean(x); 35 | variance = var(x); 36 | endfunction 37 | 38 | function [precision, recall, f1] = evaluationMetrics(computed, actual) 39 | truePositives = sum(computed + actual == 2); 40 | totalPositives = sum(computed); 41 | actualPositives = sum(actual); 42 | precision = truePositives / totalPositives; 43 | recall = truePositives / actualPositives; 44 | f1 = (2 * precision * recall) / (precision + recall); 45 | endfunction 46 | 47 | function [mu, covMatrix] = multivariateGaussianModel(x) 48 | mu = mean(x); 49 | m = size(x, 1); 50 | covMatrix = (1 / m) * x' * x; 51 | endfunction 52 | 53 | function p = probabilityMultivariateGaussian(x, mu, covMatrix) 54 | n = size(x, 2); 55 | g = -(1 / 2) * (x - mu) * pinv(covMatrix) * (x - mu)'; 56 | g = exp(-g); 57 | detCovMatrix = det(covMatrix); 58 | factor = (detCovMatrix ^(1 / 2)) * ((2 * pi) ^ (n / 2)); 59 | p = diag(g ./ factor); 60 | endfunction 61 | 62 | x = [1 2 3 4 5 ; 3 4 5 6 7 ; 0 0 0 3 10]; 63 | xCrossValidation = [0 0 -90 67 3 ; 3 5 6 0 23]; 64 | [mu variance] = gaussianModel(x); 65 | disp(probabilityGaussianModel(x, mu, variance)); 66 | [mu, covMatrix] = multivariateGaussianModel(x); 67 | disp(probabilityMultivariateGaussian(x, mu, covMatrix)); 68 | -------------------------------------------------------------------------------- /week9/ex8.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8.pdf -------------------------------------------------------------------------------- /week9/ex8/checkCostFunction.m: -------------------------------------------------------------------------------- 1 | function checkCostFunction(lambda) 2 | %CHECKCOSTFUNCTION Creates a collaborative filering problem 3 | %to check your cost function and gradients 4 | % CHECKCOSTFUNCTION(lambda) Creates a collaborative filering problem 5 | % to check your cost function and gradients, it will output the 6 | % analytical gradients produced by your code and the numerical gradients 7 | % (computed using computeNumericalGradient). These two gradient 8 | % computations should result in very similar values. 9 | 10 | % Set lambda 11 | if ~exist('lambda', 'var') || isempty(lambda) 12 | lambda = 0; 13 | end 14 | 15 | %% Create small problem 16 | X_t = rand(4, 3); 17 | Theta_t = rand(5, 3); 18 | 19 | % Zap out most entries 20 | Y = X_t * Theta_t'; 21 | Y(rand(size(Y)) > 0.5) = 0; 22 | R = zeros(size(Y)); 23 | R(Y ~= 0) = 1; 24 | 25 | %% Run Gradient Checking 26 | X = randn(size(X_t)); 27 | Theta = randn(size(Theta_t)); 28 | num_users = size(Y, 2); 29 | num_movies = size(Y, 1); 30 | num_features = size(Theta_t, 2); 31 | 32 | numgrad = computeNumericalGradient( ... 33 | @(t) cofiCostFunc(t, Y, R, num_users, num_movies, ... 34 | num_features, lambda), [X(:); Theta(:)]); 35 | 36 | [cost, grad] = cofiCostFunc([X(:); Theta(:)], Y, R, num_users, ... 37 | num_movies, num_features, lambda); 38 | 39 | disp([numgrad grad]); 40 | fprintf(['The above two columns you get should be very similar.\n' ... 41 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); 42 | 43 | diff = norm(numgrad-grad)/norm(numgrad+grad); 44 | fprintf(['If your cost function implementation is correct, then \n' ... 45 | 'the relative difference will be small (less than 1e-9). \n' ... 46 | '\nRelative Difference: %g\n'], diff); 47 | 48 | end -------------------------------------------------------------------------------- /week9/ex8/cofiCostFunc.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = cofiCostFunc(params, Y, R, num_users, num_movies, ... 2 | num_features, lambda) 3 | %COFICOSTFUNC Collaborative filtering cost function 4 | % [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ... 5 | % num_features, lambda) returns the cost and gradient for the 6 | % collaborative filtering problem. 7 | % 8 | 9 | % Unfold the U and W matrices from params 10 | X = reshape(params(1:num_movies*num_features), num_movies, num_features); 11 | Theta = reshape(params(num_movies*num_features+1:end), ... 12 | num_users, num_features); 13 | 14 | J = 1/2 * sum(sum((R.* ((X*Theta') - Y)).^2)); 15 | X_grad = (R .* (X*Theta' - Y)) * Theta; 16 | Theta_grad = (R .* (X*Theta' - Y))' * X; 17 | 18 | % With regularization 19 | J = J + lambda/2 * (sum(sum(Theta.^2)) + sum(sum(X.^2))); 20 | X_grad = X_grad + lambda * X; 21 | Theta_grad = Theta_grad + lambda * Theta; 22 | 23 | grad = [X_grad(:); Theta_grad(:)]; 24 | end 25 | -------------------------------------------------------------------------------- /week9/ex8/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /week9/ex8/estimateGaussian.m: -------------------------------------------------------------------------------- 1 | function [mu sigma2] = estimateGaussian(X) 2 | %ESTIMATEGAUSSIAN This function estimates the parameters of a 3 | %Gaussian distribution using the data in X 4 | % [mu sigma2] = estimateGaussian(X), 5 | % The input X is the dataset with each n-dimensional data point in one row 6 | % The output is an n-dimensional vector mu, the mean of the data set 7 | % and the variances sigma^2, an n x 1 vector 8 | % 9 | 10 | [m, n] = size(X); 11 | mu = mean(X); 12 | sigma2 = ((m - 1) / m) * var(X); 13 | end 14 | -------------------------------------------------------------------------------- /week9/ex8/ex8_movieParams.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8/ex8_movieParams.mat -------------------------------------------------------------------------------- /week9/ex8/ex8_movies.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8/ex8_movies.mat -------------------------------------------------------------------------------- /week9/ex8/ex8data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8/ex8data1.mat -------------------------------------------------------------------------------- /week9/ex8/ex8data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8/ex8data2.mat -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anishLearnsToCode/ml-stanford/a6cfe134382957287db97127ea0f1a5e5181cf51/week9/ex8/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /week9/ex8/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i bestF1 24 | bestF1 = F1; 25 | bestEpsilon = epsilon; 26 | end 27 | end 28 | endfunction 29 | 30 | function [precision, recall, f1] = evaluationMetrics(computed, actual) 31 | truePositives = sum(computed + actual == 2); 32 | totalPositives = sum(computed); 33 | actualPositives = sum(actual); 34 | precision = truePositives / totalPositives; 35 | recall = truePositives / actualPositives; 36 | f1 = (2 * precision * recall) / (precision + recall); 37 | endfunction -------------------------------------------------------------------------------- /week9/ex8/submit.m: -------------------------------------------------------------------------------- 1 | function submit() 2 | addpath('./lib'); 3 | 4 | conf.assignmentSlug = 'anomaly-detection-and-recommender-systems'; 5 | conf.itemName = 'Anomaly Detection and Recommender Systems'; 6 | conf.partArrays = { ... 7 | { ... 8 | '1', ... 9 | { 'estimateGaussian.m' }, ... 10 | 'Estimate Gaussian Parameters', ... 11 | }, ... 12 | { ... 13 | '2', ... 14 | { 'selectThreshold.m' }, ... 15 | 'Select Threshold', ... 16 | }, ... 17 | { ... 18 | '3', ... 19 | { 'cofiCostFunc.m' }, ... 20 | 'Collaborative Filtering Cost', ... 21 | }, ... 22 | { ... 23 | '4', ... 24 | { 'cofiCostFunc.m' }, ... 25 | 'Collaborative Filtering Gradient', ... 26 | }, ... 27 | { ... 28 | '5', ... 29 | { 'cofiCostFunc.m' }, ... 30 | 'Regularized Cost', ... 31 | }, ... 32 | { ... 33 | '6', ... 34 | { 'cofiCostFunc.m' }, ... 35 | 'Regularized Gradient', ... 36 | }, ... 37 | }; 38 | conf.output = @output; 39 | 40 | submitWithConfiguration(conf); 41 | end 42 | 43 | function out = output(partId, auxstring) 44 | % Random Test Cases 45 | n_u = 3; n_m = 4; n = 5; 46 | X = reshape(sin(1:n_m*n), n_m, n); 47 | Theta = reshape(cos(1:n_u*n), n_u, n); 48 | Y = reshape(sin(1:2:2*n_m*n_u), n_m, n_u); 49 | R = Y > 0.5; 50 | pval = [abs(Y(:)) ; 0.001; 1]; 51 | Y = (Y .* double(R)); % set 'Y' values to 0 for movies not reviewed 52 | yval = [R(:) ; 1; 0]; 53 | params = [X(:); Theta(:)]; 54 | if partId == '1' 55 | [mu sigma2] = estimateGaussian(X); 56 | out = sprintf('%0.5f ', [mu(:); sigma2(:)]); 57 | elseif partId == '2' 58 | [bestEpsilon bestF1] = selectThreshold(yval, pval); 59 | out = sprintf('%0.5f ', [bestEpsilon(:); bestF1(:)]); 60 | elseif partId == '3' 61 | [J] = cofiCostFunc(params, Y, R, n_u, n_m, ... 62 | n, 0); 63 | out = sprintf('%0.5f ', J(:)); 64 | elseif partId == '4' 65 | [J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ... 66 | n, 0); 67 | out = sprintf('%0.5f ', grad(:)); 68 | elseif partId == '5' 69 | [J] = cofiCostFunc(params, Y, R, n_u, n_m, ... 70 | n, 1.5); 71 | out = sprintf('%0.5f ', J(:)); 72 | elseif partId == '6' 73 | [J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ... 74 | n, 1.5); 75 | out = sprintf('%0.5f ', grad(:)); 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /week9/ex8/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 5.2.0, Sun Jun 21 21:14:24 2020 GMT 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 21 6 | anishviewer@gmail.com 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | 8A2IJoPx2DCGoZqI 14 | 15 | 16 | -------------------------------------------------------------------------------- /week9/ex8/visualizeFit.m: -------------------------------------------------------------------------------- 1 | function visualizeFit(X, mu, sigma2) 2 | %VISUALIZEFIT Visualize the dataset and its estimated distribution. 3 | % VISUALIZEFIT(X, p, mu, sigma2) This visualization shows you the 4 | % probability density function of the Gaussian distribution. Each example 5 | % has a location (x1, x2) that depends on its feature values. 6 | % 7 | 8 | [X1,X2] = meshgrid(0:.5:35); 9 | Z = multivariateGaussian([X1(:) X2(:)],mu,sigma2); 10 | Z = reshape(Z,size(X1)); 11 | 12 | plot(X(:, 1), X(:, 2),'bx'); 13 | hold on; 14 | % Do not plot if there are infinities 15 | if (sum(isinf(Z)) == 0) 16 | contour(X1, X2, Z, 10.^(-20:3:0)'); 17 | end 18 | hold off; 19 | 20 | end -------------------------------------------------------------------------------- /week9/reccomender-systems-quiz.md: -------------------------------------------------------------------------------- 1 | # Recommender Systems 2 | 3 | ![Question 1](assets/reccomender-systems/question-1.PNG) 4 | ![Question 2](assets/reccomender-systems/question-2-3.PNG) 5 | ![Question 3](assets/reccomender-systems/question-3.PNG) 6 | ![Question 3](assets/reccomender-systems/question-3-2.PNG) 7 | ![Question 4](assets/reccomender-systems/question-4.PNG) 8 | ![Question 4](assets/reccomender-systems/question-4-2.PNG) 9 | ![Question 5](assets/reccomender-systems/question-5.PNG) 10 | -------------------------------------------------------------------------------- /week9/test.m: -------------------------------------------------------------------------------- 1 | clc; 2 | clear; 3 | 4 | A = rand(5, 3); 5 | B = rand(3, 5); 6 | C = A * B; 7 | disp('c'); disp(C); 8 | R = rand(5, 5) >= 0.5 ; 9 | disp('r'); disp(R); 10 | 11 | disp(sum(sum(C(R == 1)))); 12 | % C = (A * B) * R; 13 | 14 | C = A(R == 1) * B(R == 1); 15 | disp(sum(sum(C))); --------------------------------------------------------------------------------