├── .gitignore ├── README.md ├── _config.yml ├── machine-learning-ex1 ├── ex1.pdf └── ex1 │ ├── computeCost.m │ ├── computeCostMulti.m │ ├── ex1.m │ ├── ex1_multi.m │ ├── ex1data1.txt │ ├── ex1data2.txt │ ├── featureNormalize.m │ ├── gradientDescent.m │ ├── gradientDescentMulti.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── normalEqn.m │ ├── octave-workspace │ ├── plotData.m │ ├── submit.m │ └── warmUpExercise.m ├── machine-learning-ex2 ├── ex2.pdf └── ex2 │ ├── costFunction.m │ ├── costFunctionReg.m │ ├── ex2.m │ ├── ex2_reg.m │ ├── ex2data1.txt │ ├── ex2data2.txt │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── mapFeature.m │ ├── plotData.m │ ├── plotDecisionBoundary.m │ ├── predict.m │ ├── sigmoid.m │ └── submit.m ├── machine-learning-ex3 ├── ex3.pdf └── ex3 │ ├── displayData.m │ ├── ex3.m │ ├── ex3_nn.m │ ├── ex3data1.mat │ ├── ex3weights.mat │ ├── fmincg.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── lrCostFunction.m │ ├── oneVsAll.m │ ├── predict.m │ ├── predictOneVsAll.m │ ├── sigmoid.m │ └── submit.m ├── machine-learning-ex4 ├── ex4.pdf └── ex4 │ ├── checkNNGradients.m │ ├── computeNumericalGradient.m │ ├── debugInitializeWeights.m │ ├── displayData.m │ ├── ex4.m │ ├── ex4data1.mat │ ├── ex4weights.mat │ ├── fmincg.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── nnCostFunction.m │ ├── predict.m │ ├── randInitializeWeights.m │ ├── sigmoid.m │ ├── sigmoidGradient.m │ └── submit.m ├── machine-learning-ex5 ├── ex5.pdf └── ex5 │ ├── ex5.m │ ├── ex5data1.mat │ ├── featureNormalize.m │ ├── fmincg.m │ ├── learningCurve.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── linearRegCostFunction.m │ ├── plotFit.m │ ├── polyFeatures.m │ ├── submit.m │ ├── trainLinearReg.m │ └── validationCurve.m ├── machine-learning-ex6 ├── ex6.pdf └── ex6 │ ├── dataset3Params.m │ ├── emailFeatures.m │ ├── emailSample1.txt │ ├── emailSample2.txt │ ├── ex6.m │ ├── ex6_spam.m │ ├── ex6data1.mat │ ├── ex6data2.mat │ ├── ex6data3.mat │ ├── gaussianKernel.m │ ├── getVocabList.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── linearKernel.m │ ├── plotData.m │ ├── porterStemmer.m │ ├── processEmail.m │ ├── readFile.m │ ├── spamSample1.txt │ ├── spamSample2.txt │ ├── spamTest.mat │ ├── spamTrain.mat │ ├── submit.m │ ├── svmPredict.m │ ├── svmTrain.m │ ├── visualizeBoundary.m │ ├── visualizeBoundaryLinear.m │ └── vocab.txt ├── machine-learning-ex7 ├── ex7.pdf └── ex7 │ ├── bird_small.mat │ ├── bird_small.png │ ├── computeCentroids.m │ ├── displayData.m │ ├── drawLine.m │ ├── ex7.m │ ├── ex7_pca.m │ ├── ex7data1.mat │ ├── ex7data2.mat │ ├── ex7faces.mat │ ├── featureNormalize.m │ ├── findClosestCentroids.m │ ├── kMeansInitCentroids.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── pca.m │ ├── plotDataPoints.m │ ├── plotProgresskMeans.m │ ├── projectData.m │ ├── recoverData.m │ ├── runkMeans.m │ └── submit.m ├── machine-learning-ex8 ├── ex8.pdf └── ex8 │ ├── checkCostFunction.m │ ├── cofiCostFunc.m │ ├── computeNumericalGradient.m │ ├── estimateGaussian.m │ ├── ex8.m │ ├── ex8_cofi.m │ ├── ex8_movieParams.mat │ ├── ex8_movies.mat │ ├── ex8data1.mat │ ├── ex8data2.mat │ ├── fmincg.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── loadMovieList.m │ ├── movie_ids.txt │ ├── multivariateGaussian.m │ ├── normalizeRatings.m │ ├── selectThreshold.m │ ├── submit.m │ └── visualizeFit.m └── notes.docx /.gitignore: -------------------------------------------------------------------------------- 1 | token.mat 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Machine Learning Course 2 | 3 | ## Andrew Ng's Stanford University Machine Learning Course (Coursera) 4 | 5 | Assignments for this course are written in Octave and Matlab. Beware there may contain mistakes. 6 | 7 | ### Assignment 1 - Linear Regression 8 | Linear regression with one variable to predict profits for a food truck. Implement gradient descent. 9 | 10 | ### Assignment 2 - Logistic Regression & Regularized Logistic Regression 11 | Logistic Regression to predict whether a student will get admitted into university. 12 | Regularized logistic regression to predict whether microchips from a fabrication plant passes quality assurance. 13 | 14 | ### Assignment 3 - Multi-class Classification & Neural Networks Representation 15 | Implement one-vs-all logistic regression and neural networks to recognize hand-written digits. 16 | Logistic regression to recognize handwritten digits using multiple one-vs-all logistic regression models to build a multi-class classifier. 17 | Implement a neural network to recognize handwritten digits. The neural network will be able to represent complex models that form non-linear hypotheses. Implement the feedforward propagation algorithm for prediction. 18 | 19 | ### Assignment 4 - Neural Networks Learning 20 | Implement the backpropagation algorithm for neural networks and apply it to the task of hand-written digit recognition. Implement the cost function and gradient for the neural network with regularization. Implement the sigmoid gradient function. Randomize the initialization. Implement the backpropagation algorithm to learn the parameters for the neural network and regularization to the gradient. 21 | 22 | ### Assignment 5 - Regularized Linear Regression and Bias vs Variance 23 | Implement regularized linear regression and use it to study models with different bias-variance properties. Implement regularized linear regression to predict the amount of water flowing out of a dam using the change of water level in a reservoir. Implement polynomial regression to find a better fit to the data. Implement code to generate the learning curves that will be useful in debugging learning algorithms. Implement an automated method to select the lambda parameter (regularization) and use a cross validation to determine how good each lamda value is. 24 | 25 | ### Assignment 6 - Support Vector Machines (SVM) 26 | Using SVMs to build a spam classifier. Using SVMs with Gaussian kernels on datasets that are not linearly separable. Implement the Guassian kernel algorithm and find the best paraneters for it. Implement preprocessing methods and feature extraction. 27 | 28 | ### Assignment 7 - K-means Clustering and Principal Component Analysis 29 | Implement the K-means clustering algorithm and apply it to compress an image. Use principle component analysis to find a low-dimensional representation of face images. Finding the closest centdroids, computing centroid means. Compute the covariance matrix of the data and compute the eigenvectors. Projecting the data onto the principal components, and reconstructing an approximation of the data. 30 | 31 | ### Assignment 8 - Anomaly Detection and Recommender Systems 32 | Implement the anomaly detection algorithm and apply it to detect failing servers on a network. Use a Gaussian model to detect anomalous examples in the dataset. Use collaborative filtering to build a recommender system for movies. Implemented the collaborative filtering cost function (with and without regularization) and the gradient descent (with and without regularization). Added personal ratings to movies to get personalized movie recommendations. 33 | 34 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-hacker -------------------------------------------------------------------------------- /machine-learning-ex1/ex1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex1/ex1.pdf -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/computeCost.m: -------------------------------------------------------------------------------- 1 | function J = computeCost(X, y, theta) 2 | %COMPUTECOST Compute cost for linear regression 3 | % J = COMPUTECOST(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | 9 | % You need to return the following variables correctly 10 | J = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Compute the cost of a particular choice of theta 14 | % You should set J to the cost. 15 | 16 | hyp = X*theta; 17 | err = hyp - y; 18 | sumSquareErr = sum(err.^2); 19 | J = 1/(2*m) * sumSquareErr; 20 | 21 | 22 | % ========================================================================= 23 | 24 | end 25 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/computeCostMulti.m: -------------------------------------------------------------------------------- 1 | function J = computeCostMulti(X, y, theta) 2 | %COMPUTECOSTMULTI Compute cost for linear regression with multiple variables 3 | % J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | 9 | % You need to return the following variables correctly 10 | J = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Compute the cost of a particular choice of theta 14 | % You should set J to the cost. 15 | 16 | 17 | 18 | 19 | 20 | % ========================================================================= 21 | 22 | end 23 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/ex1data1.txt: -------------------------------------------------------------------------------- 1 | 6.1101,17.592 2 | 5.5277,9.1302 3 | 8.5186,13.662 4 | 7.0032,11.854 5 | 5.8598,6.8233 6 | 8.3829,11.886 7 | 7.4764,4.3483 8 | 8.5781,12 9 | 6.4862,6.5987 10 | 5.0546,3.8166 11 | 5.7107,3.2522 12 | 14.164,15.505 13 | 5.734,3.1551 14 | 8.4084,7.2258 15 | 5.6407,0.71618 16 | 5.3794,3.5129 17 | 6.3654,5.3048 18 | 5.1301,0.56077 19 | 6.4296,3.6518 20 | 7.0708,5.3893 21 | 6.1891,3.1386 22 | 20.27,21.767 23 | 5.4901,4.263 24 | 6.3261,5.1875 25 | 5.5649,3.0825 26 | 18.945,22.638 27 | 12.828,13.501 28 | 10.957,7.0467 29 | 13.176,14.692 30 | 22.203,24.147 31 | 5.2524,-1.22 32 | 6.5894,5.9966 33 | 9.2482,12.134 34 | 5.8918,1.8495 35 | 8.2111,6.5426 36 | 7.9334,4.5623 37 | 8.0959,4.1164 38 | 5.6063,3.3928 39 | 12.836,10.117 40 | 6.3534,5.4974 41 | 5.4069,0.55657 42 | 6.8825,3.9115 43 | 11.708,5.3854 44 | 5.7737,2.4406 45 | 7.8247,6.7318 46 | 7.0931,1.0463 47 | 5.0702,5.1337 48 | 5.8014,1.844 49 | 11.7,8.0043 50 | 5.5416,1.0179 51 | 7.5402,6.7504 52 | 5.3077,1.8396 53 | 7.4239,4.2885 54 | 7.6031,4.9981 55 | 6.3328,1.4233 56 | 6.3589,-1.4211 57 | 6.2742,2.4756 58 | 5.6397,4.6042 59 | 9.3102,3.9624 60 | 9.4536,5.4141 61 | 8.8254,5.1694 62 | 5.1793,-0.74279 63 | 21.279,17.929 64 | 14.908,12.054 65 | 18.959,17.054 66 | 7.2182,4.8852 67 | 8.2951,5.7442 68 | 10.236,7.7754 69 | 5.4994,1.0173 70 | 20.341,20.992 71 | 10.136,6.6799 72 | 7.3345,4.0259 73 | 6.0062,1.2784 74 | 7.2259,3.3411 75 | 5.0269,-2.6807 76 | 6.5479,0.29678 77 | 7.5386,3.8845 78 | 5.0365,5.7014 79 | 10.274,6.7526 80 | 5.1077,2.0576 81 | 5.7292,0.47953 82 | 5.1884,0.20421 83 | 6.3557,0.67861 84 | 9.7687,7.5435 85 | 6.5159,5.3436 86 | 8.5172,4.2415 87 | 9.1802,6.7981 88 | 6.002,0.92695 89 | 5.5204,0.152 90 | 5.0594,2.8214 91 | 5.7077,1.8451 92 | 7.6366,4.2959 93 | 5.8707,7.2029 94 | 5.3054,1.9869 95 | 8.2934,0.14454 96 | 13.394,9.0551 97 | 5.4369,0.61705 98 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/ex1data2.txt: -------------------------------------------------------------------------------- 1 | 2104,3,399900 2 | 1600,3,329900 3 | 2400,3,369000 4 | 1416,2,232000 5 | 3000,4,539900 6 | 1985,4,299900 7 | 1534,3,314900 8 | 1427,3,198999 9 | 1380,3,212000 10 | 1494,3,242500 11 | 1940,4,239999 12 | 2000,3,347000 13 | 1890,3,329999 14 | 4478,5,699900 15 | 1268,3,259900 16 | 2300,4,449900 17 | 1320,2,299900 18 | 1236,3,199900 19 | 2609,4,499998 20 | 3031,4,599000 21 | 1767,3,252900 22 | 1888,2,255000 23 | 1604,3,242900 24 | 1962,4,259900 25 | 3890,3,573900 26 | 1100,3,249900 27 | 1458,3,464500 28 | 2526,3,469000 29 | 2200,3,475000 30 | 2637,3,299900 31 | 1839,2,349900 32 | 1000,1,169900 33 | 2040,4,314900 34 | 3137,3,579900 35 | 1811,4,285900 36 | 1437,3,249900 37 | 1239,3,229900 38 | 2132,4,345000 39 | 4215,4,549000 40 | 2162,4,287000 41 | 1664,2,368500 42 | 2238,3,329900 43 | 2567,4,314000 44 | 1200,3,299000 45 | 852,2,179900 46 | 1852,4,299900 47 | 1203,3,239500 48 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | % You need to set these values correctly 9 | X_norm = X; 10 | mu = zeros(1, size(X, 2)); 11 | sigma = zeros(1, size(X, 2)); 12 | 13 | % ====================== YOUR CODE HERE ====================== 14 | % Instructions: First, for each feature dimension, compute the mean 15 | % of the feature and subtract it from the dataset, 16 | % storing the mean value in mu. Next, compute the 17 | % standard deviation of each feature and divide 18 | % each feature by it's standard deviation, storing 19 | % the standard deviation in sigma. 20 | % 21 | % Note that X is a matrix where each column is a 22 | % feature and each row is an example. You need 23 | % to perform the normalization separately for 24 | % each feature. 25 | % 26 | % Hint: You might find the 'mean' and 'std' functions useful. 27 | % 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | % ============================================================ 38 | 39 | end 40 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/gradientDescent.m: -------------------------------------------------------------------------------- 1 | function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENT Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | 10 | for iter = 1:num_iters 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Perform a single gradient step on the parameter vector 14 | % theta. 15 | % 16 | % Hint: While debugging, it can be useful to print out the values 17 | % of the cost function (computeCost) and gradient here. 18 | % 19 | 20 | hyp = X*theta; 21 | err = (hyp - y)'; 22 | theta = theta - ((alpha * (1/m)) * (err * X)'); 23 | 24 | 25 | 26 | 27 | % ============================================================ 28 | 29 | % Save the cost J in every iteration 30 | J_history(iter) = computeCost(X, y, theta); 31 | 32 | end 33 | 34 | end 35 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/gradientDescentMulti.m: -------------------------------------------------------------------------------- 1 | function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENTMULTI Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | 10 | for iter = 1:num_iters 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Perform a single gradient step on the parameter vector 14 | % theta. 15 | % 16 | % Hint: While debugging, it can be useful to print out the values 17 | % of the cost function (computeCostMulti) and gradient here. 18 | % 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | % ============================================================ 31 | 32 | % Save the cost J in every iteration 33 | J_history(iter) = computeCostMulti(X, y, theta); 34 | 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex1/ex1/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex1/ex1/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex2/ex2/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i3 matrix, where the first column is all-ones 10 | 11 | % Plot Data 12 | plotData(X(:,2:3), y); 13 | hold on 14 | 15 | if size(X, 2) <= 3 16 | % Only need 2 points to define a line, so choose two endpoints 17 | plot_x = [min(X(:,2))-2, max(X(:,2))+2]; 18 | 19 | % Calculate the decision boundary line 20 | plot_y = (-1./theta(3)).*(theta(2).*plot_x + theta(1)); 21 | 22 | % Plot, and adjust axes for better viewing 23 | plot(plot_x, plot_y) 24 | 25 | % Legend, specific for the exercise 26 | legend('Admitted', 'Not admitted', 'Decision Boundary') 27 | axis([30, 100, 30, 100]) 28 | else 29 | % Here is the grid range 30 | u = linspace(-1, 1.5, 50); 31 | v = linspace(-1, 1.5, 50); 32 | 33 | z = zeros(length(u), length(v)); 34 | % Evaluate z = theta*x over the grid 35 | for i = 1:length(u) 36 | for j = 1:length(v) 37 | z(i,j) = mapFeature(u(i), v(j))*theta; 38 | end 39 | end 40 | z = z'; % important to transpose z before calling contour 41 | 42 | % Plot z = 0 43 | % Notice you need to specify the range [0, 0] 44 | contour(u, v, z, [0, 0], 'LineWidth', 2) 45 | end 46 | hold off 47 | 48 | end 49 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/predict.m: -------------------------------------------------------------------------------- 1 | function p = predict(theta, X) 2 | %PREDICT Predict whether the label is 0 or 1 using learned logistic 3 | %regression parameters theta 4 | % p = PREDICT(theta, X) computes the predictions for X using a 5 | % threshold at 0.5 (i.e., if sigmoid(theta'*x) >= 0.5, predict 1) 6 | 7 | m = size(X, 1); % Number of training examples 8 | 9 | % You need to return the following variables correctly 10 | p = zeros(m, 1); 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Complete the following code to make predictions using 14 | % your learned logistic regression parameters. 15 | % You should set p to a vector of 0's and 1's 16 | %' 17 | 18 | index = find(sigmoid(theta'*X') >= 0.5); 19 | p(index,1) = 1; 20 | 21 | 22 | 23 | % ========================================================================= 24 | 25 | 26 | end 27 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/sigmoid.m: -------------------------------------------------------------------------------- 1 | function g = sigmoid(z) 2 | %SIGMOID Compute sigmoid function 3 | % g = SIGMOID(z) computes the sigmoid of z. 4 | 5 | % You need to return the following variables correctly 6 | g = zeros(size(z)); 7 | 8 | % ====================== YOUR CODE HERE ====================== 9 | % Instructions: Compute the sigmoid of each value of z (z can be a matrix, 10 | % vector or scalar). 11 | 12 | 13 | g = 1 ./ (1 + e.^(-z)); 14 | 15 | 16 | % ============================================================= 17 | 18 | end 19 | -------------------------------------------------------------------------------- /machine-learning-ex2/ex2/submit.m: -------------------------------------------------------------------------------- 1 | function submit() 2 | addpath('./lib'); 3 | 4 | conf.assignmentSlug = 'logistic-regression'; 5 | conf.itemName = 'Logistic Regression'; 6 | conf.partArrays = { ... 7 | { ... 8 | '1', ... 9 | { 'sigmoid.m' }, ... 10 | 'Sigmoid Function', ... 11 | }, ... 12 | { ... 13 | '2', ... 14 | { 'costFunction.m' }, ... 15 | 'Logistic Regression Cost', ... 16 | }, ... 17 | { ... 18 | '3', ... 19 | { 'costFunction.m' }, ... 20 | 'Logistic Regression Gradient', ... 21 | }, ... 22 | { ... 23 | '4', ... 24 | { 'predict.m' }, ... 25 | 'Predict', ... 26 | }, ... 27 | { ... 28 | '5', ... 29 | { 'costFunctionReg.m' }, ... 30 | 'Regularized Logistic Regression Cost', ... 31 | }, ... 32 | { ... 33 | '6', ... 34 | { 'costFunctionReg.m' }, ... 35 | 'Regularized Logistic Regression Gradient', ... 36 | }, ... 37 | }; 38 | conf.output = @output; 39 | 40 | submitWithConfiguration(conf); 41 | end 42 | 43 | function out = output(partId, auxstring) 44 | % Random Test Cases 45 | X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))']; 46 | y = sin(X(:,1) + X(:,2)) > 0; 47 | if partId == '1' 48 | out = sprintf('%0.5f ', sigmoid(X)); 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y)); 51 | elseif partId == '3' 52 | [cost, grad] = costFunction([0.25 0.5 -0.5]', X, y); 53 | out = sprintf('%0.5f ', grad); 54 | elseif partId == '4' 55 | out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X)); 56 | elseif partId == '5' 57 | out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1)); 58 | elseif partId == '6' 59 | [cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1); 60 | out = sprintf('%0.5f ', grad); 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex3/ex3.pdf -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/ex3.m: -------------------------------------------------------------------------------- 1 | %% Machine Learning Online Class - Exercise 3 | Part 1: One-vs-all 2 | 3 | % Instructions 4 | % ------------ 5 | % 6 | % This file contains code that helps you get started on the 7 | % linear exercise. You will need to complete the following functions 8 | % in this exericse: 9 | % 10 | % lrCostFunction.m (logistic regression cost function) 11 | % oneVsAll.m 12 | % predictOneVsAll.m 13 | % predict.m 14 | % 15 | % For this exercise, you will not need to change any code in this file, 16 | % or any other files other than those mentioned above. 17 | % 18 | 19 | %% Initialization 20 | clear ; close all; clc 21 | 22 | %% Setup the parameters you will use for this part of the exercise 23 | input_layer_size = 400; % 20x20 Input Images of Digits 24 | num_labels = 10; % 10 labels, from 1 to 10 25 | % (note that we have mapped "0" to label 10) 26 | 27 | %% =========== Part 1: Loading and Visualizing Data ============= 28 | % We start the exercise by first loading and visualizing the dataset. 29 | % You will be working with a dataset that contains handwritten digits. 30 | % 31 | 32 | % Load Training Data 33 | fprintf('Loading and Visualizing Data ...\n') 34 | 35 | load('ex3data1.mat'); % training data stored in arrays X, y 36 | m = size(X, 1); 37 | 38 | % Randomly select 100 data points to display 39 | rand_indices = randperm(m); 40 | sel = X(rand_indices(1:100), :); 41 | 42 | displayData(sel); 43 | 44 | fprintf('Program paused. Press enter to continue.\n'); 45 | pause; 46 | 47 | %% ============ Part 2a: Vectorize Logistic Regression ============ 48 | % In this part of the exercise, you will reuse your logistic regression 49 | % code from the last exercise. You task here is to make sure that your 50 | % regularized logistic regression implementation is vectorized. After 51 | % that, you will implement one-vs-all classification for the handwritten 52 | % digit dataset. 53 | % 54 | 55 | % Test case for lrCostFunction 56 | fprintf('\nTesting lrCostFunction() with regularization'); 57 | 58 | theta_t = [-2; -1; 1; 2]; 59 | X_t = [ones(5,1) reshape(1:15,5,3)/10]; 60 | y_t = ([1;0;1;0;1] >= 0.5); 61 | lambda_t = 3; 62 | [J grad] = lrCostFunction(theta_t, X_t, y_t, lambda_t); 63 | 64 | fprintf('\nCost: %f\n', J); 65 | fprintf('Expected cost: 2.534819\n'); 66 | fprintf('Gradients:\n'); 67 | fprintf(' %f \n', grad); 68 | fprintf('Expected gradients:\n'); 69 | fprintf(' 0.146561\n -0.548558\n 0.724722\n 1.398003\n'); 70 | 71 | fprintf('Program paused. Press enter to continue.\n'); 72 | pause; 73 | %% ============ Part 2b: One-vs-All Training ============ 74 | fprintf('\nTraining One-vs-All Logistic Regression...\n') 75 | 76 | lambda = 0.1; 77 | [all_theta] = oneVsAll(X, y, num_labels, lambda); 78 | 79 | fprintf('Program paused. Press enter to continue.\n'); 80 | pause; 81 | 82 | 83 | %% ================ Part 3: Predict for One-Vs-All ================ 84 | 85 | pred = predictOneVsAll(all_theta, X); 86 | 87 | fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100); 88 | 89 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/ex3_nn.m: -------------------------------------------------------------------------------- 1 | %% Machine Learning Online Class - Exercise 3 | Part 2: Neural Networks 2 | 3 | % Instructions 4 | % ------------ 5 | % 6 | % This file contains code that helps you get started on the 7 | % linear exercise. You will need to complete the following functions 8 | % in this exericse: 9 | % 10 | % lrCostFunction.m (logistic regression cost function) 11 | % oneVsAll.m 12 | % predictOneVsAll.m 13 | % predict.m 14 | % 15 | % For this exercise, you will not need to change any code in this file, 16 | % or any other files other than those mentioned above. 17 | % 18 | 19 | %% Initialization 20 | clear ; close all; clc 21 | 22 | %% Setup the parameters you will use for this exercise 23 | input_layer_size = 400; % 20x20 Input Images of Digits 24 | hidden_layer_size = 25; % 25 hidden units 25 | num_labels = 10; % 10 labels, from 1 to 10 26 | % (note that we have mapped "0" to label 10) 27 | 28 | %% =========== Part 1: Loading and Visualizing Data ============= 29 | % We start the exercise by first loading and visualizing the dataset. 30 | % You will be working with a dataset that contains handwritten digits. 31 | % 32 | 33 | % Load Training Data 34 | fprintf('Loading and Visualizing Data ...\n') 35 | 36 | load('ex3data1.mat'); 37 | m = size(X, 1); 38 | 39 | % Randomly select 100 data points to display 40 | sel = randperm(size(X, 1)); 41 | sel = sel(1:100); 42 | 43 | displayData(X(sel, :)); 44 | 45 | fprintf('Program paused. Press enter to continue.\n'); 46 | pause; 47 | 48 | %% ================ Part 2: Loading Pameters ================ 49 | % In this part of the exercise, we load some pre-initialized 50 | % neural network parameters. 51 | 52 | fprintf('\nLoading Saved Neural Network Parameters ...\n') 53 | 54 | % Load the weights into variables Theta1 and Theta2 55 | load('ex3weights.mat'); 56 | 57 | %% ================= Part 3: Implement Predict ================= 58 | % After training the neural network, we would like to use it to predict 59 | % the labels. You will now implement the "predict" function to use the 60 | % neural network to predict the labels of the training set. This lets 61 | % you compute the training set accuracy. 62 | 63 | pred = predict(Theta1, Theta2, X); 64 | 65 | fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100); 66 | 67 | fprintf('Program paused. Press enter to continue.\n'); 68 | pause; 69 | 70 | % To give you an idea of the network's output, you can also run 71 | % through the examples one at the a time to see what it is predicting. 72 | 73 | % Randomly permute examples 74 | rp = randperm(m); 75 | 76 | for i = 1:m 77 | % Display 78 | fprintf('\nDisplaying Example Image\n'); 79 | displayData(X(rp(i), :)); 80 | 81 | pred = predict(Theta1, Theta2, X(rp(i),:)); 82 | fprintf('\nNeural Network Prediction: %d (digit %d)\n', pred, mod(pred, 10)); 83 | 84 | % Pause with quit option 85 | s = input('Paused - press enter to continue, q to exit:','s'); 86 | if s == 'q' 87 | break 88 | end 89 | end 90 | 91 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/ex3data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex3/ex3/ex3data1.mat -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/ex3weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex3/ex3/ex3weights.mat -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex3/ex3/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex3/ex3/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 0; 37 | Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ... 38 | 1 1 ; 1 2 ; 2 1 ; 2 2 ; ... 39 | -1 1 ; -1 2 ; -2 1 ; -2 2 ; ... 40 | 1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ]; 41 | ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]'; 42 | t1 = sin(reshape(1:2:24, 4, 3)); 43 | t2 = cos(reshape(1:2:40, 4, 5)); 44 | 45 | if partId == '1' 46 | [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1); 47 | out = sprintf('%0.5f ', J); 48 | out = [out sprintf('%0.5f ', grad)]; 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1)); 51 | elseif partId == '3' 52 | out = sprintf('%0.5f ', predictOneVsAll(t1, Xm)); 53 | elseif partId == '4' 54 | out = sprintf('%0.5f ', predict(t1, t2, Xm)); 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex4/ex4.pdf -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/checkNNGradients.m: -------------------------------------------------------------------------------- 1 | function checkNNGradients(lambda) 2 | %CHECKNNGRADIENTS Creates a small neural network to check the 3 | %backpropagation gradients 4 | % CHECKNNGRADIENTS(lambda) Creates a small neural network to check the 5 | % backpropagation gradients, it will output the analytical gradients 6 | % produced by your backprop code and the numerical gradients (computed 7 | % using computeNumericalGradient). These two gradient computations should 8 | % result in very similar values. 9 | % 10 | 11 | if ~exist('lambda', 'var') || isempty(lambda) 12 | lambda = 0; 13 | end 14 | 15 | input_layer_size = 3; 16 | hidden_layer_size = 5; 17 | num_labels = 3; 18 | m = 5; 19 | 20 | % We generate some 'random' test data 21 | Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size); 22 | Theta2 = debugInitializeWeights(num_labels, hidden_layer_size); 23 | % Reusing debugInitializeWeights to generate X 24 | X = debugInitializeWeights(m, input_layer_size - 1); 25 | y = 1 + mod(1:m, num_labels)'; 26 | 27 | % Unroll parameters 28 | nn_params = [Theta1(:) ; Theta2(:)]; 29 | 30 | % Short hand for cost function 31 | costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ... 32 | num_labels, X, y, lambda); 33 | 34 | [cost, grad] = costFunc(nn_params); 35 | numgrad = computeNumericalGradient(costFunc, nn_params); 36 | 37 | % Visually examine the two gradient computations. The two columns 38 | % you get should be very similar. 39 | disp([numgrad grad]); 40 | fprintf(['The above two columns you get should be very similar.\n' ... 41 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); 42 | 43 | % Evaluate the norm of the difference between two solutions. 44 | % If you have a correct implementation, and assuming you used EPSILON = 0.0001 45 | % in computeNumericalGradient.m, then diff below should be less than 1e-9 46 | diff = norm(numgrad-grad)/norm(numgrad+grad); 47 | 48 | fprintf(['If your backpropagation implementation is correct, then \n' ... 49 | 'the relative difference will be small (less than 1e-9). \n' ... 50 | '\nRelative Difference: %g\n'], diff); 51 | 52 | end 53 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/debugInitializeWeights.m: -------------------------------------------------------------------------------- 1 | function W = debugInitializeWeights(fan_out, fan_in) 2 | %DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in 3 | %incoming connections and fan_out outgoing connections using a fixed 4 | %strategy, this will help you later in debugging 5 | % W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights 6 | % of a layer with fan_in incoming connections and fan_out outgoing 7 | % connections using a fix set of values 8 | % 9 | % Note that W should be set to a matrix of size(1 + fan_in, fan_out) as 10 | % the first row of W handles the "bias" terms 11 | % 12 | 13 | % Set W to zeros 14 | W = zeros(fan_out, 1 + fan_in); 15 | 16 | % Initialize W using "sin", this ensures that W is always of the same 17 | % values and will be useful for debugging 18 | W = reshape(sin(1:numel(W)), size(W)) / 10; 19 | 20 | % ========================================================================= 21 | 22 | end 23 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/ex4data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex4/ex4/ex4data1.mat -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/ex4weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex4/ex4/ex4weights.mat -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex4/ex4/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex4/ex4/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex5/ex5/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex5/ex5/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i Anyone knows how much it costs to host a web portal ? 2 | > 3 | Well, it depends on how many visitors you're expecting. 4 | This can be anywhere from less than 10 bucks a month to a couple of $100. 5 | You should checkout http://www.rackspace.com/ or perhaps Amazon EC2 6 | if youre running something big.. 7 | 8 | To unsubscribe yourself from this mailing list, send an email to: 9 | groupname-unsubscribe@egroups.com 10 | 11 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/emailSample2.txt: -------------------------------------------------------------------------------- 1 | Folks, 2 | 3 | my first time posting - have a bit of Unix experience, but am new to Linux. 4 | 5 | 6 | Just got a new PC at home - Dell box with Windows XP. Added a second hard disk 7 | for Linux. Partitioned the disk and have installed Suse 7.2 from CD, which went 8 | fine except it didn't pick up my monitor. 9 | 10 | I have a Dell branded E151FPp 15" LCD flat panel monitor and a nVidia GeForce4 11 | Ti4200 video card, both of which are probably too new to feature in Suse's default 12 | set. I downloaded a driver from the nVidia website and installed it using RPM. 13 | Then I ran Sax2 (as was recommended in some postings I found on the net), but 14 | it still doesn't feature my video card in the available list. What next? 15 | 16 | Another problem. I have a Dell branded keyboard and if I hit Caps-Lock twice, 17 | the whole machine crashes (in Linux, not Windows) - even the on/off switch is 18 | inactive, leaving me to reach for the power cable instead. 19 | 20 | If anyone can help me in any way with these probs., I'd be really grateful - 21 | I've searched the 'net but have run out of ideas. 22 | 23 | Or should I be going for a different version of Linux such as RedHat? Opinions 24 | welcome. 25 | 26 | Thanks a lot, 27 | Peter 28 | 29 | -- 30 | Irish Linux Users' Group: ilug@linux.ie 31 | http://www.linux.ie/mailman/listinfo/ilug for (un)subscription information. 32 | List maintainer: listmaster@linux.ie 33 | 34 | 35 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/ex6data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex6/ex6/ex6data1.mat -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/ex6data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex6/ex6/ex6data2.mat -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/ex6data3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex6/ex6/ex6data3.mat -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/gaussianKernel.m: -------------------------------------------------------------------------------- 1 | function sim = gaussianKernel(x1, x2, sigma) 2 | %RBFKERNEL returns a radial basis function kernel between x1 and x2 3 | % sim = gaussianKernel(x1, x2) returns a gaussian kernel between x1 and x2 4 | % and returns the value in sim 5 | 6 | % Ensure that x1 and x2 are column vectors 7 | x1 = x1(:); x2 = x2(:); 8 | 9 | % You need to return the following variables correctly. 10 | sim = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Fill in this function to return the similarity between x1 14 | % and x2 computed using a Gaussian kernel with bandwidth 15 | % sigma 16 | % 17 | % 18 | 19 | sim = exp(-sum((x1 - x2).^2)/ (2*sigma^2)); 20 | 21 | 22 | % ============================================================= 23 | 24 | end 25 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/getVocabList.m: -------------------------------------------------------------------------------- 1 | function vocabList = getVocabList() 2 | %GETVOCABLIST reads the fixed vocabulary list in vocab.txt and returns a 3 | %cell array of the words 4 | % vocabList = GETVOCABLIST() reads the fixed vocabulary list in vocab.txt 5 | % and returns a cell array of the words in vocabList. 6 | 7 | 8 | %% Read the fixed vocabulary list 9 | fid = fopen('vocab.txt'); 10 | 11 | % Store all dictionary words in cell array vocab{} 12 | n = 1899; % Total number of words in the dictionary 13 | 14 | % For ease of implementation, we use a struct to map the strings => integers 15 | % In practice, you'll want to use some form of hashmap 16 | vocabList = cell(n, 1); 17 | for i = 1:n 18 | % Word Index (can ignore since it will be = i) 19 | fscanf(fid, '%d', 1); 20 | % Actual Word 21 | vocabList{i} = fscanf(fid, '%s', 1); 22 | end 23 | fclose(fid); 24 | 25 | end 26 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/ChangeLog.txt: -------------------------------------------------------------------------------- 1 | ============================================================================ 2 | 3 | JSONlab - a toolbox to encode/decode JSON/UBJSON files in MATLAB/Octave 4 | 5 | ---------------------------------------------------------------------------- 6 | 7 | JSONlab ChangeLog (key features marked by *): 8 | 9 | == JSONlab 1.0 (codename: Optimus - Final), FangQ == 10 | 11 | 2015/01/02 polish help info for all major functions, update examples, finalize 1.0 12 | 2014/12/19 fix a bug to strictly respect NoRowBracket in savejson 13 | 14 | == JSONlab 1.0.0-RC2 (codename: Optimus - RC2), FangQ == 15 | 16 | 2014/11/22 show progress bar in loadjson ('ShowProgress') 17 | 2014/11/17 add Compact option in savejson to output compact JSON format ('Compact') 18 | 2014/11/17 add FastArrayParser in loadjson to specify fast parser applicable levels 19 | 2014/09/18 start official github mirror: https://github.com/fangq/jsonlab 20 | 21 | == JSONlab 1.0.0-RC1 (codename: Optimus - RC1), FangQ == 22 | 23 | 2014/09/17 fix several compatibility issues when running on octave versions 3.2-3.8 24 | 2014/09/17 support 2D cell and struct arrays in both savejson and saveubjson 25 | 2014/08/04 escape special characters in a JSON string 26 | 2014/02/16 fix a bug when saving ubjson files 27 | 28 | == JSONlab 0.9.9 (codename: Optimus - beta), FangQ == 29 | 30 | 2014/01/22 use binary read and write in saveubjson and loadubjson 31 | 32 | == JSONlab 0.9.8-1 (codename: Optimus - alpha update 1), FangQ == 33 | 34 | 2013/10/07 better round-trip conservation for empty arrays and structs (patch submitted by Yul Kang) 35 | 36 | == JSONlab 0.9.8 (codename: Optimus - alpha), FangQ == 37 | 2013/08/23 *universal Binary JSON (UBJSON) support, including both saveubjson and loadubjson 38 | 39 | == JSONlab 0.9.1 (codename: Rodimus, update 1), FangQ == 40 | 2012/12/18 *handling of various empty and sparse matrices (fixes submitted by Niclas Borlin) 41 | 42 | == JSONlab 0.9.0 (codename: Rodimus), FangQ == 43 | 44 | 2012/06/17 *new format for an invalid leading char, unpacking hex code in savejson 45 | 2012/06/01 support JSONP in savejson 46 | 2012/05/25 fix the empty cell bug (reported by Cyril Davin) 47 | 2012/04/05 savejson can save to a file (suggested by Patrick Rapin) 48 | 49 | == JSONlab 0.8.1 (codename: Sentiel, Update 1), FangQ == 50 | 51 | 2012/02/28 loadjson quotation mark escape bug, see http://bit.ly/yyk1nS 52 | 2012/01/25 patch to handle root-less objects, contributed by Blake Johnson 53 | 54 | == JSONlab 0.8.0 (codename: Sentiel), FangQ == 55 | 56 | 2012/01/13 *speed up loadjson by 20 fold when parsing large data arrays in matlab 57 | 2012/01/11 remove row bracket if an array has 1 element, suggested by Mykel Kochenderfer 58 | 2011/12/22 *accept sequence of 'param',value input in savejson and loadjson 59 | 2011/11/18 fix struct array bug reported by Mykel Kochenderfer 60 | 61 | == JSONlab 0.5.1 (codename: Nexus Update 1), FangQ == 62 | 63 | 2011/10/21 fix a bug in loadjson, previous code does not use any of the acceleration 64 | 2011/10/20 loadjson supports JSON collections - concatenated JSON objects 65 | 66 | == JSONlab 0.5.0 (codename: Nexus), FangQ == 67 | 68 | 2011/10/16 package and release jsonlab 0.5.0 69 | 2011/10/15 *add json demo and regression test, support cpx numbers, fix double quote bug 70 | 2011/10/11 *speed up readjson dramatically, interpret _Array* tags, show data in root level 71 | 2011/10/10 create jsonlab project, start jsonlab website, add online documentation 72 | 2011/10/07 *speed up savejson by 25x using sprintf instead of mat2str, add options support 73 | 2011/10/06 *savejson works for structs, cells and arrays 74 | 2011/09/09 derive loadjson from JSON parser from MATLAB Central, draft savejson.m 75 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex6/ex6/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i= 0) = 1; 51 | pred(p < 0) = 0; 52 | 53 | end 54 | 55 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/visualizeBoundary.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundary(X, y, model, varargin) 2 | %VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM 3 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision 4 | % boundary learned by the SVM and overlays the data on it 5 | 6 | % Plot the training data on top of the boundary 7 | plotData(X, y) 8 | 9 | % Make classification predictions over a grid of values 10 | x1plot = linspace(min(X(:,1)), max(X(:,1)), 100)'; 11 | x2plot = linspace(min(X(:,2)), max(X(:,2)), 100)'; 12 | [X1, X2] = meshgrid(x1plot, x2plot); 13 | vals = zeros(size(X1)); 14 | for i = 1:size(X1, 2) 15 | this_X = [X1(:, i), X2(:, i)]; 16 | vals(:, i) = svmPredict(model, this_X); 17 | end 18 | 19 | % Plot the SVM boundary 20 | hold on 21 | contour(X1, X2, vals, [0.5 0.5], 'b'); 22 | hold off; 23 | 24 | end 25 | -------------------------------------------------------------------------------- /machine-learning-ex6/ex6/visualizeBoundaryLinear.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundaryLinear(X, y, model) 2 | %VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the 3 | %SVM 4 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 5 | % learned by the SVM and overlays the data on it 6 | 7 | w = model.w; 8 | b = model.b; 9 | xp = linspace(min(X(:,1)), max(X(:,1)), 100); 10 | yp = - (w(1)*xp + b)/w(2); 11 | plotData(X, y); 12 | hold on; 13 | plot(xp, yp, '-b'); 14 | hold off 15 | 16 | end 17 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7.pdf -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/bird_small.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/bird_small.mat -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/bird_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/bird_small.png -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/computeCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = computeCentroids(X, idx, K) 2 | %COMPUTECENTROIDS returns the new centroids by computing the means of the 3 | %data points assigned to each centroid. 4 | % centroids = COMPUTECENTROIDS(X, idx, K) returns the new centroids by 5 | % computing the means of the data points assigned to each centroid. It is 6 | % given a dataset X where each row is a single data point, a vector 7 | % idx of centroid assignments (i.e. each entry in range [1..K]) for each 8 | % example, and K, the number of centroids. You should return a matrix 9 | % centroids, where each row of centroids is the mean of the data points 10 | % assigned to it. 11 | % 12 | 13 | % Useful variables 14 | [m n] = size(X); 15 | 16 | % You need to return the following variables correctly. 17 | centroids = zeros(K, n); 18 | 19 | 20 | % ====================== YOUR CODE HERE ====================== 21 | % Instructions: Go over every centroid and compute mean of all points that 22 | % belong to it. Concretely, the row vector centroids(i, :) 23 | % should contain the mean of the data points assigned to 24 | % centroid i. 25 | % 26 | % Note: You can use a for-loop over the centroids to compute this. 27 | % 28 | 29 | centroids = zeros(K, size(X,2)); 30 | 31 | for i = 1:K 32 | centroids(i, :) = mean(X(find(idx == i), :)); 33 | end 34 | 35 | % my solution was given full marks in submission but was not working for a Part 36 | % so i found this code online https://github.com/everpeace/ml-class-assignments/blob/master/ex7.K-Means_Clustering_and_PCA/mlclass-ex7/computeCentroids.m 37 | % for testing purposes 38 | 39 | %for k = 1:K 40 | % num_k = 0; 41 | % sum = zeros(n, 1); 42 | % for i = 1:m 43 | % if ( idx(i) == k ) 44 | % sum = sum + X(i, :)'; 45 | % num_k = num_k + 1; 46 | % end 47 | % end 48 | % centroids(k, :) = (sum/num_k)'; 49 | %end 50 | 51 | 52 | % ============================================================= 53 | 54 | 55 | end 56 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/drawLine.m: -------------------------------------------------------------------------------- 1 | function drawLine(p1, p2, varargin) 2 | %DRAWLINE Draws a line from point p1 to point p2 3 | % DRAWLINE(p1, p2) Draws a line from point p1 to point p2 and holds the 4 | % current figure 5 | 6 | plot([p1(1) p2(1)], [p1(2) p2(2)], varargin{:}); 7 | 8 | end -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/ex7data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/ex7data1.mat -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/ex7data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/ex7data2.mat -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/ex7faces.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/ex7faces.mat -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | mu = mean(X); 9 | X_norm = bsxfun(@minus, X, mu); 10 | 11 | sigma = std(X_norm); 12 | X_norm = bsxfun(@rdivide, X_norm, sigma); 13 | 14 | 15 | % ============================================================ 16 | 17 | end 18 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/findClosestCentroids.m: -------------------------------------------------------------------------------- 1 | function idx = findClosestCentroids(X, centroids) 2 | %FINDCLOSESTCENTROIDS computes the centroid memberships for every example 3 | % idx = FINDCLOSESTCENTROIDS (X, centroids) returns the closest centroids 4 | % in idx for a dataset X where each row is a single example. idx = m x 1 5 | % vector of centroid assignments (i.e. each entry in range [1..K]) 6 | % 7 | 8 | % Set K 9 | K = size(centroids, 1); 10 | 11 | % You need to return the following variables correctly. 12 | idx = zeros(size(X,1), 1); 13 | 14 | % ====================== YOUR CODE HERE ====================== 15 | % Instructions: Go over every example, find its closest centroid, and store 16 | % the index inside idx at the appropriate location. 17 | % Concretely, idx(i) should contain the index of the centroid 18 | % closest to example i. Hence, it should be a value in the 19 | % range 1..K 20 | % 21 | % Note: You can use a for-loop over the examples to compute this. 22 | % 23 | 24 | m = size(X,1); 25 | 26 | for i = 1:m 27 | idx(i)=1; 28 | diff = sqrt(sum((X(i,:) - centroids(1,:)).^2)); 29 | for j = 2:K 30 | newDiff = sqrt(sum((X(i,:) - centroids(j,:)).^2)); 31 | if diff > newDiff 32 | diff = newDiff; 33 | idx(i) = j; 34 | end 35 | end 36 | end 37 | 38 | 39 | 40 | 41 | % ============================================================= 42 | 43 | end 44 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/kMeansInitCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = kMeansInitCentroids(X, K) 2 | %KMEANSINITCENTROIDS This function initializes K centroids that are to be 3 | %used in K-Means on the dataset X 4 | % centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be 5 | % used with the K-Means on the dataset X 6 | % 7 | 8 | % You should return this values correctly 9 | centroids = zeros(K, size(X, 2)); 10 | 11 | % ====================== YOUR CODE HERE ====================== 12 | % Instructions: You should set centroids to randomly chosen examples from 13 | % the dataset X 14 | % 15 | 16 | 17 | 18 | randidx = randperm(size(X,1)); 19 | 20 | centrois = X(randidx(1:K), :); 21 | 22 | 23 | 24 | 25 | % ============================================================= 26 | 27 | end 28 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex7/ex7/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex7/ex7/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 0.5) = 0; 22 | R = zeros(size(Y)); 23 | R(Y ~= 0) = 1; 24 | 25 | %% Run Gradient Checking 26 | X = randn(size(X_t)); 27 | Theta = randn(size(Theta_t)); 28 | num_users = size(Y, 2); 29 | num_movies = size(Y, 1); 30 | num_features = size(Theta_t, 2); 31 | 32 | numgrad = computeNumericalGradient( ... 33 | @(t) cofiCostFunc(t, Y, R, num_users, num_movies, ... 34 | num_features, lambda), [X(:); Theta(:)]); 35 | 36 | [cost, grad] = cofiCostFunc([X(:); Theta(:)], Y, R, num_users, ... 37 | num_movies, num_features, lambda); 38 | 39 | disp([numgrad grad]); 40 | fprintf(['The above two columns you get should be very similar.\n' ... 41 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); 42 | 43 | diff = norm(numgrad-grad)/norm(numgrad+grad); 44 | fprintf(['If your cost function implementation is correct, then \n' ... 45 | 'the relative difference will be small (less than 1e-9). \n' ... 46 | '\nRelative Difference: %g\n'], diff); 47 | 48 | end -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/cofiCostFunc.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = cofiCostFunc(params, Y, R, num_users, num_movies, ... 2 | num_features, lambda) 3 | %COFICOSTFUNC Collaborative filtering cost function 4 | % [J, grad] = COFICOSTFUNC(params, Y, R, num_users, num_movies, ... 5 | % num_features, lambda) returns the cost and gradient for the 6 | % collaborative filtering problem. 7 | % 8 | 9 | % Unfold the U and W matrices from params 10 | X = reshape(params(1:num_movies*num_features), num_movies, num_features); 11 | Theta = reshape(params(num_movies*num_features+1:end), ... 12 | num_users, num_features); 13 | 14 | 15 | % You need to return the following values correctly 16 | J = 0; 17 | X_grad = zeros(size(X)); 18 | Theta_grad = zeros(size(Theta)); 19 | 20 | % ====================== YOUR CODE HERE ====================== 21 | % Instructions: Compute the cost function and gradient for collaborative 22 | % filtering. Concretely, you should first implement the cost 23 | % function (without regularization) and make sure it is 24 | % matches our costs. After that, you should implement the 25 | % gradient and use the checkCostFunction routine to check 26 | % that the gradient is correct. Finally, you should implement 27 | % regularization. 28 | % 29 | % Notes: X - num_movies x num_features matrix of movie features 30 | % Theta - num_users x num_features matrix of user features 31 | % Y - num_movies x num_users matrix of user ratings of movies 32 | % R - num_movies x num_users matrix, where R(i, j) = 1 if the 33 | % i-th movie was rated by the j-th user 34 | % 35 | % You should set the following variables correctly: 36 | % 37 | % X_grad - num_movies x num_features matrix, containing the 38 | % partial derivatives w.r.t. to each element of X 39 | % Theta_grad - num_users x num_features matrix, containing the 40 | % partial derivatives w.r.t. to each element of Theta 41 | % 42 | 43 | cost = X * Theta' - Y; %' 44 | J = (1/2) * sum(sum(R.*(cost.^2 ))) + ((lambda/2)* sum(sum(Theta.^2))) + ((lambda/2) * sum(sum(X.^2))); 45 | X_grad = (cost.*R) *Theta + (lambda .* X); 46 | Theta_grad = (cost .*R)' *X + (lambda .* Theta); 47 | 48 | 49 | 50 | 51 | 52 | % ============================================================= 53 | 54 | grad = [X_grad(:); Theta_grad(:)]; 55 | 56 | end 57 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/estimateGaussian.m: -------------------------------------------------------------------------------- 1 | function [mu sigma2] = estimateGaussian(X) 2 | %ESTIMATEGAUSSIAN This function estimates the parameters of a 3 | %Gaussian distribution using the data in X 4 | % [mu sigma2] = estimateGaussian(X), 5 | % The input X is the dataset with each n-dimensional data point in one row 6 | % The output is an n-dimensional vector mu, the mean of the data set 7 | % and the variances sigma^2, an n x 1 vector 8 | % 9 | 10 | % Useful variables 11 | [m, n] = size(X); 12 | 13 | % You should return these values correctly 14 | mu = zeros(n, 1); 15 | sigma2 = zeros(n, 1); 16 | 17 | % ====================== YOUR CODE HERE ====================== 18 | % Instructions: Compute the mean of the data and the variances 19 | % In particular, mu(i) should contain the mean of 20 | % the data for the i-th feature and sigma2(i) 21 | % should contain variance of the i-th feature. 22 | % 23 | 24 | 25 | mu = (1/m) * sum(X); 26 | sigma2 = (1/m) * sum((X - repmat(mu, m , 1)).^2); 27 | sigma = sqrt(sigma2); 28 | 29 | 30 | 31 | % ============================================================= 32 | 33 | 34 | end 35 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/ex8_movieParams.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex8/ex8/ex8_movieParams.mat -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/ex8_movies.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex8/ex8/ex8_movies.mat -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/ex8data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex8/ex8/ex8data1.mat -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/ex8data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex8/ex8/ex8data2.mat -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/machine-learning-ex8/ex8/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i bestF1 41 | bestF1 = F1; 42 | bestEpsilon = epsilon; 43 | end 44 | end 45 | 46 | end 47 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/submit.m: -------------------------------------------------------------------------------- 1 | function submit() 2 | addpath('./lib'); 3 | 4 | conf.assignmentSlug = 'anomaly-detection-and-recommender-systems'; 5 | conf.itemName = 'Anomaly Detection and Recommender Systems'; 6 | conf.partArrays = { ... 7 | { ... 8 | '1', ... 9 | { 'estimateGaussian.m' }, ... 10 | 'Estimate Gaussian Parameters', ... 11 | }, ... 12 | { ... 13 | '2', ... 14 | { 'selectThreshold.m' }, ... 15 | 'Select Threshold', ... 16 | }, ... 17 | { ... 18 | '3', ... 19 | { 'cofiCostFunc.m' }, ... 20 | 'Collaborative Filtering Cost', ... 21 | }, ... 22 | { ... 23 | '4', ... 24 | { 'cofiCostFunc.m' }, ... 25 | 'Collaborative Filtering Gradient', ... 26 | }, ... 27 | { ... 28 | '5', ... 29 | { 'cofiCostFunc.m' }, ... 30 | 'Regularized Cost', ... 31 | }, ... 32 | { ... 33 | '6', ... 34 | { 'cofiCostFunc.m' }, ... 35 | 'Regularized Gradient', ... 36 | }, ... 37 | }; 38 | conf.output = @output; 39 | 40 | submitWithConfiguration(conf); 41 | end 42 | 43 | function out = output(partId, auxstring) 44 | % Random Test Cases 45 | n_u = 3; n_m = 4; n = 5; 46 | X = reshape(sin(1:n_m*n), n_m, n); 47 | Theta = reshape(cos(1:n_u*n), n_u, n); 48 | Y = reshape(sin(1:2:2*n_m*n_u), n_m, n_u); 49 | R = Y > 0.5; 50 | pval = [abs(Y(:)) ; 0.001; 1]; 51 | Y = (Y .* double(R)); % set 'Y' values to 0 for movies not reviewed 52 | yval = [R(:) ; 1; 0]; 53 | params = [X(:); Theta(:)]; 54 | if partId == '1' 55 | [mu sigma2] = estimateGaussian(X); 56 | out = sprintf('%0.5f ', [mu(:); sigma2(:)]); 57 | elseif partId == '2' 58 | [bestEpsilon bestF1] = selectThreshold(yval, pval); 59 | out = sprintf('%0.5f ', [bestEpsilon(:); bestF1(:)]); 60 | elseif partId == '3' 61 | [J] = cofiCostFunc(params, Y, R, n_u, n_m, ... 62 | n, 0); 63 | out = sprintf('%0.5f ', J(:)); 64 | elseif partId == '4' 65 | [J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ... 66 | n, 0); 67 | out = sprintf('%0.5f ', grad(:)); 68 | elseif partId == '5' 69 | [J] = cofiCostFunc(params, Y, R, n_u, n_m, ... 70 | n, 1.5); 71 | out = sprintf('%0.5f ', J(:)); 72 | elseif partId == '6' 73 | [J, grad] = cofiCostFunc(params, Y, R, n_u, n_m, ... 74 | n, 1.5); 75 | out = sprintf('%0.5f ', grad(:)); 76 | end 77 | end 78 | -------------------------------------------------------------------------------- /machine-learning-ex8/ex8/visualizeFit.m: -------------------------------------------------------------------------------- 1 | function visualizeFit(X, mu, sigma2) 2 | %VISUALIZEFIT Visualize the dataset and its estimated distribution. 3 | % VISUALIZEFIT(X, p, mu, sigma2) This visualization shows you the 4 | % probability density function of the Gaussian distribution. Each example 5 | % has a location (x1, x2) that depends on its feature values. 6 | % 7 | 8 | [X1,X2] = meshgrid(0:.5:35); 9 | Z = multivariateGaussian([X1(:) X2(:)],mu,sigma2); 10 | Z = reshape(Z,size(X1)); 11 | 12 | plot(X(:, 1), X(:, 2),'bx'); 13 | hold on; 14 | % Do not plot if there are infinities 15 | if (sum(isinf(Z)) == 0) 16 | contour(X1, X2, Z, 10.^(-20:3:0)'); 17 | end 18 | hold off; 19 | 20 | end -------------------------------------------------------------------------------- /notes.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flsing/machine-learning/27aba097c003b706ddfd999066be0ded885a93ed/notes.docx --------------------------------------------------------------------------------