├── .gitignore ├── Coursera ├── README.md ├── machine-learning-ex1 │ └── ex1 │ │ ├── computeCost.m │ │ ├── computeCostMulti.m │ │ ├── ex1.m │ │ ├── ex1_multi.m │ │ ├── ex1data1.txt │ │ ├── ex1data2.txt │ │ ├── featureNormalize.m │ │ ├── gradientDescent.m │ │ ├── gradientDescentMulti.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── normalEqn.m │ │ ├── plotData.m │ │ ├── submit.m │ │ ├── token.mat │ │ └── warmUpExercise.m ├── machine-learning-ex2 │ └── ex2 │ │ ├── costFunction.m │ │ ├── costFunctionReg.m │ │ ├── ex2.m │ │ ├── ex2_reg.m │ │ ├── ex2data1.txt │ │ ├── ex2data2.txt │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── mapFeature.m │ │ ├── plotData.m │ │ ├── plotDecisionBoundary.m │ │ ├── predict.m │ │ ├── sigmoid.m │ │ ├── submit.m │ │ └── token.mat ├── machine-learning-ex3 │ └── ex3 │ │ ├── displayData.m │ │ ├── ex3.m │ │ ├── ex3_nn.m │ │ ├── ex3data1.mat │ │ ├── ex3weights.mat │ │ ├── fmincg.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── lrCostFunction.m │ │ ├── oneVsAll.m │ │ ├── predict.m │ │ ├── predictOneVsAll.m │ │ ├── sigmoid.m │ │ ├── submit.m │ │ └── token.mat ├── machine-learning-ex4 │ └── ex4 │ │ ├── checkNNGradients.m │ │ ├── computeNumericalGradient.m │ │ ├── debugInitializeWeights.m │ │ ├── displayData.m │ │ ├── ex4.m │ │ ├── ex4data1.mat │ │ ├── ex4weights.mat │ │ ├── fmincg.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── nnCostFunction.m │ │ ├── predict.m │ │ ├── randInitializeWeights.m │ │ ├── sigmoid.m │ │ ├── sigmoidGradient.m │ │ ├── submit.m │ │ └── token.mat ├── machine-learning-ex5 │ └── ex5 │ │ ├── ex5.m │ │ ├── ex5data1.mat │ │ ├── featureNormalize.m │ │ ├── fmincg.m │ │ ├── learningCurve.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── linearRegCostFunction.m │ │ ├── octave-workspace │ │ ├── plotFit.m │ │ ├── polyFeatures.m │ │ ├── submit.m │ │ ├── token.mat │ │ ├── trainLinearReg.m │ │ └── validationCurve.m ├── machine-learning-ex6 │ └── ex6 │ │ ├── C_1.png │ │ ├── C_10.png │ │ ├── C_100.png │ │ ├── dataset3Params.m │ │ ├── emailFeatures.m │ │ ├── emailSample1.txt │ │ ├── emailSample2.txt │ │ ├── ex6.m │ │ ├── ex6_spam.m │ │ ├── ex6data1.mat │ │ ├── ex6data2.mat │ │ ├── ex6data3.mat │ │ ├── gaussianKernel.m │ │ ├── getVocabList.m │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── linearKernel.m │ │ ├── plotData.m │ │ ├── porterStemmer.m │ │ ├── processEmail.m │ │ ├── readFile.m │ │ ├── sigma_0.03.png │ │ ├── sigma_0.1.png │ │ ├── sigma_0.3.png │ │ ├── spamSample1.txt │ │ ├── spamSample2.txt │ │ ├── spamTest.mat │ │ ├── spamTrain.mat │ │ ├── submit.m │ │ ├── svmPredict.m │ │ ├── svmTrain.m │ │ ├── token.mat │ │ ├── visualizeBoundary.m │ │ ├── visualizeBoundaryLinear.m │ │ └── vocab.txt ├── machine-learning-ex7 │ └── ex7 │ │ ├── bird_small.mat │ │ ├── bird_small.png │ │ ├── computeCentroids.m │ │ ├── displayData.m │ │ ├── drawLine.m │ │ ├── ex7.m │ │ ├── ex7_pca.m │ │ ├── ex7data1.mat │ │ ├── ex7data2.mat │ │ ├── ex7faces.mat │ │ ├── face_100.png │ │ ├── face_36.png │ │ ├── featureNormalize.m │ │ ├── findClosestCentroids.m │ │ ├── kMeansInitCentroids.m │ │ ├── kmeans.png │ │ ├── lib │ │ ├── jsonlab │ │ │ ├── AUTHORS.txt │ │ │ ├── ChangeLog.txt │ │ │ ├── LICENSE_BSD.txt │ │ │ ├── README.txt │ │ │ ├── jsonopt.m │ │ │ ├── loadjson.m │ │ │ ├── loadubjson.m │ │ │ ├── mergestruct.m │ │ │ ├── savejson.m │ │ │ ├── saveubjson.m │ │ │ └── varargin2struct.m │ │ ├── makeValidFieldName.m │ │ └── submitWithConfiguration.m │ │ ├── pca.m │ │ ├── pca.png │ │ ├── plotDataPoints.m │ │ ├── plotProgresskMeans.m │ │ ├── projectData.m │ │ ├── recoverData.m │ │ ├── runkMeans.m │ │ ├── submit.m │ │ └── token.mat └── machine-learning-ex8 │ └── ex8 │ ├── checkCostFunction.m │ ├── cofiCostFunc.m │ ├── computeNumericalGradient.m │ ├── estimateGaussian.m │ ├── ex8.m │ ├── ex8_cofi.m │ ├── ex8_movieParams.mat │ ├── ex8_movies.mat │ ├── ex8data1.mat │ ├── ex8data2.mat │ ├── fmincg.m │ ├── lib │ ├── jsonlab │ │ ├── AUTHORS.txt │ │ ├── ChangeLog.txt │ │ ├── LICENSE_BSD.txt │ │ ├── README.txt │ │ ├── jsonopt.m │ │ ├── loadjson.m │ │ ├── loadubjson.m │ │ ├── mergestruct.m │ │ ├── savejson.m │ │ ├── saveubjson.m │ │ └── varargin2struct.m │ ├── makeValidFieldName.m │ └── submitWithConfiguration.m │ ├── loadMovieList.m │ ├── movie_ids.txt │ ├── multivariateGaussian.m │ ├── normalizeRatings.m │ ├── octave-workspace │ ├── selectThreshold.m │ ├── submit.m │ ├── token.mat │ └── visualizeFit.m ├── README.md ├── cs231n ├── Assignment1.html ├── Assignment1_files │ ├── MathJax.js │ ├── analytics.js │ ├── css │ └── main.css ├── Assignment2.html ├── Assignment2_files │ ├── MathJax.js │ ├── analytics.js │ ├── css │ └── main.css ├── Assignment3.html ├── Assignment3_files │ ├── MathJax.js │ ├── analytics.js │ ├── css │ └── main.css ├── assignment1 │ ├── .gitignore │ ├── .ipynb_checkpoints │ │ ├── features-checkpoint.ipynb │ │ ├── knn-checkpoint.ipynb │ │ ├── softmax-checkpoint.ipynb │ │ ├── svm-checkpoint.ipynb │ │ └── two_layer_net-checkpoint.ipynb │ ├── README.md │ ├── collectSubmission.sh │ ├── cs231n │ │ ├── __init__.py │ │ ├── classifiers │ │ │ ├── __init__.py │ │ │ ├── k_nearest_neighbor.py │ │ │ ├── linear_classifier.py │ │ │ ├── linear_svm.py │ │ │ ├── neural_net.py │ │ │ └── softmax.py │ │ ├── data_utils.py │ │ ├── datasets │ │ │ ├── .gitignore │ │ │ └── get_datasets.sh │ │ ├── features.py │ │ ├── gradient_check.py │ │ ├── nn.png │ │ └── vis_utils.py │ ├── features.ipynb │ ├── frameworkpython │ ├── knn.ipynb │ ├── requirements.txt │ ├── setup_googlecloud.sh │ ├── softmax.ipynb │ ├── start_ipython_osx.sh │ ├── svm.ipynb │ └── two_layer_net.ipynb ├── assignment2 │ ├── .gitignore │ ├── .ipynb_checkpoints │ │ ├── BatchNormalization-checkpoint.ipynb │ │ ├── ConvolutionalNetworks-checkpoint.ipynb │ │ ├── Dropout-checkpoint.ipynb │ │ ├── FullyConnectedNets-checkpoint.ipynb │ │ └── PyTorch-checkpoint.ipynb │ ├── BatchNormalization.ipynb │ ├── ConvolutionalNetworks.ipynb │ ├── Dropout.ipynb │ ├── FullyConnectedNets.ipynb │ ├── PyTorch.ipynb │ ├── README.md │ ├── TensorFlow.ipynb │ ├── collectSubmission.sh │ ├── cs231n │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── classifiers │ │ │ ├── __init__.py │ │ │ ├── cnn.py │ │ │ └── fc_net.py │ │ ├── data_utils.py │ │ ├── datasets │ │ │ ├── .gitignore │ │ │ └── get_datasets.sh │ │ ├── fast_layers.py │ │ ├── gradient_check.py │ │ ├── im2col.py │ │ ├── im2col_cython.cpython-35m-x86_64-linux-gnu.so │ │ ├── im2col_cython.cpython-36m-x86_64-linux-gnu.so │ │ ├── im2col_cython.pyx │ │ ├── layer_utils.py │ │ ├── layers.py │ │ ├── optim.py │ │ ├── setup.py │ │ ├── solver.py │ │ └── vis_utils.py │ ├── frameworkpython │ ├── kitten.jpg │ ├── puppy.jpg │ ├── requirements.txt │ └── start_ipython_osx.sh ├── assignment3 │ ├── .gitignore │ ├── .ipynb_checkpoints │ │ ├── GANs-PyTorch-checkpoint.ipynb │ │ ├── LSTM_Captioning-checkpoint.ipynb │ │ ├── NetworkVisualization-PyTorch-checkpoint.ipynb │ │ ├── RNN_Captioning-checkpoint.ipynb │ │ └── StyleTransfer-PyTorch-checkpoint.ipynb │ ├── GANs-PyTorch.ipynb │ ├── GANs-TensorFlow.ipynb │ ├── LSTM_Captioning.ipynb │ ├── NetworkVisualization-PyTorch.ipynb │ ├── NetworkVisualization-TensorFlow.ipynb │ ├── RNN_Captioning.ipynb │ ├── StyleTransfer-PyTorch.ipynb │ ├── StyleTransfer-TensorFlow.ipynb │ ├── collectSubmission.sh │ ├── cs231n │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── captioning_solver.py │ │ ├── classifiers │ │ │ ├── __init__.py │ │ │ ├── rnn.py │ │ │ └── squeezenet.py │ │ ├── coco_utils.py │ │ ├── data_utils.py │ │ ├── datasets │ │ │ ├── .gitignore │ │ │ ├── get_assignment3_data.sh │ │ │ └── get_imagenet_val.sh │ │ ├── fast_layers.py │ │ ├── gradient_check.py │ │ ├── im2col.py │ │ ├── im2col_cython.pyx │ │ ├── image_utils.py │ │ ├── layer_utils.py │ │ ├── layers.py │ │ ├── optim.py │ │ ├── rnn_layers.py │ │ └── setup.py │ ├── example_styletransfer.png │ ├── frameworkpython │ ├── gan-checks-tf.npz │ ├── gan_outputs_pytorch.png │ ├── gan_outputs_tf.png │ ├── kitten.jpg │ ├── requirements.txt │ ├── sky.jpg │ ├── start_ipython_osx.sh │ ├── style-transfer-checks-tf.npz │ ├── style-transfer-checks.npz │ ├── styles │ │ ├── composition_vii.jpg │ │ ├── muse.jpg │ │ ├── starry_night.jpg │ │ ├── the_scream.jpg │ │ └── tubingen.jpg │ └── where_are_my_drivers.sh └── notes │ ├── Images │ ├── l10_image_caption.png │ ├── l10_lstm.png │ ├── l10_rnn_layer.png │ ├── l10_rnn_layer2.png │ ├── l10_rnn_layer3.png │ ├── l10_summary.png │ ├── l11_fft.png │ ├── l11_im2col.png │ ├── l11_stack_cnn.png │ ├── l11_transfer_learning.png │ ├── l13_cascades.png │ ├── l13_hypercolumns.png │ ├── l13_multi_scale.png │ ├── l13_refinement.png │ ├── l13_semantic_segmentation_cnn.png │ ├── l13_similar_to_rcnn.png │ ├── l13_soft_attentation_for_caption.png │ ├── l13_soft_vs_hard1.png │ ├── l13_soft_vs_hard2.png │ ├── l13_upsampling.png │ ├── l2_deep_learning_pipline.png │ ├── l2_traditional_pipeline.png │ ├── l3_softmax_function.png │ ├── l3_softmax_loss_function.png │ ├── l3_svm_loss.png │ ├── l3_svm_loss_with_regularization.png │ ├── l4_activation_function.png │ ├── l4_backpropagation.png │ ├── l4_nerual.png │ ├── l5_batch_normalization.png │ ├── l5_parameters_initialization.png │ ├── l6_dropout.png │ ├── l7_convolutional_layer.png │ ├── l7_pooling_layer.png │ ├── l7_summary.png │ ├── l8_computer_vision_tasks.png │ ├── l8_localization_as_regression.png │ ├── l8_overfeat_1.png │ ├── l8_overfeat_2.png │ ├── l8_recap.png │ ├── l8_selective_search.png │ ├── l9_deconvolution_approaches.png │ ├── l9_deep_dream.png │ ├── l9_image_gradient.png │ ├── l9_image_reconstructure.png │ ├── l9_occlusion_experiments.png │ ├── l9_optimization_to_image.png │ ├── l9_t_sne.png │ ├── l9_visualize_activations.png │ ├── l9_visualize_deconvolution.png │ ├── l9_visualize_filers.png │ └── l9_visualize_patches.png │ ├── L10_Recurrent_Neural_Networks.md │ ├── L11_CNNs_in_practice.md │ ├── L13_Segmentation_and_Attention.md │ ├── L14_Videos_and_Unspervised_Learning.md │ ├── L1_Introduction.md │ ├── L2_Image_Classification_Pipeline.md │ ├── L3_Loss_Functions_and_Optimization.md │ ├── L4_Backpropagation_and_Neural_Networks.md │ ├── L5_Training_Neural_Networks_part_1.md │ ├── L6_Training_Neural_Networks_part_2.md │ ├── L7_Convoluational_Neural_Networks.md │ ├── L8_Spatial_Localization_and_Detection.md │ └── L9_Understanding_and_Visualizing_CNNs.md ├── notes ├── anomaly_detection.md ├── cs231n.md ├── cv.md ├── diagnostic.md ├── inside_cnn.md ├── kmeans.md ├── neural_networks.md ├── optim.md ├── pca.md ├── pic │ ├── bias1.png │ ├── bias2.png │ ├── cnn.png │ ├── cnn2.png │ ├── kmeans.png │ ├── nn.png │ ├── nn2.png │ ├── pca.png │ ├── select_lambda.png │ ├── svm.png │ ├── var1.png │ └── var2.png ├── reading.md ├── recommender.md ├── regression.md ├── svm.md └── unsupervised.md └── pytorch ├── .gitignore ├── .ipynb_checkpoints ├── reinforcement_q_learning-checkpoint.ipynb └── transfer_learning_tutorial-checkpoint.ipynb ├── advanced_tutorial.ipynb ├── autograd_tutorial.ipynb ├── char_rnn_classification_tutorial.ipynb ├── char_rnn_generation_tutorial.ipynb ├── cifar10_tutorial.ipynb ├── data_loading_tutorial.ipynb ├── data_parallel_tutorial.ipynb ├── neural_networks_tutorial.ipynb ├── neural_style_tutorial.ipynb ├── nn_tutorial.ipynb ├── parallelism_tutorial.ipynb ├── reinforcement_q_learning.ipynb ├── seq2seq_translation_tutorial.ipynb ├── sequence_models_tutorial.ipynb ├── spatial_transformer_tutorial.ipynb ├── super_resolution_with_caffe2.ipynb ├── tensor_tutorial.ipynb ├── transfer_learning_tutorial.ipynb └── word_embeddings_tutorial.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | *.zip 3 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/computeCost.m: -------------------------------------------------------------------------------- 1 | function J = computeCost(X, y, theta) 2 | %COMPUTECOST Compute cost for linear regression 3 | % J = COMPUTECOST(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | 9 | % You need to return the following variables correctly 10 | J = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Compute the cost of a particular choice of theta 14 | % You should set J to the cost. 15 | 16 | 17 | htx = X*theta; 18 | J = sum((htx-y).^2)/2/m; 19 | 20 | 21 | 22 | % ========================================================================= 23 | 24 | end 25 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/computeCostMulti.m: -------------------------------------------------------------------------------- 1 | function J = computeCostMulti(X, y, theta) 2 | %COMPUTECOSTMULTI Compute cost for linear regression with multiple variables 3 | % J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the 4 | % parameter for linear regression to fit the data points in X and y 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | 9 | % You need to return the following variables correctly 10 | J = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Compute the cost of a particular choice of theta 14 | % You should set J to the cost. 15 | 16 | 17 | htx = X*theta; 18 | J = sum((htx-y).^2)/2/m; 19 | 20 | 21 | 22 | % ========================================================================= 23 | 24 | end 25 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/ex1data1.txt: -------------------------------------------------------------------------------- 1 | 6.1101,17.592 2 | 5.5277,9.1302 3 | 8.5186,13.662 4 | 7.0032,11.854 5 | 5.8598,6.8233 6 | 8.3829,11.886 7 | 7.4764,4.3483 8 | 8.5781,12 9 | 6.4862,6.5987 10 | 5.0546,3.8166 11 | 5.7107,3.2522 12 | 14.164,15.505 13 | 5.734,3.1551 14 | 8.4084,7.2258 15 | 5.6407,0.71618 16 | 5.3794,3.5129 17 | 6.3654,5.3048 18 | 5.1301,0.56077 19 | 6.4296,3.6518 20 | 7.0708,5.3893 21 | 6.1891,3.1386 22 | 20.27,21.767 23 | 5.4901,4.263 24 | 6.3261,5.1875 25 | 5.5649,3.0825 26 | 18.945,22.638 27 | 12.828,13.501 28 | 10.957,7.0467 29 | 13.176,14.692 30 | 22.203,24.147 31 | 5.2524,-1.22 32 | 6.5894,5.9966 33 | 9.2482,12.134 34 | 5.8918,1.8495 35 | 8.2111,6.5426 36 | 7.9334,4.5623 37 | 8.0959,4.1164 38 | 5.6063,3.3928 39 | 12.836,10.117 40 | 6.3534,5.4974 41 | 5.4069,0.55657 42 | 6.8825,3.9115 43 | 11.708,5.3854 44 | 5.7737,2.4406 45 | 7.8247,6.7318 46 | 7.0931,1.0463 47 | 5.0702,5.1337 48 | 5.8014,1.844 49 | 11.7,8.0043 50 | 5.5416,1.0179 51 | 7.5402,6.7504 52 | 5.3077,1.8396 53 | 7.4239,4.2885 54 | 7.6031,4.9981 55 | 6.3328,1.4233 56 | 6.3589,-1.4211 57 | 6.2742,2.4756 58 | 5.6397,4.6042 59 | 9.3102,3.9624 60 | 9.4536,5.4141 61 | 8.8254,5.1694 62 | 5.1793,-0.74279 63 | 21.279,17.929 64 | 14.908,12.054 65 | 18.959,17.054 66 | 7.2182,4.8852 67 | 8.2951,5.7442 68 | 10.236,7.7754 69 | 5.4994,1.0173 70 | 20.341,20.992 71 | 10.136,6.6799 72 | 7.3345,4.0259 73 | 6.0062,1.2784 74 | 7.2259,3.3411 75 | 5.0269,-2.6807 76 | 6.5479,0.29678 77 | 7.5386,3.8845 78 | 5.0365,5.7014 79 | 10.274,6.7526 80 | 5.1077,2.0576 81 | 5.7292,0.47953 82 | 5.1884,0.20421 83 | 6.3557,0.67861 84 | 9.7687,7.5435 85 | 6.5159,5.3436 86 | 8.5172,4.2415 87 | 9.1802,6.7981 88 | 6.002,0.92695 89 | 5.5204,0.152 90 | 5.0594,2.8214 91 | 5.7077,1.8451 92 | 7.6366,4.2959 93 | 5.8707,7.2029 94 | 5.3054,1.9869 95 | 8.2934,0.14454 96 | 13.394,9.0551 97 | 5.4369,0.61705 98 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/ex1data2.txt: -------------------------------------------------------------------------------- 1 | 2104,3,399900 2 | 1600,3,329900 3 | 2400,3,369000 4 | 1416,2,232000 5 | 3000,4,539900 6 | 1985,4,299900 7 | 1534,3,314900 8 | 1427,3,198999 9 | 1380,3,212000 10 | 1494,3,242500 11 | 1940,4,239999 12 | 2000,3,347000 13 | 1890,3,329999 14 | 4478,5,699900 15 | 1268,3,259900 16 | 2300,4,449900 17 | 1320,2,299900 18 | 1236,3,199900 19 | 2609,4,499998 20 | 3031,4,599000 21 | 1767,3,252900 22 | 1888,2,255000 23 | 1604,3,242900 24 | 1962,4,259900 25 | 3890,3,573900 26 | 1100,3,249900 27 | 1458,3,464500 28 | 2526,3,469000 29 | 2200,3,475000 30 | 2637,3,299900 31 | 1839,2,349900 32 | 1000,1,169900 33 | 2040,4,314900 34 | 3137,3,579900 35 | 1811,4,285900 36 | 1437,3,249900 37 | 1239,3,229900 38 | 2132,4,345000 39 | 4215,4,549000 40 | 2162,4,287000 41 | 1664,2,368500 42 | 2238,3,329900 43 | 2567,4,314000 44 | 1200,3,299000 45 | 852,2,179900 46 | 1852,4,299900 47 | 1203,3,239500 48 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | % You need to set these values correctly 9 | X_norm = X; 10 | mu = zeros(1, size(X, 2)); 11 | sigma = zeros(1, size(X, 2)); 12 | 13 | % ====================== YOUR CODE HERE ====================== 14 | % Instructions: First, for each feature dimension, compute the mean 15 | % of the feature and subtract it from the dataset, 16 | % storing the mean value in mu. Next, compute the 17 | % standard deviation of each feature and divide 18 | % each feature by it's standard deviation, storing 19 | % the standard deviation in sigma. 20 | % 21 | % Note that X is a matrix where each column is a 22 | % feature and each row is an example. You need 23 | % to perform the normalization separately for 24 | % each feature. 25 | % 26 | % Hint: You might find the 'mean' and 'std' functions useful. 27 | % 28 | 29 | 30 | 31 | 32 | mu = mean(X); 33 | sigma = std(X); 34 | 35 | for i=1:size(X)(2) 36 | X_norm(:,i)=(X(:,i)-mu(i))/sigma(i); 37 | end; 38 | 39 | % ============================================================ 40 | 41 | end 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/gradientDescent.m: -------------------------------------------------------------------------------- 1 | function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENT Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENT(X, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | 10 | for iter = 1:num_iters 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Perform a single gradient step on the parameter vector 14 | % theta. 15 | % 16 | % Hint: While debugging, it can be useful to print out the values 17 | % of the cost function (computeCost) and gradient here. 18 | % 19 | 20 | theta-=alpha/m*((X*theta-y)'*X)(:); 21 | 22 | % ============================================================ 23 | 24 | % Save the cost J in every iteration 25 | J_history(iter) = computeCost(X, y, theta); 26 | 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/gradientDescentMulti.m: -------------------------------------------------------------------------------- 1 | function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) 2 | %GRADIENTDESCENTMULTI Performs gradient descent to learn theta 3 | % theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by 4 | % taking num_iters gradient steps with learning rate alpha 5 | 6 | % Initialize some useful values 7 | m = length(y); % number of training examples 8 | J_history = zeros(num_iters, 1); 9 | 10 | for iter = 1:num_iters 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Perform a single gradient step on the parameter vector 14 | % theta. 15 | % 16 | % Hint: While debugging, it can be useful to print out the values 17 | % of the cost function (computeCostMulti) and gradient here. 18 | % 19 | 20 | theta-=alpha/m*((X*theta-y)'*X)(:); 21 | 22 | % ============================================================ 23 | 24 | % Save the cost J in every iteration 25 | J_history(iter) = computeCostMulti(X, y, theta); 26 | 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex1/ex1/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | JcHZl2ipcxgpuBxm 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex1/ex1/warmUpExercise.m: -------------------------------------------------------------------------------- 1 | function A = warmUpExercise() 2 | %WARMUPEXERCISE Example function in octave 3 | % A = WARMUPEXERCISE() is an example function that returns the 5x5 identity matrix 4 | 5 | A = []; 6 | % ============= YOUR CODE HERE ============== 7 | % Instructions: Return the 5x5 identity matrix 8 | % In octave, we return values by defining which variables 9 | % represent the return values (at the top of the file) 10 | % and then set them accordingly. 11 | 12 | 13 | 14 | A=eye(5); 15 | 16 | 17 | 18 | % =========================================== 19 | 20 | 21 | end 22 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/costFunction.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = costFunction(theta, X, y) 2 | %COSTFUNCTION Compute cost and gradient for logistic regression 3 | % J = COSTFUNCTION(theta, X, y) computes the cost of using theta as the 4 | % parameter for logistic regression and the gradient of the cost 5 | % w.r.t. to the parameters. 6 | 7 | % Initialize some useful values 8 | m = length(y); % number of training examples 9 | 10 | % You need to return the following variables correctly 11 | J = 0; 12 | grad = zeros(size(theta)); 13 | 14 | % ====================== YOUR CODE HERE ====================== 15 | % Instructions: Compute the cost of a particular choice of theta. 16 | % You should set J to the cost. 17 | % Compute the partial derivatives and set grad to the partial 18 | % derivatives of the cost w.r.t. each parameter in theta 19 | % 20 | % Note: grad should have the same dimensions as theta 21 | % 22 | 23 | 24 | h=sigmoid(X*theta); 25 | J=-(y'*log(h)+(1-y')*log(1-h))./m; 26 | grad=(h-y)'*X./m; 27 | 28 | 29 | % ============================================================= 30 | 31 | end 32 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/costFunctionReg.m: -------------------------------------------------------------------------------- 1 | function [J, grad] = costFunctionReg(theta, X, y, lambda) 2 | %COSTFUNCTIONREG Compute cost and gradient for logistic regression with regularization 3 | % J = COSTFUNCTIONREG(theta, X, y, lambda) computes the cost of using 4 | % theta as the parameter for regularized logistic regression and the 5 | % gradient of the cost w.r.t. to the parameters. 6 | 7 | % Initialize some useful values 8 | m = length(y); % number of training examples 9 | 10 | % You need to return the following variables correctly 11 | J = 0; 12 | grad = zeros(size(theta)); 13 | 14 | % ====================== YOUR CODE HERE ====================== 15 | % Instructions: Compute the cost of a particular choice of theta. 16 | % You should set J to the cost. 17 | % Compute the partial derivatives and set grad to the partial 18 | % derivatives of the cost w.r.t. each parameter in theta 19 | 20 | 21 | h=sigmoid(X*theta); 22 | J=-(y'*log(h)+(1-y')*log(1-h))./m+lambda/2./m*(theta'*theta-theta(1)^2); 23 | grad(1)=(h-y)'*X(:,1)./m; 24 | grad(2:end)=(h-y)'*X(:,2:end)./m+theta(2:end)'.*lambda/m; 25 | 26 | 27 | 28 | 29 | % ============================================================= 30 | 31 | end 32 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex2/ex2/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i3 matrix, where the first column is all-ones 10 | 11 | % Plot Data 12 | plotData(X(:,2:3), y); 13 | hold on 14 | 15 | if size(X, 2) <= 3 16 | % Only need 2 points to define a line, so choose two endpoints 17 | plot_x = [min(X(:,2))-2, max(X(:,2))+2]; 18 | 19 | % Calculate the decision boundary line 20 | plot_y = (-1./theta(3)).*(theta(2).*plot_x + theta(1)); 21 | 22 | % Plot, and adjust axes for better viewing 23 | plot(plot_x, plot_y) 24 | 25 | % Legend, specific for the exercise 26 | legend('Admitted', 'Not admitted', 'Decision Boundary') 27 | axis([30, 100, 30, 100]) 28 | else 29 | % Here is the grid range 30 | u = linspace(-1, 1.5, 50); 31 | v = linspace(-1, 1.5, 50); 32 | 33 | z = zeros(length(u), length(v)); 34 | % Evaluate z = theta*x over the grid 35 | for i = 1:length(u) 36 | for j = 1:length(v) 37 | z(i,j) = mapFeature(u(i), v(j))*theta; 38 | end 39 | end 40 | z = z'; % important to transpose z before calling contour 41 | 42 | % Plot z = 0 43 | % Notice you need to specify the range [0, 0] 44 | contour(u, v, z, [0, 0], 'LineWidth', 2) 45 | end 46 | hold off 47 | 48 | end 49 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/predict.m: -------------------------------------------------------------------------------- 1 | function p = predict(theta, X) 2 | %PREDICT Predict whether the label is 0 or 1 using learned logistic 3 | %regression parameters theta 4 | % p = PREDICT(theta, X) computes the predictions for X using a 5 | % threshold at 0.5 (i.e., if sigmoid(theta'*x) >= 0.5, predict 1) 6 | 7 | m = size(X, 1); % Number of training examples 8 | 9 | % You need to return the following variables correctly 10 | p = zeros(m, 1); 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Complete the following code to make predictions using 14 | % your learned logistic regression parameters. 15 | % You should set p to a vector of 0's and 1's 16 | % 17 | 18 | p=sigmoid(X*theta)>=0.5; 19 | 20 | 21 | 22 | 23 | 24 | % ========================================================================= 25 | 26 | 27 | end 28 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/sigmoid.m: -------------------------------------------------------------------------------- 1 | function g = sigmoid(z) 2 | %SIGMOID Compute sigmoid function 3 | % g = SIGMOID(z) computes the sigmoid of z. 4 | 5 | % You need to return the following variables correctly 6 | g = zeros(size(z)); 7 | 8 | % ====================== YOUR CODE HERE ====================== 9 | % Instructions: Compute the sigmoid of each value of z (z can be a matrix, 10 | % vector or scalar). 11 | 12 | g=1./(1.+exp(-z)); 13 | 14 | % ============================================================= 15 | 16 | end 17 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/submit.m: -------------------------------------------------------------------------------- 1 | function submit() 2 | addpath('./lib'); 3 | 4 | conf.assignmentSlug = 'logistic-regression'; 5 | conf.itemName = 'Logistic Regression'; 6 | conf.partArrays = { ... 7 | { ... 8 | '1', ... 9 | { 'sigmoid.m' }, ... 10 | 'Sigmoid Function', ... 11 | }, ... 12 | { ... 13 | '2', ... 14 | { 'costFunction.m' }, ... 15 | 'Logistic Regression Cost', ... 16 | }, ... 17 | { ... 18 | '3', ... 19 | { 'costFunction.m' }, ... 20 | 'Logistic Regression Gradient', ... 21 | }, ... 22 | { ... 23 | '4', ... 24 | { 'predict.m' }, ... 25 | 'Predict', ... 26 | }, ... 27 | { ... 28 | '5', ... 29 | { 'costFunctionReg.m' }, ... 30 | 'Regularized Logistic Regression Cost', ... 31 | }, ... 32 | { ... 33 | '6', ... 34 | { 'costFunctionReg.m' }, ... 35 | 'Regularized Logistic Regression Gradient', ... 36 | }, ... 37 | }; 38 | conf.output = @output; 39 | 40 | submitWithConfiguration(conf); 41 | end 42 | 43 | function out = output(partId, auxstring) 44 | % Random Test Cases 45 | X = [ones(20,1) (exp(1) * sin(1:1:20))' (exp(0.5) * cos(1:1:20))']; 46 | y = sin(X(:,1) + X(:,2)) > 0; 47 | if partId == '1' 48 | out = sprintf('%0.5f ', sigmoid(X)); 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', costFunction([0.25 0.5 -0.5]', X, y)); 51 | elseif partId == '3' 52 | [cost, grad] = costFunction([0.25 0.5 -0.5]', X, y); 53 | out = sprintf('%0.5f ', grad); 54 | elseif partId == '4' 55 | out = sprintf('%0.5f ', predict([0.25 0.5 -0.5]', X)); 56 | elseif partId == '5' 57 | out = sprintf('%0.5f ', costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1)); 58 | elseif partId == '6' 59 | [cost, grad] = costFunctionReg([0.25 0.5 -0.5]', X, y, 0.1); 60 | out = sprintf('%0.5f ', grad); 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex2/ex2/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 4.2.1, Tue Jan 30 19:27:38 2018 CST 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | XAdX1C4biTZ56sct 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/ex3data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex3/ex3/ex3data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/ex3weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex3/ex3/ex3weights.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex3/ex3/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 0; 37 | Xm = [ -1 -1 ; -1 -2 ; -2 -1 ; -2 -2 ; ... 38 | 1 1 ; 1 2 ; 2 1 ; 2 2 ; ... 39 | -1 1 ; -1 2 ; -2 1 ; -2 2 ; ... 40 | 1 -1 ; 1 -2 ; -2 -1 ; -2 -2 ]; 41 | ym = [ 1 1 1 1 2 2 2 2 3 3 3 3 4 4 4 4 ]'; 42 | t1 = sin(reshape(1:2:24, 4, 3)); 43 | t2 = cos(reshape(1:2:40, 4, 5)); 44 | 45 | if partId == '1' 46 | [J, grad] = lrCostFunction([0.25 0.5 -0.5]', X, y, 0.1); 47 | out = sprintf('%0.5f ', J); 48 | out = [out sprintf('%0.5f ', grad)]; 49 | elseif partId == '2' 50 | out = sprintf('%0.5f ', oneVsAll(Xm, ym, 4, 0.1)); 51 | elseif partId == '3' 52 | out = sprintf('%0.5f ', predictOneVsAll(t1, Xm)); 53 | elseif partId == '4' 54 | out = sprintf('%0.5f ', predict(t1, t2, Xm)); 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex3/ex3/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 4.2.1, Sat Jan 27 17:35:35 2018 CST 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | 75uVhxLOcNSdA5Yl 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/debugInitializeWeights.m: -------------------------------------------------------------------------------- 1 | function W = debugInitializeWeights(fan_out, fan_in) 2 | %DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in 3 | %incoming connections and fan_out outgoing connections using a fixed 4 | %strategy, this will help you later in debugging 5 | % W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights 6 | % of a layer with fan_in incoming connections and fan_out outgoing 7 | % connections using a fix set of values 8 | % 9 | % Note that W should be set to a matrix of size(1 + fan_in, fan_out) as 10 | % the first row of W handles the "bias" terms 11 | % 12 | 13 | % Set W to zeros 14 | W = zeros(fan_out, 1 + fan_in); 15 | 16 | % Initialize W using "sin", this ensures that W is always of the same 17 | % values and will be useful for debugging 18 | W = reshape(sin(1:numel(W)), size(W)) / 10; 19 | 20 | % ========================================================================= 21 | 22 | end 23 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/ex4data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex4/ex4/ex4data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/ex4weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex4/ex4/ex4weights.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex4/ex4/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex4/ex4/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | 1HVqhVNgx23bmTZo 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/ex5data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex5/ex5/ex5data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | mu = mean(X); 9 | X_norm = bsxfun(@minus, X, mu); 10 | 11 | sigma = std(X_norm); 12 | X_norm = bsxfun(@rdivide, X_norm, sigma); 13 | 14 | 15 | % ============================================================ 16 | 17 | end 18 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex5/ex5/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | KuC0zDuGPFpZ7eVm 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex5/ex5/trainLinearReg.m: -------------------------------------------------------------------------------- 1 | function [theta] = trainLinearReg(X, y, lambda) 2 | %TRAINLINEARREG Trains linear regression given a dataset (X, y) and a 3 | %regularization parameter lambda 4 | % [theta] = TRAINLINEARREG (X, y, lambda) trains linear regression using 5 | % the dataset (X, y) and regularization parameter lambda. Returns the 6 | % trained parameters theta. 7 | % 8 | 9 | % Initialize Theta 10 | initial_theta = zeros(size(X, 2), 1); 11 | 12 | % Create "short hand" for the cost function to be minimized 13 | costFunction = @(t) linearRegCostFunction(X, y, t, lambda); 14 | 15 | % Now, costFunction is a function that takes in only one argument 16 | options = optimset('MaxIter', 200, 'GradObj', 'on'); 17 | 18 | % Minimize using fmincg 19 | theta = fmincg(costFunction, initial_theta, options); 20 | 21 | end 22 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/C_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/C_1.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/C_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/C_10.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/C_100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/C_100.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/dataset3Params.m: -------------------------------------------------------------------------------- 1 | function [C, sigma] = dataset3Params(X, y, Xval, yval) 2 | %DATASET3PARAMS returns your choice of C and sigma for Part 3 of the exercise 3 | %where you select the optimal (C, sigma) learning parameters to use for SVM 4 | %with RBF kernel 5 | % [C, sigma] = DATASET3PARAMS(X, y, Xval, yval) returns your choice of C and 6 | % sigma. You should complete this function to return the optimal C and 7 | % sigma based on a cross-validation set. 8 | % 9 | 10 | % You need to return the following variables correctly. 11 | C = 1; 12 | sigma = 0.3; 13 | 14 | % ====================== YOUR CODE HERE ====================== 15 | % Instructions: Fill in this function to return the optimal C and sigma 16 | % learning parameters found using the cross validation set. 17 | % You can use svmPredict to predict the labels on the cross 18 | % validation set. For example, 19 | % predictions = svmPredict(model, Xval); 20 | % will return the predictions on the cross validation set. 21 | % 22 | % Note: You can compute the prediction error using 23 | % mean(double(predictions ~= yval)) 24 | % 25 | sigma=0.1; 26 | return; 27 | _C=[0.01,0.03,0.05,0.1,0.2,0.3,1,3]; 28 | _sigma=[0.01,0.03,0.05,0.1,0.2,0.3,1,3]; 29 | min_err=1e10; 30 | for i=1:length(_C) 31 | for j=1:length(_sigma) 32 | model=svmTrain(X,y,_C(i),@(x1,x2)gaussianKernel(x1,x2,_sigma(j))); 33 | pred=svmPredict(model,Xval); 34 | err=mean(double(pred~=yval)); 35 | if(err Anyone knows how much it costs to host a web portal ? 2 | > 3 | Well, it depends on how many visitors you're expecting. 4 | This can be anywhere from less than 10 bucks a month to a couple of $100. 5 | You should checkout http://www.rackspace.com/ or perhaps Amazon EC2 6 | if youre running something big.. 7 | 8 | To unsubscribe yourself from this mailing list, send an email to: 9 | groupname-unsubscribe@egroups.com 10 | 11 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/emailSample2.txt: -------------------------------------------------------------------------------- 1 | Folks, 2 | 3 | my first time posting - have a bit of Unix experience, but am new to Linux. 4 | 5 | 6 | Just got a new PC at home - Dell box with Windows XP. Added a second hard disk 7 | for Linux. Partitioned the disk and have installed Suse 7.2 from CD, which went 8 | fine except it didn't pick up my monitor. 9 | 10 | I have a Dell branded E151FPp 15" LCD flat panel monitor and a nVidia GeForce4 11 | Ti4200 video card, both of which are probably too new to feature in Suse's default 12 | set. I downloaded a driver from the nVidia website and installed it using RPM. 13 | Then I ran Sax2 (as was recommended in some postings I found on the net), but 14 | it still doesn't feature my video card in the available list. What next? 15 | 16 | Another problem. I have a Dell branded keyboard and if I hit Caps-Lock twice, 17 | the whole machine crashes (in Linux, not Windows) - even the on/off switch is 18 | inactive, leaving me to reach for the power cable instead. 19 | 20 | If anyone can help me in any way with these probs., I'd be really grateful - 21 | I've searched the 'net but have run out of ideas. 22 | 23 | Or should I be going for a different version of Linux such as RedHat? Opinions 24 | welcome. 25 | 26 | Thanks a lot, 27 | Peter 28 | 29 | -- 30 | Irish Linux Users' Group: ilug@linux.ie 31 | http://www.linux.ie/mailman/listinfo/ilug for (un)subscription information. 32 | List maintainer: listmaster@linux.ie 33 | 34 | 35 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/ex6data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/ex6data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/ex6data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/ex6data2.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/ex6data3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/ex6data3.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/gaussianKernel.m: -------------------------------------------------------------------------------- 1 | function sim = gaussianKernel(x1, x2, sigma) 2 | %RBFKERNEL returns a radial basis function kernel between x1 and x2 3 | % sim = gaussianKernel(x1, x2) returns a gaussian kernel between x1 and x2 4 | % and returns the value in sim 5 | 6 | % Ensure that x1 and x2 are column vectors 7 | x1 = x1(:); x2 = x2(:); 8 | 9 | % You need to return the following variables correctly. 10 | sim = 0; 11 | 12 | % ====================== YOUR CODE HERE ====================== 13 | % Instructions: Fill in this function to return the similarity between x1 14 | % and x2 computed using a Gaussian kernel with bandwidth 15 | % sigma 16 | % 17 | % 18 | 19 | sim=exp(-sum((x1-x2).^2)/2/sigma/sigma); 20 | 21 | % ============================================================= 22 | 23 | end 24 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/getVocabList.m: -------------------------------------------------------------------------------- 1 | function vocabList = getVocabList() 2 | %GETVOCABLIST reads the fixed vocabulary list in vocab.txt and returns a 3 | %cell array of the words 4 | % vocabList = GETVOCABLIST() reads the fixed vocabulary list in vocab.txt 5 | % and returns a cell array of the words in vocabList. 6 | 7 | 8 | %% Read the fixed vocabulary list 9 | fid = fopen('vocab.txt'); 10 | 11 | % Store all dictionary words in cell array vocab{} 12 | n = 1899; % Total number of words in the dictionary 13 | 14 | % For ease of implementation, we use a struct to map the strings => integers 15 | % In practice, you'll want to use some form of hashmap 16 | vocabList = cell(n, 1); 17 | for i = 1:n 18 | % Word Index (can ignore since it will be = i) 19 | fscanf(fid, '%d', 1); 20 | % Actual Word 21 | vocabList{i} = fscanf(fid, '%s', 1); 22 | end 23 | fclose(fid); 24 | 25 | end 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex6/ex6/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i= 0) = 1; 51 | pred(p < 0) = 0; 52 | 53 | end 54 | 55 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 4.2.1, Mon Jan 29 19:48:55 2018 CST 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | JYxO2KrtmE20jzkL 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/visualizeBoundary.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundary(X, y, model, varargin) 2 | %VISUALIZEBOUNDARY plots a non-linear decision boundary learned by the SVM 3 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a non-linear decision 4 | % boundary learned by the SVM and overlays the data on it 5 | 6 | % Plot the training data on top of the boundary 7 | plotData(X, y) 8 | 9 | % Make classification predictions over a grid of values 10 | x1plot = linspace(min(X(:,1)), max(X(:,1)), 100)'; 11 | x2plot = linspace(min(X(:,2)), max(X(:,2)), 100)'; 12 | [X1, X2] = meshgrid(x1plot, x2plot); 13 | vals = zeros(size(X1)); 14 | for i = 1:size(X1, 2) 15 | this_X = [X1(:, i), X2(:, i)]; 16 | vals(:, i) = svmPredict(model, this_X); 17 | end 18 | 19 | % Plot the SVM boundary 20 | hold on 21 | contour(X1, X2, vals, [0.5 0.5], 'b'); 22 | hold off; 23 | 24 | end 25 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex6/ex6/visualizeBoundaryLinear.m: -------------------------------------------------------------------------------- 1 | function visualizeBoundaryLinear(X, y, model) 2 | %VISUALIZEBOUNDARYLINEAR plots a linear decision boundary learned by the 3 | %SVM 4 | % VISUALIZEBOUNDARYLINEAR(X, y, model) plots a linear decision boundary 5 | % learned by the SVM and overlays the data on it 6 | 7 | w = model.w; 8 | b = model.b; 9 | xp = linspace(min(X(:,1)), max(X(:,1)), 100); 10 | yp = - (w(1)*xp + b)/w(2); 11 | plotData(X, y); 12 | hold on; 13 | plot(xp, yp, '-b'); 14 | hold off 15 | 16 | end 17 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/bird_small.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/bird_small.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/bird_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/bird_small.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/computeCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = computeCentroids(X, idx, K) 2 | %COMPUTECENTROIDS returns the new centroids by computing the means of the 3 | %data points assigned to each centroid. 4 | % centroids = COMPUTECENTROIDS(X, idx, K) returns the new centroids by 5 | % computing the means of the data points assigned to each centroid. It is 6 | % given a dataset X where each row is a single data point, a vector 7 | % idx of centroid assignments (i.e. each entry in range [1..K]) for each 8 | % example, and K, the number of centroids. You should return a matrix 9 | % centroids, where each row of centroids is the mean of the data points 10 | % assigned to it. 11 | % 12 | 13 | % Useful variables 14 | [m n] = size(X); 15 | 16 | % You need to return the following variables correctly. 17 | centroids = zeros(K, n); 18 | 19 | 20 | % ====================== YOUR CODE HERE ====================== 21 | % Instructions: Go over every centroid and compute mean of all points that 22 | % belong to it. Concretely, the row vector centroids(i, :) 23 | % should contain the mean of the data points assigned to 24 | % centroid i. 25 | % 26 | % Note: You can use a for-loop over the centroids to compute this. 27 | % 28 | 29 | cnt=zeros(size(idx)); 30 | for i=1:m 31 | cnt(idx(i))+=1; 32 | centroids(idx(i),:)+=X(i,:); 33 | end; 34 | for i=1:K 35 | centroids(i,:)/=cnt(i); 36 | end; 37 | 38 | % ============================================================= 39 | 40 | 41 | end 42 | 43 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/displayData.m: -------------------------------------------------------------------------------- 1 | function [h, display_array] = displayData(X, example_width) 2 | %DISPLAYDATA Display 2D data in a nice grid 3 | % [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data 4 | % stored in X in a nice grid. It returns the figure handle h and the 5 | % displayed array if requested. 6 | 7 | % Set example_width automatically if not passed in 8 | if ~exist('example_width', 'var') || isempty(example_width) 9 | example_width = round(sqrt(size(X, 2))); 10 | end 11 | 12 | % Gray Image 13 | colormap(gray); 14 | 15 | % Compute rows, cols 16 | [m n] = size(X); 17 | example_height = (n / example_width); 18 | 19 | % Compute number of items to display 20 | display_rows = floor(sqrt(m)); 21 | display_cols = ceil(m / display_rows); 22 | 23 | % Between images padding 24 | pad = 1; 25 | 26 | % Setup blank display 27 | display_array = - ones(pad + display_rows * (example_height + pad), ... 28 | pad + display_cols * (example_width + pad)); 29 | 30 | % Copy each example into a patch on the display array 31 | curr_ex = 1; 32 | for j = 1:display_rows 33 | for i = 1:display_cols 34 | if curr_ex > m, 35 | break; 36 | end 37 | % Copy the patch 38 | 39 | % Get the max value of the patch 40 | max_val = max(abs(X(curr_ex, :))); 41 | display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... 42 | pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... 43 | reshape(X(curr_ex, :), example_height, example_width) / max_val; 44 | curr_ex = curr_ex + 1; 45 | end 46 | if curr_ex > m, 47 | break; 48 | end 49 | end 50 | 51 | % Display Image 52 | h = imagesc(display_array, [-1 1]); 53 | 54 | % Do not show axis 55 | axis image off 56 | 57 | drawnow; 58 | 59 | end 60 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/drawLine.m: -------------------------------------------------------------------------------- 1 | function drawLine(p1, p2, varargin) 2 | %DRAWLINE Draws a line from point p1 to point p2 3 | % DRAWLINE(p1, p2) Draws a line from point p1 to point p2 and holds the 4 | % current figure 5 | 6 | plot([p1(1) p2(1)], [p1(2) p2(2)], varargin{:}); 7 | 8 | end -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/ex7data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/ex7data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/ex7data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/ex7data2.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/ex7faces.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/ex7faces.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/face_100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/face_100.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/face_36.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/face_36.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/featureNormalize.m: -------------------------------------------------------------------------------- 1 | function [X_norm, mu, sigma] = featureNormalize(X) 2 | %FEATURENORMALIZE Normalizes the features in X 3 | % FEATURENORMALIZE(X) returns a normalized version of X where 4 | % the mean value of each feature is 0 and the standard deviation 5 | % is 1. This is often a good preprocessing step to do when 6 | % working with learning algorithms. 7 | 8 | mu = mean(X); 9 | X_norm = bsxfun(@minus, X, mu); 10 | 11 | sigma = std(X_norm); 12 | X_norm = bsxfun(@rdivide, X_norm, sigma); 13 | 14 | 15 | % ============================================================ 16 | 17 | end 18 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/findClosestCentroids.m: -------------------------------------------------------------------------------- 1 | function idx = findClosestCentroids(X, centroids) 2 | %FINDCLOSESTCENTROIDS computes the centroid memberships for every example 3 | % idx = FINDCLOSESTCENTROIDS (X, centroids) returns the closest centroids 4 | % in idx for a dataset X where each row is a single example. idx = m x 1 5 | % vector of centroid assignments (i.e. each entry in range [1..K]) 6 | % 7 | 8 | % Set K 9 | K = size(centroids, 1); 10 | 11 | % You need to return the following variables correctly. 12 | idx = zeros(size(X,1), 1); 13 | 14 | % ====================== YOUR CODE HERE ====================== 15 | % Instructions: Go over every example, find its closest centroid, and store 16 | % the index inside idx at the appropriate location. 17 | % Concretely, idx(i) should contain the index of the centroid 18 | % closest to example i. Hence, it should be a value in the 19 | % range 1..K 20 | % 21 | % Note: You can use a for-loop over the examples to compute this. 22 | % 23 | 24 | for i=1:size(X,1) 25 | [_,idx(i)]=min(sum((centroids-X(i,:)).^2,2)); 26 | end; 27 | 28 | % ============================================================= 29 | 30 | end 31 | 32 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/kMeansInitCentroids.m: -------------------------------------------------------------------------------- 1 | function centroids = kMeansInitCentroids(X, K) 2 | %KMEANSINITCENTROIDS This function initializes K centroids that are to be 3 | %used in K-Means on the dataset X 4 | % centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be 5 | % used with the K-Means on the dataset X 6 | % 7 | 8 | % You should return this values correctly 9 | centroids = zeros(K, size(X, 2)); 10 | 11 | % ====================== YOUR CODE HERE ====================== 12 | % Instructions: You should set centroids to randomly chosen examples from 13 | % the dataset X 14 | % 15 | 16 | % Randomly reorder the indices of examples 17 | randidx = randperm(size(X, 1)); 18 | % Take the first K examples as centroids 19 | centroids = X(randidx(1:K), :); 20 | 21 | % ============================================================= 22 | 23 | end 24 | 25 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/kmeans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/kmeans.png -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex7/ex7/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex7/ex7/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | sFAy9ZwHrM5CyptH 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/checkCostFunction.m: -------------------------------------------------------------------------------- 1 | function checkCostFunction(lambda) 2 | %CHECKCOSTFUNCTION Creates a collaborative filering problem 3 | %to check your cost function and gradients 4 | % CHECKCOSTFUNCTION(lambda) Creates a collaborative filering problem 5 | % to check your cost function and gradients, it will output the 6 | % analytical gradients produced by your code and the numerical gradients 7 | % (computed using computeNumericalGradient). These two gradient 8 | % computations should result in very similar values. 9 | 10 | % Set lambda 11 | if ~exist('lambda', 'var') || isempty(lambda) 12 | lambda = 0; 13 | end 14 | 15 | %% Create small problem 16 | X_t = rand(4, 3); 17 | Theta_t = rand(5, 3); 18 | 19 | % Zap out most entries 20 | Y = X_t * Theta_t'; 21 | Y(rand(size(Y)) > 0.5) = 0; 22 | R = zeros(size(Y)); 23 | R(Y ~= 0) = 1; 24 | 25 | %% Run Gradient Checking 26 | X = randn(size(X_t)); 27 | Theta = randn(size(Theta_t)); 28 | num_users = size(Y, 2); 29 | num_movies = size(Y, 1); 30 | num_features = size(Theta_t, 2); 31 | 32 | numgrad = computeNumericalGradient( ... 33 | @(t) cofiCostFunc(t, Y, R, num_users, num_movies, ... 34 | num_features, lambda), [X(:); Theta(:)]); 35 | 36 | [cost, grad] = cofiCostFunc([X(:); Theta(:)], Y, R, num_users, ... 37 | num_movies, num_features, lambda); 38 | 39 | disp([numgrad grad]); 40 | fprintf(['The above two columns you get should be very similar.\n' ... 41 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']); 42 | 43 | diff = norm(numgrad-grad)/norm(numgrad+grad); 44 | fprintf(['If your cost function implementation is correct, then \n' ... 45 | 'the relative difference will be small (less than 1e-9). \n' ... 46 | '\nRelative Difference: %g\n'], diff); 47 | 48 | end -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/computeNumericalGradient.m: -------------------------------------------------------------------------------- 1 | function numgrad = computeNumericalGradient(J, theta) 2 | %COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences" 3 | %and gives us a numerical estimate of the gradient. 4 | % numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical 5 | % gradient of the function J around theta. Calling y = J(theta) should 6 | % return the function value at theta. 7 | 8 | % Notes: The following code implements numerical gradient checking, and 9 | % returns the numerical gradient.It sets numgrad(i) to (a numerical 10 | % approximation of) the partial derivative of J with respect to the 11 | % i-th input argument, evaluated at theta. (i.e., numgrad(i) should 12 | % be the (approximately) the partial derivative of J with respect 13 | % to theta(i).) 14 | % 15 | 16 | numgrad = zeros(size(theta)); 17 | perturb = zeros(size(theta)); 18 | e = 1e-4; 19 | for p = 1:numel(theta) 20 | % Set perturbation vector 21 | perturb(p) = e; 22 | loss1 = J(theta - perturb); 23 | loss2 = J(theta + perturb); 24 | % Compute Numerical Gradient 25 | numgrad(p) = (loss2 - loss1) / (2*e); 26 | perturb(p) = 0; 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/estimateGaussian.m: -------------------------------------------------------------------------------- 1 | function [mu sigma2] = estimateGaussian(X) 2 | %ESTIMATEGAUSSIAN This function estimates the parameters of a 3 | %Gaussian distribution using the data in X 4 | % [mu sigma2] = estimateGaussian(X), 5 | % The input X is the dataset with each n-dimensional data point in one row 6 | % The output is an n-dimensional vector mu, the mean of the data set 7 | % and the variances sigma^2, an n x 1 vector 8 | % 9 | 10 | % Useful variables 11 | [m, n] = size(X); 12 | 13 | % You should return these values correctly 14 | mu = zeros(n, 1); 15 | sigma2 = zeros(n, 1); 16 | 17 | % ====================== YOUR CODE HERE ====================== 18 | % Instructions: Compute the mean of the data and the variances 19 | % In particular, mu(i) should contain the mean of 20 | % the data for the i-th feature and sigma2(i) 21 | % should contain variance of the i-th feature. 22 | % 23 | 24 | mu=mean(X)(:); 25 | sigma2=var(X)(:)*(m-1)/m; 26 | 27 | % ============================================================= 28 | 29 | 30 | end 31 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/ex8_movieParams.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex8/ex8/ex8_movieParams.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/ex8_movies.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex8/ex8/ex8_movies.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/ex8data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex8/ex8/ex8data1.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/ex8data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex8/ex8/ex8data2.mat -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/AUTHORS.txt: -------------------------------------------------------------------------------- 1 | The author of "jsonlab" toolbox is Qianqian Fang. Qianqian 2 | is currently an Assistant Professor at Massachusetts General Hospital, 3 | Harvard Medical School. 4 | 5 | Address: Martinos Center for Biomedical Imaging, 6 | Massachusetts General Hospital, 7 | Harvard Medical School 8 | Bldg 149, 13th St, Charlestown, MA 02129, USA 9 | URL: http://nmr.mgh.harvard.edu/~fangq/ 10 | Email: or 11 | 12 | 13 | The script loadjson.m was built upon previous works by 14 | 15 | - Nedialko Krouchev: http://www.mathworks.com/matlabcentral/fileexchange/25713 16 | date: 2009/11/02 17 | - François Glineur: http://www.mathworks.com/matlabcentral/fileexchange/23393 18 | date: 2009/03/22 19 | - Joel Feenstra: http://www.mathworks.com/matlabcentral/fileexchange/20565 20 | date: 2008/07/03 21 | 22 | 23 | This toolbox contains patches submitted by the following contributors: 24 | 25 | - Blake Johnson 26 | part of revision 341 27 | 28 | - Niclas Borlin 29 | various fixes in revision 394, including 30 | - loadjson crashes for all-zero sparse matrix. 31 | - loadjson crashes for empty sparse matrix. 32 | - Non-zero size of 0-by-N and N-by-0 empty matrices is lost after savejson/loadjson. 33 | - loadjson crashes for sparse real column vector. 34 | - loadjson crashes for sparse complex column vector. 35 | - Data is corrupted by savejson for sparse real row vector. 36 | - savejson crashes for sparse complex row vector. 37 | 38 | - Yul Kang 39 | patches for svn revision 415. 40 | - savejson saves an empty cell array as [] instead of null 41 | - loadjson differentiates an empty struct from an empty array 42 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/LICENSE_BSD.txt: -------------------------------------------------------------------------------- 1 | Copyright 2011-2015 Qianqian Fang . All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are 4 | permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this list of 7 | conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this list 10 | of conditions and the following disclaimer in the documentation and/or other materials 11 | provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY EXPRESS OR IMPLIED 14 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND 15 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS 16 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 17 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 19 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 21 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | 23 | The views and conclusions contained in the software and documentation are those of the 24 | authors and should not be interpreted as representing official policies, either expressed 25 | or implied, of the copyright holders. 26 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/README.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/Coursera/machine-learning-ex8/ex8/lib/jsonlab/README.txt -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/jsonopt.m: -------------------------------------------------------------------------------- 1 | function val=jsonopt(key,default,varargin) 2 | % 3 | % val=jsonopt(key,default,optstruct) 4 | % 5 | % setting options based on a struct. The struct can be produced 6 | % by varargin2struct from a list of 'param','value' pairs 7 | % 8 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 9 | % 10 | % $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ 11 | % 12 | % input: 13 | % key: a string with which one look up a value from a struct 14 | % default: if the key does not exist, return default 15 | % optstruct: a struct where each sub-field is a key 16 | % 17 | % output: 18 | % val: if key exists, val=optstruct.key; otherwise val=default 19 | % 20 | % license: 21 | % BSD, see LICENSE_BSD.txt files for details 22 | % 23 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 24 | % 25 | 26 | val=default; 27 | if(nargin<=2) return; end 28 | opt=varargin{1}; 29 | if(isstruct(opt) && isfield(opt,key)) 30 | val=getfield(opt,key); 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/mergestruct.m: -------------------------------------------------------------------------------- 1 | function s=mergestruct(s1,s2) 2 | % 3 | % s=mergestruct(s1,s2) 4 | % 5 | % merge two struct objects into one 6 | % 7 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 8 | % date: 2012/12/22 9 | % 10 | % input: 11 | % s1,s2: a struct object, s1 and s2 can not be arrays 12 | % 13 | % output: 14 | % s: the merged struct object. fields in s1 and s2 will be combined in s. 15 | % 16 | % license: 17 | % BSD, see LICENSE_BSD.txt files for details 18 | % 19 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 20 | % 21 | 22 | if(~isstruct(s1) || ~isstruct(s2)) 23 | error('input parameters contain non-struct'); 24 | end 25 | if(length(s1)>1 || length(s2)>1) 26 | error('can not merge struct arrays'); 27 | end 28 | fn=fieldnames(s2); 29 | s=s1; 30 | for i=1:length(fn) 31 | s=setfield(s,fn{i},getfield(s2,fn{i})); 32 | end 33 | 34 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/lib/jsonlab/varargin2struct.m: -------------------------------------------------------------------------------- 1 | function opt=varargin2struct(varargin) 2 | % 3 | % opt=varargin2struct('param1',value1,'param2',value2,...) 4 | % or 5 | % opt=varargin2struct(...,optstruct,...) 6 | % 7 | % convert a series of input parameters into a structure 8 | % 9 | % authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) 10 | % date: 2012/12/22 11 | % 12 | % input: 13 | % 'param', value: the input parameters should be pairs of a string and a value 14 | % optstruct: if a parameter is a struct, the fields will be merged to the output struct 15 | % 16 | % output: 17 | % opt: a struct where opt.param1=value1, opt.param2=value2 ... 18 | % 19 | % license: 20 | % BSD, see LICENSE_BSD.txt files for details 21 | % 22 | % -- this function is part of jsonlab toolbox (http://iso2mesh.sf.net/cgi-bin/index.cgi?jsonlab) 23 | % 24 | 25 | len=length(varargin); 26 | opt=struct; 27 | if(len==0) return; end 28 | i=1; 29 | while(i<=len) 30 | if(isstruct(varargin{i})) 31 | opt=mergestruct(opt,varargin{i}); 32 | elseif(ischar(varargin{i}) && i bestF1 37 | bestF1 = F1; 38 | bestEpsilon = epsilon; 39 | end 40 | end 41 | 42 | end 43 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/token.mat: -------------------------------------------------------------------------------- 1 | # Created by Octave 4.2.1, Tue Jan 30 12:12:52 2018 CST 2 | # name: email 3 | # type: sq_string 4 | # elements: 1 5 | # length: 30 6 | wengjy16@mails.tsinghua.edu.cn 7 | 8 | 9 | # name: token 10 | # type: sq_string 11 | # elements: 1 12 | # length: 16 13 | IAuGiu4upiFvy9zV 14 | 15 | 16 | -------------------------------------------------------------------------------- /Coursera/machine-learning-ex8/ex8/visualizeFit.m: -------------------------------------------------------------------------------- 1 | function visualizeFit(X, mu, sigma2) 2 | %VISUALIZEFIT Visualize the dataset and its estimated distribution. 3 | % VISUALIZEFIT(X, p, mu, sigma2) This visualization shows you the 4 | % probability density function of the Gaussian distribution. Each example 5 | % has a location (x1, x2) that depends on its feature values. 6 | % 7 | 8 | [X1,X2] = meshgrid(0:.5:35); 9 | Z = multivariateGaussian([X1(:) X2(:)],mu,sigma2); 10 | Z = reshape(Z,size(X1)); 11 | 12 | plot(X(:, 1), X(:, 2),'bx'); 13 | hold on; 14 | % Do not plot if there are infinities 15 | if (sum(isinf(Z)) == 0) 16 | contour(X1, X2, Z, 10.^(-20:3:0)'); 17 | end 18 | hold off; 19 | 20 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Machine Learning 2 | 3 | 由于被Mentor说基础不牢(比如不知道/忘记正则化是啥),被要求回炉重造,因此有了这个repo。 4 | 5 | [Machine Learning (Andrew Ng)](https://www.coursera.org/learn/machine-learning):存放于Coursera文件夹下,包含所有课件、编程作业; 6 | 7 | [CS231n](http://cs231n.github.io):存放于cs231n文件夹下,包含所有课件、编程作业(使用PyTorch) 8 | 9 | `notes`文件夹:存放个人学习笔记和心得体会 10 | 11 | markdown文件推荐使用Typora打开 12 | -------------------------------------------------------------------------------- /cs231n/assignment1/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | .env/* 4 | -------------------------------------------------------------------------------- /cs231n/assignment1/README.md: -------------------------------------------------------------------------------- 1 | Details about this assignment can be found [on the course webpage](http://cs231n.github.io/), under Assignment #1 of Spring 2017. 2 | -------------------------------------------------------------------------------- /cs231n/assignment1/collectSubmission.sh: -------------------------------------------------------------------------------- 1 | rm -f assignment1.zip 2 | zip -r assignment1.zip . -x "*.git*" "*cs231n/datasets*" "*.ipynb_checkpoints*" "*README.md" "*collectSubmission.sh" "*requirements.txt" ".env/*" 3 | -------------------------------------------------------------------------------- /cs231n/assignment1/cs231n/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment1/cs231n/__init__.py -------------------------------------------------------------------------------- /cs231n/assignment1/cs231n/classifiers/__init__.py: -------------------------------------------------------------------------------- 1 | from cs231n.classifiers.k_nearest_neighbor import * 2 | from cs231n.classifiers.linear_classifier import * 3 | -------------------------------------------------------------------------------- /cs231n/assignment1/cs231n/datasets/.gitignore: -------------------------------------------------------------------------------- 1 | cifar-10-batches-py/* 2 | tiny-imagenet-100-A* 3 | tiny-imagenet-100-B* 4 | tiny-100-A-pretrained/* 5 | -------------------------------------------------------------------------------- /cs231n/assignment1/cs231n/datasets/get_datasets.sh: -------------------------------------------------------------------------------- 1 | # Get CIFAR10 2 | wget http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 3 | tar -xzvf cifar-10-python.tar.gz 4 | rm cifar-10-python.tar.gz 5 | -------------------------------------------------------------------------------- /cs231n/assignment1/cs231n/nn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment1/cs231n/nn.png -------------------------------------------------------------------------------- /cs231n/assignment1/frameworkpython: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # what real Python executable to use 4 | #PYVER=2.7 5 | #PATHTOPYTHON=/usr/local/bin/ 6 | #PYTHON=${PATHTOPYTHON}python${PYVER} 7 | 8 | PYTHON=$(which $(readlink .env/bin/python)) # only works with python3 9 | 10 | # find the root of the virtualenv, it should be the parent of the dir this script is in 11 | ENV=`$PYTHON -c "import os; print(os.path.abspath(os.path.join(os.path.dirname(\"$0\"), '..')))"` 12 | 13 | # now run Python with the virtualenv set as Python's HOME 14 | export PYTHONHOME=$ENV 15 | exec $PYTHON "$@" 16 | -------------------------------------------------------------------------------- /cs231n/assignment1/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython==0.23.4 2 | Jinja2==2.8 3 | MarkupSafe==0.23 4 | Pillow==6.2.0 5 | Pygments==2.0.2 6 | appnope==0.1.0 7 | argparse==1.2.1 8 | backports-abc==0.4 9 | backports.ssl-match-hostname==3.5.0.1 10 | certifi==2015.11.20.1 11 | cycler==0.10.0 12 | decorator==4.0.6 13 | future==0.16.0 14 | gnureadline==6.3.3 15 | ipykernel==4.2.2 16 | ipython==4.0.1 17 | ipython-genutils==0.1.0 18 | ipywidgets==4.1.1 19 | jsonschema==2.5.1 20 | jupyter==1.0.0 21 | jupyter-client==4.1.1 22 | jupyter-console==4.0.3 23 | jupyter-core==4.0.6 24 | matplotlib==2.0.0 25 | mistune==0.7.1 26 | nbconvert==4.1.0 27 | nbformat==4.0.1 28 | notebook==5.7.8 29 | numpy==1.10.4 30 | path.py==8.1.2 31 | pexpect==4.0.1 32 | pickleshare==0.5 33 | ptyprocess==0.5 34 | pyparsing==2.0.7 35 | python-dateutil==2.4.2 36 | pytz==2015.7 37 | pyzmq==15.1.0 38 | qtconsole==4.1.1 39 | scipy==0.16.1 40 | simplegeneric==0.8.1 41 | singledispatch==3.4.0.3 42 | six==1.10.0 43 | terminado==0.5 44 | tornado==4.3 45 | traitlets==4.0.0 46 | -------------------------------------------------------------------------------- /cs231n/assignment1/setup_googlecloud.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This is the set-up script for Google Cloud. 4 | sudo apt-get update 5 | sudo apt-get install libncurses5-dev 6 | sudo apt-get install python-dev 7 | sudo apt-get install python-pip 8 | sudo apt-get install libjpeg8-dev 9 | sudo ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib 10 | pip install pillow 11 | sudo apt-get build-dep python-imaging 12 | sudo apt-get install libjpeg8 libjpeg62-dev libfreetype6 libfreetype6-dev 13 | sudo pip install virtualenv 14 | #virtualenv .env # Create a virtual environment 15 | #source .env/bin/activate # Activate the virtual environment 16 | #pip install -r requirements.txt # Install dependencies 17 | #deactivate 18 | echo "**************************************************" 19 | echo "***** End of Google Cloud Set-up Script ********" 20 | echo "**************************************************" 21 | echo "" 22 | echo "If you had no errors, You can proceed to work with your virtualenv as normal." 23 | echo "(run 'source .env/bin/activate' in your assignment directory to load the venv," 24 | echo " and run 'deactivate' to exit the venv. See assignment handout for details.)" 25 | -------------------------------------------------------------------------------- /cs231n/assignment1/start_ipython_osx.sh: -------------------------------------------------------------------------------- 1 | # Assume the virtualenv is called .env 2 | 3 | cp frameworkpython .env/bin 4 | .env/bin/frameworkpython -m IPython notebook 5 | -------------------------------------------------------------------------------- /cs231n/assignment2/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | .env/* 4 | -------------------------------------------------------------------------------- /cs231n/assignment2/collectSubmission.sh: -------------------------------------------------------------------------------- 1 | files="BatchNormalization.ipynb 2 | ConvolutionalNetworks.ipynb 3 | Dropout.ipynb 4 | FullyConnectedNets.ipynb 5 | PyTorch.ipynb 6 | TensorFlow.ipynb" 7 | 8 | for file in $files 9 | do 10 | if [ ! -f $file ]; then 11 | echo "Required notebook $file not found." 12 | exit 0 13 | fi 14 | done 15 | 16 | rm -f assignment2.zip 17 | zip -r assignment2.zip . -x "*.git*" "*cs231n/datasets*" "*.ipynb_checkpoints*" "*README.md" "*collectSubmission.sh" "*requirements.txt" ".env/*" "*.pyc" "*cs231n/build/*" 18 | -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | im2col_cython.c 3 | im2col_cython.so 4 | -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/cs231n/__init__.py -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/classifiers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/cs231n/classifiers/__init__.py -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/datasets/.gitignore: -------------------------------------------------------------------------------- 1 | cifar-10-batches-py/* 2 | tiny-imagenet-100-A* 3 | tiny-imagenet-100-B* 4 | tiny-100-A-pretrained/* 5 | -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/datasets/get_datasets.sh: -------------------------------------------------------------------------------- 1 | # Get CIFAR10 2 | wget http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz 3 | tar -xzvf cifar-10-python.tar.gz 4 | rm cifar-10-python.tar.gz 5 | -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/im2col_cython.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/cs231n/im2col_cython.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/im2col_cython.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/cs231n/im2col_cython.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /cs231n/assignment2/cs231n/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy 5 | 6 | extensions = [ 7 | Extension('im2col_cython', ['im2col_cython.pyx'], 8 | include_dirs = [numpy.get_include()] 9 | ), 10 | ] 11 | 12 | setup( 13 | ext_modules = cythonize(extensions), 14 | ) 15 | -------------------------------------------------------------------------------- /cs231n/assignment2/frameworkpython: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # what real Python executable to use 4 | #PYVER=2.7 5 | #PATHTOPYTHON=/usr/local/bin/ 6 | #PYTHON=${PATHTOPYTHON}python${PYVER} 7 | 8 | PYTHON=$(which $(readlink .env/bin/python)) # only works with python3 9 | 10 | # find the root of the virtualenv, it should be the parent of the dir this script is in 11 | ENV=`$PYTHON -c "import os; print(os.path.abspath(os.path.join(os.path.dirname(\"$0\"), '..')))"` 12 | 13 | # now run Python with the virtualenv set as Python's HOME 14 | export PYTHONHOME=$ENV 15 | exec $PYTHON "$@" 16 | -------------------------------------------------------------------------------- /cs231n/assignment2/kitten.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/kitten.jpg -------------------------------------------------------------------------------- /cs231n/assignment2/puppy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment2/puppy.jpg -------------------------------------------------------------------------------- /cs231n/assignment2/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython==0.25.2 2 | Jinja2==2.8 3 | MarkupSafe==0.23 4 | Pillow==6.2.0 5 | Pygments==2.0.2 6 | appnope==0.1.0 7 | argparse==1.2.1 8 | backports-abc==0.4 9 | backports.ssl-match-hostname==3.5.0.1 10 | certifi==2015.11.20.1 11 | cycler==0.10.0 12 | decorator==4.0.6 13 | future==0.16.0 14 | gnureadline==6.3.3 15 | h5py==2.7.0 16 | ipykernel==4.2.2 17 | ipython==4.0.1 18 | ipython-genutils==0.1.0 19 | ipywidgets==4.1.1 20 | jsonschema==2.5.1 21 | jupyter==1.0.0 22 | jupyter-client==4.1.1 23 | jupyter-console==4.0.3 24 | jupyter-core==4.0.6 25 | matplotlib==2.0.0 26 | mistune==0.7.1 27 | nbconvert==4.1.0 28 | nbformat==4.0.1 29 | nltk==3.4.5 30 | notebook==5.7.8 31 | numpy==1.12.1 32 | path.py==8.1.2 33 | pexpect==4.0.1 34 | pickleshare==0.5 35 | ptyprocess==0.5 36 | pyparsing==2.0.7 37 | python-dateutil==2.4.2 38 | pytz==2015.7 39 | pyzmq==15.1.0 40 | qtconsole==4.1.1 41 | scipy==0.19.0 42 | simplegeneric==0.8.1 43 | singledispatch==3.4.0.3 44 | site==0.0.1 45 | six==1.10.0 46 | terminado==0.5 47 | tornado==4.3 48 | traitlets==4.0.0 49 | -------------------------------------------------------------------------------- /cs231n/assignment2/start_ipython_osx.sh: -------------------------------------------------------------------------------- 1 | # Assume the virtualenv is called .env 2 | 3 | cp frameworkpython .env/bin 4 | .env/bin/frameworkpython -m IPython notebook 5 | -------------------------------------------------------------------------------- /cs231n/assignment3/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | .env/* 4 | -------------------------------------------------------------------------------- /cs231n/assignment3/collectSubmission.sh: -------------------------------------------------------------------------------- 1 | files="GANs-PyTorch.ipynb 2 | GANs-TensorFlow.ipynb 3 | LSTM_Captioning.ipynb 4 | NetworkVisualization-PyTorch.ipynb 5 | NetworkVisualization-TensorFlow.ipynb 6 | RNN_Captioning.ipynb 7 | StyleTransfer-PyTorch.ipynb 8 | StyleTransfer-TensorFlow.ipynb" 9 | 10 | for file in $files 11 | do 12 | if [ ! -f $file ]; then 13 | echo "Required notebook $file not found." 14 | exit 0 15 | fi 16 | done 17 | 18 | 19 | rm -f assignment3.zip 20 | zip -r assignment3.zip . -x "*.git" "*cs231n/datasets*" "*.ipynb_checkpoints*" "*README.md" "*collectSubmission.sh" "*requirements.txt" ".env/*" "*.pyc" "*cs231n/build/*" 21 | -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | im2col_cython.c 3 | im2col_cython.so 4 | -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/cs231n/__init__.py -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/classifiers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/cs231n/classifiers/__init__.py -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/datasets/.gitignore: -------------------------------------------------------------------------------- 1 | coco_captioning 2 | get_coco_captioning.sh 3 | get_squeezenet_tf.sh 4 | squeezenet.ckpt.data-00000-of-00001 5 | squeezenet.ckpt.meta 6 | imagenet_val_25.npz 7 | squeezenet.ckpt.index 8 | MNIST_data 9 | -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/datasets/get_assignment3_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ./get_coco_captioning.sh 3 | ./get_squeezenet_tf.sh 4 | ./get_imagenet_val.sh 5 | 6 | -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/datasets/get_imagenet_val.sh: -------------------------------------------------------------------------------- 1 | wget http://cs231n.stanford.edu/imagenet_val_25.npz 2 | -------------------------------------------------------------------------------- /cs231n/assignment3/cs231n/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Build import cythonize 4 | import numpy 5 | 6 | extensions = [ 7 | Extension('im2col_cython', ['im2col_cython.pyx'], 8 | include_dirs = [numpy.get_include()] 9 | ), 10 | ] 11 | 12 | setup( 13 | ext_modules = cythonize(extensions), 14 | ) 15 | -------------------------------------------------------------------------------- /cs231n/assignment3/example_styletransfer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/example_styletransfer.png -------------------------------------------------------------------------------- /cs231n/assignment3/frameworkpython: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # what real Python executable to use 4 | #PYVER=2.7 5 | #PATHTOPYTHON=/usr/local/bin/ 6 | #PYTHON=${PATHTOPYTHON}python${PYVER} 7 | 8 | PYTHON=$(which $(readlink .env/bin/python)) # only works with python3 9 | 10 | # find the root of the virtualenv, it should be the parent of the dir this script is in 11 | ENV=`$PYTHON -c "import os; print(os.path.abspath(os.path.join(os.path.dirname(\"$0\"), '..')))"` 12 | 13 | # now run Python with the virtualenv set as Python's HOME 14 | export PYTHONHOME=$ENV 15 | exec $PYTHON "$@" 16 | -------------------------------------------------------------------------------- /cs231n/assignment3/gan-checks-tf.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/gan-checks-tf.npz -------------------------------------------------------------------------------- /cs231n/assignment3/gan_outputs_pytorch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/gan_outputs_pytorch.png -------------------------------------------------------------------------------- /cs231n/assignment3/gan_outputs_tf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/gan_outputs_tf.png -------------------------------------------------------------------------------- /cs231n/assignment3/kitten.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/kitten.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/requirements.txt: -------------------------------------------------------------------------------- 1 | Cython==0.25.2 2 | Jinja2==2.8 3 | MarkupSafe==0.23 4 | Pillow==6.2.0 5 | Pygments==2.0.2 6 | appnope==0.1.0 7 | argparse==1.2.1 8 | backports-abc==0.4 9 | backports.ssl-match-hostname==3.5.0.1 10 | certifi==2015.11.20.1 11 | cycler==0.10.0 12 | decorator==4.0.6 13 | future==0.16.0 14 | gnureadline==6.3.3 15 | h5py==2.7.0 16 | ipykernel==4.2.2 17 | ipython==4.0.1 18 | ipython-genutils==0.1.0 19 | ipywidgets==4.1.1 20 | jsonschema==2.5.1 21 | jupyter==1.0.0 22 | jupyter-client==4.1.1 23 | jupyter-console==4.0.3 24 | jupyter-core==4.0.6 25 | matplotlib==2.0.0 26 | mistune==0.7.1 27 | nbconvert==4.1.0 28 | nbformat==4.0.1 29 | nltk==3.4.5 30 | notebook==5.7.8 31 | numpy==1.12.1 32 | path.py==8.1.2 33 | pexpect==4.0.1 34 | pickleshare==0.5 35 | ptyprocess==0.5 36 | pyparsing==2.0.7 37 | python-dateutil==2.4.2 38 | pytz==2015.7 39 | pyzmq==15.1.0 40 | qtconsole==4.1.1 41 | scipy==0.19.0 42 | simplegeneric==0.8.1 43 | singledispatch==3.4.0.3 44 | site==0.0.1 45 | six==1.10.0 46 | terminado==0.5 47 | tornado==4.3 48 | traitlets==4.0.0 49 | h5py==2.7.0 50 | -------------------------------------------------------------------------------- /cs231n/assignment3/sky.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/sky.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/start_ipython_osx.sh: -------------------------------------------------------------------------------- 1 | # Assume the virtualenv is called .env 2 | 3 | cp frameworkpython .env/bin 4 | .env/bin/frameworkpython -m IPython notebook 5 | -------------------------------------------------------------------------------- /cs231n/assignment3/style-transfer-checks-tf.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/style-transfer-checks-tf.npz -------------------------------------------------------------------------------- /cs231n/assignment3/style-transfer-checks.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/style-transfer-checks.npz -------------------------------------------------------------------------------- /cs231n/assignment3/styles/composition_vii.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/styles/composition_vii.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/styles/muse.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/styles/muse.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/styles/starry_night.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/styles/starry_night.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/styles/the_scream.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/styles/the_scream.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/styles/tubingen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/assignment3/styles/tubingen.jpg -------------------------------------------------------------------------------- /cs231n/assignment3/where_are_my_drivers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Checking for CUDA and installing." 3 | # Check for CUDA and try to install. 4 | if ! dpkg-query -W cuda; then 5 | # The 16.04 installer works with 16.10. 6 | curl -O http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_8.0.61-1_amd64.deb 7 | dpkg -i ./cuda-repo-ubuntu1604_8.0.61-1_amd64.deb 8 | apt-get update 9 | apt-get install cuda -y 10 | fi 11 | -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_image_caption.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_image_caption.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_lstm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_lstm.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_rnn_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_rnn_layer.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_rnn_layer2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_rnn_layer2.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_rnn_layer3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_rnn_layer3.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l10_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l10_summary.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l11_fft.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l11_fft.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l11_im2col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l11_im2col.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l11_stack_cnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l11_stack_cnn.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l11_transfer_learning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l11_transfer_learning.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_cascades.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_cascades.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_hypercolumns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_hypercolumns.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_multi_scale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_multi_scale.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_refinement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_refinement.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_semantic_segmentation_cnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_semantic_segmentation_cnn.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_similar_to_rcnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_similar_to_rcnn.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_soft_attentation_for_caption.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_soft_attentation_for_caption.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_soft_vs_hard1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_soft_vs_hard1.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_soft_vs_hard2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_soft_vs_hard2.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l13_upsampling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l13_upsampling.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l2_deep_learning_pipline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l2_deep_learning_pipline.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l2_traditional_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l2_traditional_pipeline.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l3_softmax_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l3_softmax_function.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l3_softmax_loss_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l3_softmax_loss_function.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l3_svm_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l3_svm_loss.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l3_svm_loss_with_regularization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l3_svm_loss_with_regularization.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l4_activation_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l4_activation_function.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l4_backpropagation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l4_backpropagation.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l4_nerual.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l4_nerual.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l5_batch_normalization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l5_batch_normalization.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l5_parameters_initialization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l5_parameters_initialization.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l6_dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l6_dropout.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l7_convolutional_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l7_convolutional_layer.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l7_pooling_layer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l7_pooling_layer.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l7_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l7_summary.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_computer_vision_tasks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_computer_vision_tasks.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_localization_as_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_localization_as_regression.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_overfeat_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_overfeat_1.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_overfeat_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_overfeat_2.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_recap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_recap.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l8_selective_search.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l8_selective_search.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_deconvolution_approaches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_deconvolution_approaches.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_deep_dream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_deep_dream.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_image_gradient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_image_gradient.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_image_reconstructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_image_reconstructure.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_occlusion_experiments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_occlusion_experiments.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_optimization_to_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_optimization_to_image.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_t_sne.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_t_sne.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_visualize_activations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_visualize_activations.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_visualize_deconvolution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_visualize_deconvolution.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_visualize_filers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_visualize_filers.png -------------------------------------------------------------------------------- /cs231n/notes/Images/l9_visualize_patches.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/cs231n/notes/Images/l9_visualize_patches.png -------------------------------------------------------------------------------- /cs231n/notes/L10_Recurrent_Neural_Networks.md: -------------------------------------------------------------------------------- 1 | # L10: Recurrent Neural Networks 2 | 3 | 4 | ## 1. Vanilla RNN layer 5 | 6 | RNN的关键就在于会把之前的信息保留下来,很适合处理序列。Rnn层如下: 7 | 8 | ![l10_rnn_layer.png](./Images/l10_rnn_layer.png) 9 | 10 | ![l10_rnn_layer2.png](./Images/l10_rnn_layer2.png) 11 | 12 | 一个RNN层和一个输出层的network 13 | 14 | ![l10_rnn_layer3.png](./Images/l10_rnn_layer3.png) 15 | 16 | 17 | ## 2. Image Captioning 18 | 19 | ![l10_image_caption.png](./Images/l10_image_caption.png) 20 | 21 | ## 3. LSTM 22 | 23 | RNN训练的时候容易出现梯度爆炸和梯度消失的问题,LSTM效果更好 24 | 25 | LSTM的记忆性能RNN更好, f是forget gates。 26 | 27 | ![l10_lstm.png](./Images/l10_lstm.png) 28 | 29 | 30 | ## 4. Summary 31 | 32 | ![l10_summary.png](./Images/l10_summary.png) 33 | 34 | -------------------------------------------------------------------------------- /cs231n/notes/L11_CNNs_in_practice.md: -------------------------------------------------------------------------------- 1 | # L11: CNNs in practice 2 | 3 | 4 | # 1. Making the most of your data 5 | 6 | ## 1.1 Data augmentation 7 | 8 | 1. Horizontal flips 9 | 10 | 2. Random crops/scales 11 | 12 | 3. Color jitter 13 | 14 | 4. 其他等等 15 | 16 | ## 1.2 Transfer learning 17 | 18 | ![l11_transfer_learning.png](./Images/l11_transfer_learning.png) 19 | 20 | 21 | # 2. All about convolution 22 | 23 | ## 2.1 How to arrage them 24 | 25 | 有证明,小核深层比大核浅层效果好,而且省内存。 26 | 27 | ![l11_stack_cnn.png](./Images/l11_stack_cnn.png) 28 | 29 | ## 2.2 How to compute them fast 30 | 31 | 卷积运算有三种方法: 32 | 33 | im2col、FFT、Fast Algorithm 34 | 35 | #### 2.2.1 im2col 36 | 37 | 把feature map和kernels转化成矩阵,通过矩阵相乘完成。 这种方法效率很高,易实现,也最常用。缺点是内存消耗大。 38 | 39 | ![l11_im2col.png](./Images/l11_im2col.png) 40 | 41 | #### 2.2.2 FFT 42 | 43 | 把feature map和kernels用fft转化到频域,在频域相乘,再逆fft回来。 44 | 45 | ![l11_fft.png](./Images/l11_fft.png) 46 | 47 | 48 | ### 2.2.3 Fast Algorithm 49 | 50 | 这种方法比较复杂,没搞懂,也不是很常用。 51 | 52 | 53 | -------------------------------------------------------------------------------- /cs231n/notes/L14_Videos_and_Unspervised_Learning.md: -------------------------------------------------------------------------------- 1 | # L14: Videos and Unspervised Learning 2 | 3 | 4 | ## 1.1 Videos 5 | 6 | 这部分没太搞懂。。。 7 | 8 | 9 | ## 1.2 Unspervised Learning 10 | 11 | ### 1.2.1 Autoencoder 12 | 13 | 也没搞懂 -------------------------------------------------------------------------------- /cs231n/notes/L1_Introduction.md: -------------------------------------------------------------------------------- 1 | # L1: Introduction 2 | 3 | 4 | 李飞飞介绍深度学习与计算机视觉的发展,以及课程要求。 -------------------------------------------------------------------------------- /cs231n/notes/L2_Image_Classification_Pipeline.md: -------------------------------------------------------------------------------- 1 | # L2: Image Classification Pipeline 2 | 3 | 4 | ## 0. Image Classification 5 | 6 | We are given a Training Set of labeled images, asked to predict labels on Test Set. Common to report the Accuracy of predictions (fraction of correctly predicted images) 7 | 8 | 9 | ## 1. First Classifier: Nearest Neighbor Classifier 10 | 11 | Predicts the labels based on nearest images in the training set 12 | 13 | 14 | 15 | ## 2. Tune Hiperparameters: Cross-validation 16 | 17 | We saw that the choice of distance and the value of k are hyperparameters that are tuned using a validation set, or through cross-validation if the size of the data is small. 18 | 19 | 20 | ## 3. Linear Classification 21 | 22 | ### 3.1 linear classifier 23 | 24 | formula: 25 | 26 | f(x, W, b) = W*x + b 27 | 28 | ### 3.2 Interpreting a Linear Classifier 29 | 30 | W的每一行相当于这个分类的平均图像 31 | 32 | 33 | ## 4. Image Classfication Pipeline 34 | 35 | traditional pipeline 36 | 37 | ![l2_traditional_pipeline](./Images/l2_traditional_pipeline.png) 38 | 39 | deep learning pipeline 40 | 41 | ![l2_deep_learning_pipline](./Images/l2_deep_learning_pipline.png) -------------------------------------------------------------------------------- /cs231n/notes/L3_Loss_Functions_and_Optimization.md: -------------------------------------------------------------------------------- 1 | # L3: Loss Functions and Optimization 2 | 3 | 4 | ## 1. SVM loss 5 | 6 | svm loss function: 7 | 8 | ![svm_loss](./Images/l3_svm_loss.png =10*) 9 | 10 | with regularization: 11 | 12 | ![l3_svm_loss_with_regularization](./Images/l3_svm_loss_with_regularization.png) 13 | 14 | 加入regularization后,可以防止模型过于复杂,从而防止过拟合。因为过于复杂的参数在loss function中会受到惩罚。 15 | 16 | 17 | ## 2. Softmax loss 18 | 19 | softmax function: 20 | 21 | ![l3_softmax_function](./Images/l3_softmax_function.png =10*) 22 | 23 | softmax loss function (cross-entropy loss): 24 | 25 | ![l3_softmax_loss_function](./Images/l3_softmax_loss_function.png =100) 26 | 27 | 28 | ## 3. Optimization: Mini-batch Gradient Descent 29 | 30 | vanilla minibatch gradient descent: 31 | 32 | ``` 33 | while True: 34 | data_batch = sample_training_data(data, batch_size) 35 | weights_grad = evaluate_gradient(loss_function, data_batch, weights) 36 | weights += - step_size * weights_grad # update formula 37 | 38 | ``` 39 | 40 | we will look at more fancy update formulas (momentum, Adagrad, RMSProp, Adam, ...) 41 | 42 | 43 | ## 4. Image Features 44 | 45 | Color Histogram 46 | 47 | HOG/SIFT features 48 | 49 | Bag of Words 50 | 51 | 52 | -------------------------------------------------------------------------------- /cs231n/notes/L4_Backpropagation_and_Neural_Networks.md: -------------------------------------------------------------------------------- 1 | # L4 Backpropagation and Neural Networks part 1 2 | 3 | 4 | ## 1. Backpropagation 5 | 6 | ![backpropagtion](./Images/l4_backpropagation.png) 7 | 8 | 如果x是向量,梯度是一个Jacobian矩阵 9 | 10 | 如果分叉,则直接梯度相加 11 | 12 | implementation: 13 | 14 | ``` 15 | 16 | class Multipy(object): 17 | ''' 18 | - forward: compute result of an operation and save any intermediates needed for gradient computation in memory 19 | - backward: apply the chain rule to compute the gradient of the loss function with respect to the inputs. 20 | ''' 21 | 22 | def forward(x1, x2): 23 | y = x1 * x2 24 | cache = (x1, x2, y) 25 | return y, cache 26 | 27 | def backward(dy, cache) 28 | x1, x2, y = cache 29 | dx1 = x2 * dy 30 | dx2 = x1 * dy 31 | return dx1, dx2 32 | ``` 33 | 34 | 在Homework/assigment1/cs231n/classifiers/neural_net.py中可以看到两层fully_connected网络的代码。 35 | 36 | 37 | ## 2. Neural Networks 38 | 39 | ### 2.1 Neural Networks 40 | 41 | ![l4_nerual](./Images/l4_nerual.png) 42 | 43 | Networks: 44 | 45 | f = W*x # 1-layer Networks 46 | f = W2*max(0, W1*x) # 2-layer network 47 | 48 | 49 | ### 2.2 Activation Function 50 | 51 | sigmoid function: 52 | 53 | ![l4_activation_function](./Images/l4_activation_function.png) 54 | 55 | 实际经验: 56 | 57 | - 一般使用ReLU, 注意调好学习率 58 | - 可以尝试Leaky ReLU / Maxout /ELU 59 | - 可以尝试 tanh, 但是不要期望有很好的效果 60 | - 不要用sigmoid! 不要用sigmoid! 不要用sigmoid! 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /cs231n/notes/L5_Training_Neural_Networks_part_1.md: -------------------------------------------------------------------------------- 1 | # L5: Training Neural Networks Part1 2 | 3 | 4 | ## 1. Data Preprocessing 5 | 6 | Normalize data 7 | 8 | X -= np.mean(X, axis=0) 9 | X /= np.std(X, axis=0) 10 | 11 | 图像中,经常将一张图片减去平均图片作为输入 12 | 13 | 14 | ## 2. Parameters Initialization 15 | 16 | ### 2.1 Small random numbers 17 | 18 | 用小标准差的,均值为0的高斯分布初始化: 19 | 20 | W = 0.01 * np.random.randn(D, H) 21 | 22 | 这种方法对于深度小的网络起作用,但是一旦网络深了以后,容易使很多单元不激活(Dead) 23 | 24 | ### 2.2 其他方法 25 | 26 | 参数初始化是一个研究领域,有许多相关研究 27 | 28 | ![l5_parameters_initialization](./Images/l5_parameters_initialization.png) 29 | 30 | 31 | ## 3. Batch Normalization 32 | 33 | Batch Normalization层可以有效提高网络的准确度。 34 | 35 | Batch Normalization在训练和测试的时候获取均值和标准差的方法不同,训练的时候用mini-batch计算,并保留下来。测试的时候用保留下来的值。 36 | 37 | ![l5_batch_normalization.png](./Images/l5_batch_normalization.png) 38 | 39 | 40 | ## 4. Hyperparameter Optimization 41 | 42 | random sample hyperparams, in log space when appropriate. 43 | 44 | 45 | -------------------------------------------------------------------------------- /cs231n/notes/L7_Convoluational_Neural_Networks.md: -------------------------------------------------------------------------------- 1 | # L7: Convoluational Neural Networks 2 | 3 | 4 | ## 1. Convoluational Layer 5 | 6 | 本质就是两个思想:稀疏连接和共享参数 7 | 8 | 卷积的具体过程见可见,卷积后新的feature maps的维度如下: 9 | 10 | ![l7_convolutional_layer.png](./Images/l7_convolutional_layer.png) 11 | 12 | 13 | ## 2. Pooling Layer 14 | 15 | 就是一个采样层,最常用的是max pooling 16 | 17 | ![l7_pooling_layer.png](./Images/l7_pooling_layer.png) 18 | 19 | 20 | ## 3. 典型的CNN 21 | 22 | 一些出名的CNN 23 | 24 | LeNet: 最早的CNN,Yann LeCun,1990 25 | 26 | AlexNet: ImageNet 2012的冠军,Alex Krizhevsky, Hinton 27 | 28 | ZF Net: ILSVRC 2013 Winner 29 | 30 | GoogleNet: ILSVRC 2014 winner, Google 31 | 32 | VGGNET: ILSVRC 2014 冠军侯选 33 | 34 | ResNet: ILSVRC 2015 winner,MSRA 35 | 36 | 37 | ## 4. 总结 38 | 39 | ![l7_summary.png](./Images/l7_summary.png) -------------------------------------------------------------------------------- /notes/anomaly_detection.md: -------------------------------------------------------------------------------- 1 | # Anomaly Detection 2 | 3 | ## Algorithm 4 | 5 | 假设所有$n$个变量互相独立且服从高斯分布,则 6 | 7 | 1. 对于每个单一变量: 8 | $$ 9 | p(x;\mu,\sigma^2)=\frac{1}{\sqrt{2\pi\sigma^2}}e^{-\frac{(x-\mu)^2}{2\sigma^2}} 10 | $$ 11 | 12 | 2. 对于整体而言: 13 | $$ 14 | p(x)=\prod_{j=1}^np(x_j;\mu_j,\sigma_j^2)=\prod_{j=1}^n\frac{1}{\sqrt{2\pi\sigma_j^2}}\exp(-\frac{(x_j-\mu_j)^2}{2\sigma_j^2}) 15 | $$ 16 | Anomaly if $p(x)<\epsilon$ 17 | 18 | 特性:高斯分布均沿着坐标轴,无法处理某些变量相关,其中一个异常的情况,需要手动添加feature 19 | 20 | ## Multivariate Gaussian distribution 21 | 22 | 1. Fit model $p(x)$ by setting 23 | $$ 24 | \mu=\frac{1}{m}\sum_{i=1}^m x^{(i)} 25 | $$ 26 | 27 | $$ 28 | \Sigma=\frac{1}{m}\sum_{i=1}^m (x^{(i)}-\mu)(x^{(i)}-\mu)^T 29 | $$ 30 | 31 | 2. Given a new example $x$, compute 32 | $$ 33 | p(x)=\frac{1}{(2\pi)^{\frac{n}{2}}|\Sigma|^{\frac{1}{2}}}\exp(-\frac{1}{2}(x-\mu)^T\Sigma^{-1}(x-\mu)) 34 | $$ 35 | Flag a anomaly if $p(x)<\epsilon$ 36 | 37 | 一元高斯分布是多元的特例。仅当$m\gg n$的时候使用(计算$\Sigma^{-1}$需要$O(n^3)$) 38 | 39 | -------------------------------------------------------------------------------- /notes/cs231n.md: -------------------------------------------------------------------------------- 1 | # cs231n 2 | 3 | - L2 distance: 旋转不变性 4 | 5 | - 计算图:(矩阵求导看维数) 6 | 7 | | Gate | Explanation | 8 | | -------- | -------------------- | 9 | | Add Gate | Gradient distributor | 10 | | Max Gate | Gradient Router | 11 | | Mul Gate | Gradient Swticher | 12 | 13 | - Tip for detecting explosions in the solver: If the cost is ever > 3 * original cost, break out early 14 | 15 | - ​ -------------------------------------------------------------------------------- /notes/cv.md: -------------------------------------------------------------------------------- 1 | # CV field 2 | 3 | [TOC] 4 | 5 | ## Detection & Localization 6 | 7 | classification problem 8 | 9 | 两个loss:一个分类,一个预测框的位置 10 | 11 | ### R-CNN 12 | 13 | RoI 14 | 15 | Problems 16 | 17 | - Ad hoc training objectives 18 | - Fine-tune network with softmax classifier (log loss) 19 | - Train post-hoc linear SVMs (hinge loss) 20 | - Train post-hoc bounding-box regressions (least squares) 21 | - Training is slow (84h), takes a lot of disk space 22 | - Inference (detection) is slow 23 | - 47s / image with VGG16 [Simonyan & Zisserman. ICLR15] 24 | - Fixed by SPP-net [He et al. ECCV14] 25 | 26 | ### Fast R-CNN 27 | 28 | RoI Pooling 29 | 30 | ### Faster R-CNN 31 | 32 | Insert Region Proposal Network (RPN) to predict proposals from features 33 | 34 | Jointly train with 4 losses: 35 | 36 | 1. RPN classify object / not object 37 | 2. RPN regress box coordinates 38 | 3. Final classification score (object classes) 39 | 4. Final box coordinates 40 | 41 | ### YOLO / SSD 42 | 43 | Detection without Proposals 44 | 45 | Within each grid cell: 46 | 47 | - Regress from each of the B base boxes to a final box with 5 numbers: (dx, dy, dh, dw, confidence) 48 | - Predict scores for each of C classes (including background as a class) 49 | 50 | 51 | 52 | ## Segmentation 53 | 54 | ### Semantic segmentation 55 | 56 | 分类pixel 57 | 58 | 对称网络结构,downsampleing和upsampleing合在一起:中间能够获取high-level的特征信息 59 | 60 | ### Instance Segmentation 61 | 62 | 多类别pixel分类 63 | 64 | #### Mask R-CNN 65 | 66 | 67 | 68 | ## Tracking 69 | 70 | -------------------------------------------------------------------------------- /notes/inside_cnn.md: -------------------------------------------------------------------------------- 1 | # Visualizing and Understanding 2 | 3 | - 可视化第一层 4 | - 使用最后一层来做knn 5 | - 降维:最后一层 (t-SNE) 6 | - 查看中间层对于输入图像的响应程度 7 | - 遮挡图像看输出分类误差 8 | - 反向计算图像梯度(Saliency Maps):Segmentation without supervision 9 | - Gradient Ascent:训练图像pixel将其得分最大化(其实不用pixel换成FC层效果更好)=> 对抗样本 -------------------------------------------------------------------------------- /notes/kmeans.md: -------------------------------------------------------------------------------- 1 | # K-means 2 | 3 | 1. 随机打乱,取前$K$个点当做聚类中心 4 | 2. 将每个点划归至最近的聚类中心 5 | 3. 重新计算聚类中心,取其类中所有点的平均值 6 | 4. 若尚未收敛,则转2 7 | 8 | ![](pic/kmeans.png) 9 | 10 | -------------------------------------------------------------------------------- /notes/pca.md: -------------------------------------------------------------------------------- 1 | # PCA 2 | 3 | [TOC] 4 | 5 | ## Algorithm 6 | 7 | 1. feature normalize 8 | 9 | 2. compute the covariance matrix of the data 10 | $$ 11 | \Sigma=\frac{1}{m}X^TX 12 | $$ 13 | where $X$ is the data matrix with examples in rows, and m is the number of examples. Note that $\Sigma$ is a $n \times n$ matrix and not the summation operator. 14 | 15 | 3. run SVD on it to compute the principal components: `[U,S,V]=svd(Sigma);` 16 | 17 | 4. project data: `Z=X*U(:,1:K);` 18 | 19 | 5. reconstruct data: `X_rec=Z*U(:,1:K)';` 20 | 21 | ## Choosing number of principal components 22 | 23 | $$ 24 | \frac{\sum_{i=1}^m||x^{(i)}-x_{approx}^{(i)}||^2}{\sum_{i=1}^m||x^{(i)}||^2}\le0.01 25 | $$ 26 | 27 | means 99% of variance is retained. Equivalent to 28 | $$ 29 | \frac{\sum_{i=1}^kS_{ii}}{\sum_{i=1}^nS_{ii}}\ge 0.99 30 | $$ 31 | 32 | ## Performance 33 | 34 | ![](pic/pca.png) 35 | 36 | ![](../Coursera/machine-learning-ex7/ex7/face_100.png) 37 | 38 | 39 | 40 | **Bad use of PCA: To prevent overfitting** 41 | 42 | *This might work OK, but isn’t a good way to address overfitting. Use regularization instead.* -------------------------------------------------------------------------------- /notes/pic/bias1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/bias1.png -------------------------------------------------------------------------------- /notes/pic/bias2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/bias2.png -------------------------------------------------------------------------------- /notes/pic/cnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/cnn.png -------------------------------------------------------------------------------- /notes/pic/cnn2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/cnn2.png -------------------------------------------------------------------------------- /notes/pic/kmeans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/kmeans.png -------------------------------------------------------------------------------- /notes/pic/nn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/nn.png -------------------------------------------------------------------------------- /notes/pic/nn2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/nn2.png -------------------------------------------------------------------------------- /notes/pic/pca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/pca.png -------------------------------------------------------------------------------- /notes/pic/select_lambda.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/select_lambda.png -------------------------------------------------------------------------------- /notes/pic/svm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/svm.png -------------------------------------------------------------------------------- /notes/pic/var1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/var1.png -------------------------------------------------------------------------------- /notes/pic/var2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Trinkle23897/ml/a7b4535a21aca2ca62275f141c21e0446d1abb12/notes/pic/var2.png -------------------------------------------------------------------------------- /notes/svm.md: -------------------------------------------------------------------------------- 1 | # SVM 2 | 3 | > “数学之美,巅峰之作” 4 | > 5 | > 但是Ng讲了跟没讲一样…… 6 | > 7 | > TODO 8 | 9 | [TOC] 10 | 11 | ## Problem Formulation 12 | 13 | ![](pic/svm.png) 14 | 15 | - $C=\frac{1}{\lambda}$ 16 | - 下面两行是约束条件 17 | 18 | 19 | 20 | ## Kernel 21 | 22 | ### Linear Kernel 23 | 24 | = No kernel 25 | 26 | ### Gaussian kernel 27 | 28 | $$ 29 | f=\text{K}(x,l)=\exp(-\frac{||x-l||^2}{2\sigma^2}) 30 | $$ 31 | 32 | 范围在$[0,1]$之间,越相似就越趋近1 33 | 34 | - Large $\sigma^2$: Features $f_i$ vary more smoothly. Higher bias, lower variance. 35 | - Small $\sigma^2$: Features $f_i$ vary less smoothly. Lower bias, higher variance. 36 | 37 | $C$和$\sigma^2$的选取产生的不同情况见`machine-learning-ex6/ex6/*.png` 38 | 39 | ### Polynomial kernel 40 | 41 | $$ 42 | f=\text{K}(x,l)=(x^Tl+c)^p 43 | $$ 44 | 45 | ### Other kernel 46 | 47 | String kernel, chi-square kernel, histogram intersection kernel, ... -------------------------------------------------------------------------------- /notes/unsupervised.md: -------------------------------------------------------------------------------- 1 | # Unsupervised learning 2 | 3 | [TOC] 4 | 5 | ## Generate Model 6 | 7 | ### PixelRNN and PixelCNN 8 | 9 | 根据相邻像素确定当前像素分布概率 10 | 11 | ### VAE 12 | 13 | 训练encoder和decoder,输入中间自定义feature,输出图像 14 | 15 | ### GAN 16 | 17 | $$ 18 | \displaystyle\min_{\theta_g}\max_{\theta_d}[\mathbb{E}_{x\sim p_{data}}\log{D_{\theta_d}(x)}+\mathbb{E}_{z\sim p(z)}\log(1-D_{\theta_d}(G_{\theta_g}(z)))] 19 | $$ 20 | 21 | where: 22 | 23 | - Discriminator outputs likelihood in (0,1) of real image 24 | - $D_{\theta_d}(x)$: Discriminator output for real data x 25 | - $D_{\theta_d}(G_{\theta_g}(z))$: Discriminator output for generated fake data G(z) 26 | - Discriminator (θ d ) wants to maximize objective such that D(x) is close to 1 (real) and D(G(z)) is close to 0 (fake) 27 | - Generator (θ g ) wants to minimize objective such that D(G(z)) is close to 1 (discriminator is fooled into thinking generated G(z) is real) 28 | 29 | DCGAN: 加了CNN “Radford et al, “Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks”, ICLR 2016” 30 | 31 | https://github.com/hindupuravinash/the-gan-zoo 32 | 33 | See also: https://github.com/soumith/ganhacks for tips and tricks for trainings GANs 34 | 35 | ## RL 36 | 37 | -------------------------------------------------------------------------------- /pytorch/.gitignore: -------------------------------------------------------------------------------- 1 | hymenoptera_data* 2 | --------------------------------------------------------------------------------