├── Exercise1 ├── .ipynb_checkpoints │ └── exercise1-checkpoint.ipynb ├── Data │ ├── ex1data1.txt │ └── ex1data2.txt ├── Figures │ ├── cost_function.png │ ├── dataset1.png │ ├── learning_rate.png │ └── regression_result.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise1.ipynb ├── token.pkl └── utils.py ├── Exercise2 ├── .ipynb_checkpoints │ └── exercise2-checkpoint.ipynb ├── Data │ ├── ex2data1.txt │ └── ex2data2.txt ├── Figures │ ├── decision_boundary1.png │ ├── decision_boundary2.png │ ├── decision_boundary3.png │ └── decision_boundary4.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise2.ipynb ├── token.pkl └── utils.py ├── Exercise3 ├── .ipynb_checkpoints │ └── exercise3-checkpoint.ipynb ├── Data │ ├── ex3data1.mat │ └── ex3weights.mat ├── Figures │ └── neuralnetwork.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise3.ipynb ├── token.pkl └── utils.py ├── Exercise4 ├── .ipynb_checkpoints │ └── exercise4-checkpoint.ipynb ├── Data │ ├── ex4data1.mat │ └── ex4weights.mat ├── Figures │ ├── ex4-backpropagation.png │ └── neural_network.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise4.ipynb ├── token.pkl └── utils.py ├── Exercise5 ├── .ipynb_checkpoints │ └── exercise5-checkpoint.ipynb ├── Data │ ├── ex4data1.mat │ ├── ex4weights.mat │ └── ex5data1.mat ├── Figures │ ├── cross_validation.png │ ├── learning_curve.png │ ├── learning_curve_random.png │ ├── linear_fit.png │ ├── polynomial_learning_curve.png │ ├── polynomial_learning_curve_reg_1.png │ ├── polynomial_regression.png │ ├── polynomial_regression_reg_1.png │ └── polynomial_regression_reg_100.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise5.ipynb ├── token.pkl └── utils.py ├── Exercise6 ├── .ipynb_checkpoints │ └── exercise6-checkpoint.ipynb ├── Data │ ├── emailSample1.txt │ ├── emailSample2.txt │ ├── ex6data1.mat │ ├── ex6data2.mat │ ├── ex6data3.mat │ ├── spamSample1.txt │ ├── spamSample2.txt │ ├── spamTest.mat │ ├── spamTrain.mat │ └── vocab.txt ├── Figures │ ├── dataset1.png │ ├── dataset2.png │ ├── dataset3.png │ ├── email.png │ ├── email_cleaned.png │ ├── svm_c1.png │ ├── svm_c100.png │ ├── svm_dataset2.png │ ├── svm_dataset3_best.png │ ├── svm_predictors.png │ ├── vocab.png │ └── word_indices.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise6.ipynb ├── token.pkl └── utils.py ├── Exercise7 ├── .ipynb_checkpoints │ └── exercise7-checkpoint.ipynb ├── Data │ ├── bird_small.mat │ ├── bird_small.png │ ├── ex7data1.mat │ ├── ex7data2.mat │ └── ex7faces.mat ├── Figures │ ├── bird_compression.png │ ├── faces.png │ ├── faces_original.png │ ├── faces_reconstructed.png │ ├── kmeans_result.png │ ├── pca_components.png │ └── pca_reconstruction.png ├── None0000000.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise7.ipynb ├── token.pkl └── utils.py ├── Exercise8 ├── .ipynb_checkpoints │ └── exercise8-checkpoint.ipynb ├── Data │ ├── ex8_movieParams.mat │ ├── ex8_movies.mat │ ├── ex8data1.mat │ ├── ex8data2.mat │ └── movie_ids.txt ├── Figures │ └── gaussian_fit.png ├── __pycache__ │ └── utils.cpython-36.pyc ├── exercise8.ipynb ├── token.pkl └── utils.py ├── README.md ├── machinelearning.jpg ├── requirements.txt └── submission.py /Exercise1/Data/ex1data1.txt: -------------------------------------------------------------------------------- 1 | 6.1101,17.592 2 | 5.5277,9.1302 3 | 8.5186,13.662 4 | 7.0032,11.854 5 | 5.8598,6.8233 6 | 8.3829,11.886 7 | 7.4764,4.3483 8 | 8.5781,12 9 | 6.4862,6.5987 10 | 5.0546,3.8166 11 | 5.7107,3.2522 12 | 14.164,15.505 13 | 5.734,3.1551 14 | 8.4084,7.2258 15 | 5.6407,0.71618 16 | 5.3794,3.5129 17 | 6.3654,5.3048 18 | 5.1301,0.56077 19 | 6.4296,3.6518 20 | 7.0708,5.3893 21 | 6.1891,3.1386 22 | 20.27,21.767 23 | 5.4901,4.263 24 | 6.3261,5.1875 25 | 5.5649,3.0825 26 | 18.945,22.638 27 | 12.828,13.501 28 | 10.957,7.0467 29 | 13.176,14.692 30 | 22.203,24.147 31 | 5.2524,-1.22 32 | 6.5894,5.9966 33 | 9.2482,12.134 34 | 5.8918,1.8495 35 | 8.2111,6.5426 36 | 7.9334,4.5623 37 | 8.0959,4.1164 38 | 5.6063,3.3928 39 | 12.836,10.117 40 | 6.3534,5.4974 41 | 5.4069,0.55657 42 | 6.8825,3.9115 43 | 11.708,5.3854 44 | 5.7737,2.4406 45 | 7.8247,6.7318 46 | 7.0931,1.0463 47 | 5.0702,5.1337 48 | 5.8014,1.844 49 | 11.7,8.0043 50 | 5.5416,1.0179 51 | 7.5402,6.7504 52 | 5.3077,1.8396 53 | 7.4239,4.2885 54 | 7.6031,4.9981 55 | 6.3328,1.4233 56 | 6.3589,-1.4211 57 | 6.2742,2.4756 58 | 5.6397,4.6042 59 | 9.3102,3.9624 60 | 9.4536,5.4141 61 | 8.8254,5.1694 62 | 5.1793,-0.74279 63 | 21.279,17.929 64 | 14.908,12.054 65 | 18.959,17.054 66 | 7.2182,4.8852 67 | 8.2951,5.7442 68 | 10.236,7.7754 69 | 5.4994,1.0173 70 | 20.341,20.992 71 | 10.136,6.6799 72 | 7.3345,4.0259 73 | 6.0062,1.2784 74 | 7.2259,3.3411 75 | 5.0269,-2.6807 76 | 6.5479,0.29678 77 | 7.5386,3.8845 78 | 5.0365,5.7014 79 | 10.274,6.7526 80 | 5.1077,2.0576 81 | 5.7292,0.47953 82 | 5.1884,0.20421 83 | 6.3557,0.67861 84 | 9.7687,7.5435 85 | 6.5159,5.3436 86 | 8.5172,4.2415 87 | 9.1802,6.7981 88 | 6.002,0.92695 89 | 5.5204,0.152 90 | 5.0594,2.8214 91 | 5.7077,1.8451 92 | 7.6366,4.2959 93 | 5.8707,7.2029 94 | 5.3054,1.9869 95 | 8.2934,0.14454 96 | 13.394,9.0551 97 | 5.4369,0.61705 98 | -------------------------------------------------------------------------------- /Exercise1/Data/ex1data2.txt: -------------------------------------------------------------------------------- 1 | 2104,3,399900 2 | 1600,3,329900 3 | 2400,3,369000 4 | 1416,2,232000 5 | 3000,4,539900 6 | 1985,4,299900 7 | 1534,3,314900 8 | 1427,3,198999 9 | 1380,3,212000 10 | 1494,3,242500 11 | 1940,4,239999 12 | 2000,3,347000 13 | 1890,3,329999 14 | 4478,5,699900 15 | 1268,3,259900 16 | 2300,4,449900 17 | 1320,2,299900 18 | 1236,3,199900 19 | 2609,4,499998 20 | 3031,4,599000 21 | 1767,3,252900 22 | 1888,2,255000 23 | 1604,3,242900 24 | 1962,4,259900 25 | 3890,3,573900 26 | 1100,3,249900 27 | 1458,3,464500 28 | 2526,3,469000 29 | 2200,3,475000 30 | 2637,3,299900 31 | 1839,2,349900 32 | 1000,1,169900 33 | 2040,4,314900 34 | 3137,3,579900 35 | 1811,4,285900 36 | 1437,3,249900 37 | 1239,3,229900 38 | 2132,4,345000 39 | 4215,4,549000 40 | 2162,4,287000 41 | 1664,2,368500 42 | 2238,3,329900 43 | 2567,4,314000 44 | 1200,3,299000 45 | 852,2,179900 46 | 1852,4,299900 47 | 1203,3,239500 48 | -------------------------------------------------------------------------------- /Exercise1/Figures/cost_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/Figures/cost_function.png -------------------------------------------------------------------------------- /Exercise1/Figures/dataset1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/Figures/dataset1.png -------------------------------------------------------------------------------- /Exercise1/Figures/learning_rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/Figures/learning_rate.png -------------------------------------------------------------------------------- /Exercise1/Figures/regression_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/Figures/regression_result.png -------------------------------------------------------------------------------- /Exercise1/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise1/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise1/token.pkl -------------------------------------------------------------------------------- /Exercise1/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | sys.path.append('..') 4 | 5 | from submission import SubmissionBase 6 | 7 | 8 | class Grader(SubmissionBase): 9 | X1 = np.column_stack((np.ones(20), np.exp(1) + np.exp(2) * np.linspace(0.1, 2, 20))) 10 | Y1 = X1[:, 1] + np.sin(X1[:, 0]) + np.cos(X1[:, 1]) 11 | X2 = np.column_stack((X1, X1[:, 1]**0.5, X1[:, 1]**0.25)) 12 | Y2 = np.power(Y1, 0.5) + Y1 13 | 14 | def __init__(self): 15 | part_names = ['Warm up exercise', 16 | 'Computing Cost (for one variable)', 17 | 'Gradient Descent (for one variable)', 18 | 'Feature Normalization', 19 | 'Computing Cost (for multiple variables)', 20 | 'Gradient Descent (for multiple variables)', 21 | 'Normal Equations'] 22 | super().__init__('linear-regression', part_names) 23 | 24 | def __iter__(self): 25 | for part_id in range(1, 8): 26 | try: 27 | func = self.functions[part_id] 28 | 29 | # Each part has different expected arguments/different function 30 | if part_id == 1: 31 | res = func() 32 | elif part_id == 2: 33 | res = func(self.X1, self.Y1, np.array([0.5, -0.5])) 34 | elif part_id == 3: 35 | res = func(self.X1, self.Y1, np.array([0.5, -0.5]), 0.01, 10) 36 | elif part_id == 4: 37 | res = func(self.X2[:, 1:4]) 38 | elif part_id == 5: 39 | res = func(self.X2, self.Y2, np.array([0.1, 0.2, 0.3, 0.4])) 40 | elif part_id == 6: 41 | res = func(self.X2, self.Y2, np.array([-0.1, -0.2, -0.3, -0.4]), 0.01, 10) 42 | elif part_id == 7: 43 | res = func(self.X2, self.Y2) 44 | else: 45 | raise KeyError 46 | yield part_id, res 47 | except KeyError: 48 | yield part_id, 0 49 | -------------------------------------------------------------------------------- /Exercise2/Data/ex2data1.txt: -------------------------------------------------------------------------------- 1 | 34.62365962451697,78.0246928153624,0 2 | 30.28671076822607,43.89499752400101,0 3 | 35.84740876993872,72.90219802708364,0 4 | 60.18259938620976,86.30855209546826,1 5 | 79.0327360507101,75.3443764369103,1 6 | 45.08327747668339,56.3163717815305,0 7 | 61.10666453684766,96.51142588489624,1 8 | 75.02474556738889,46.55401354116538,1 9 | 76.09878670226257,87.42056971926803,1 10 | 84.43281996120035,43.53339331072109,1 11 | 95.86155507093572,38.22527805795094,0 12 | 75.01365838958247,30.60326323428011,0 13 | 82.30705337399482,76.48196330235604,1 14 | 69.36458875970939,97.71869196188608,1 15 | 39.53833914367223,76.03681085115882,0 16 | 53.9710521485623,89.20735013750205,1 17 | 69.07014406283025,52.74046973016765,1 18 | 67.94685547711617,46.67857410673128,0 19 | 70.66150955499435,92.92713789364831,1 20 | 76.97878372747498,47.57596364975532,1 21 | 67.37202754570876,42.83843832029179,0 22 | 89.67677575072079,65.79936592745237,1 23 | 50.534788289883,48.85581152764205,0 24 | 34.21206097786789,44.20952859866288,0 25 | 77.9240914545704,68.9723599933059,1 26 | 62.27101367004632,69.95445795447587,1 27 | 80.1901807509566,44.82162893218353,1 28 | 93.114388797442,38.80067033713209,0 29 | 61.83020602312595,50.25610789244621,0 30 | 38.78580379679423,64.99568095539578,0 31 | 61.379289447425,72.80788731317097,1 32 | 85.40451939411645,57.05198397627122,1 33 | 52.10797973193984,63.12762376881715,0 34 | 52.04540476831827,69.43286012045222,1 35 | 40.23689373545111,71.16774802184875,0 36 | 54.63510555424817,52.21388588061123,0 37 | 33.91550010906887,98.86943574220611,0 38 | 64.17698887494485,80.90806058670817,1 39 | 74.78925295941542,41.57341522824434,0 40 | 34.1836400264419,75.2377203360134,0 41 | 83.90239366249155,56.30804621605327,1 42 | 51.54772026906181,46.85629026349976,0 43 | 94.44336776917852,65.56892160559052,1 44 | 82.36875375713919,40.61825515970618,0 45 | 51.04775177128865,45.82270145776001,0 46 | 62.22267576120188,52.06099194836679,0 47 | 77.19303492601364,70.45820000180959,1 48 | 97.77159928000232,86.7278223300282,1 49 | 62.07306379667647,96.76882412413983,1 50 | 91.56497449807442,88.69629254546599,1 51 | 79.94481794066932,74.16311935043758,1 52 | 99.2725269292572,60.99903099844988,1 53 | 90.54671411399852,43.39060180650027,1 54 | 34.52451385320009,60.39634245837173,0 55 | 50.2864961189907,49.80453881323059,0 56 | 49.58667721632031,59.80895099453265,0 57 | 97.64563396007767,68.86157272420604,1 58 | 32.57720016809309,95.59854761387875,0 59 | 74.24869136721598,69.82457122657193,1 60 | 71.79646205863379,78.45356224515052,1 61 | 75.3956114656803,85.75993667331619,1 62 | 35.28611281526193,47.02051394723416,0 63 | 56.25381749711624,39.26147251058019,0 64 | 30.05882244669796,49.59297386723685,0 65 | 44.66826172480893,66.45008614558913,0 66 | 66.56089447242954,41.09209807936973,0 67 | 40.45755098375164,97.53518548909936,1 68 | 49.07256321908844,51.88321182073966,0 69 | 80.27957401466998,92.11606081344084,1 70 | 66.74671856944039,60.99139402740988,1 71 | 32.72283304060323,43.30717306430063,0 72 | 64.0393204150601,78.03168802018232,1 73 | 72.34649422579923,96.22759296761404,1 74 | 60.45788573918959,73.09499809758037,1 75 | 58.84095621726802,75.85844831279042,1 76 | 99.82785779692128,72.36925193383885,1 77 | 47.26426910848174,88.47586499559782,1 78 | 50.45815980285988,75.80985952982456,1 79 | 60.45555629271532,42.50840943572217,0 80 | 82.22666157785568,42.71987853716458,0 81 | 88.9138964166533,69.80378889835472,1 82 | 94.83450672430196,45.69430680250754,1 83 | 67.31925746917527,66.58935317747915,1 84 | 57.23870631569862,59.51428198012956,1 85 | 80.36675600171273,90.96014789746954,1 86 | 68.46852178591112,85.59430710452014,1 87 | 42.0754545384731,78.84478600148043,0 88 | 75.47770200533905,90.42453899753964,1 89 | 78.63542434898018,96.64742716885644,1 90 | 52.34800398794107,60.76950525602592,0 91 | 94.09433112516793,77.15910509073893,1 92 | 90.44855097096364,87.50879176484702,1 93 | 55.48216114069585,35.57070347228866,0 94 | 74.49269241843041,84.84513684930135,1 95 | 89.84580670720979,45.35828361091658,1 96 | 83.48916274498238,48.38028579728175,1 97 | 42.2617008099817,87.10385094025457,1 98 | 99.31500880510394,68.77540947206617,1 99 | 55.34001756003703,64.9319380069486,1 100 | 74.77589300092767,89.52981289513276,1 101 | -------------------------------------------------------------------------------- /Exercise2/Data/ex2data2.txt: -------------------------------------------------------------------------------- 1 | 0.051267,0.69956,1 2 | -0.092742,0.68494,1 3 | -0.21371,0.69225,1 4 | -0.375,0.50219,1 5 | -0.51325,0.46564,1 6 | -0.52477,0.2098,1 7 | -0.39804,0.034357,1 8 | -0.30588,-0.19225,1 9 | 0.016705,-0.40424,1 10 | 0.13191,-0.51389,1 11 | 0.38537,-0.56506,1 12 | 0.52938,-0.5212,1 13 | 0.63882,-0.24342,1 14 | 0.73675,-0.18494,1 15 | 0.54666,0.48757,1 16 | 0.322,0.5826,1 17 | 0.16647,0.53874,1 18 | -0.046659,0.81652,1 19 | -0.17339,0.69956,1 20 | -0.47869,0.63377,1 21 | -0.60541,0.59722,1 22 | -0.62846,0.33406,1 23 | -0.59389,0.005117,1 24 | -0.42108,-0.27266,1 25 | -0.11578,-0.39693,1 26 | 0.20104,-0.60161,1 27 | 0.46601,-0.53582,1 28 | 0.67339,-0.53582,1 29 | -0.13882,0.54605,1 30 | -0.29435,0.77997,1 31 | -0.26555,0.96272,1 32 | -0.16187,0.8019,1 33 | -0.17339,0.64839,1 34 | -0.28283,0.47295,1 35 | -0.36348,0.31213,1 36 | -0.30012,0.027047,1 37 | -0.23675,-0.21418,1 38 | -0.06394,-0.18494,1 39 | 0.062788,-0.16301,1 40 | 0.22984,-0.41155,1 41 | 0.2932,-0.2288,1 42 | 0.48329,-0.18494,1 43 | 0.64459,-0.14108,1 44 | 0.46025,0.012427,1 45 | 0.6273,0.15863,1 46 | 0.57546,0.26827,1 47 | 0.72523,0.44371,1 48 | 0.22408,0.52412,1 49 | 0.44297,0.67032,1 50 | 0.322,0.69225,1 51 | 0.13767,0.57529,1 52 | -0.0063364,0.39985,1 53 | -0.092742,0.55336,1 54 | -0.20795,0.35599,1 55 | -0.20795,0.17325,1 56 | -0.43836,0.21711,1 57 | -0.21947,-0.016813,1 58 | -0.13882,-0.27266,1 59 | 0.18376,0.93348,0 60 | 0.22408,0.77997,0 61 | 0.29896,0.61915,0 62 | 0.50634,0.75804,0 63 | 0.61578,0.7288,0 64 | 0.60426,0.59722,0 65 | 0.76555,0.50219,0 66 | 0.92684,0.3633,0 67 | 0.82316,0.27558,0 68 | 0.96141,0.085526,0 69 | 0.93836,0.012427,0 70 | 0.86348,-0.082602,0 71 | 0.89804,-0.20687,0 72 | 0.85196,-0.36769,0 73 | 0.82892,-0.5212,0 74 | 0.79435,-0.55775,0 75 | 0.59274,-0.7405,0 76 | 0.51786,-0.5943,0 77 | 0.46601,-0.41886,0 78 | 0.35081,-0.57968,0 79 | 0.28744,-0.76974,0 80 | 0.085829,-0.75512,0 81 | 0.14919,-0.57968,0 82 | -0.13306,-0.4481,0 83 | -0.40956,-0.41155,0 84 | -0.39228,-0.25804,0 85 | -0.74366,-0.25804,0 86 | -0.69758,0.041667,0 87 | -0.75518,0.2902,0 88 | -0.69758,0.68494,0 89 | -0.4038,0.70687,0 90 | -0.38076,0.91886,0 91 | -0.50749,0.90424,0 92 | -0.54781,0.70687,0 93 | 0.10311,0.77997,0 94 | 0.057028,0.91886,0 95 | -0.10426,0.99196,0 96 | -0.081221,1.1089,0 97 | 0.28744,1.087,0 98 | 0.39689,0.82383,0 99 | 0.63882,0.88962,0 100 | 0.82316,0.66301,0 101 | 0.67339,0.64108,0 102 | 1.0709,0.10015,0 103 | -0.046659,-0.57968,0 104 | -0.23675,-0.63816,0 105 | -0.15035,-0.36769,0 106 | -0.49021,-0.3019,0 107 | -0.46717,-0.13377,0 108 | -0.28859,-0.060673,0 109 | -0.61118,-0.067982,0 110 | -0.66302,-0.21418,0 111 | -0.59965,-0.41886,0 112 | -0.72638,-0.082602,0 113 | -0.83007,0.31213,0 114 | -0.72062,0.53874,0 115 | -0.59389,0.49488,0 116 | -0.48445,0.99927,0 117 | -0.0063364,0.99927,0 118 | 0.63265,-0.030612,0 119 | -------------------------------------------------------------------------------- /Exercise2/Figures/decision_boundary1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/Figures/decision_boundary1.png -------------------------------------------------------------------------------- /Exercise2/Figures/decision_boundary2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/Figures/decision_boundary2.png -------------------------------------------------------------------------------- /Exercise2/Figures/decision_boundary3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/Figures/decision_boundary3.png -------------------------------------------------------------------------------- /Exercise2/Figures/decision_boundary4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/Figures/decision_boundary4.png -------------------------------------------------------------------------------- /Exercise2/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise2/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise2/token.pkl -------------------------------------------------------------------------------- /Exercise2/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from matplotlib import pyplot 4 | 5 | sys.path.append('..') 6 | from submission import SubmissionBase 7 | 8 | 9 | def mapFeature(X1, X2, degree=6): 10 | """ 11 | Maps the two input features to quadratic features used in the regularization exercise. 12 | 13 | Returns a new feature array with more features, comprising of 14 | X1, X2, X1.^2, X2.^2, X1*X2, X1*X2.^2, etc.. 15 | 16 | Parameters 17 | ---------- 18 | X1 : array_like 19 | A vector of shape (m, 1), containing one feature for all examples. 20 | 21 | X2 : array_like 22 | A vector of shape (m, 1), containing a second feature for all examples. 23 | Inputs X1, X2 must be the same size. 24 | 25 | degree: int, optional 26 | The polynomial degree. 27 | 28 | Returns 29 | ------- 30 | : array_like 31 | A matrix of of m rows, and columns depend on the degree of polynomial. 32 | """ 33 | if X1.ndim > 0: 34 | out = [np.ones(X1.shape[0])] 35 | else: 36 | out = [np.ones(1)] 37 | 38 | for i in range(1, degree + 1): 39 | for j in range(i + 1): 40 | out.append((X1 ** (i - j)) * (X2 ** j)) 41 | 42 | if X1.ndim > 0: 43 | return np.stack(out, axis=1) 44 | else: 45 | return np.array(out) 46 | 47 | 48 | def plotDecisionBoundary(plotData, theta, X, y): 49 | """ 50 | Plots the data points X and y into a new figure with the decision boundary defined by theta. 51 | Plots the data points with * for the positive examples and o for the negative examples. 52 | 53 | Parameters 54 | ---------- 55 | plotData : func 56 | A function reference for plotting the X, y data. 57 | 58 | theta : array_like 59 | Parameters for logistic regression. A vector of shape (n+1, ). 60 | 61 | X : array_like 62 | The input dataset. X is assumed to be a either: 63 | 1) Mx3 matrix, where the first column is an all ones column for the intercept. 64 | 2) MxN, N>3 matrix, where the first column is all ones. 65 | 66 | y : array_like 67 | Vector of data labels of shape (m, ). 68 | """ 69 | # make sure theta is a numpy array 70 | theta = np.array(theta) 71 | 72 | # Plot Data (remember first column in X is the intercept) 73 | plotData(X[:, 1:3], y) 74 | 75 | if X.shape[1] <= 3: 76 | # Only need 2 points to define a line, so choose two endpoints 77 | plot_x = np.array([np.min(X[:, 1]) - 2, np.max(X[:, 1]) + 2]) 78 | 79 | # Calculate the decision boundary line 80 | plot_y = (-1. / theta[2]) * (theta[1] * plot_x + theta[0]) 81 | 82 | # Plot, and adjust axes for better viewing 83 | pyplot.plot(plot_x, plot_y) 84 | 85 | # Legend, specific for the exercise 86 | pyplot.legend(['Admitted', 'Not admitted', 'Decision Boundary']) 87 | pyplot.xlim([30, 100]) 88 | pyplot.ylim([30, 100]) 89 | else: 90 | # Here is the grid range 91 | u = np.linspace(-1, 1.5, 50) 92 | v = np.linspace(-1, 1.5, 50) 93 | 94 | z = np.zeros((u.size, v.size)) 95 | # Evaluate z = theta*x over the grid 96 | for i, ui in enumerate(u): 97 | for j, vj in enumerate(v): 98 | z[i, j] = np.dot(mapFeature(ui, vj), theta) 99 | 100 | z = z.T # important to transpose z before calling contour 101 | # print(z) 102 | 103 | # Plot z = 0 104 | pyplot.contour(u, v, z, levels=[0], linewidths=2, colors='g') 105 | pyplot.contourf(u, v, z, levels=[np.min(z), 0, np.max(z)], cmap='Greens', alpha=0.4) 106 | 107 | 108 | class Grader(SubmissionBase): 109 | X = np.stack([np.ones(20), 110 | np.exp(1) * np.sin(np.arange(1, 21)), 111 | np.exp(0.5) * np.cos(np.arange(1, 21))], axis=1) 112 | 113 | y = (np.sin(X[:, 0] + X[:, 1]) > 0).astype(float) 114 | 115 | def __init__(self): 116 | part_names = ['Sigmoid Function', 117 | 'Logistic Regression Cost', 118 | 'Logistic Regression Gradient', 119 | 'Predict', 120 | 'Regularized Logistic Regression Cost', 121 | 'Regularized Logistic Regression Gradient'] 122 | super().__init__('logistic-regression', part_names) 123 | 124 | def __iter__(self): 125 | for part_id in range(1, 7): 126 | try: 127 | func = self.functions[part_id] 128 | 129 | # Each part has different expected arguments/different function 130 | if part_id == 1: 131 | res = func(self.X) 132 | elif part_id == 2: 133 | res = func(np.array([0.25, 0.5, -0.5]), self.X, self.y) 134 | elif part_id == 3: 135 | J, grad = func(np.array([0.25, 0.5, -0.5]), self.X, self.y) 136 | res = grad 137 | elif part_id == 4: 138 | res = func(np.array([0.25, 0.5, -0.5]), self.X) 139 | elif part_id == 5: 140 | res = func(np.array([0.25, 0.5, -0.5]), self.X, self.y, 0.1) 141 | elif part_id == 6: 142 | res = func(np.array([0.25, 0.5, -0.5]), self.X, self.y, 0.1)[1] 143 | else: 144 | raise KeyError 145 | yield part_id, res 146 | except KeyError: 147 | yield part_id, 0 148 | -------------------------------------------------------------------------------- /Exercise3/Data/ex3data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise3/Data/ex3data1.mat -------------------------------------------------------------------------------- /Exercise3/Data/ex3weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise3/Data/ex3weights.mat -------------------------------------------------------------------------------- /Exercise3/Figures/neuralnetwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise3/Figures/neuralnetwork.png -------------------------------------------------------------------------------- /Exercise3/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise3/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise3/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise3/token.pkl -------------------------------------------------------------------------------- /Exercise3/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from matplotlib import pyplot 4 | 5 | sys.path.append('..') 6 | from submission import SubmissionBase 7 | 8 | 9 | def displayData(X, example_width=None, figsize=(10, 10)): 10 | """ 11 | Displays 2D data stored in X in a nice grid. 12 | """ 13 | # Compute rows, cols 14 | if X.ndim == 2: 15 | m, n = X.shape 16 | elif X.ndim == 1: 17 | n = X.size 18 | m = 1 19 | X = X[None] # Promote to a 2 dimensional array 20 | else: 21 | raise IndexError('Input X should be 1 or 2 dimensional.') 22 | 23 | example_width = example_width or int(np.round(np.sqrt(n))) 24 | example_height = n / example_width 25 | 26 | # Compute number of items to display 27 | display_rows = int(np.floor(np.sqrt(m))) 28 | display_cols = int(np.ceil(m / display_rows)) 29 | 30 | fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize) 31 | fig.subplots_adjust(wspace=0.025, hspace=0.025) 32 | 33 | ax_array = [ax_array] if m == 1 else ax_array.ravel() 34 | 35 | for i, ax in enumerate(ax_array): 36 | ax.imshow(X[i].reshape(example_width, example_width, order='F'), 37 | cmap='Greys', extent=[0, 1, 0, 1]) 38 | ax.axis('off') 39 | 40 | 41 | def sigmoid(z): 42 | """ 43 | Computes the sigmoid of z. 44 | """ 45 | return 1.0 / (1.0 + np.exp(-z)) 46 | 47 | 48 | class Grader(SubmissionBase): 49 | # Random Test Cases 50 | X = np.stack([np.ones(20), 51 | np.exp(1) * np.sin(np.arange(1, 21)), 52 | np.exp(0.5) * np.cos(np.arange(1, 21))], axis=1) 53 | 54 | y = (np.sin(X[:, 0] + X[:, 1]) > 0).astype(float) 55 | 56 | Xm = np.array([[-1, -1], 57 | [-1, -2], 58 | [-2, -1], 59 | [-2, -2], 60 | [1, 1], 61 | [1, 2], 62 | [2, 1], 63 | [2, 2], 64 | [-1, 1], 65 | [-1, 2], 66 | [-2, 1], 67 | [-2, 2], 68 | [1, -1], 69 | [1, -2], 70 | [-2, -1], 71 | [-2, -2]]) 72 | ym = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]) 73 | 74 | t1 = np.sin(np.reshape(np.arange(1, 25, 2), (4, 3), order='F')) 75 | t2 = np.cos(np.reshape(np.arange(1, 41, 2), (4, 5), order='F')) 76 | 77 | def __init__(self): 78 | part_names = ['Regularized Logistic Regression', 79 | 'One-vs-All Classifier Training', 80 | 'One-vs-All Classifier Prediction', 81 | 'Neural Network Prediction Function'] 82 | 83 | super().__init__('multi-class-classification-and-neural-networks', part_names) 84 | 85 | def __iter__(self): 86 | for part_id in range(1, 5): 87 | try: 88 | func = self.functions[part_id] 89 | 90 | # Each part has different expected arguments/different function 91 | if part_id == 1: 92 | res = func(np.array([0.25, 0.5, -0.5]), self.X, self.y, 0.1) 93 | res = np.hstack(res).tolist() 94 | elif part_id == 2: 95 | res = func(self.Xm, self.ym, 4, 0.1) 96 | elif part_id == 3: 97 | res = func(self.t1, self.Xm) + 1 98 | elif part_id == 4: 99 | res = func(self.t1, self.t2, self.Xm) + 1 100 | else: 101 | raise KeyError 102 | yield part_id, res 103 | except KeyError: 104 | yield part_id, 0 105 | -------------------------------------------------------------------------------- /Exercise4/Data/ex4data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/Data/ex4data1.mat -------------------------------------------------------------------------------- /Exercise4/Data/ex4weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/Data/ex4weights.mat -------------------------------------------------------------------------------- /Exercise4/Figures/ex4-backpropagation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/Figures/ex4-backpropagation.png -------------------------------------------------------------------------------- /Exercise4/Figures/neural_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/Figures/neural_network.png -------------------------------------------------------------------------------- /Exercise4/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise4/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise4/token.pkl -------------------------------------------------------------------------------- /Exercise4/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from matplotlib import pyplot 4 | 5 | sys.path.append('..') 6 | from submission import SubmissionBase 7 | 8 | 9 | def displayData(X, example_width=None, figsize=(10, 10)): 10 | """ 11 | Displays 2D data stored in X in a nice grid. 12 | """ 13 | # Compute rows, cols 14 | if X.ndim == 2: 15 | m, n = X.shape 16 | elif X.ndim == 1: 17 | n = X.size 18 | m = 1 19 | X = X[None] # Promote to a 2 dimensional array 20 | else: 21 | raise IndexError('Input X should be 1 or 2 dimensional.') 22 | 23 | example_width = example_width or int(np.round(np.sqrt(n))) 24 | example_height = n / example_width 25 | 26 | # Compute number of items to display 27 | display_rows = int(np.floor(np.sqrt(m))) 28 | display_cols = int(np.ceil(m / display_rows)) 29 | 30 | fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize) 31 | fig.subplots_adjust(wspace=0.025, hspace=0.025) 32 | 33 | ax_array = [ax_array] if m == 1 else ax_array.ravel() 34 | 35 | for i, ax in enumerate(ax_array): 36 | # Display Image 37 | h = ax.imshow(X[i].reshape(example_width, example_width, order='F'), 38 | cmap='Greys', extent=[0, 1, 0, 1]) 39 | ax.axis('off') 40 | 41 | 42 | def predict(Theta1, Theta2, X): 43 | """ 44 | Predict the label of an input given a trained neural network 45 | Outputs the predicted label of X given the trained weights of a neural 46 | network(Theta1, Theta2) 47 | """ 48 | # Useful values 49 | m = X.shape[0] 50 | num_labels = Theta2.shape[0] 51 | 52 | # You need to return the following variables correctly 53 | p = np.zeros(m) 54 | h1 = sigmoid(np.dot(np.concatenate([np.ones((m, 1)), X], axis=1), Theta1.T)) 55 | h2 = sigmoid(np.dot(np.concatenate([np.ones((m, 1)), h1], axis=1), Theta2.T)) 56 | p = np.argmax(h2, axis=1) 57 | return p 58 | 59 | 60 | def debugInitializeWeights(fan_out, fan_in): 61 | """ 62 | Initialize the weights of a layer with fan_in incoming connections and fan_out outgoings 63 | connections using a fixed strategy. This will help you later in debugging. 64 | 65 | Note that W should be set a matrix of size (1+fan_in, fan_out) as the first row of W handles 66 | the "bias" terms. 67 | 68 | Parameters 69 | ---------- 70 | fan_out : int 71 | The number of outgoing connections. 72 | 73 | fan_in : int 74 | The number of incoming connections. 75 | 76 | Returns 77 | ------- 78 | W : array_like (1+fan_in, fan_out) 79 | The initialized weights array given the dimensions. 80 | """ 81 | # Initialize W using "sin". This ensures that W is always of the same values and will be 82 | # useful for debugging 83 | W = np.sin(np.arange(1, 1 + (1+fan_in)*fan_out))/10.0 84 | W = W.reshape(fan_out, 1+fan_in, order='F') 85 | return W 86 | 87 | 88 | def computeNumericalGradient(J, theta, e=1e-4): 89 | """ 90 | Computes the gradient using "finite differences" and gives us a numerical estimate of the 91 | gradient. 92 | 93 | Parameters 94 | ---------- 95 | J : func 96 | The cost function which will be used to estimate its numerical gradient. 97 | 98 | theta : array_like 99 | The one dimensional unrolled network parameters. The numerical gradient is computed at 100 | those given parameters. 101 | 102 | e : float (optional) 103 | The value to use for epsilon for computing the finite difference. 104 | 105 | Notes 106 | ----- 107 | The following code implements numerical gradient checking, and 108 | returns the numerical gradient. It sets `numgrad[i]` to (a numerical 109 | approximation of) the partial derivative of J with respect to the 110 | i-th input argument, evaluated at theta. (i.e., `numgrad[i]` should 111 | be the (approximately) the partial derivative of J with respect 112 | to theta[i].) 113 | """ 114 | numgrad = np.zeros(theta.shape) 115 | perturb = np.diag(e * np.ones(theta.shape)) 116 | for i in range(theta.size): 117 | loss1, _ = J(theta - perturb[:, i]) 118 | loss2, _ = J(theta + perturb[:, i]) 119 | numgrad[i] = (loss2 - loss1)/(2*e) 120 | return numgrad 121 | 122 | 123 | def checkNNGradients(nnCostFunction, lambda_=0): 124 | """ 125 | Creates a small neural network to check the backpropagation gradients. It will output the 126 | analytical gradients produced by your backprop code and the numerical gradients 127 | (computed using computeNumericalGradient). These two gradient computations should result in 128 | very similar values. 129 | 130 | Parameters 131 | ---------- 132 | nnCostFunction : func 133 | A reference to the cost function implemented by the student. 134 | 135 | lambda_ : float (optional) 136 | The regularization parameter value. 137 | """ 138 | input_layer_size = 3 139 | hidden_layer_size = 5 140 | num_labels = 3 141 | m = 5 142 | 143 | # We generate some 'random' test data 144 | Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size) 145 | Theta2 = debugInitializeWeights(num_labels, hidden_layer_size) 146 | 147 | # Reusing debugInitializeWeights to generate X 148 | X = debugInitializeWeights(m, input_layer_size - 1) 149 | y = np.arange(1, 1+m) % num_labels 150 | # print(y) 151 | # Unroll parameters 152 | nn_params = np.concatenate([Theta1.ravel(), Theta2.ravel()]) 153 | 154 | # short hand for cost function 155 | costFunc = lambda p: nnCostFunction(p, input_layer_size, hidden_layer_size, 156 | num_labels, X, y, lambda_) 157 | cost, grad = costFunc(nn_params) 158 | numgrad = computeNumericalGradient(costFunc, nn_params) 159 | 160 | # Visually examine the two gradient computations.The two columns you get should be very similar. 161 | print(np.stack([numgrad, grad], axis=1)) 162 | print('The above two columns you get should be very similar.') 163 | print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\n') 164 | 165 | # Evaluate the norm of the difference between two the solutions. If you have a correct 166 | # implementation, and assuming you used e = 0.0001 in computeNumericalGradient, then diff 167 | # should be less than 1e-9. 168 | diff = np.linalg.norm(numgrad - grad)/np.linalg.norm(numgrad + grad) 169 | 170 | print('If your backpropagation implementation is correct, then \n' 171 | 'the relative difference will be small (less than 1e-9). \n' 172 | 'Relative Difference: %g' % diff) 173 | 174 | 175 | def sigmoid(z): 176 | """ 177 | Computes the sigmoid of z. 178 | """ 179 | return 1.0 / (1.0 + np.exp(-z)) 180 | 181 | 182 | class Grader(SubmissionBase): 183 | X = np.reshape(3 * np.sin(np.arange(1, 31)), (3, 10), order='F') 184 | Xm = np.reshape(np.sin(np.arange(1, 33)), (16, 2), order='F') / 5 185 | ym = np.arange(1, 17) % 4 186 | t1 = np.sin(np.reshape(np.arange(1, 25, 2), (4, 3), order='F')) 187 | t2 = np.cos(np.reshape(np.arange(1, 41, 2), (4, 5), order='F')) 188 | t = np.concatenate([t1.ravel(), t2.ravel()], axis=0) 189 | 190 | def __init__(self): 191 | part_names = ['Feedforward and Cost Function', 192 | 'Regularized Cost Function', 193 | 'Sigmoid Gradient', 194 | 'Neural Network Gradient (Backpropagation)', 195 | 'Regularized Gradient'] 196 | super().__init__('neural-network-learning', part_names) 197 | 198 | def __iter__(self): 199 | for part_id in range(1, 6): 200 | try: 201 | func = self.functions[part_id] 202 | 203 | # Each part has different expected arguments/different function 204 | if part_id == 1: 205 | res = func(self.t, 2, 4, 4, self.Xm, self.ym, 0)[0] 206 | elif part_id == 2: 207 | res = func(self.t, 2, 4, 4, self.Xm, self.ym, 1.5) 208 | elif part_id == 3: 209 | res = func(self.X, ) 210 | elif part_id == 4: 211 | J, grad = func(self.t, 2, 4, 4, self.Xm, self.ym, 0) 212 | grad1 = np.reshape(grad[:12], (4, 3)) 213 | grad2 = np.reshape(grad[12:], (4, 5)) 214 | grad = np.concatenate([grad1.ravel('F'), grad2.ravel('F')]) 215 | res = np.hstack([J, grad]).tolist() 216 | elif part_id == 5: 217 | J, grad = func(self.t, 2, 4, 4, self.Xm, self.ym, 1.5) 218 | grad1 = np.reshape(grad[:12], (4, 3)) 219 | grad2 = np.reshape(grad[12:], (4, 5)) 220 | grad = np.concatenate([grad1.ravel('F'), grad2.ravel('F')]) 221 | res = np.hstack([J, grad]).tolist() 222 | else: 223 | raise KeyError 224 | yield part_id, res 225 | except KeyError: 226 | yield part_id, 0 227 | -------------------------------------------------------------------------------- /Exercise5/Data/ex4data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Data/ex4data1.mat -------------------------------------------------------------------------------- /Exercise5/Data/ex4weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Data/ex4weights.mat -------------------------------------------------------------------------------- /Exercise5/Data/ex5data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Data/ex5data1.mat -------------------------------------------------------------------------------- /Exercise5/Figures/cross_validation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/cross_validation.png -------------------------------------------------------------------------------- /Exercise5/Figures/learning_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/learning_curve.png -------------------------------------------------------------------------------- /Exercise5/Figures/learning_curve_random.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/learning_curve_random.png -------------------------------------------------------------------------------- /Exercise5/Figures/linear_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/linear_fit.png -------------------------------------------------------------------------------- /Exercise5/Figures/polynomial_learning_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/polynomial_learning_curve.png -------------------------------------------------------------------------------- /Exercise5/Figures/polynomial_learning_curve_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/polynomial_learning_curve_reg_1.png -------------------------------------------------------------------------------- /Exercise5/Figures/polynomial_regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/polynomial_regression.png -------------------------------------------------------------------------------- /Exercise5/Figures/polynomial_regression_reg_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/polynomial_regression_reg_1.png -------------------------------------------------------------------------------- /Exercise5/Figures/polynomial_regression_reg_100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/Figures/polynomial_regression_reg_100.png -------------------------------------------------------------------------------- /Exercise5/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise5/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise5/token.pkl -------------------------------------------------------------------------------- /Exercise5/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from scipy import optimize 4 | from matplotlib import pyplot 5 | 6 | sys.path.append('..') 7 | from submission import SubmissionBase 8 | 9 | 10 | def trainLinearReg(linearRegCostFunction, X, y, lambda_=0.0, maxiter=200): 11 | """ 12 | Trains linear regression using scipy's optimize.minimize. 13 | 14 | Parameters 15 | ---------- 16 | X : array_like 17 | The dataset with shape (m x n+1). The bias term is assumed to be concatenated. 18 | 19 | y : array_like 20 | Function values at each datapoint. A vector of shape (m,). 21 | 22 | lambda_ : float, optional 23 | The regularization parameter. 24 | 25 | maxiter : int, optional 26 | Maximum number of iteration for the optimization algorithm. 27 | 28 | Returns 29 | ------- 30 | theta : array_like 31 | The parameters for linear regression. This is a vector of shape (n+1,). 32 | """ 33 | # Initialize Theta 34 | initial_theta = np.zeros(X.shape[1]) 35 | 36 | # Create "short hand" for the cost function to be minimized 37 | costFunction = lambda t: linearRegCostFunction(X, y, t, lambda_) 38 | 39 | # Now, costFunction is a function that takes in only one argument 40 | options = {'maxiter': maxiter} 41 | 42 | # Minimize using scipy 43 | res = optimize.minimize(costFunction, initial_theta, jac=True, method='TNC', options=options) 44 | return res.x 45 | 46 | 47 | def featureNormalize(X): 48 | """ 49 | Normalizes the features in X returns a normalized version of X where the mean value of each 50 | feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when 51 | working with learning algorithms. 52 | 53 | Parameters 54 | ---------- 55 | X : array_like 56 | An dataset which is a (m x n) matrix, where m is the number of examples, 57 | and n is the number of dimensions for each example. 58 | 59 | Returns 60 | ------- 61 | X_norm : array_like 62 | The normalized input dataset. 63 | 64 | mu : array_like 65 | A vector of size n corresponding to the mean for each dimension across all examples. 66 | 67 | sigma : array_like 68 | A vector of size n corresponding to the standard deviations for each dimension across 69 | all examples. 70 | """ 71 | mu = np.mean(X, axis=0) 72 | X_norm = X - mu 73 | 74 | sigma = np.std(X_norm, axis=0, ddof=1) 75 | X_norm /= sigma 76 | return X_norm, mu, sigma 77 | 78 | 79 | def plotFit(polyFeatures, min_x, max_x, mu, sigma, theta, p): 80 | """ 81 | Plots a learned polynomial regression fit over an existing figure. 82 | Also works with linear regression. 83 | Plots the learned polynomial fit with power p and feature normalization (mu, sigma). 84 | 85 | Parameters 86 | ---------- 87 | polyFeatures : func 88 | A function which generators polynomial features from a single feature. 89 | 90 | min_x : float 91 | The minimum value for the feature. 92 | 93 | max_x : float 94 | The maximum value for the feature. 95 | 96 | mu : float 97 | The mean feature value over the training dataset. 98 | 99 | sigma : float 100 | The feature standard deviation of the training dataset. 101 | 102 | theta : array_like 103 | The parameters for the trained polynomial linear regression. 104 | 105 | p : int 106 | The polynomial order. 107 | """ 108 | # We plot a range slightly bigger than the min and max values to get 109 | # an idea of how the fit will vary outside the range of the data points 110 | x = np.arange(min_x - 15, max_x + 25, 0.05).reshape(-1, 1) 111 | 112 | # Map the X values 113 | X_poly = polyFeatures(x, p) 114 | X_poly -= mu 115 | X_poly /= sigma 116 | 117 | # Add ones 118 | X_poly = np.concatenate([np.ones((x.shape[0], 1)), X_poly], axis=1) 119 | 120 | # Plot 121 | pyplot.plot(x, np.dot(X_poly, theta), '--', lw=2) 122 | 123 | 124 | class Grader(SubmissionBase): 125 | # Random test cases 126 | X = np.vstack([np.ones(10), 127 | np.sin(np.arange(1, 15, 1.5)), 128 | np.cos(np.arange(1, 15, 1.5))]).T 129 | y = np.sin(np.arange(1, 31, 3)) 130 | Xval = np.vstack([np.ones(10), 131 | np.sin(np.arange(0, 14, 1.5)), 132 | np.cos(np.arange(0, 14, 1.5))]).T 133 | yval = np.sin(np.arange(1, 11)) 134 | 135 | def __init__(self): 136 | part_names = ['Regularized Linear Regression Cost Function', 137 | 'Regularized Linear Regression Gradient', 138 | 'Learning Curve', 139 | 'Polynomial Feature Mapping', 140 | 'Validation Curve'] 141 | super().__init__('regularized-linear-regression-and-bias-variance', part_names) 142 | 143 | def __iter__(self): 144 | for part_id in range(1, 6): 145 | try: 146 | func = self.functions[part_id] 147 | # Each part has different expected arguments/different function 148 | if part_id == 1: 149 | res = func(self.X, self.y, np.array([0.1, 0.2, 0.3]), 0.5) 150 | elif part_id == 2: 151 | theta = np.array([0.1, 0.2, 0.3]) 152 | res = func(self.X, self.y, theta, 0.5)[1] 153 | elif part_id == 3: 154 | res = np.hstack(func(self.X, self.y, self.Xval, self.yval, 1)).tolist() 155 | elif part_id == 4: 156 | res = func(self.X[1, :].reshape(-1, 1), 8) 157 | elif part_id == 5: 158 | res = np.hstack(func(self.X, self.y, self.Xval, self.yval)).tolist() 159 | else: 160 | raise KeyError 161 | except KeyError: 162 | yield part_id, 0 163 | yield part_id, res 164 | 165 | -------------------------------------------------------------------------------- /Exercise6/Data/emailSample1.txt: -------------------------------------------------------------------------------- 1 | > Anyone knows how much it costs to host a web portal ? 2 | > 3 | Well, it depends on how many visitors you're expecting. 4 | This can be anywhere from less than 10 bucks a month to a couple of $100. 5 | You should checkout http://www.rackspace.com/ or perhaps Amazon EC2 6 | if youre running something big.. 7 | 8 | To unsubscribe yourself from this mailing list, send an email to: 9 | groupname-unsubscribe@egroups.com 10 | 11 | -------------------------------------------------------------------------------- /Exercise6/Data/emailSample2.txt: -------------------------------------------------------------------------------- 1 | Folks, 2 | 3 | my first time posting - have a bit of Unix experience, but am new to Linux. 4 | 5 | 6 | Just got a new PC at home - Dell box with Windows XP. Added a second hard disk 7 | for Linux. Partitioned the disk and have installed Suse 7.2 from CD, which went 8 | fine except it didn't pick up my monitor. 9 | 10 | I have a Dell branded E151FPp 15" LCD flat panel monitor and a nVidia GeForce4 11 | Ti4200 video card, both of which are probably too new to feature in Suse's default 12 | set. I downloaded a driver from the nVidia website and installed it using RPM. 13 | Then I ran Sax2 (as was recommended in some postings I found on the net), but 14 | it still doesn't feature my video card in the available list. What next? 15 | 16 | Another problem. I have a Dell branded keyboard and if I hit Caps-Lock twice, 17 | the whole machine crashes (in Linux, not Windows) - even the on/off switch is 18 | inactive, leaving me to reach for the power cable instead. 19 | 20 | If anyone can help me in any way with these probs., I'd be really grateful - 21 | I've searched the 'net but have run out of ideas. 22 | 23 | Or should I be going for a different version of Linux such as RedHat? Opinions 24 | welcome. 25 | 26 | Thanks a lot, 27 | Peter 28 | 29 | -- 30 | Irish Linux Users' Group: ilug@linux.ie 31 | http://www.linux.ie/mailman/listinfo/ilug for (un)subscription information. 32 | List maintainer: listmaster@linux.ie 33 | 34 | 35 | -------------------------------------------------------------------------------- /Exercise6/Data/ex6data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Data/ex6data1.mat -------------------------------------------------------------------------------- /Exercise6/Data/ex6data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Data/ex6data2.mat -------------------------------------------------------------------------------- /Exercise6/Data/ex6data3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Data/ex6data3.mat -------------------------------------------------------------------------------- /Exercise6/Data/spamSample1.txt: -------------------------------------------------------------------------------- 1 | Do You Want To Make $1000 Or More Per Week? 2 | 3 | 4 | 5 | If you are a motivated and qualified individual - I 6 | will personally demonstrate to you a system that will 7 | make you $1,000 per week or more! This is NOT mlm. 8 | 9 | 10 | 11 | Call our 24 hour pre-recorded number to get the 12 | details. 13 | 14 | 15 | 16 | 000-456-789 17 | 18 | 19 | 20 | I need people who want to make serious money. Make 21 | the call and get the facts. 22 | 23 | Invest 2 minutes in yourself now! 24 | 25 | 26 | 27 | 000-456-789 28 | 29 | 30 | 31 | Looking forward to your call and I will introduce you 32 | to people like yourself who 33 | are currently making $10,000 plus per week! 34 | 35 | 36 | 37 | 000-456-789 38 | 39 | 40 | 41 | 3484lJGv6-241lEaN9080lRmS6-271WxHo7524qiyT5-438rjUv5615hQcf0-662eiDB9057dMtVl72 42 | 43 | -------------------------------------------------------------------------------- /Exercise6/Data/spamSample2.txt: -------------------------------------------------------------------------------- 1 | Best Buy Viagra Generic Online 2 | 3 | Viagra 100mg x 60 Pills $125, Free Pills & Reorder Discount, Top Selling 100% Quality & Satisfaction guaranteed! 4 | 5 | We accept VISA, Master & E-Check Payments, 90000+ Satisfied Customers! 6 | http://medphysitcstech.ru 7 | 8 | 9 | -------------------------------------------------------------------------------- /Exercise6/Data/spamTest.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Data/spamTest.mat -------------------------------------------------------------------------------- /Exercise6/Data/spamTrain.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Data/spamTrain.mat -------------------------------------------------------------------------------- /Exercise6/Data/vocab.txt: -------------------------------------------------------------------------------- 1 | 1 aa 2 | 2 ab 3 | 3 abil 4 | 4 abl 5 | 5 about 6 | 6 abov 7 | 7 absolut 8 | 8 abus 9 | 9 ac 10 | 10 accept 11 | 11 access 12 | 12 accord 13 | 13 account 14 | 14 achiev 15 | 15 acquir 16 | 16 across 17 | 17 act 18 | 18 action 19 | 19 activ 20 | 20 actual 21 | 21 ad 22 | 22 adam 23 | 23 add 24 | 24 addit 25 | 25 address 26 | 26 administr 27 | 27 adult 28 | 28 advanc 29 | 29 advantag 30 | 30 advertis 31 | 31 advic 32 | 32 advis 33 | 33 ae 34 | 34 af 35 | 35 affect 36 | 36 affili 37 | 37 afford 38 | 38 africa 39 | 39 after 40 | 40 ag 41 | 41 again 42 | 42 against 43 | 43 agenc 44 | 44 agent 45 | 45 ago 46 | 46 agre 47 | 47 agreement 48 | 48 aid 49 | 49 air 50 | 50 al 51 | 51 alb 52 | 52 align 53 | 53 all 54 | 54 allow 55 | 55 almost 56 | 56 alon 57 | 57 along 58 | 58 alreadi 59 | 59 alsa 60 | 60 also 61 | 61 altern 62 | 62 although 63 | 63 alwai 64 | 64 am 65 | 65 amaz 66 | 66 america 67 | 67 american 68 | 68 among 69 | 69 amount 70 | 70 amp 71 | 71 an 72 | 72 analysi 73 | 73 analyst 74 | 74 and 75 | 75 ani 76 | 76 anim 77 | 77 announc 78 | 78 annual 79 | 79 annuiti 80 | 80 anoth 81 | 81 answer 82 | 82 anti 83 | 83 anumb 84 | 84 anybodi 85 | 85 anymor 86 | 86 anyon 87 | 87 anyth 88 | 88 anywai 89 | 89 anywher 90 | 90 aol 91 | 91 ap 92 | 92 apolog 93 | 93 app 94 | 94 appar 95 | 95 appear 96 | 96 appl 97 | 97 appli 98 | 98 applic 99 | 99 appreci 100 | 100 approach 101 | 101 approv 102 | 102 apt 103 | 103 ar 104 | 104 archiv 105 | 105 area 106 | 106 aren 107 | 107 argument 108 | 108 arial 109 | 109 arm 110 | 110 around 111 | 111 arrai 112 | 112 arriv 113 | 113 art 114 | 114 articl 115 | 115 artist 116 | 116 as 117 | 117 ascii 118 | 118 ask 119 | 119 asset 120 | 120 assist 121 | 121 associ 122 | 122 assum 123 | 123 assur 124 | 124 at 125 | 125 atol 126 | 126 attach 127 | 127 attack 128 | 128 attempt 129 | 129 attent 130 | 130 attornei 131 | 131 attract 132 | 132 audio 133 | 133 aug 134 | 134 august 135 | 135 author 136 | 136 auto 137 | 137 autom 138 | 138 automat 139 | 139 avail 140 | 140 averag 141 | 141 avoid 142 | 142 awai 143 | 143 awar 144 | 144 award 145 | 145 ba 146 | 146 babi 147 | 147 back 148 | 148 background 149 | 149 backup 150 | 150 bad 151 | 151 balanc 152 | 152 ban 153 | 153 bank 154 | 154 bar 155 | 155 base 156 | 156 basenumb 157 | 157 basi 158 | 158 basic 159 | 159 bb 160 | 160 bc 161 | 161 bd 162 | 162 be 163 | 163 beat 164 | 164 beberg 165 | 165 becaus 166 | 166 becom 167 | 167 been 168 | 168 befor 169 | 169 begin 170 | 170 behalf 171 | 171 behavior 172 | 172 behind 173 | 173 believ 174 | 174 below 175 | 175 benefit 176 | 176 best 177 | 177 beta 178 | 178 better 179 | 179 between 180 | 180 bf 181 | 181 big 182 | 182 bill 183 | 183 billion 184 | 184 bin 185 | 185 binari 186 | 186 bit 187 | 187 black 188 | 188 blank 189 | 189 block 190 | 190 blog 191 | 191 blood 192 | 192 blue 193 | 193 bnumber 194 | 194 board 195 | 195 bodi 196 | 196 boi 197 | 197 bonu 198 | 198 book 199 | 199 boot 200 | 200 border 201 | 201 boss 202 | 202 boston 203 | 203 botan 204 | 204 both 205 | 205 bottl 206 | 206 bottom 207 | 207 boundari 208 | 208 box 209 | 209 brain 210 | 210 brand 211 | 211 break 212 | 212 brian 213 | 213 bring 214 | 214 broadcast 215 | 215 broker 216 | 216 browser 217 | 217 bug 218 | 218 bui 219 | 219 build 220 | 220 built 221 | 221 bulk 222 | 222 burn 223 | 223 bush 224 | 224 busi 225 | 225 but 226 | 226 button 227 | 227 by 228 | 228 byte 229 | 229 ca 230 | 230 cabl 231 | 231 cach 232 | 232 calcul 233 | 233 california 234 | 234 call 235 | 235 came 236 | 236 camera 237 | 237 campaign 238 | 238 can 239 | 239 canada 240 | 240 cannot 241 | 241 canon 242 | 242 capabl 243 | 243 capillari 244 | 244 capit 245 | 245 car 246 | 246 card 247 | 247 care 248 | 248 career 249 | 249 carri 250 | 250 cartridg 251 | 251 case 252 | 252 cash 253 | 253 cat 254 | 254 catch 255 | 255 categori 256 | 256 caus 257 | 257 cb 258 | 258 cc 259 | 259 cd 260 | 260 ce 261 | 261 cell 262 | 262 cent 263 | 263 center 264 | 264 central 265 | 265 centuri 266 | 266 ceo 267 | 267 certain 268 | 268 certainli 269 | 269 cf 270 | 270 challeng 271 | 271 chanc 272 | 272 chang 273 | 273 channel 274 | 274 char 275 | 275 charact 276 | 276 charg 277 | 277 charset 278 | 278 chat 279 | 279 cheap 280 | 280 check 281 | 281 cheer 282 | 282 chief 283 | 283 children 284 | 284 china 285 | 285 chip 286 | 286 choic 287 | 287 choos 288 | 288 chri 289 | 289 citi 290 | 290 citizen 291 | 291 civil 292 | 292 claim 293 | 293 class 294 | 294 classifi 295 | 295 clean 296 | 296 clear 297 | 297 clearli 298 | 298 click 299 | 299 client 300 | 300 close 301 | 301 clue 302 | 302 cnet 303 | 303 cnumber 304 | 304 co 305 | 305 code 306 | 306 collect 307 | 307 colleg 308 | 308 color 309 | 309 com 310 | 310 combin 311 | 311 come 312 | 312 comfort 313 | 313 command 314 | 314 comment 315 | 315 commentari 316 | 316 commerci 317 | 317 commiss 318 | 318 commit 319 | 319 common 320 | 320 commun 321 | 321 compani 322 | 322 compar 323 | 323 comparison 324 | 324 compat 325 | 325 compet 326 | 326 competit 327 | 327 compil 328 | 328 complet 329 | 329 comprehens 330 | 330 comput 331 | 331 concentr 332 | 332 concept 333 | 333 concern 334 | 334 condit 335 | 335 conf 336 | 336 confer 337 | 337 confid 338 | 338 confidenti 339 | 339 config 340 | 340 configur 341 | 341 confirm 342 | 342 conflict 343 | 343 confus 344 | 344 congress 345 | 345 connect 346 | 346 consid 347 | 347 consolid 348 | 348 constitut 349 | 349 construct 350 | 350 consult 351 | 351 consum 352 | 352 contact 353 | 353 contain 354 | 354 content 355 | 355 continu 356 | 356 contract 357 | 357 contribut 358 | 358 control 359 | 359 conveni 360 | 360 convers 361 | 361 convert 362 | 362 cool 363 | 363 cooper 364 | 364 copi 365 | 365 copyright 366 | 366 core 367 | 367 corpor 368 | 368 correct 369 | 369 correspond 370 | 370 cost 371 | 371 could 372 | 372 couldn 373 | 373 count 374 | 374 countri 375 | 375 coupl 376 | 376 cours 377 | 377 court 378 | 378 cover 379 | 379 coverag 380 | 380 crash 381 | 381 creat 382 | 382 creativ 383 | 383 credit 384 | 384 critic 385 | 385 cross 386 | 386 cultur 387 | 387 current 388 | 388 custom 389 | 389 cut 390 | 390 cv 391 | 391 da 392 | 392 dagga 393 | 393 dai 394 | 394 daili 395 | 395 dan 396 | 396 danger 397 | 397 dark 398 | 398 data 399 | 399 databas 400 | 400 datapow 401 | 401 date 402 | 402 dave 403 | 403 david 404 | 404 dc 405 | 405 de 406 | 406 dead 407 | 407 deal 408 | 408 dear 409 | 409 death 410 | 410 debt 411 | 411 decad 412 | 412 decid 413 | 413 decis 414 | 414 declar 415 | 415 declin 416 | 416 decor 417 | 417 default 418 | 418 defend 419 | 419 defens 420 | 420 defin 421 | 421 definit 422 | 422 degre 423 | 423 delai 424 | 424 delet 425 | 425 deliv 426 | 426 deliveri 427 | 427 dell 428 | 428 demand 429 | 429 democrat 430 | 430 depart 431 | 431 depend 432 | 432 deposit 433 | 433 describ 434 | 434 descript 435 | 435 deserv 436 | 436 design 437 | 437 desir 438 | 438 desktop 439 | 439 despit 440 | 440 detail 441 | 441 detect 442 | 442 determin 443 | 443 dev 444 | 444 devel 445 | 445 develop 446 | 446 devic 447 | 447 di 448 | 448 dial 449 | 449 did 450 | 450 didn 451 | 451 diet 452 | 452 differ 453 | 453 difficult 454 | 454 digit 455 | 455 direct 456 | 456 directli 457 | 457 director 458 | 458 directori 459 | 459 disabl 460 | 460 discount 461 | 461 discov 462 | 462 discoveri 463 | 463 discuss 464 | 464 disk 465 | 465 displai 466 | 466 disposit 467 | 467 distanc 468 | 468 distribut 469 | 469 dn 470 | 470 dnumber 471 | 471 do 472 | 472 doc 473 | 473 document 474 | 474 doe 475 | 475 doer 476 | 476 doesn 477 | 477 dollar 478 | 478 dollarac 479 | 479 dollarnumb 480 | 480 domain 481 | 481 don 482 | 482 done 483 | 483 dont 484 | 484 doubl 485 | 485 doubt 486 | 486 down 487 | 487 download 488 | 488 dr 489 | 489 draw 490 | 490 dream 491 | 491 drive 492 | 492 driver 493 | 493 drop 494 | 494 drug 495 | 495 due 496 | 496 dure 497 | 497 dvd 498 | 498 dw 499 | 499 dynam 500 | 500 ea 501 | 501 each 502 | 502 earli 503 | 503 earlier 504 | 504 earn 505 | 505 earth 506 | 506 easi 507 | 507 easier 508 | 508 easili 509 | 509 eat 510 | 510 eb 511 | 511 ebai 512 | 512 ec 513 | 513 echo 514 | 514 econom 515 | 515 economi 516 | 516 ed 517 | 517 edg 518 | 518 edit 519 | 519 editor 520 | 520 educ 521 | 521 eff 522 | 522 effect 523 | 523 effici 524 | 524 effort 525 | 525 either 526 | 526 el 527 | 527 electron 528 | 528 elimin 529 | 529 els 530 | 530 email 531 | 531 emailaddr 532 | 532 emerg 533 | 533 empir 534 | 534 employ 535 | 535 employe 536 | 536 en 537 | 537 enabl 538 | 538 encod 539 | 539 encourag 540 | 540 end 541 | 541 enemi 542 | 542 enenkio 543 | 543 energi 544 | 544 engin 545 | 545 english 546 | 546 enhanc 547 | 547 enjoi 548 | 548 enough 549 | 549 ensur 550 | 550 enter 551 | 551 enterpris 552 | 552 entertain 553 | 553 entir 554 | 554 entri 555 | 555 enumb 556 | 556 environ 557 | 557 equal 558 | 558 equip 559 | 559 equival 560 | 560 error 561 | 561 especi 562 | 562 essenti 563 | 563 establish 564 | 564 estat 565 | 565 estim 566 | 566 et 567 | 567 etc 568 | 568 euro 569 | 569 europ 570 | 570 european 571 | 571 even 572 | 572 event 573 | 573 eventu 574 | 574 ever 575 | 575 everi 576 | 576 everyon 577 | 577 everyth 578 | 578 evid 579 | 579 evil 580 | 580 exactli 581 | 581 exampl 582 | 582 excel 583 | 583 except 584 | 584 exchang 585 | 585 excit 586 | 586 exclus 587 | 587 execut 588 | 588 exercis 589 | 589 exist 590 | 590 exmh 591 | 591 expand 592 | 592 expect 593 | 593 expens 594 | 594 experi 595 | 595 expert 596 | 596 expir 597 | 597 explain 598 | 598 explor 599 | 599 express 600 | 600 extend 601 | 601 extens 602 | 602 extra 603 | 603 extract 604 | 604 extrem 605 | 605 ey 606 | 606 fa 607 | 607 face 608 | 608 fact 609 | 609 factor 610 | 610 fail 611 | 611 fair 612 | 612 fall 613 | 613 fals 614 | 614 famili 615 | 615 faq 616 | 616 far 617 | 617 fast 618 | 618 faster 619 | 619 fastest 620 | 620 fat 621 | 621 father 622 | 622 favorit 623 | 623 fax 624 | 624 fb 625 | 625 fd 626 | 626 featur 627 | 627 feder 628 | 628 fee 629 | 629 feed 630 | 630 feedback 631 | 631 feel 632 | 632 femal 633 | 633 few 634 | 634 ffffff 635 | 635 ffnumber 636 | 636 field 637 | 637 fight 638 | 638 figur 639 | 639 file 640 | 640 fill 641 | 641 film 642 | 642 filter 643 | 643 final 644 | 644 financ 645 | 645 financi 646 | 646 find 647 | 647 fine 648 | 648 finish 649 | 649 fire 650 | 650 firewal 651 | 651 firm 652 | 652 first 653 | 653 fit 654 | 654 five 655 | 655 fix 656 | 656 flag 657 | 657 flash 658 | 658 flow 659 | 659 fnumber 660 | 660 focu 661 | 661 folder 662 | 662 folk 663 | 663 follow 664 | 664 font 665 | 665 food 666 | 666 for 667 | 667 forc 668 | 668 foreign 669 | 669 forev 670 | 670 forget 671 | 671 fork 672 | 672 form 673 | 673 format 674 | 674 former 675 | 675 fortun 676 | 676 forward 677 | 677 found 678 | 678 foundat 679 | 679 four 680 | 680 franc 681 | 681 free 682 | 682 freedom 683 | 683 french 684 | 684 freshrpm 685 | 685 fri 686 | 686 fridai 687 | 687 friend 688 | 688 from 689 | 689 front 690 | 690 ftoc 691 | 691 ftp 692 | 692 full 693 | 693 fulli 694 | 694 fun 695 | 695 function 696 | 696 fund 697 | 697 further 698 | 698 futur 699 | 699 ga 700 | 700 gain 701 | 701 game 702 | 702 gari 703 | 703 garrigu 704 | 704 gave 705 | 705 gcc 706 | 706 geek 707 | 707 gener 708 | 708 get 709 | 709 gif 710 | 710 gift 711 | 711 girl 712 | 712 give 713 | 713 given 714 | 714 global 715 | 715 gnome 716 | 716 gnu 717 | 717 gnupg 718 | 718 go 719 | 719 goal 720 | 720 god 721 | 721 goe 722 | 722 gold 723 | 723 gone 724 | 724 good 725 | 725 googl 726 | 726 got 727 | 727 govern 728 | 728 gpl 729 | 729 grand 730 | 730 grant 731 | 731 graphic 732 | 732 great 733 | 733 greater 734 | 734 ground 735 | 735 group 736 | 736 grow 737 | 737 growth 738 | 738 gt 739 | 739 guarante 740 | 740 guess 741 | 741 gui 742 | 742 guid 743 | 743 ha 744 | 744 hack 745 | 745 had 746 | 746 half 747 | 747 ham 748 | 748 hand 749 | 749 handl 750 | 750 happen 751 | 751 happi 752 | 752 hard 753 | 753 hardwar 754 | 754 hat 755 | 755 hate 756 | 756 have 757 | 757 haven 758 | 758 he 759 | 759 head 760 | 760 header 761 | 761 headlin 762 | 762 health 763 | 763 hear 764 | 764 heard 765 | 765 heart 766 | 766 heaven 767 | 767 hei 768 | 768 height 769 | 769 held 770 | 770 hello 771 | 771 help 772 | 772 helvetica 773 | 773 her 774 | 774 herba 775 | 775 here 776 | 776 hermio 777 | 777 hettinga 778 | 778 hi 779 | 779 high 780 | 780 higher 781 | 781 highli 782 | 782 highlight 783 | 783 him 784 | 784 histori 785 | 785 hit 786 | 786 hold 787 | 787 home 788 | 788 honor 789 | 789 hope 790 | 790 host 791 | 791 hot 792 | 792 hour 793 | 793 hous 794 | 794 how 795 | 795 howev 796 | 796 hp 797 | 797 html 798 | 798 http 799 | 799 httpaddr 800 | 800 huge 801 | 801 human 802 | 802 hundr 803 | 803 ibm 804 | 804 id 805 | 805 idea 806 | 806 ident 807 | 807 identifi 808 | 808 idnumb 809 | 809 ie 810 | 810 if 811 | 811 ignor 812 | 812 ii 813 | 813 iii 814 | 814 iiiiiiihnumberjnumberhnumberjnumberhnumb 815 | 815 illeg 816 | 816 im 817 | 817 imag 818 | 818 imagin 819 | 819 immedi 820 | 820 impact 821 | 821 implement 822 | 822 import 823 | 823 impress 824 | 824 improv 825 | 825 in 826 | 826 inc 827 | 827 includ 828 | 828 incom 829 | 829 increas 830 | 830 incred 831 | 831 inde 832 | 832 independ 833 | 833 index 834 | 834 india 835 | 835 indian 836 | 836 indic 837 | 837 individu 838 | 838 industri 839 | 839 info 840 | 840 inform 841 | 841 initi 842 | 842 inlin 843 | 843 innov 844 | 844 input 845 | 845 insert 846 | 846 insid 847 | 847 instal 848 | 848 instanc 849 | 849 instant 850 | 850 instead 851 | 851 institut 852 | 852 instruct 853 | 853 insur 854 | 854 int 855 | 855 integr 856 | 856 intel 857 | 857 intellig 858 | 858 intend 859 | 859 interact 860 | 860 interest 861 | 861 interfac 862 | 862 intern 863 | 863 internet 864 | 864 interview 865 | 865 into 866 | 866 intro 867 | 867 introduc 868 | 868 inumb 869 | 869 invest 870 | 870 investig 871 | 871 investor 872 | 872 invok 873 | 873 involv 874 | 874 ip 875 | 875 ireland 876 | 876 irish 877 | 877 is 878 | 878 island 879 | 879 isn 880 | 880 iso 881 | 881 isp 882 | 882 issu 883 | 883 it 884 | 884 item 885 | 885 itself 886 | 886 jabber 887 | 887 jame 888 | 888 java 889 | 889 jim 890 | 890 jnumberiiiiiiihepihepihf 891 | 891 job 892 | 892 joe 893 | 893 john 894 | 894 join 895 | 895 journal 896 | 896 judg 897 | 897 judgment 898 | 898 jul 899 | 899 juli 900 | 900 jump 901 | 901 june 902 | 902 just 903 | 903 justin 904 | 904 keep 905 | 905 kei 906 | 906 kept 907 | 907 kernel 908 | 908 kevin 909 | 909 keyboard 910 | 910 kid 911 | 911 kill 912 | 912 kind 913 | 913 king 914 | 914 kingdom 915 | 915 knew 916 | 916 know 917 | 917 knowledg 918 | 918 known 919 | 919 la 920 | 920 lack 921 | 921 land 922 | 922 languag 923 | 923 laptop 924 | 924 larg 925 | 925 larger 926 | 926 largest 927 | 927 laser 928 | 928 last 929 | 929 late 930 | 930 later 931 | 931 latest 932 | 932 launch 933 | 933 law 934 | 934 lawrenc 935 | 935 le 936 | 936 lead 937 | 937 leader 938 | 938 learn 939 | 939 least 940 | 940 leav 941 | 941 left 942 | 942 legal 943 | 943 lender 944 | 944 length 945 | 945 less 946 | 946 lesson 947 | 947 let 948 | 948 letter 949 | 949 level 950 | 950 lib 951 | 951 librari 952 | 952 licens 953 | 953 life 954 | 954 lifetim 955 | 955 light 956 | 956 like 957 | 957 limit 958 | 958 line 959 | 959 link 960 | 960 linux 961 | 961 list 962 | 962 listen 963 | 963 littl 964 | 964 live 965 | 965 ll 966 | 966 lo 967 | 967 load 968 | 968 loan 969 | 969 local 970 | 970 locat 971 | 971 lock 972 | 972 lockergnom 973 | 973 log 974 | 974 long 975 | 975 longer 976 | 976 look 977 | 977 lose 978 | 978 loss 979 | 979 lost 980 | 980 lot 981 | 981 love 982 | 982 low 983 | 983 lower 984 | 984 lowest 985 | 985 lt 986 | 986 ma 987 | 987 mac 988 | 988 machin 989 | 989 made 990 | 990 magazin 991 | 991 mai 992 | 992 mail 993 | 993 mailer 994 | 994 main 995 | 995 maintain 996 | 996 major 997 | 997 make 998 | 998 maker 999 | 999 male 1000 | 1000 man 1001 | 1001 manag 1002 | 1002 mani 1003 | 1003 manual 1004 | 1004 manufactur 1005 | 1005 map 1006 | 1006 march 1007 | 1007 margin 1008 | 1008 mark 1009 | 1009 market 1010 | 1010 marshal 1011 | 1011 mass 1012 | 1012 master 1013 | 1013 match 1014 | 1014 materi 1015 | 1015 matter 1016 | 1016 matthia 1017 | 1017 mayb 1018 | 1018 me 1019 | 1019 mean 1020 | 1020 measur 1021 | 1021 mechan 1022 | 1022 media 1023 | 1023 medic 1024 | 1024 meet 1025 | 1025 member 1026 | 1026 membership 1027 | 1027 memori 1028 | 1028 men 1029 | 1029 mention 1030 | 1030 menu 1031 | 1031 merchant 1032 | 1032 messag 1033 | 1033 method 1034 | 1034 mh 1035 | 1035 michael 1036 | 1036 microsoft 1037 | 1037 middl 1038 | 1038 might 1039 | 1039 mike 1040 | 1040 mile 1041 | 1041 militari 1042 | 1042 million 1043 | 1043 mime 1044 | 1044 mind 1045 | 1045 mine 1046 | 1046 mini 1047 | 1047 minimum 1048 | 1048 minut 1049 | 1049 miss 1050 | 1050 mistak 1051 | 1051 mobil 1052 | 1052 mode 1053 | 1053 model 1054 | 1054 modem 1055 | 1055 modifi 1056 | 1056 modul 1057 | 1057 moment 1058 | 1058 mon 1059 | 1059 mondai 1060 | 1060 monei 1061 | 1061 monitor 1062 | 1062 month 1063 | 1063 monthli 1064 | 1064 more 1065 | 1065 morn 1066 | 1066 mortgag 1067 | 1067 most 1068 | 1068 mostli 1069 | 1069 mother 1070 | 1070 motiv 1071 | 1071 move 1072 | 1072 movi 1073 | 1073 mpnumber 1074 | 1074 mr 1075 | 1075 ms 1076 | 1076 msg 1077 | 1077 much 1078 | 1078 multi 1079 | 1079 multipart 1080 | 1080 multipl 1081 | 1081 murphi 1082 | 1082 music 1083 | 1083 must 1084 | 1084 my 1085 | 1085 myself 1086 | 1086 name 1087 | 1087 nation 1088 | 1088 natur 1089 | 1089 nbsp 1090 | 1090 near 1091 | 1091 nearli 1092 | 1092 necessari 1093 | 1093 need 1094 | 1094 neg 1095 | 1095 net 1096 | 1096 netscap 1097 | 1097 network 1098 | 1098 never 1099 | 1099 new 1100 | 1100 newslett 1101 | 1101 next 1102 | 1102 nextpart 1103 | 1103 nice 1104 | 1104 nigeria 1105 | 1105 night 1106 | 1106 no 1107 | 1107 nobodi 1108 | 1108 non 1109 | 1109 none 1110 | 1110 nor 1111 | 1111 normal 1112 | 1112 north 1113 | 1113 not 1114 | 1114 note 1115 | 1115 noth 1116 | 1116 notic 1117 | 1117 now 1118 | 1118 nt 1119 | 1119 null 1120 | 1120 number 1121 | 1121 numbera 1122 | 1122 numberam 1123 | 1123 numberanumb 1124 | 1124 numberb 1125 | 1125 numberbit 1126 | 1126 numberc 1127 | 1127 numbercb 1128 | 1128 numbercbr 1129 | 1129 numbercfont 1130 | 1130 numbercli 1131 | 1131 numbercnumb 1132 | 1132 numbercp 1133 | 1133 numberctd 1134 | 1134 numberd 1135 | 1135 numberdari 1136 | 1136 numberdnumb 1137 | 1137 numberenumb 1138 | 1138 numberf 1139 | 1139 numberfb 1140 | 1140 numberff 1141 | 1141 numberffont 1142 | 1142 numberfp 1143 | 1143 numberftd 1144 | 1144 numberk 1145 | 1145 numberm 1146 | 1146 numbermb 1147 | 1147 numberp 1148 | 1148 numberpd 1149 | 1149 numberpm 1150 | 1150 numberpx 1151 | 1151 numberst 1152 | 1152 numberth 1153 | 1153 numbertnumb 1154 | 1154 numberx 1155 | 1155 object 1156 | 1156 oblig 1157 | 1157 obtain 1158 | 1158 obvious 1159 | 1159 occur 1160 | 1160 oct 1161 | 1161 octob 1162 | 1162 of 1163 | 1163 off 1164 | 1164 offer 1165 | 1165 offic 1166 | 1166 offici 1167 | 1167 often 1168 | 1168 oh 1169 | 1169 ok 1170 | 1170 old 1171 | 1171 on 1172 | 1172 onc 1173 | 1173 onli 1174 | 1174 onlin 1175 | 1175 open 1176 | 1176 oper 1177 | 1177 opinion 1178 | 1178 opportun 1179 | 1179 opt 1180 | 1180 optim 1181 | 1181 option 1182 | 1182 or 1183 | 1183 order 1184 | 1184 org 1185 | 1185 organ 1186 | 1186 origin 1187 | 1187 os 1188 | 1188 osdn 1189 | 1189 other 1190 | 1190 otherwis 1191 | 1191 our 1192 | 1192 out 1193 | 1193 outlook 1194 | 1194 output 1195 | 1195 outsid 1196 | 1196 over 1197 | 1197 own 1198 | 1198 owner 1199 | 1199 oz 1200 | 1200 pacif 1201 | 1201 pack 1202 | 1202 packag 1203 | 1203 page 1204 | 1204 pai 1205 | 1205 paid 1206 | 1206 pain 1207 | 1207 palm 1208 | 1208 panel 1209 | 1209 paper 1210 | 1210 paragraph 1211 | 1211 parent 1212 | 1212 part 1213 | 1213 parti 1214 | 1214 particip 1215 | 1215 particular 1216 | 1216 particularli 1217 | 1217 partit 1218 | 1218 partner 1219 | 1219 pass 1220 | 1220 password 1221 | 1221 past 1222 | 1222 patch 1223 | 1223 patent 1224 | 1224 path 1225 | 1225 pattern 1226 | 1226 paul 1227 | 1227 payment 1228 | 1228 pc 1229 | 1229 peac 1230 | 1230 peopl 1231 | 1231 per 1232 | 1232 percent 1233 | 1233 percentag 1234 | 1234 perfect 1235 | 1235 perfectli 1236 | 1236 perform 1237 | 1237 perhap 1238 | 1238 period 1239 | 1239 perl 1240 | 1240 perman 1241 | 1241 permiss 1242 | 1242 person 1243 | 1243 pgp 1244 | 1244 phone 1245 | 1245 photo 1246 | 1246 php 1247 | 1247 phrase 1248 | 1248 physic 1249 | 1249 pick 1250 | 1250 pictur 1251 | 1251 piec 1252 | 1252 piiiiiiii 1253 | 1253 pipe 1254 | 1254 pjnumber 1255 | 1255 place 1256 | 1256 plai 1257 | 1257 plain 1258 | 1258 plan 1259 | 1259 planet 1260 | 1260 plant 1261 | 1261 planta 1262 | 1262 platform 1263 | 1263 player 1264 | 1264 pleas 1265 | 1265 plu 1266 | 1266 plug 1267 | 1267 pm 1268 | 1268 pocket 1269 | 1269 point 1270 | 1270 polic 1271 | 1271 polici 1272 | 1272 polit 1273 | 1273 poor 1274 | 1274 pop 1275 | 1275 popul 1276 | 1276 popular 1277 | 1277 port 1278 | 1278 posit 1279 | 1279 possibl 1280 | 1280 post 1281 | 1281 potenti 1282 | 1282 pound 1283 | 1283 powel 1284 | 1284 power 1285 | 1285 powershot 1286 | 1286 practic 1287 | 1287 pre 1288 | 1288 predict 1289 | 1289 prefer 1290 | 1290 premium 1291 | 1291 prepar 1292 | 1292 present 1293 | 1293 presid 1294 | 1294 press 1295 | 1295 pretti 1296 | 1296 prevent 1297 | 1297 previou 1298 | 1298 previous 1299 | 1299 price 1300 | 1300 principl 1301 | 1301 print 1302 | 1302 printabl 1303 | 1303 printer 1304 | 1304 privaci 1305 | 1305 privat 1306 | 1306 prize 1307 | 1307 pro 1308 | 1308 probabl 1309 | 1309 problem 1310 | 1310 procedur 1311 | 1311 process 1312 | 1312 processor 1313 | 1313 procmail 1314 | 1314 produc 1315 | 1315 product 1316 | 1316 profession 1317 | 1317 profil 1318 | 1318 profit 1319 | 1319 program 1320 | 1320 programm 1321 | 1321 progress 1322 | 1322 project 1323 | 1323 promis 1324 | 1324 promot 1325 | 1325 prompt 1326 | 1326 properti 1327 | 1327 propos 1328 | 1328 proprietari 1329 | 1329 prospect 1330 | 1330 protect 1331 | 1331 protocol 1332 | 1332 prove 1333 | 1333 proven 1334 | 1334 provid 1335 | 1335 proxi 1336 | 1336 pub 1337 | 1337 public 1338 | 1338 publish 1339 | 1339 pudg 1340 | 1340 pull 1341 | 1341 purchas 1342 | 1342 purpos 1343 | 1343 put 1344 | 1344 python 1345 | 1345 qnumber 1346 | 1346 qualifi 1347 | 1347 qualiti 1348 | 1348 quarter 1349 | 1349 question 1350 | 1350 quick 1351 | 1351 quickli 1352 | 1352 quit 1353 | 1353 quot 1354 | 1354 radio 1355 | 1355 ragga 1356 | 1356 rais 1357 | 1357 random 1358 | 1358 rang 1359 | 1359 rate 1360 | 1360 rather 1361 | 1361 ratio 1362 | 1362 razor 1363 | 1363 razornumb 1364 | 1364 re 1365 | 1365 reach 1366 | 1366 read 1367 | 1367 reader 1368 | 1368 readi 1369 | 1369 real 1370 | 1370 realiz 1371 | 1371 realli 1372 | 1372 reason 1373 | 1373 receiv 1374 | 1374 recent 1375 | 1375 recipi 1376 | 1376 recommend 1377 | 1377 record 1378 | 1378 red 1379 | 1379 redhat 1380 | 1380 reduc 1381 | 1381 refer 1382 | 1382 refin 1383 | 1383 reg 1384 | 1384 regard 1385 | 1385 region 1386 | 1386 regist 1387 | 1387 regul 1388 | 1388 regular 1389 | 1389 rel 1390 | 1390 relat 1391 | 1391 relationship 1392 | 1392 releas 1393 | 1393 relev 1394 | 1394 reliabl 1395 | 1395 remain 1396 | 1396 rememb 1397 | 1397 remot 1398 | 1398 remov 1399 | 1399 replac 1400 | 1400 repli 1401 | 1401 report 1402 | 1402 repositori 1403 | 1403 repres 1404 | 1404 republ 1405 | 1405 request 1406 | 1406 requir 1407 | 1407 research 1408 | 1408 reserv 1409 | 1409 resid 1410 | 1410 resourc 1411 | 1411 respect 1412 | 1412 respond 1413 | 1413 respons 1414 | 1414 rest 1415 | 1415 result 1416 | 1416 retail 1417 | 1417 return 1418 | 1418 reveal 1419 | 1419 revenu 1420 | 1420 revers 1421 | 1421 review 1422 | 1422 revok 1423 | 1423 rh 1424 | 1424 rich 1425 | 1425 right 1426 | 1426 risk 1427 | 1427 road 1428 | 1428 robert 1429 | 1429 rock 1430 | 1430 role 1431 | 1431 roll 1432 | 1432 rom 1433 | 1433 roman 1434 | 1434 room 1435 | 1435 root 1436 | 1436 round 1437 | 1437 rpm 1438 | 1438 rss 1439 | 1439 rule 1440 | 1440 run 1441 | 1441 sa 1442 | 1442 safe 1443 | 1443 sai 1444 | 1444 said 1445 | 1445 sale 1446 | 1446 same 1447 | 1447 sampl 1448 | 1448 san 1449 | 1449 saou 1450 | 1450 sat 1451 | 1451 satellit 1452 | 1452 save 1453 | 1453 saw 1454 | 1454 scan 1455 | 1455 schedul 1456 | 1456 school 1457 | 1457 scienc 1458 | 1458 score 1459 | 1459 screen 1460 | 1460 script 1461 | 1461 se 1462 | 1462 search 1463 | 1463 season 1464 | 1464 second 1465 | 1465 secret 1466 | 1466 section 1467 | 1467 secur 1468 | 1468 see 1469 | 1469 seed 1470 | 1470 seek 1471 | 1471 seem 1472 | 1472 seen 1473 | 1473 select 1474 | 1474 self 1475 | 1475 sell 1476 | 1476 seminar 1477 | 1477 send 1478 | 1478 sender 1479 | 1479 sendmail 1480 | 1480 senior 1481 | 1481 sens 1482 | 1482 sensit 1483 | 1483 sent 1484 | 1484 sep 1485 | 1485 separ 1486 | 1486 septemb 1487 | 1487 sequenc 1488 | 1488 seri 1489 | 1489 serif 1490 | 1490 seriou 1491 | 1491 serv 1492 | 1492 server 1493 | 1493 servic 1494 | 1494 set 1495 | 1495 setup 1496 | 1496 seven 1497 | 1497 seventh 1498 | 1498 sever 1499 | 1499 sex 1500 | 1500 sexual 1501 | 1501 sf 1502 | 1502 shape 1503 | 1503 share 1504 | 1504 she 1505 | 1505 shell 1506 | 1506 ship 1507 | 1507 shop 1508 | 1508 short 1509 | 1509 shot 1510 | 1510 should 1511 | 1511 show 1512 | 1512 side 1513 | 1513 sign 1514 | 1514 signatur 1515 | 1515 signific 1516 | 1516 similar 1517 | 1517 simpl 1518 | 1518 simpli 1519 | 1519 sinc 1520 | 1520 sincer 1521 | 1521 singl 1522 | 1522 sit 1523 | 1523 site 1524 | 1524 situat 1525 | 1525 six 1526 | 1526 size 1527 | 1527 skeptic 1528 | 1528 skill 1529 | 1529 skin 1530 | 1530 skip 1531 | 1531 sleep 1532 | 1532 slow 1533 | 1533 small 1534 | 1534 smart 1535 | 1535 smoke 1536 | 1536 smtp 1537 | 1537 snumber 1538 | 1538 so 1539 | 1539 social 1540 | 1540 societi 1541 | 1541 softwar 1542 | 1542 sold 1543 | 1543 solut 1544 | 1544 solv 1545 | 1545 some 1546 | 1546 someon 1547 | 1547 someth 1548 | 1548 sometim 1549 | 1549 son 1550 | 1550 song 1551 | 1551 soni 1552 | 1552 soon 1553 | 1553 sorri 1554 | 1554 sort 1555 | 1555 sound 1556 | 1556 sourc 1557 | 1557 south 1558 | 1558 space 1559 | 1559 spain 1560 | 1560 spam 1561 | 1561 spamassassin 1562 | 1562 spamd 1563 | 1563 spammer 1564 | 1564 speak 1565 | 1565 spec 1566 | 1566 special 1567 | 1567 specif 1568 | 1568 specifi 1569 | 1569 speech 1570 | 1570 speed 1571 | 1571 spend 1572 | 1572 sponsor 1573 | 1573 sport 1574 | 1574 spot 1575 | 1575 src 1576 | 1576 ssh 1577 | 1577 st 1578 | 1578 stabl 1579 | 1579 staff 1580 | 1580 stai 1581 | 1581 stand 1582 | 1582 standard 1583 | 1583 star 1584 | 1584 start 1585 | 1585 state 1586 | 1586 statement 1587 | 1587 statu 1588 | 1588 step 1589 | 1589 steve 1590 | 1590 still 1591 | 1591 stock 1592 | 1592 stop 1593 | 1593 storag 1594 | 1594 store 1595 | 1595 stori 1596 | 1596 strategi 1597 | 1597 stream 1598 | 1598 street 1599 | 1599 string 1600 | 1600 strip 1601 | 1601 strong 1602 | 1602 structur 1603 | 1603 studi 1604 | 1604 stuff 1605 | 1605 stupid 1606 | 1606 style 1607 | 1607 subject 1608 | 1608 submit 1609 | 1609 subscrib 1610 | 1610 subscript 1611 | 1611 substanti 1612 | 1612 success 1613 | 1613 such 1614 | 1614 suffer 1615 | 1615 suggest 1616 | 1616 suit 1617 | 1617 sum 1618 | 1618 summari 1619 | 1619 summer 1620 | 1620 sun 1621 | 1621 super 1622 | 1622 suppli 1623 | 1623 support 1624 | 1624 suppos 1625 | 1625 sure 1626 | 1626 surpris 1627 | 1627 suse 1628 | 1628 suspect 1629 | 1629 sweet 1630 | 1630 switch 1631 | 1631 system 1632 | 1632 tab 1633 | 1633 tabl 1634 | 1634 tablet 1635 | 1635 tag 1636 | 1636 take 1637 | 1637 taken 1638 | 1638 talk 1639 | 1639 tape 1640 | 1640 target 1641 | 1641 task 1642 | 1642 tax 1643 | 1643 teach 1644 | 1644 team 1645 | 1645 tech 1646 | 1646 technic 1647 | 1647 techniqu 1648 | 1648 technolog 1649 | 1649 tel 1650 | 1650 telecom 1651 | 1651 telephon 1652 | 1652 tell 1653 | 1653 temperatur 1654 | 1654 templ 1655 | 1655 ten 1656 | 1656 term 1657 | 1657 termin 1658 | 1658 terror 1659 | 1659 terrorist 1660 | 1660 test 1661 | 1661 texa 1662 | 1662 text 1663 | 1663 than 1664 | 1664 thank 1665 | 1665 that 1666 | 1666 the 1667 | 1667 thei 1668 | 1668 their 1669 | 1669 them 1670 | 1670 themselv 1671 | 1671 then 1672 | 1672 theori 1673 | 1673 there 1674 | 1674 therefor 1675 | 1675 these 1676 | 1676 thi 1677 | 1677 thing 1678 | 1678 think 1679 | 1679 thinkgeek 1680 | 1680 third 1681 | 1681 those 1682 | 1682 though 1683 | 1683 thought 1684 | 1684 thousand 1685 | 1685 thread 1686 | 1686 threat 1687 | 1687 three 1688 | 1688 through 1689 | 1689 thu 1690 | 1690 thursdai 1691 | 1691 ti 1692 | 1692 ticket 1693 | 1693 tim 1694 | 1694 time 1695 | 1695 tip 1696 | 1696 tire 1697 | 1697 titl 1698 | 1698 tm 1699 | 1699 to 1700 | 1700 todai 1701 | 1701 togeth 1702 | 1702 token 1703 | 1703 told 1704 | 1704 toll 1705 | 1705 tom 1706 | 1706 toner 1707 | 1707 toni 1708 | 1708 too 1709 | 1709 took 1710 | 1710 tool 1711 | 1711 top 1712 | 1712 topic 1713 | 1713 total 1714 | 1714 touch 1715 | 1715 toward 1716 | 1716 track 1717 | 1717 trade 1718 | 1718 tradit 1719 | 1719 traffic 1720 | 1720 train 1721 | 1721 transact 1722 | 1722 transfer 1723 | 1723 travel 1724 | 1724 treat 1725 | 1725 tree 1726 | 1726 tri 1727 | 1727 trial 1728 | 1728 trick 1729 | 1729 trip 1730 | 1730 troubl 1731 | 1731 true 1732 | 1732 truli 1733 | 1733 trust 1734 | 1734 truth 1735 | 1735 try 1736 | 1736 tue 1737 | 1737 tuesdai 1738 | 1738 turn 1739 | 1739 tv 1740 | 1740 two 1741 | 1741 type 1742 | 1742 uk 1743 | 1743 ultim 1744 | 1744 un 1745 | 1745 under 1746 | 1746 understand 1747 | 1747 unfortun 1748 | 1748 uniqu 1749 | 1749 unison 1750 | 1750 unit 1751 | 1751 univers 1752 | 1752 unix 1753 | 1753 unless 1754 | 1754 unlik 1755 | 1755 unlimit 1756 | 1756 unseen 1757 | 1757 unsolicit 1758 | 1758 unsubscrib 1759 | 1759 until 1760 | 1760 up 1761 | 1761 updat 1762 | 1762 upgrad 1763 | 1763 upon 1764 | 1764 urgent 1765 | 1765 url 1766 | 1766 us 1767 | 1767 usa 1768 | 1768 usag 1769 | 1769 usb 1770 | 1770 usd 1771 | 1771 usdollarnumb 1772 | 1772 useless 1773 | 1773 user 1774 | 1774 usr 1775 | 1775 usual 1776 | 1776 util 1777 | 1777 vacat 1778 | 1778 valid 1779 | 1779 valu 1780 | 1780 valuabl 1781 | 1781 var 1782 | 1782 variabl 1783 | 1783 varieti 1784 | 1784 variou 1785 | 1785 ve 1786 | 1786 vendor 1787 | 1787 ventur 1788 | 1788 veri 1789 | 1789 verifi 1790 | 1790 version 1791 | 1791 via 1792 | 1792 video 1793 | 1793 view 1794 | 1794 virtual 1795 | 1795 visa 1796 | 1796 visit 1797 | 1797 visual 1798 | 1798 vnumber 1799 | 1799 voic 1800 | 1800 vote 1801 | 1801 vs 1802 | 1802 vulner 1803 | 1803 wa 1804 | 1804 wai 1805 | 1805 wait 1806 | 1806 wake 1807 | 1807 walk 1808 | 1808 wall 1809 | 1809 want 1810 | 1810 war 1811 | 1811 warm 1812 | 1812 warn 1813 | 1813 warranti 1814 | 1814 washington 1815 | 1815 wasn 1816 | 1816 wast 1817 | 1817 watch 1818 | 1818 water 1819 | 1819 we 1820 | 1820 wealth 1821 | 1821 weapon 1822 | 1822 web 1823 | 1823 weblog 1824 | 1824 websit 1825 | 1825 wed 1826 | 1826 wednesdai 1827 | 1827 week 1828 | 1828 weekli 1829 | 1829 weight 1830 | 1830 welcom 1831 | 1831 well 1832 | 1832 went 1833 | 1833 were 1834 | 1834 west 1835 | 1835 what 1836 | 1836 whatev 1837 | 1837 when 1838 | 1838 where 1839 | 1839 whether 1840 | 1840 which 1841 | 1841 while 1842 | 1842 white 1843 | 1843 whitelist 1844 | 1844 who 1845 | 1845 whole 1846 | 1846 whose 1847 | 1847 why 1848 | 1848 wi 1849 | 1849 wide 1850 | 1850 width 1851 | 1851 wife 1852 | 1852 will 1853 | 1853 william 1854 | 1854 win 1855 | 1855 window 1856 | 1856 wing 1857 | 1857 winner 1858 | 1858 wireless 1859 | 1859 wish 1860 | 1860 with 1861 | 1861 within 1862 | 1862 without 1863 | 1863 wnumberp 1864 | 1864 woman 1865 | 1865 women 1866 | 1866 won 1867 | 1867 wonder 1868 | 1868 word 1869 | 1869 work 1870 | 1870 worker 1871 | 1871 world 1872 | 1872 worldwid 1873 | 1873 worri 1874 | 1874 worst 1875 | 1875 worth 1876 | 1876 would 1877 | 1877 wouldn 1878 | 1878 write 1879 | 1879 written 1880 | 1880 wrong 1881 | 1881 wrote 1882 | 1882 www 1883 | 1883 ximian 1884 | 1884 xml 1885 | 1885 xp 1886 | 1886 yahoo 1887 | 1887 ye 1888 | 1888 yeah 1889 | 1889 year 1890 | 1890 yesterdai 1891 | 1891 yet 1892 | 1892 york 1893 | 1893 you 1894 | 1894 young 1895 | 1895 your 1896 | 1896 yourself 1897 | 1897 zdnet 1898 | 1898 zero 1899 | 1899 zip 1900 | -------------------------------------------------------------------------------- /Exercise6/Figures/dataset1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/dataset1.png -------------------------------------------------------------------------------- /Exercise6/Figures/dataset2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/dataset2.png -------------------------------------------------------------------------------- /Exercise6/Figures/dataset3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/dataset3.png -------------------------------------------------------------------------------- /Exercise6/Figures/email.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/email.png -------------------------------------------------------------------------------- /Exercise6/Figures/email_cleaned.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/email_cleaned.png -------------------------------------------------------------------------------- /Exercise6/Figures/svm_c1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/svm_c1.png -------------------------------------------------------------------------------- /Exercise6/Figures/svm_c100.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/svm_c100.png -------------------------------------------------------------------------------- /Exercise6/Figures/svm_dataset2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/svm_dataset2.png -------------------------------------------------------------------------------- /Exercise6/Figures/svm_dataset3_best.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/svm_dataset3_best.png -------------------------------------------------------------------------------- /Exercise6/Figures/svm_predictors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/svm_predictors.png -------------------------------------------------------------------------------- /Exercise6/Figures/vocab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/vocab.png -------------------------------------------------------------------------------- /Exercise6/Figures/word_indices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/Figures/word_indices.png -------------------------------------------------------------------------------- /Exercise6/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise6/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise6/token.pkl -------------------------------------------------------------------------------- /Exercise6/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append('..') 4 | from submission import SubmissionBase 5 | import numpy as np 6 | from scipy.io import loadmat 7 | from os.path import join 8 | from matplotlib import pyplot 9 | 10 | 11 | def plotData(X, y, grid=False): 12 | """ 13 | Plots the data points X and y into a new figure. Uses `+` for positive examples, and `o` for 14 | negative examples. `X` is assumed to be a Mx2 matrix 15 | 16 | Parameters 17 | ---------- 18 | X : numpy ndarray 19 | X is assumed to be a Mx2 matrix. 20 | 21 | y : numpy ndarray 22 | The data labels. 23 | 24 | grid : bool (Optional) 25 | Specify whether or not to show the grid in the plot. It is False by default. 26 | 27 | Notes 28 | ----- 29 | This was slightly modified such that it expects y=1 or y=0. 30 | """ 31 | # Find Indices of Positive and Negative Examples 32 | pos = y == 1 33 | neg = y == 0 34 | 35 | # Plot Examples 36 | pyplot.plot(X[pos, 0], X[pos, 1], 'X', mew=1, ms=10, mec='k') 37 | pyplot.plot(X[neg, 0], X[neg, 1], 'o', mew=1, mfc='y', ms=10, mec='k') 38 | pyplot.grid(grid) 39 | 40 | 41 | def svmTrain(X, Y, C, kernelFunction, tol=1e-3, max_passes=5, args=()): 42 | """ 43 | Trains an SVM classifier using a simplified version of the SMO algorithm. 44 | 45 | Parameters 46 | --------- 47 | X : numpy ndarray 48 | (m x n) Matrix of training examples. Each row is a training example, and the 49 | jth column holds the jth feature. 50 | 51 | Y : numpy ndarray 52 | (m, ) A vector (1-D numpy array) containing 1 for positive examples and 0 for negative examples. 53 | 54 | C : float 55 | The standard SVM regularization parameter. 56 | 57 | kernelFunction : func 58 | A function handle which computes the kernel. The function should accept two vectors as 59 | inputs, and returns a scalar as output. 60 | 61 | tol : float, optional 62 | Tolerance value used for determining equality of floating point numbers. 63 | 64 | max_passes : int, optional 65 | Controls the number of iterations over the dataset (without changes to alpha) 66 | before the algorithm quits. 67 | 68 | args : tuple 69 | Extra arguments required for the kernel function, such as the sigma parameter for a 70 | Gaussian kernel. 71 | 72 | Returns 73 | ------- 74 | model : 75 | The trained SVM model. 76 | 77 | Notes 78 | ----- 79 | This is a simplified version of the SMO algorithm for training SVMs. In practice, if 80 | you want to train an SVM classifier, we recommend using an optimized package such as: 81 | 82 | - LIBSVM (http://www.csie.ntu.edu.tw/~cjlin/libsvm/) 83 | - SVMLight (http://svmlight.joachims.org/) 84 | - scikit-learn (http://scikit-learn.org/stable/modules/svm.html) which contains python wrappers 85 | for the LIBSVM library. 86 | """ 87 | # make sure data is signed int 88 | Y = Y.astype(int) 89 | # Dataset size parameters 90 | m, n = X.shape 91 | 92 | passes = 0 93 | E = np.zeros(m) 94 | alphas = np.zeros(m) 95 | b = 0 96 | 97 | # Map 0 to -1 98 | Y[Y == 0] = -1 99 | 100 | # Pre-compute the Kernel Matrix since our dataset is small 101 | # (in practice, optimized SVM packages that handle large datasets 102 | # gracefully will **not** do this) 103 | 104 | # We have implemented the optimized vectorized version of the Kernels here so 105 | # that the SVM training will run faster 106 | if kernelFunction.__name__ == 'linearKernel': 107 | # Vectorized computation for the linear kernel 108 | # This is equivalent to computing the kernel on every pair of examples 109 | K = np.dot(X, X.T) 110 | elif kernelFunction.__name__ == 'gaussianKernel': 111 | # vectorized RBF Kernel 112 | # This is equivalent to computing the kernel on every pair of examples 113 | X2 = np.sum(X**2, axis=1) 114 | K = X2 + X2[:, None] - 2 * np.dot(X, X.T) 115 | 116 | if len(args) > 0: 117 | K /= 2*args[0]**2 118 | 119 | K = np.exp(-K) 120 | else: 121 | K = np.zeros((m, m)) 122 | for i in range(m): 123 | for j in range(i, m): 124 | K[i, j] = kernelFunction(X[i, :], X[j, :]) 125 | K[j, i] = K[i, j] 126 | 127 | while passes < max_passes: 128 | num_changed_alphas = 0 129 | for i in range(m): 130 | E[i] = b + np.sum(alphas * Y * K[:, i]) - Y[i] 131 | 132 | if (Y[i]*E[i] < -tol and alphas[i] < C) or (Y[i]*E[i] > tol and alphas[i] > 0): 133 | # select the alpha_j randomly 134 | j = np.random.choice(list(range(i)) + list(range(i+1, m)), size=1)[0] 135 | 136 | E[j] = b + np.sum(alphas * Y * K[:, j]) - Y[j] 137 | 138 | alpha_i_old = alphas[i] 139 | alpha_j_old = alphas[j] 140 | 141 | if Y[i] == Y[j]: 142 | L = max(0, alphas[j] + alphas[i] - C) 143 | H = min(C, alphas[j] + alphas[i]) 144 | else: 145 | L = max(0, alphas[j] - alphas[i]) 146 | H = min(C, C + alphas[j] - alphas[i]) 147 | 148 | if L == H: 149 | continue 150 | 151 | eta = 2 * K[i, j] - K[i, i] - K[j, j] 152 | 153 | # objective function positive definite, there will be a minimum along the direction 154 | # of linear equality constrain, and eta will be greater than zero 155 | # we are actually computing -eta here (so we skip of eta >= 0) 156 | if eta >= 0: 157 | continue 158 | 159 | alphas[j] -= Y[j] * (E[i] - E[j])/eta 160 | alphas[j] = max(L, min(H, alphas[j])) 161 | 162 | if abs(alphas[j] - alpha_j_old) < tol: 163 | alphas[j] = alpha_j_old 164 | continue 165 | alphas[i] += Y[i]*Y[j]*(alpha_j_old - alphas[j]) 166 | 167 | b1 = b - E[i] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \ 168 | - Y[j] * (alphas[j] - alpha_j_old) * K[i, j] 169 | 170 | b2 = b - E[j] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \ 171 | - Y[j] * (alphas[j] - alpha_j_old) * K[j, j] 172 | 173 | if 0 < alphas[i] < C: 174 | b = b1 175 | elif 0 < alphas[j] < C: 176 | b = b2 177 | else: 178 | b = (b1 + b2)/2 179 | 180 | num_changed_alphas += 1 181 | if num_changed_alphas == 0: 182 | passes += 1 183 | else: 184 | passes = 0 185 | 186 | idx = alphas > 0 187 | model = {'X': X[idx, :], 188 | 'y': Y[idx], 189 | 'kernelFunction': kernelFunction, 190 | 'b': b, 191 | 'args': args, 192 | 'alphas': alphas[idx], 193 | 'w': np.dot(alphas * Y, X)} 194 | return model 195 | 196 | 197 | def svmPredict(model, X): 198 | """ 199 | Returns a vector of predictions using a trained SVM model. 200 | 201 | Parameters 202 | ---------- 203 | model : dict 204 | The parameters of the trained svm model, as returned by the function svmTrain 205 | 206 | X : array_like 207 | A (m x n) matrix where each example is a row. 208 | 209 | Returns 210 | ------- 211 | pred : array_like 212 | A (m,) sized vector of predictions {0, 1} values. 213 | """ 214 | # check if we are getting a vector. If so, then assume we only need to do predictions 215 | # for a single example 216 | if X.ndim == 1: 217 | X = X[np.newaxis, :] 218 | 219 | m = X.shape[0] 220 | p = np.zeros(m) 221 | pred = np.zeros(m) 222 | 223 | if model['kernelFunction'].__name__ == 'linearKernel': 224 | # we can use the weights and bias directly if working with the linear kernel 225 | p = np.dot(X, model['w']) + model['b'] 226 | elif model['kernelFunction'].__name__ == 'gaussianKernel': 227 | # vectorized RBF Kernel 228 | # This is equivalent to computing the kernel on every pair of examples 229 | X1 = np.sum(X**2, 1) 230 | X2 = np.sum(model['X']**2, 1) 231 | K = X2 + X1[:, None] - 2 * np.dot(X, model['X'].T) 232 | 233 | if len(model['args']) > 0: 234 | K /= 2*model['args'][0]**2 235 | 236 | K = np.exp(-K) 237 | p = np.dot(K, model['alphas']*model['y']) + model['b'] 238 | else: 239 | # other non-linear kernel 240 | for i in range(m): 241 | predictions = 0 242 | for j in range(model['X'].shape[0]): 243 | predictions += model['alphas'][j] * model['y'][j] \ 244 | * model['kernelFunction'](X[i, :], model['X'][j, :]) 245 | p[i] = predictions 246 | 247 | pred[p >= 0] = 1 248 | return pred 249 | 250 | 251 | def linearKernel(x1, x2): 252 | """ 253 | Returns a linear kernel between x1 and x2. 254 | 255 | Parameters 256 | ---------- 257 | x1 : numpy ndarray 258 | A 1-D vector. 259 | 260 | x2 : numpy ndarray 261 | A 1-D vector of same size as x1. 262 | 263 | Returns 264 | ------- 265 | : float 266 | The scalar amplitude. 267 | """ 268 | return np.dot(x1, x2) 269 | 270 | 271 | def visualizeBoundaryLinear(X, y, model): 272 | """ 273 | Plots a linear decision boundary learned by the SVM. 274 | 275 | Parameters 276 | ---------- 277 | X : array_like 278 | (m x 2) The training data with two features (to plot in a 2-D plane). 279 | 280 | y : array_like 281 | (m, ) The data labels. 282 | 283 | model : dict 284 | Dictionary of model variables learned by SVM. 285 | """ 286 | w, b = model['w'], model['b'] 287 | xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100) 288 | yp = -(w[0] * xp + b)/w[1] 289 | 290 | plotData(X, y) 291 | pyplot.plot(xp, yp, '-b') 292 | 293 | 294 | def visualizeBoundary(X, y, model): 295 | """ 296 | Plots a non-linear decision boundary learned by the SVM and overlays the data on it. 297 | 298 | Parameters 299 | ---------- 300 | X : array_like 301 | (m x 2) The training data with two features (to plot in a 2-D plane). 302 | 303 | y : array_like 304 | (m, ) The data labels. 305 | 306 | model : dict 307 | Dictionary of model variables learned by SVM. 308 | """ 309 | plotData(X, y) 310 | 311 | # make classification predictions over a grid of values 312 | x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100) 313 | x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100) 314 | X1, X2 = np.meshgrid(x1plot, x2plot) 315 | 316 | vals = np.zeros(X1.shape) 317 | for i in range(X1.shape[1]): 318 | this_X = np.stack((X1[:, i], X2[:, i]), axis=1) 319 | vals[:, i] = svmPredict(model, this_X) 320 | 321 | pyplot.contour(X1, X2, vals, colors='y', linewidths=2) 322 | pyplot.pcolormesh(X1, X2, vals, cmap='YlGnBu', alpha=0.25, edgecolors='None', lw=0) 323 | pyplot.grid(False) 324 | 325 | 326 | def getVocabList(): 327 | """ 328 | Reads the fixed vocabulary list in vocab.txt and returns a cell array of the words 329 | % vocabList = GETVOCABLIST() reads the fixed vocabulary list in vocab.txt 330 | % and returns a cell array of the words in vocabList. 331 | 332 | :return: 333 | """ 334 | vocabList = np.genfromtxt(join('Data', 'vocab.txt'), dtype=object) 335 | return list(vocabList[:, 1].astype(str)) 336 | 337 | 338 | class PorterStemmer: 339 | """ 340 | Porter Stemming Algorithm 341 | 342 | This is the Porter stemming algorithm, ported to Python from the 343 | version coded up in ANSI C by the author. It may be be regarded 344 | as canonical, in that it follows the algorithm presented in 345 | 346 | Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14, 347 | no. 3, pp 130-137, 348 | 349 | only differing from it at the points maked --DEPARTURE-- below. 350 | 351 | See also http://www.tartarus.org/~martin/PorterStemmer 352 | 353 | The algorithm as described in the paper could be exactly replicated 354 | by adjusting the points of DEPARTURE, but this is barely necessary, 355 | because (a) the points of DEPARTURE are definitely improvements, and 356 | (b) no encoding of the Porter stemmer I have seen is anything like 357 | as exact as this version, even with the points of DEPARTURE! 358 | 359 | Vivake Gupta (v@nano.com) 360 | 361 | Release 1: January 2001 362 | 363 | Further adjustments by Santiago Bruno (bananabruno@gmail.com) 364 | to allow word input not restricted to one word per line, leading 365 | to: 366 | 367 | release 2: July 2008 368 | """ 369 | def __init__(self): 370 | """ 371 | The main part of the stemming algorithm starts here. 372 | b is a buffer holding a word to be stemmed. The letters are in b[k0], 373 | b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is 374 | readjusted downwards as the stemming progresses. Zero termination is 375 | not in fact used in the algorithm. 376 | 377 | Note that only lower case sequences are stemmed. Forcing to lower case 378 | should be done before stem(...) is called. 379 | """ 380 | self.b = "" # buffer for word to be stemmed 381 | self.k = 0 382 | self.k0 = 0 383 | self.j = 0 # j is a general offset into the string 384 | 385 | def cons(self, i): 386 | """cons(i) is TRUE <=> b[i] is a consonant.""" 387 | if self.b[i] in 'aeiou': 388 | return 0 389 | if self.b[i] == 'y': 390 | if i == self.k0: 391 | return 1 392 | else: 393 | return not self.cons(i - 1) 394 | return 1 395 | 396 | def m(self): 397 | """ 398 | m() measures the number of consonant sequences between k0 and j. 399 | if c is a consonant sequence and v a vowel sequence, and <..> 400 | indicates arbitrary presence, 401 | 402 | gives 0 403 | vc gives 1 404 | vcvc gives 2 405 | vcvcvc gives 3 406 | .... 407 | """ 408 | n = 0 409 | i = self.k0 410 | while 1: 411 | if i > self.j: 412 | return n 413 | if not self.cons(i): 414 | break 415 | i = i + 1 416 | i = i + 1 417 | while 1: 418 | while 1: 419 | if i > self.j: 420 | return n 421 | if self.cons(i): 422 | break 423 | i = i + 1 424 | i = i + 1 425 | n = n + 1 426 | while 1: 427 | if i > self.j: 428 | return n 429 | if not self.cons(i): 430 | break 431 | i = i + 1 432 | i = i + 1 433 | 434 | def vowelinstem(self): 435 | """vowelinstem() is TRUE <=> k0,...j contains a vowel""" 436 | for i in range(self.k0, self.j + 1): 437 | if not self.cons(i): 438 | return 1 439 | return 0 440 | 441 | def doublec(self, j): 442 | """ doublec(j) is TRUE <=> j,(j-1) contain a double consonant. """ 443 | if j < (self.k0 + 1): 444 | return 0 445 | if self.b[j] != self.b[j-1]: 446 | return 0 447 | return self.cons(j) 448 | 449 | def cvc(self, i): 450 | """ 451 | cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant 452 | and also if the second c is not w,x or y. this is used when trying to 453 | restore an e at the end of a short e.g. 454 | 455 | cav(e), lov(e), hop(e), crim(e), but 456 | snow, box, tray. 457 | """ 458 | if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2): 459 | return 0 460 | ch = self.b[i] 461 | if ch in 'wxy': 462 | return 0 463 | return 1 464 | 465 | def ends(self, s): 466 | """ends(s) is TRUE <=> k0,...k ends with the string s.""" 467 | length = len(s) 468 | if s[length - 1] != self.b[self.k]: # tiny speed-up 469 | return 0 470 | if length > (self.k - self.k0 + 1): 471 | return 0 472 | if self.b[self.k-length+1:self.k+1] != s: 473 | return 0 474 | self.j = self.k - length 475 | return 1 476 | 477 | def setto(self, s): 478 | """setto(s) sets (j+1),...k to the characters in the string s, readjusting k.""" 479 | length = len(s) 480 | self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:] 481 | self.k = self.j + length 482 | 483 | def r(self, s): 484 | """r(s) is used further down.""" 485 | if self.m() > 0: 486 | self.setto(s) 487 | 488 | def step1ab(self): 489 | """step1ab() gets rid of plurals and -ed or -ing. e.g. 490 | 491 | caresses -> caress 492 | ponies -> poni 493 | ties -> ti 494 | caress -> caress 495 | cats -> cat 496 | 497 | feed -> feed 498 | agreed -> agree 499 | disabled -> disable 500 | 501 | matting -> mat 502 | mating -> mate 503 | meeting -> meet 504 | milling -> mill 505 | messing -> mess 506 | 507 | meetings -> meet 508 | """ 509 | if self.b[self.k] == 's': 510 | if self.ends("sses"): 511 | self.k = self.k - 2 512 | elif self.ends("ies"): 513 | self.setto("i") 514 | elif self.b[self.k - 1] != 's': 515 | self.k = self.k - 1 516 | if self.ends("eed"): 517 | if self.m() > 0: 518 | self.k = self.k - 1 519 | elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem(): 520 | self.k = self.j 521 | if self.ends("at"): 522 | self.setto("ate") 523 | elif self.ends("bl"): 524 | self.setto("ble") 525 | elif self.ends("iz"): 526 | self.setto("ize") 527 | elif self.doublec(self.k): 528 | self.k = self.k - 1 529 | ch = self.b[self.k] 530 | if ch in 'lsz': 531 | self.k += 1 532 | elif self.m() == 1 and self.cvc(self.k): 533 | self.setto("e") 534 | 535 | def step1c(self): 536 | """step1c() turns terminal y to i when there is another vowel in the stem.""" 537 | if self.ends("y") and self.vowelinstem(): 538 | self.b = self.b[:self.k] + 'i' + self.b[self.k+1:] 539 | 540 | def step2(self): 541 | """step2() maps double suffices to single ones. 542 | so -ization ( = -ize plus -ation) maps to -ize etc. note that the 543 | string before the suffix must give m() > 0. 544 | """ 545 | if self.b[self.k - 1] == 'a': 546 | if self.ends("ational"): self.r("ate") 547 | elif self.ends("tional"): self.r("tion") 548 | elif self.b[self.k - 1] == 'c': 549 | if self.ends("enci"): self.r("ence") 550 | elif self.ends("anci"): self.r("ance") 551 | elif self.b[self.k - 1] == 'e': 552 | if self.ends("izer"): self.r("ize") 553 | elif self.b[self.k - 1] == 'l': 554 | if self.ends("bli"): self.r("ble") # --DEPARTURE-- 555 | # To match the published algorithm, replace this phrase with 556 | # if self.ends("abli"): self.r("able") 557 | elif self.ends("alli"): self.r("al") 558 | elif self.ends("entli"): self.r("ent") 559 | elif self.ends("eli"): self.r("e") 560 | elif self.ends("ousli"): self.r("ous") 561 | elif self.b[self.k - 1] == 'o': 562 | if self.ends("ization"): self.r("ize") 563 | elif self.ends("ation"): self.r("ate") 564 | elif self.ends("ator"): self.r("ate") 565 | elif self.b[self.k - 1] == 's': 566 | if self.ends("alism"): self.r("al") 567 | elif self.ends("iveness"): self.r("ive") 568 | elif self.ends("fulness"): self.r("ful") 569 | elif self.ends("ousness"): self.r("ous") 570 | elif self.b[self.k - 1] == 't': 571 | if self.ends("aliti"): self.r("al") 572 | elif self.ends("iviti"): self.r("ive") 573 | elif self.ends("biliti"): self.r("ble") 574 | elif self.b[self.k - 1] == 'g': # --DEPARTURE-- 575 | if self.ends("logi"): self.r("log") 576 | # To match the published algorithm, delete this phrase 577 | 578 | def step3(self): 579 | """step3() dels with -ic-, -full, -ness etc. similar strategy to step2.""" 580 | if self.b[self.k] == 'e': 581 | if self.ends("icate"): self.r("ic") 582 | elif self.ends("ative"): self.r("") 583 | elif self.ends("alize"): self.r("al") 584 | elif self.b[self.k] == 'i': 585 | if self.ends("iciti"): self.r("ic") 586 | elif self.b[self.k] == 'l': 587 | if self.ends("ical"): self.r("ic") 588 | elif self.ends("ful"): self.r("") 589 | elif self.b[self.k] == 's': 590 | if self.ends("ness"): self.r("") 591 | 592 | def step4(self): 593 | """step4() takes off -ant, -ence etc., in context vcvc.""" 594 | if self.b[self.k - 1] == 'a': 595 | if self.ends("al"): pass 596 | else: return 597 | elif self.b[self.k - 1] == 'c': 598 | if self.ends("ance"): pass 599 | elif self.ends("ence"): pass 600 | else: return 601 | elif self.b[self.k - 1] == 'e': 602 | if self.ends("er"): pass 603 | else: return 604 | elif self.b[self.k - 1] == 'i': 605 | if self.ends("ic"): pass 606 | else: return 607 | elif self.b[self.k - 1] == 'l': 608 | if self.ends("able"): pass 609 | elif self.ends("ible"): pass 610 | else: return 611 | elif self.b[self.k - 1] == 'n': 612 | if self.ends("ant"): pass 613 | elif self.ends("ement"): pass 614 | elif self.ends("ment"): pass 615 | elif self.ends("ent"): pass 616 | else: return 617 | elif self.b[self.k - 1] == 'o': 618 | if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass 619 | elif self.ends("ou"): pass 620 | # takes care of -ous 621 | else: return 622 | elif self.b[self.k - 1] == 's': 623 | if self.ends("ism"): pass 624 | else: return 625 | elif self.b[self.k - 1] == 't': 626 | if self.ends("ate"): pass 627 | elif self.ends("iti"): pass 628 | else: return 629 | elif self.b[self.k - 1] == 'u': 630 | if self.ends("ous"): pass 631 | else: return 632 | elif self.b[self.k - 1] == 'v': 633 | if self.ends("ive"): pass 634 | else: return 635 | elif self.b[self.k - 1] == 'z': 636 | if self.ends("ize"): pass 637 | else: return 638 | else: 639 | return 640 | if self.m() > 1: 641 | self.k = self.j 642 | 643 | def step5(self): 644 | """step5() removes a final -e if m() > 1, and changes -ll to -l if 645 | m() > 1. 646 | """ 647 | self.j = self.k 648 | if self.b[self.k] == 'e': 649 | a = self.m() 650 | if a > 1 or (a == 1 and not self.cvc(self.k-1)): 651 | self.k = self.k - 1 652 | if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1: 653 | self.k = self.k -1 654 | 655 | def stem(self, p, i=0, j=None): 656 | """In stem(p,i,j), p is a char pointer, and the string to be stemmed 657 | is from p[i] to p[j] inclusive. Typically i is zero and j is the 658 | offset to the last character of a string, (p[j+1] == '\0'). The 659 | stemmer adjusts the characters p[i] ... p[j] and returns the new 660 | end-point of the string, k. Stemming never increases word length, so 661 | i <= k <= j. To turn the stemmer into a module, declare 'stem' as 662 | extern, and delete the remainder of this file. 663 | """ 664 | # copy the parameters into statics 665 | self.b = p 666 | self.k = j or len(p) - 1 667 | self.k0 = i 668 | if self.k <= self.k0 + 1: 669 | return self.b # --DEPARTURE-- 670 | 671 | # With this line, strings of length 1 or 2 don't go through the 672 | # stemming process, although no mention is made of this in the 673 | # published algorithm. Remove the line to match the published 674 | # algorithm. 675 | 676 | self.step1ab() 677 | self.step1c() 678 | self.step2() 679 | self.step3() 680 | self.step4() 681 | self.step5() 682 | return self.b[self.k0:self.k+1] 683 | 684 | 685 | class Grader(SubmissionBase): 686 | # Random Test Cases 687 | x1 = np.sin(np.arange(1, 11)) 688 | x2 = np.cos(np.arange(1, 11)) 689 | ec = 'the quick brown fox jumped over the lazy dog' 690 | wi = np.abs(np.round(x1 * 1863)).astype(int) 691 | wi = np.concatenate([wi, wi]) 692 | 693 | def __init__(self): 694 | part_names = ['Gaussian Kernel', 695 | 'Parameters (C, sigma) for Dataset 3', 696 | 'Email Processing', 697 | 'Email Feature Extraction'] 698 | super().__init__('support-vector-machines', part_names) 699 | 700 | def __iter__(self): 701 | for part_id in range(1, 5): 702 | try: 703 | func = self.functions[part_id] 704 | # Each part has different expected arguments/different function 705 | if part_id == 1: 706 | res = func(self.x1, self.x2, 2) 707 | elif part_id == 2: 708 | res = np.hstack(func()).tolist() 709 | elif part_id == 3: 710 | # add one to be compatible with matlab grader 711 | res = [ind+1 for ind in func(self.ec, False)] 712 | elif part_id == 4: 713 | res = func(self.wi) 714 | else: 715 | raise KeyError 716 | yield part_id, res 717 | except KeyError: 718 | yield part_id, 0 719 | -------------------------------------------------------------------------------- /Exercise7/Data/bird_small.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Data/bird_small.mat -------------------------------------------------------------------------------- /Exercise7/Data/bird_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Data/bird_small.png -------------------------------------------------------------------------------- /Exercise7/Data/ex7data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Data/ex7data1.mat -------------------------------------------------------------------------------- /Exercise7/Data/ex7data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Data/ex7data2.mat -------------------------------------------------------------------------------- /Exercise7/Data/ex7faces.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Data/ex7faces.mat -------------------------------------------------------------------------------- /Exercise7/Figures/bird_compression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/bird_compression.png -------------------------------------------------------------------------------- /Exercise7/Figures/faces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/faces.png -------------------------------------------------------------------------------- /Exercise7/Figures/faces_original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/faces_original.png -------------------------------------------------------------------------------- /Exercise7/Figures/faces_reconstructed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/faces_reconstructed.png -------------------------------------------------------------------------------- /Exercise7/Figures/kmeans_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/kmeans_result.png -------------------------------------------------------------------------------- /Exercise7/Figures/pca_components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/pca_components.png -------------------------------------------------------------------------------- /Exercise7/Figures/pca_reconstruction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/Figures/pca_reconstruction.png -------------------------------------------------------------------------------- /Exercise7/None0000000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/None0000000.png -------------------------------------------------------------------------------- /Exercise7/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise7/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise7/token.pkl -------------------------------------------------------------------------------- /Exercise7/utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | from matplotlib import pyplot 4 | from matplotlib.animation import FuncAnimation 5 | import matplotlib as mpl 6 | 7 | sys.path.append('..') 8 | from submission import SubmissionBase 9 | 10 | 11 | def displayData(X, example_width=None, figsize=(10, 10)): 12 | """ 13 | Displays 2D data in a nice grid. 14 | 15 | Parameters 16 | ---------- 17 | X : array_like 18 | The input data of size (m x n) where m is the number of examples and n is the number of 19 | features. 20 | 21 | example_width : int, optional 22 | THe width of each 2-D image in pixels. If not provided, the image is assumed to be square, 23 | and the width is the floor of the square root of total number of pixels. 24 | 25 | figsize : tuple, optional 26 | A 2-element tuple indicating the width and height of figure in inches. 27 | """ 28 | # Compute rows, cols 29 | if X.ndim == 2: 30 | m, n = X.shape 31 | elif X.ndim == 1: 32 | n = X.size 33 | m = 1 34 | X = X[None] # Promote to a 2 dimensional array 35 | else: 36 | raise IndexError('Input X should be 1 or 2 dimensional.') 37 | 38 | example_width = example_width or int(np.round(np.sqrt(n))) 39 | example_height = int(n / example_width) 40 | 41 | # Compute number of items to display 42 | display_rows = int(np.floor(np.sqrt(m))) 43 | display_cols = int(np.ceil(m / display_rows)) 44 | 45 | fig, ax_array = pyplot.subplots(display_rows, display_cols, figsize=figsize) 46 | fig.subplots_adjust(wspace=0.025, hspace=0.025) 47 | 48 | ax_array = [ax_array] if m == 1 else ax_array.ravel() 49 | 50 | for i, ax in enumerate(ax_array): 51 | ax.imshow(X[i].reshape(example_height, example_width, order='F'), cmap='gray') 52 | ax.axis('off') 53 | 54 | 55 | def featureNormalize(X): 56 | """ 57 | Normalizes the features in X returns a normalized version of X where the mean value of each 58 | feature is 0 and the standard deviation is 1. This is often a good preprocessing step to do when 59 | working with learning algorithms. 60 | 61 | Parameters 62 | ---------- 63 | X : array_like 64 | An dataset which is a (m x n) matrix, where m is the number of examples, 65 | and n is the number of dimensions for each example. 66 | 67 | Returns 68 | ------- 69 | X_norm : array_like 70 | The normalized input dataset. 71 | 72 | mu : array_like 73 | A vector of size n corresponding to the mean for each dimension across all examples. 74 | 75 | sigma : array_like 76 | A vector of size n corresponding to the standard deviations for each dimension across 77 | all examples. 78 | """ 79 | mu = np.mean(X, axis=0) 80 | X_norm = X - mu 81 | 82 | sigma = np.std(X_norm, axis=0, ddof=1) 83 | X_norm /= sigma 84 | return X_norm, mu, sigma 85 | 86 | 87 | def plotProgresskMeans(i, X, centroid_history, idx_history): 88 | """ 89 | A helper function that displays the progress of k-Means as it is running. It is intended for use 90 | only with 2D data. It plots data points with colors assigned to each centroid. With the 91 | previous centroids, it also plots a line between the previous locations and current locations 92 | of the centroids. 93 | 94 | Parameters 95 | ---------- 96 | i : int 97 | Current iteration number of k-means. Used for matplotlib animation function. 98 | 99 | X : array_like 100 | The dataset, which is a matrix (m x n). Note since the plot only supports 2D data, n should 101 | be equal to 2. 102 | 103 | centroid_history : list 104 | A list of computed centroids for all iteration. 105 | 106 | idx_history : list 107 | A list of computed assigned indices for all iterations. 108 | """ 109 | K = centroid_history[0].shape[0] 110 | pyplot.gcf().clf() 111 | cmap = pyplot.cm.rainbow 112 | norm = mpl.colors.Normalize(vmin=0, vmax=2) 113 | 114 | for k in range(K): 115 | current = np.stack([c[k, :] for c in centroid_history[:i+1]], axis=0) 116 | pyplot.plot(current[:, 0], current[:, 1], 117 | '-Xk', 118 | mec='k', 119 | lw=2, 120 | ms=10, 121 | mfc=cmap(norm(k)), 122 | mew=2) 123 | 124 | pyplot.scatter(X[:, 0], X[:, 1], 125 | c=idx_history[i], 126 | cmap=cmap, 127 | marker='o', 128 | s=8**2, 129 | linewidths=1,) 130 | pyplot.grid(False) 131 | pyplot.title('Iteration number %d' % (i+1)) 132 | 133 | 134 | def runkMeans(X, centroids, findClosestCentroids, computeCentroids, 135 | max_iters=10, plot_progress=False): 136 | """ 137 | Runs the K-means algorithm. 138 | 139 | Parameters 140 | ---------- 141 | X : array_like 142 | The data set of size (m, n). Each row of X is a single example of n dimensions. The 143 | data set is a total of m examples. 144 | 145 | centroids : array_like 146 | Initial centroid location for each clusters. This is a matrix of size (K, n). K is the total 147 | number of clusters and n is the dimensions of each data point. 148 | 149 | findClosestCentroids : func 150 | A function (implemented by student) reference which computes the cluster assignment for 151 | each example. 152 | 153 | computeCentroids : func 154 | A function(implemented by student) reference which computes the centroid of each cluster. 155 | 156 | max_iters : int, optional 157 | Specifies the total number of interactions of K-Means to execute. 158 | 159 | plot_progress : bool, optional 160 | A flag that indicates if the function should also plot its progress as the learning happens. 161 | This is set to false by default. 162 | 163 | Returns 164 | ------- 165 | centroids : array_like 166 | A (K x n) matrix of the computed (updated) centroids. 167 | idx : array_like 168 | A vector of size (m,) for cluster assignment for each example in the dataset. Each entry 169 | in idx is within the range [0 ... K-1]. 170 | 171 | anim : FuncAnimation, optional 172 | A matplotlib animation object which can be used to embed a video within the jupyter 173 | notebook. This is only returned if `plot_progress` is `True`. 174 | """ 175 | K = centroids.shape[0] 176 | idx = None 177 | idx_history = [] 178 | centroid_history = [] 179 | 180 | for i in range(max_iters): 181 | idx = findClosestCentroids(X, centroids) 182 | 183 | if plot_progress: 184 | idx_history.append(idx) 185 | centroid_history.append(centroids) 186 | 187 | centroids = computeCentroids(X, idx, K) 188 | 189 | if plot_progress: 190 | fig = pyplot.figure() 191 | anim = FuncAnimation(fig, plotProgresskMeans, 192 | frames=max_iters, 193 | interval=500, 194 | repeat_delay=2, 195 | fargs=(X, centroid_history, idx_history)) 196 | return centroids, idx, anim 197 | 198 | return centroids, idx 199 | 200 | 201 | class Grader(SubmissionBase): 202 | # Random Test Cases 203 | X = np.sin(np.arange(1, 166)).reshape(15, 11, order='F') 204 | Z = np.cos(np.arange(1, 122)).reshape(11, 11, order='F') 205 | C = Z[:5, :] 206 | idx = np.arange(1, 16) % 3 207 | 208 | def __init__(self): 209 | part_names = ['Find Closest Centroids (k-Means)', 210 | 'Compute Centroid Means (k-Means)', 211 | 'PCA', 212 | 'Project Data (PCA)', 213 | 'Recover Data (PCA)'] 214 | super().__init__('k-means-clustering-and-pca', part_names) 215 | 216 | def __iter__(self): 217 | for part_id in range(1, 6): 218 | try: 219 | func = self.functions[part_id] 220 | # Each part has different expected arguments/different function 221 | if part_id == 1: 222 | res = 1 + func(self.X, self.C) 223 | elif part_id == 2: 224 | res = func(self.X, self.idx, 3) 225 | elif part_id == 3: 226 | U, S = func(self.X) 227 | res = np.hstack([U.ravel('F'), np.diag(S).ravel('F')]).tolist() 228 | elif part_id == 4: 229 | res = func(self.X, self.Z, 5) 230 | elif part_id == 5: 231 | res = func(self.X[:, :5], self.Z, 5) 232 | else: 233 | raise KeyError 234 | yield part_id, res 235 | except KeyError: 236 | yield part_id, 0 237 | -------------------------------------------------------------------------------- /Exercise8/Data/ex8_movieParams.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Data/ex8_movieParams.mat -------------------------------------------------------------------------------- /Exercise8/Data/ex8_movies.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Data/ex8_movies.mat -------------------------------------------------------------------------------- /Exercise8/Data/ex8data1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Data/ex8data1.mat -------------------------------------------------------------------------------- /Exercise8/Data/ex8data2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Data/ex8data2.mat -------------------------------------------------------------------------------- /Exercise8/Data/movie_ids.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Data/movie_ids.txt -------------------------------------------------------------------------------- /Exercise8/Figures/gaussian_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/Figures/gaussian_fit.png -------------------------------------------------------------------------------- /Exercise8/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /Exercise8/token.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/Exercise8/token.pkl -------------------------------------------------------------------------------- /Exercise8/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from os.path import join 4 | from matplotlib import pyplot 5 | 6 | sys.path.append('..') 7 | from submission import SubmissionBase 8 | 9 | 10 | def normalizeRatings(Y, R): 11 | """ 12 | Preprocess data by subtracting mean rating for every movie (every row). 13 | 14 | Parameters 15 | ---------- 16 | Y : array_like 17 | The user ratings for all movies. A matrix of shape (num_movies x num_users). 18 | 19 | R : array_like 20 | Indicator matrix for movies rated by users. A matrix of shape (num_movies x num_users). 21 | 22 | Returns 23 | ------- 24 | Ynorm : array_like 25 | A matrix of same shape as Y, after mean normalization. 26 | 27 | Ymean : array_like 28 | A vector of shape (num_movies, ) containing the mean rating for each movie. 29 | """ 30 | m, n = Y.shape 31 | Ymean = np.zeros(m) 32 | Ynorm = np.zeros(Y.shape) 33 | 34 | for i in range(m): 35 | idx = R[i, :] == 1 36 | Ymean[i] = np.mean(Y[i, idx]) 37 | Ynorm[i, idx] = Y[i, idx] - Ymean[i] 38 | 39 | return Ynorm, Ymean 40 | 41 | 42 | def loadMovieList(): 43 | """ 44 | Reads the fixed movie list in movie_ids.txt and returns a list of movie names. 45 | 46 | Returns 47 | ------- 48 | movieNames : list 49 | A list of strings, representing all movie names. 50 | """ 51 | # Read the fixed movieulary list 52 | with open(join('Data', 'movie_ids.txt'), encoding='ISO-8859-1') as fid: 53 | movies = fid.readlines() 54 | 55 | movieNames = [] 56 | for movie in movies: 57 | parts = movie.split() 58 | movieNames.append(' '.join(parts[1:]).strip()) 59 | return movieNames 60 | 61 | 62 | def computeNumericalGradient(J, theta, e=1e-4): 63 | """ 64 | Computes the gradient using "finite differences" and gives us a numerical estimate of the 65 | gradient. 66 | 67 | Parameters 68 | ---------- 69 | J : func 70 | The cost function which will be used to estimate its numerical gradient. 71 | 72 | theta : array_like 73 | The one dimensional unrolled network parameters. The numerical gradient is computed at 74 | those given parameters. 75 | 76 | e : float (optional) 77 | The value to use for epsilon for computing the finite difference. 78 | 79 | Returns 80 | ------- 81 | numgrad : array_like 82 | The numerical gradient with respect to theta. Has same shape as theta. 83 | 84 | Notes 85 | ----- 86 | The following code implements numerical gradient checking, and 87 | returns the numerical gradient. It sets `numgrad[i]` to (a numerical 88 | approximation of) the partial derivative of J with respect to the 89 | i-th input argument, evaluated at theta. (i.e., `numgrad[i]` should 90 | be the (approximately) the partial derivative of J with respect 91 | to theta[i].) 92 | """ 93 | numgrad = np.zeros(theta.shape) 94 | perturb = np.diag(e * np.ones(theta.shape)) 95 | for i in range(theta.size): 96 | loss1, _ = J(theta - perturb[:, i]) 97 | loss2, _ = J(theta + perturb[:, i]) 98 | numgrad[i] = (loss2 - loss1)/(2*e) 99 | return numgrad 100 | 101 | 102 | def checkCostFunction(cofiCostFunc, lambda_=0.): 103 | """ 104 | Creates a collaborative filtering problem to check your cost function and gradients. 105 | It will output the analytical gradients produced by your code and the numerical gradients 106 | (computed using computeNumericalGradient). These two gradient computations should result 107 | in very similar values. 108 | 109 | Parameters 110 | ---------- 111 | cofiCostFunc: func 112 | Implementation of the cost function. 113 | 114 | lambda_ : float, optional 115 | The regularization parameter. 116 | """ 117 | # Create small problem 118 | X_t = np.random.rand(4, 3) 119 | Theta_t = np.random.rand(5, 3) 120 | 121 | # Zap out most entries 122 | Y = np.dot(X_t, Theta_t.T) 123 | Y[np.random.rand(*Y.shape) > 0.5] = 0 124 | R = np.zeros(Y.shape) 125 | R[Y != 0] = 1 126 | 127 | # Run Gradient Checking 128 | X = np.random.randn(*X_t.shape) 129 | Theta = np.random.randn(*Theta_t.shape) 130 | num_movies, num_users = Y.shape 131 | num_features = Theta_t.shape[1] 132 | 133 | params = np.concatenate([X.ravel(), Theta.ravel()]) 134 | numgrad = computeNumericalGradient( 135 | lambda x: cofiCostFunc(x, Y, R, num_users, num_movies, num_features, lambda_), params) 136 | 137 | cost, grad = cofiCostFunc(params, Y, R, num_users,num_movies, num_features, lambda_) 138 | 139 | print(np.stack([numgrad, grad], axis=1)) 140 | print('\nThe above two columns you get should be very similar.' 141 | '(Left-Your Numerical Gradient, Right-Analytical Gradient)') 142 | 143 | diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad) 144 | print('If your cost function implementation is correct, then ' 145 | 'the relative difference will be small (less than 1e-9).') 146 | print('\nRelative Difference: %g' % diff) 147 | 148 | 149 | def multivariateGaussian(X, mu, Sigma2): 150 | """ 151 | Computes the probability density function of the multivariate gaussian distribution. 152 | 153 | Parameters 154 | ---------- 155 | X : array_like 156 | The dataset of shape (m x n). Where there are m examples of n-dimensions. 157 | 158 | mu : array_like 159 | A vector of shape (n,) contains the means for each dimension (feature). 160 | 161 | Sigma2 : array_like 162 | Either a vector of shape (n,) containing the variances of independent features 163 | (i.e. it is the diagonal of the correlation matrix), or the full 164 | correlation matrix of shape (n x n) which can represent dependent features. 165 | 166 | Returns 167 | ------ 168 | p : array_like 169 | A vector of shape (m,) which contains the computed probabilities at each of the 170 | provided examples. 171 | """ 172 | k = mu.size 173 | 174 | # if sigma is given as a diagonal, compute the matrix 175 | if Sigma2.ndim == 1: 176 | Sigma2 = np.diag(Sigma2) 177 | 178 | X = X - mu 179 | p = (2 * np.pi) ** (- k / 2) * np.linalg.det(Sigma2) ** (-0.5)\ 180 | * np.exp(-0.5 * np.sum(np.dot(X, np.linalg.pinv(Sigma2)) * X, axis=1)) 181 | return p 182 | 183 | 184 | def visualizeFit(X, mu, sigma2): 185 | """ 186 | Visualize the dataset and its estimated distribution. 187 | This visualization shows you the probability density function of the Gaussian distribution. 188 | Each example has a location (x1, x2) that depends on its feature values. 189 | 190 | Parameters 191 | ---------- 192 | X : array_like 193 | The dataset of shape (m x 2). Where there are m examples of 2-dimensions. We need at most 194 | 2-D features to be able to visualize the distribution. 195 | 196 | mu : array_like 197 | A vector of shape (n,) contains the means for each dimension (feature). 198 | 199 | sigma2 : array_like 200 | Either a vector of shape (n,) containing the variances of independent features 201 | (i.e. it is the diagonal of the correlation matrix), or the full 202 | correlation matrix of shape (n x n) which can represent dependent features. 203 | """ 204 | 205 | X1, X2 = np.meshgrid(np.arange(0, 35.5, 0.5), np.arange(0, 35.5, 0.5)) 206 | Z = multivariateGaussian(np.stack([X1.ravel(), X2.ravel()], axis=1), mu, sigma2) 207 | Z = Z.reshape(X1.shape) 208 | 209 | pyplot.plot(X[:, 0], X[:, 1], 'bx', mec='b', mew=2, ms=8) 210 | 211 | if np.all(abs(Z) != np.inf): 212 | pyplot.contour(X1, X2, Z, levels=10**(np.arange(-20., 1, 3)), zorder=100) 213 | 214 | 215 | class Grader(SubmissionBase): 216 | # Random Test Cases 217 | n_u = 3 218 | n_m = 4 219 | n = 5 220 | X = np.sin(np.arange(1, 1 + n_m * n)).reshape(n_m, n, order='F') 221 | Theta = np.cos(np.arange(1, 1 + n_u * n)).reshape(n_u, n, order='F') 222 | Y = np.sin(np.arange(1, 1 + 2 * n_m * n_u, 2)).reshape(n_m, n_u, order='F') 223 | R = Y > 0.5 224 | pval = np.concatenate([abs(Y.ravel('F')), [0.001], [1]]) 225 | Y = Y * R # set 'Y' values to 0 for movies not reviewed 226 | 227 | yval = np.concatenate([R.ravel('F'), [1], [0]]) 228 | # 229 | params = np.concatenate([X.ravel(), Theta.ravel()]) 230 | 231 | def __init__(self): 232 | part_names = ['Estimate Gaussian Parameters', 233 | 'Select Threshold', 234 | 'Collaborative Filtering Cost', 235 | 'Collaborative Filtering Gradient', 236 | 'Regularized Cost', 237 | 'Regularized Gradient'] 238 | super().__init__('anomaly-detection-and-recommender-systems', part_names) 239 | 240 | def __iter__(self): 241 | for part_id in range(1, 7): 242 | try: 243 | func = self.functions[part_id] 244 | 245 | # Each part has different expected arguments/different function 246 | if part_id == 1: 247 | res = np.hstack(func(self.X)).tolist() 248 | elif part_id == 2: 249 | res = np.hstack(func(self.yval, self.pval)).tolist() 250 | elif part_id == 3: 251 | J, grad = func(self.params, self.Y, self.R, self.n_u, self.n_m, self.n) 252 | res = J 253 | elif part_id == 4: 254 | J, grad = func(self.params, self.Y, self.R, self.n_u, self.n_m, self.n, 0) 255 | xgrad = grad[:self.n_m*self.n].reshape(self.n_m, self.n) 256 | thetagrad = grad[self.n_m*self.n:].reshape(self.n_u, self.n) 257 | res = np.hstack([xgrad.ravel('F'), thetagrad.ravel('F')]).tolist() 258 | elif part_id == 5: 259 | res, _ = func(self.params, self.Y, self.R, self.n_u, self.n_m, self.n, 1.5) 260 | elif part_id == 6: 261 | J, grad = func(self.params, self.Y, self.R, self.n_u, self.n_m, self.n, 1.5) 262 | xgrad = grad[:self.n_m*self.n].reshape(self.n_m, self.n) 263 | thetagrad = grad[self.n_m*self.n:].reshape(self.n_u, self.n) 264 | res = np.hstack([xgrad.ravel('F'), thetagrad.ravel('F')]).tolist() 265 | else: 266 | raise KeyError 267 | yield part_id, res 268 | except KeyError: 269 | yield part_id, 0 270 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # coursera-machine-learning-solutions-python 2 | A repository with solutions to the assignments on Andrew Ng's machine learning MOOC on Coursera. The credits for template for the python submission goes to @dibgerge[https://github.com/dibgerge] 3 | -------------------------------------------------------------------------------- /machinelearning.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/suraggupta/coursera-machine-learning-solutions-python/ef75456369d22d18edbc41ec27900a75256d1e3b/machinelearning.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | appdirs==1.4.3 2 | asn1crypto==0.24.0 3 | attrs==18.1.0 4 | Automat==0.7.0 5 | backcall==0.1.0 6 | bleach==3.1.4 7 | certifi==2018.8.13 8 | cffi==1.11.5 9 | constantly==15.1.0 10 | cryptography==2.3.1 11 | cycler==0.10.0 12 | decorator==4.3.0 13 | entrypoints==0.2.3 14 | html5lib==1.0.1 15 | hyperlink==18.0.0 16 | idna==2.7 17 | incremental==17.5.0 18 | ipykernel==4.8.2 19 | ipython==6.5.0 20 | ipython-genutils==0.2.0 21 | ipywidgets==7.4.0 22 | jedi==0.12.1 23 | Jinja2==2.10.1 24 | jsonschema==2.6.0 25 | jupyter==1.0.0 26 | jupyter-client==5.2.3 27 | jupyter-console==5.2.0 28 | jupyter-core==4.4.0 29 | MarkupSafe==1.0 30 | matplotlib==2.1.2 31 | mistune==0.8.3 32 | mkl-fft==1.0.4 33 | mkl-random==1.0.1 34 | nbconvert==5.3.1 35 | nbformat==4.4.0 36 | notebook==5.7.8 37 | numpy==1.13.3 38 | pandocfilters==1.4.2 39 | parso==0.3.1 40 | pexpect==4.6.0 41 | pickleshare==0.7.4 42 | prometheus-client==0.3.1 43 | prompt-toolkit==1.0.15 44 | ptyprocess==0.6.0 45 | pyasn1==0.4.4 46 | pyasn1-modules==0.2.2 47 | pycparser==2.18 48 | Pygments==2.2.0 49 | pyOpenSSL==18.0.0 50 | pyparsing==2.2.0 51 | python-dateutil==2.7.3 52 | pytz==2018.5 53 | pyzmq==17.1.2 54 | qtconsole==4.3.1 55 | scipy==1.1.0 56 | Send2Trash==1.5.0 57 | service-identity==17.0.0 58 | simplegeneric==0.8.1 59 | six==1.11.0 60 | terminado==0.8.1 61 | testpath==0.3.1 62 | tornado==5.1 63 | traitlets==4.3.2 64 | twisted>=19.2.1 65 | wcwidth==0.1.7 66 | webencodings==0.5.1 67 | widgetsnbextension==3.4.0 68 | zope.interface==4.5.0 69 | -------------------------------------------------------------------------------- /submission.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlencode 2 | from urllib.request import urlopen 3 | import pickle 4 | import json 5 | from collections import OrderedDict 6 | import numpy as np 7 | import os 8 | 9 | 10 | class SubmissionBase: 11 | 12 | submit_url = 'https://www-origin.coursera.org/api/' \ 13 | 'onDemandProgrammingImmediateFormSubmissions.v1' 14 | save_file = 'token.pkl' 15 | 16 | def __init__(self, assignment_slug, part_names): 17 | self.assignment_slug = assignment_slug 18 | self.part_names = part_names 19 | self.login = None 20 | self.token = None 21 | self.functions = OrderedDict() 22 | self.args = dict() 23 | 24 | def grade(self): 25 | print('\nSubmitting Solutions | Programming Exercise %s\n' % self.assignment_slug) 26 | self.login_prompt() 27 | 28 | # Evaluate the different parts of exercise 29 | parts = OrderedDict() 30 | for part_id, result in self: 31 | parts[str(part_id)] = {'output': sprintf('%0.5f ', result)} 32 | result, response = self.request(parts) 33 | response = json.loads(response) 34 | 35 | # if an error was returned, print it and stop 36 | if 'errorMessage' in response: 37 | print(response['errorMessage']) 38 | return 39 | 40 | # Print the grading table 41 | print('%43s | %9s | %-s' % ('Part Name', 'Score', 'Feedback')) 42 | print('%43s | %9s | %-s' % ('---------', '-----', '--------')) 43 | for part in parts: 44 | part_feedback = response['partFeedbacks'][part] 45 | part_evaluation = response['partEvaluations'][part] 46 | score = '%d / %3d' % (part_evaluation['score'], part_evaluation['maxScore']) 47 | print('%43s | %9s | %-s' % (self.part_names[int(part) - 1], score, part_feedback)) 48 | evaluation = response['evaluation'] 49 | total_score = '%d / %d' % (evaluation['score'], evaluation['maxScore']) 50 | print(' --------------------------------') 51 | print('%43s | %9s | %-s\n' % (' ', total_score, ' ')) 52 | 53 | def login_prompt(self): 54 | if os.path.isfile(self.save_file): 55 | with open(self.save_file, 'rb') as f: 56 | login, token = pickle.load(f) 57 | reenter = input('Use token from last successful submission (%s)? (Y/n): ' % login) 58 | 59 | if reenter == '' or reenter[0] == 'Y' or reenter[0] == 'y': 60 | self.login, self.token = login, token 61 | return 62 | else: 63 | os.remove(self.save_file) 64 | 65 | self.login = input('Login (email address): ') 66 | self.token = input('Token: ') 67 | 68 | # Save the entered credentials 69 | if not os.path.isfile(self.save_file): 70 | with open(self.save_file, 'wb') as f: 71 | pickle.dump((self.login, self.token), f) 72 | 73 | def request(self, parts): 74 | params = { 75 | 'assignmentSlug': self.assignment_slug, 76 | 'secret': self.token, 77 | 'parts': parts, 78 | 'submitterEmail': self.login} 79 | 80 | params = urlencode({'jsonBody': json.dumps(params)}).encode("utf-8") 81 | f = urlopen(self.submit_url, params) 82 | try: 83 | return 0, f.read() 84 | finally: 85 | f.close() 86 | 87 | def __iter__(self): 88 | for part_id in self.functions: 89 | yield part_id 90 | 91 | def __setitem__(self, key, value): 92 | self.functions[key] = value 93 | 94 | 95 | def sprintf(fmt, arg): 96 | """ Emulates (part of) Octave sprintf function. """ 97 | if isinstance(arg, tuple): 98 | # for multiple return values, only use the first one 99 | arg = arg[0] 100 | 101 | if isinstance(arg, (np.ndarray, list)): 102 | # concatenates all elements, column by column 103 | return ' '.join(fmt % e for e in np.asarray(arg).ravel('F')) 104 | else: 105 | return fmt % arg 106 | --------------------------------------------------------------------------------