├── DeepLearning ├── Week1 │ ├── .hidden │ ├── C2_W1_Lab01_Neurons_and_Layers.ipynb │ ├── C2_W1_Lab02_CoffeeRoasting_TF.ipynb │ ├── C2_W1_Lab03_CoffeeRoasting_Numpy.ipynb │ ├── C2_W1_Trying.ipynb │ ├── CoffeRoasting_ML.ipynb │ ├── deeplearning.mplstyle │ ├── images │ │ ├── C2_W1_CoffeeRoasting.png │ │ ├── C2_W1_L3_Lab01_3Neurons.PNG │ │ ├── C2_W1_MatrixMatrix.PNG │ │ ├── C2_W1_NeuronsAndLayers.png │ │ ├── C2_W1_RoastingDecision.PNG │ │ ├── C2_W1_RoastingNetwork.PNG │ │ ├── C2_W1_VectorMatrix.PNG │ │ ├── C2_W1_dense.PNG │ │ ├── C2_W1_dense2.PNG │ │ └── C2_W1_dense3.png │ ├── lab_coffee_utils.py │ ├── lab_neurons_utils.py │ └── lab_utils_common.py ├── Week1Assignment │ ├── .ipynb_checkpoints │ │ └── C2_W1_Assignment-checkpoint.ipynb │ ├── C2_W1_Assignment.ipynb │ ├── Understanding.ipynb │ ├── __pycache__ │ │ ├── autils.cpython-310.pyc │ │ ├── autils.cpython-37.pyc │ │ ├── public_tests.cpython-310.pyc │ │ └── public_tests.cpython-37.pyc │ ├── archive │ │ ├── .ipynb_checkpoints │ │ │ ├── 20230130_C2_W1_Assignment-checkpoint.ipynb │ │ │ └── C2_W1_Assignment-Copy1-checkpoint.ipynb │ │ ├── 20230130_C2_W1_Assignment.ipynb │ │ └── C2_W1_Assignment-Copy1.ipynb │ ├── autils.py │ ├── data │ │ ├── X.npy │ │ └── y.npy │ ├── dig.ipynb │ ├── images │ │ ├── C2_W1_Assign1.PNG │ │ ├── C2_W1_Assign1_BroadcastIndexes.PNG │ │ ├── C2_W1_Assign1_BroadcastMatrix.PNG │ │ ├── C2_W1_Assign1_Broadcasting.gif │ │ ├── C2_W1_Assign1_VectorAdd.PNG │ │ ├── C2_W1_CoffeeRoasting.png │ │ ├── C2_W1_L3_Lab01_3Neurons.PNG │ │ ├── C2_W1_MatrixMatrix.PNG │ │ ├── C2_W1_NeuronsAndLayers.png │ │ ├── C2_W1_RoastingDecision.PNG │ │ ├── C2_W1_RoastingNetwork.PNG │ │ ├── C2_W1_VectorMatrix.PNG │ │ ├── C2_W1_dense.PNG │ │ └── C2_W1_dense2.PNG │ ├── public_tests.py │ └── utils.py ├── Week2 │ ├── .ipynb_checkpoints │ │ ├── C2_W2_Multiclass_TF-checkpoint.ipynb │ │ ├── C2_W2_Relu-checkpoint.ipynb │ │ └── C2_W2_SoftMax-checkpoint.ipynb │ ├── C2_W2_Multiclass_TF.ipynb │ ├── C2_W2_Relu.ipynb │ ├── C2_W2_SoftMax.ipynb │ ├── __pycache__ │ │ ├── autils.cpython-310.pyc │ │ ├── autils.cpython-37.pyc │ │ ├── lab_utils_common.cpython-310.pyc │ │ ├── lab_utils_common.cpython-37.pyc │ │ ├── lab_utils_multiclass.cpython-37.pyc │ │ ├── lab_utils_multiclass_TF.cpython-310.pyc │ │ ├── lab_utils_multiclass_TF.cpython-37.pyc │ │ ├── lab_utils_relu.cpython-310.pyc │ │ ├── lab_utils_relu.cpython-37.pyc │ │ └── lab_utils_softmax.cpython-37.pyc │ ├── archive │ │ ├── .ipynb_checkpoints │ │ │ ├── C2_W2_SoftMax-Copy1-checkpoint.ipynb │ │ │ └── C2_W2_SoftMax-Copy2-checkpoint.ipynb │ │ ├── C2_W2_SoftMax-Copy1.ipynb │ │ └── C2_W2_SoftMax-Copy2.ipynb │ ├── autils.py │ ├── betaversion │ │ ├── C2_W2_Multiclass_TF.ipynb │ │ ├── C2_W2_Relu.ipynb │ │ ├── C2_W2_SoftMax-Copy1.ipynb │ │ ├── C2_W2_SoftMax.ipynb │ │ ├── autils.py │ │ ├── deeplearning.mplstyle │ │ ├── images │ │ │ ├── C2_W2_NNSoftmax.PNG │ │ │ ├── C2_W2_ReLU_Graph.png │ │ │ ├── C2_W2_ReLU_Network.png │ │ │ ├── C2_W2_ReLU_Plot.png │ │ │ ├── C2_W2_ReLu.png │ │ │ ├── C2_W2_SoftMaxCost.png │ │ │ ├── C2_W2_SoftMaxNN.png │ │ │ ├── C2_W2_Softmax.png │ │ │ ├── C2_W2_SoftmaxReg_NN.png │ │ │ ├── C2_W2_Softmax_Header.PNG │ │ │ ├── C2_W2_mclass_header.png │ │ │ ├── C2_W2_mclass_lab_network.PNG │ │ │ ├── C2_W2_mclass_layer1.png │ │ │ ├── C2_W2_mclass_layer2.png │ │ │ ├── C2_W2_mclass_relu.png │ │ │ ├── C2_W2_smallnetwork.png │ │ │ └── C2_W2_softmax_accurate.png │ │ ├── lab_utils_common.py │ │ ├── lab_utils_multiclass.py │ │ ├── lab_utils_multiclass_TF.py │ │ ├── lab_utils_relu.py │ │ └── lab_utils_softmax.py │ ├── deeplearning.mplstyle │ ├── images │ │ ├── C2_W2_Assigment_NN.png │ │ ├── C2_W2_BinaryVsMultiClass.png │ │ ├── C2_W2_GradDesc.png │ │ ├── C2_W2_NNSoftmax.PNG │ │ ├── C2_W2_ReLU_Graph.png │ │ ├── C2_W2_ReLU_Network.png │ │ ├── C2_W2_ReLU_Plot.png │ │ ├── C2_W2_ReLu.png │ │ ├── C2_W2_SoftMaxCost.png │ │ ├── C2_W2_SoftMaxNN.png │ │ ├── C2_W2_Softmax.png │ │ ├── C2_W2_SoftmaxReg_NN.png │ │ ├── C2_W2_Softmax_Header.PNG │ │ ├── C2_W2_mclass_header.png │ │ ├── C2_W2_mclass_lab_network.PNG │ │ ├── C2_W2_mclass_layer1.png │ │ ├── C2_W2_mclass_layer2.png │ │ ├── C2_W2_mclass_relu.png │ │ ├── C2_W2_smallnetwork.png │ │ └── C2_W2_softmax_accurate.png │ ├── lab_utils_common.py │ ├── lab_utils_multiclass.py │ ├── lab_utils_multiclass_TF.py │ ├── lab_utils_relu.py │ └── lab_utils_softmax.py ├── Week2Assignment │ ├── .ipynb_checkpoints │ │ └── C2_W2_Assignment-checkpoint.ipynb │ ├── C2_W2_Assignment.ipynb │ ├── __pycache__ │ │ ├── autils.cpython-37.pyc │ │ ├── lab_utils_common.cpython-37.pyc │ │ ├── lab_utils_softmax.cpython-37.pyc │ │ └── public_tests.cpython-37.pyc │ ├── autils.py │ ├── data │ │ ├── X.npy │ │ └── y.npy │ ├── deeplearning.mplstyle │ ├── images │ │ ├── C2_W2_Assigment_NN.png │ │ ├── C2_W2_BinaryVsMultiClass.png │ │ ├── C2_W2_NNSoftmax.PNG │ │ ├── C2_W2_ReLu.png │ │ ├── C2_W2_SoftMaxCost.png │ │ ├── C2_W2_SoftMaxNN.png │ │ ├── C2_W2_Softmax.png │ │ ├── C2_W2_SoftmaxReg_NN.png │ │ └── C2_W2_smallnetwork.png │ ├── lab_utils_common.py │ ├── lab_utils_softmax.py │ ├── logs │ │ └── train │ │ │ ├── events.out.tfevents.1645828646.ed92f2b0de47.31.186.v2 │ │ │ ├── events.out.tfevents.1645828646.ed92f2b0de47.profile-empty │ │ │ ├── events.out.tfevents.1647805546.30f1e0ee4ffd.31.186.v2 │ │ │ └── plugins │ │ │ └── profile │ │ │ ├── 2022-02-25_22-37-26 │ │ │ └── local.trace │ │ │ └── 2022-03-20_19-45-46 │ │ │ └── local.trace │ └── public_tests.py ├── Week2Optional │ ├── .ipynb_checkpoints │ │ ├── C2_W2_Backprop-checkpoint.ipynb │ │ └── C2_W2_Derivatives-checkpoint.ipynb │ ├── C2_W2_Backprop.ipynb │ ├── C2_W2_Derivatives.ipynb │ ├── __pycache__ │ │ └── lab_utils_backprop.cpython-37.pyc │ ├── images │ │ ├── C2_W2_BP_network0.PNG │ │ ├── C2_W2_BP_network0_a.PNG │ │ ├── C2_W2_BP_network0_diff.PNG │ │ ├── C2_W2_BP_network0_j.PNG │ │ ├── C2_W2_BP_network1.PNG │ │ ├── C2_W2_BP_network1_a.PNG │ │ ├── C2_W2_BP_network1_c.PNG │ │ ├── C2_W2_BP_network1_d.PNG │ │ └── C2_W2_BP_network1_jdsq.PNG │ └── lab_utils_backprop.py ├── Week3 │ ├── .ipynb_checkpoints │ │ ├── C2W3_Lab_01_Model_Evaluation_and_Selection-checkpoint.ipynb │ │ └── C2W3_Lab_02_Diagnosing_Bias_and_Variance-checkpoint.ipynb │ ├── C2W3_Lab_01_Model_Evaluation_and_Selection.ipynb │ ├── C2W3_Lab_02_Diagnosing_Bias_and_Variance.ipynb │ ├── __pycache__ │ │ └── utils.cpython-37.pyc │ ├── data │ │ ├── c2w3_lab2_data1.csv │ │ ├── c2w3_lab2_data2.csv │ │ ├── c2w3_lab2_data3.csv │ │ ├── c2w3_lab2_data4.csv │ │ ├── data_w3_ex1.csv │ │ └── data_w3_ex2.csv │ ├── deeplearning.mplstyle │ ├── images │ │ ├── C2_W3_BiasVariance.png │ │ ├── C2_W3_NN_Arch.png │ │ └── C2_W3_poly.png │ └── utils.py ├── Week3Assignment │ ├── .ipynb_checkpoints │ │ └── C2_W3_Assignment-checkpoint.ipynb │ ├── C2_W3_Assignment.ipynb │ ├── __pycache__ │ │ ├── assigment_utils.cpython-37.pyc │ │ └── public_tests_a1.cpython-37.pyc │ ├── archive │ │ └── assigment_utils.py │ ├── assigment_utils.py │ ├── deeplearning.mplstyle │ ├── images │ │ ├── C2_W3_BiasVarianceDegree.png │ │ ├── C2_W3_Compute_Cost_linear.png │ │ ├── C2_W3_TrainingVsNew.png │ │ ├── C2_W3_TrainingVsNew_Slide.png │ │ ├── C2_W4_degree.png │ │ └── C2_W4_justright.PNG │ ├── public_tests_a1.py │ └── utils.py ├── Week4 │ ├── .ipynb_checkpoints │ │ └── C2_W4_Decision_Tree_with_Markdown-checkpoint.ipynb │ ├── C2_W4_Decision_Tree_with_Markdown.ipynb │ ├── __pycache__ │ │ ├── public_tests.cpython-37.pyc │ │ └── utils.cpython-37.pyc │ ├── images │ │ ├── 0.png │ │ ├── 1.png │ │ ├── 2.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5.png │ │ ├── 6.png │ │ ├── 7.png │ │ ├── 8.png │ │ └── 9.png │ ├── public_tests.py │ └── utils.py └── Week4Assignment │ ├── .ipynb_checkpoints │ ├── C2_W4_Lab_01_Decision_Trees-checkpoint.ipynb │ └── C2_W4_Lab_02_Tree_Ensemble-checkpoint.ipynb │ ├── C2_W4_Lab_01_Decision_Trees.ipynb │ ├── C2_W4_Lab_02_Tree_Ensemble.ipynb │ ├── __pycache__ │ └── utils.cpython-37.pyc │ ├── deeplearning.mplstyle │ ├── heart.csv │ ├── images │ ├── 0.png │ ├── 1.png │ ├── 2.png │ ├── 3.png │ ├── 4.png │ ├── 5.png │ ├── 6.png │ ├── 7.png │ ├── 8.png │ └── 9.png │ └── utils.py ├── README.md └── Supervised-Machine-Learning-Regression-and-Classification ├── README.md ├── Week1 ├── .ipynb_checkpoints │ └── The best classifier-checkpoint.ipynb ├── C1_Gradient_Descent_Soln.ipynb ├── C1_W1_Lab04_Cost_function_Soln.ipynb ├── CostFunction.png ├── Gradient Descent soltuion.png ├── Optional Lab_Model_Representation.ipynb └── Python and Jupyter Notebooks.ipynb ├── Week2 ├── C1_W2_Lab01_Python_Numpy_Vectorization_Soln.ipynb ├── C1_W2_Lab02_Multiple_Variable_Soln.ipynb ├── C1_W2_Lab03_Feature_Scaling_and_Learning_Rate_Soln.ipynb ├── C1_W2_Lab04_FeatEng_PolyReg_Soln.ipynb ├── C1_W2_Lab05_Sklearn_GD_Soln.ipynb ├── C1_W2_Linear_Regression.ipynb ├── MachineLearning_Steps.png ├── Trying-C1_W2_Linearmodel.ipynb ├── Trying_2nd_Lab.ipynb ├── deeplearning.mplstyle ├── lab_utils_common.py └── lab_utils_multi.py ├── Week3 ├── C1_W3_Lab01_Classification_Soln.ipynb ├── C1_W3_Lab02_Sigmoid_function_Soln.ipynb ├── C1_W3_Lab03_Decision_Boundary_Soln.ipynb ├── C1_W3_Lab04_LogisticLoss_Soln.ipynb ├── C1_W3_Lab05_Cost_Function_Soln.ipynb ├── C1_W3_Lab06_Gradient_Descent_Soln.ipynb ├── C1_W3_Lab07_Scikit_Learn_Soln.ipynb ├── C1_W3_Lab08_Overfitting_Soln.ipynb ├── C1_W3_Lab09_Regularization_Soln.ipynb ├── Cost_FunctionInLogistic.ipynb ├── Gradientdescent.ipynb ├── ImportingSklearn.ipynb ├── deeplearning.mplstyle ├── lab_utils_common.py ├── plt_logistic_loss.py ├── plt_one_addpt_onclick.py ├── plt_overfit.py ├── plt_quad_logistic.py ├── test.ipynb ├── tryingLogisiticRegression.ipynb └── trying_lab01.ipynb └── Week3Assignment ├── C1_W3_Logistic_Regression.ipynb ├── PracticseLab.ipynb ├── data ├── ex2data1.txt └── ex2data2.txt ├── images ├── figure 1.png ├── figure 2.png ├── figure 3.png ├── figure 4.png ├── figure 5.png └── figure 6.png ├── public_tests.py ├── test_utils.py └── utils.py /DeepLearning/Week1/.hidden: -------------------------------------------------------------------------------- 1 | betaVersions/ 2 | Hided -------------------------------------------------------------------------------- /DeepLearning/Week1/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_CoffeeRoasting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_CoffeeRoasting.png -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_L3_Lab01_3Neurons.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_L3_Lab01_3Neurons.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_MatrixMatrix.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_MatrixMatrix.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_NeuronsAndLayers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_NeuronsAndLayers.png -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_RoastingDecision.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_RoastingDecision.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_RoastingNetwork.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_RoastingNetwork.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_VectorMatrix.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_VectorMatrix.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_dense.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_dense.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_dense2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_dense2.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1/images/C2_W1_dense3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1/images/C2_W1_dense3.png -------------------------------------------------------------------------------- /DeepLearning/Week1/lab_coffee_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | plt.style.use('./deeplearning.mplstyle') 4 | import tensorflow as tf 5 | from tensorflow.keras.activations import sigmoid 6 | from matplotlib import cm 7 | import matplotlib.colors as colors 8 | from lab_utils_common import dlc 9 | 10 | def load_coffee_data(): 11 | """ Creates a coffee roasting data set. 12 | roasting duration: 12-15 minutes is best 13 | temperature range: 175-260C is best 14 | """ 15 | rng = np.random.default_rng(2) 16 | X = rng.random(400).reshape(-1,2) 17 | X[:,1] = X[:,1] * 4 + 11.5 # 12-15 min is best 18 | X[:,0] = X[:,0] * (285-150) + 150 # 350-500 F (175-260 C) is best 19 | Y = np.zeros(len(X)) 20 | 21 | i=0 22 | for t,d in X: 23 | y = -3/(260-175)*t + 21 24 | if (t > 175 and t < 260 and d > 12 and d < 15 and d<=y ): 25 | Y[i] = 1 26 | else: 27 | Y[i] = 0 28 | i += 1 29 | 30 | return (X, Y.reshape(-1,1)) 31 | 32 | def plt_roast(X,Y): 33 | Y = Y.reshape(-1,) 34 | colormap = np.array(['r', 'b']) 35 | fig, ax = plt.subplots(1,1,) 36 | ax.scatter(X[Y==1,0],X[Y==1,1], s=70, marker='x', c='red', label="Good Roast" ) 37 | ax.scatter(X[Y==0,0],X[Y==0,1], s=100, marker='o', facecolors='none', 38 | edgecolors=dlc["dldarkblue"],linewidth=1, label="Bad Roast") 39 | tr = np.linspace(175,260,50) 40 | ax.plot(tr, (-3/85) * tr + 21, color=dlc["dlpurple"],linewidth=1) 41 | ax.axhline(y=12,color=dlc["dlpurple"],linewidth=1) 42 | ax.axvline(x=175,color=dlc["dlpurple"],linewidth=1) 43 | ax.set_title(f"Coffee Roasting", size=16) 44 | ax.set_xlabel("Temperature \n(Celsius)",size=12) 45 | ax.set_ylabel("Duration \n(minutes)",size=12) 46 | ax.legend(loc='upper right') 47 | plt.show() 48 | 49 | def plt_prob(ax,fwb): 50 | """ plots a decision boundary but include shading to indicate the probability """ 51 | #setup useful ranges and common linspaces 52 | x0_space = np.linspace(150, 285 , 40) 53 | x1_space = np.linspace(11.5, 15.5 , 40) 54 | 55 | # get probability for x0,x1 ranges 56 | tmp_x0,tmp_x1 = np.meshgrid(x0_space,x1_space) 57 | z = np.zeros_like(tmp_x0) 58 | for i in range(tmp_x0.shape[0]): 59 | for j in range(tmp_x1.shape[1]): 60 | x = np.array([[tmp_x0[i,j],tmp_x1[i,j]]]) 61 | z[i,j] = fwb(x) 62 | 63 | 64 | cmap = plt.get_cmap('Blues') 65 | new_cmap = truncate_colormap(cmap, 0.0, 0.5) 66 | pcm = ax.pcolormesh(tmp_x0, tmp_x1, z, 67 | norm=cm.colors.Normalize(vmin=0, vmax=1), 68 | cmap=new_cmap, shading='nearest', alpha = 0.9) 69 | ax.figure.colorbar(pcm, ax=ax) 70 | 71 | def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): 72 | """ truncates color map """ 73 | new_cmap = colors.LinearSegmentedColormap.from_list( 74 | 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), 75 | cmap(np.linspace(minval, maxval, n))) 76 | return new_cmap 77 | 78 | def plt_layer(X,Y,W1,b1,norm_l): 79 | Y = Y.reshape(-1,) 80 | fig,ax = plt.subplots(1,W1.shape[1], figsize=(16,4)) 81 | for i in range(W1.shape[1]): 82 | layerf= lambda x : sigmoid(np.dot(norm_l(x),W1[:,i]) + b1[i]) 83 | plt_prob(ax[i], layerf) 84 | ax[i].scatter(X[Y==1,0],X[Y==1,1], s=70, marker='x', c='red', label="Good Roast" ) 85 | ax[i].scatter(X[Y==0,0],X[Y==0,1], s=100, marker='o', facecolors='none', 86 | edgecolors=dlc["dldarkblue"],linewidth=1, label="Bad Roast") 87 | tr = np.linspace(175,260,50) 88 | ax[i].plot(tr, (-3/85) * tr + 21, color=dlc["dlpurple"],linewidth=2) 89 | ax[i].axhline(y= 12, color=dlc["dlpurple"], linewidth=2) 90 | ax[i].axvline(x=175, color=dlc["dlpurple"], linewidth=2) 91 | ax[i].set_title(f"Layer 1, unit {i}") 92 | ax[i].set_xlabel("Temperature \n(Celsius)",size=12) 93 | ax[0].set_ylabel("Duration \n(minutes)",size=12) 94 | plt.show() 95 | 96 | def plt_network(X,Y,netf): 97 | fig, ax = plt.subplots(1,2,figsize=(16,4)) 98 | Y = Y.reshape(-1,) 99 | plt_prob(ax[0], netf) 100 | ax[0].scatter(X[Y==1,0],X[Y==1,1], s=70, marker='x', c='red', label="Good Roast" ) 101 | ax[0].scatter(X[Y==0,0],X[Y==0,1], s=100, marker='o', facecolors='none', 102 | edgecolors=dlc["dldarkblue"],linewidth=1, label="Bad Roast") 103 | ax[0].plot(X[:,0], (-3/85) * X[:,0] + 21, color=dlc["dlpurple"],linewidth=1) 104 | ax[0].axhline(y= 12, color=dlc["dlpurple"], linewidth=1) 105 | ax[0].axvline(x=175, color=dlc["dlpurple"], linewidth=1) 106 | ax[0].set_xlabel("Temperature \n(Celsius)",size=12) 107 | ax[0].set_ylabel("Duration \n(minutes)",size=12) 108 | ax[0].legend(loc='upper right') 109 | ax[0].set_title(f"network probability") 110 | 111 | ax[1].plot(X[:,0], (-3/85) * X[:,0] + 21, color=dlc["dlpurple"],linewidth=1) 112 | ax[1].axhline(y= 12, color=dlc["dlpurple"], linewidth=1) 113 | ax[1].axvline(x=175, color=dlc["dlpurple"], linewidth=1) 114 | fwb = netf(X) 115 | yhat = (fwb > 0.5).astype(int) 116 | ax[1].scatter(X[yhat[:,0]==1,0],X[yhat[:,0]==1,1], s=70, marker='x', c='orange', label="Predicted Good Roast" ) 117 | ax[1].scatter(X[yhat[:,0]==0,0],X[yhat[:,0]==0,1], s=100, marker='o', facecolors='none', 118 | edgecolors=dlc["dldarkblue"],linewidth=1, label="Bad Roast") 119 | ax[1].set_title(f"network decision") 120 | ax[1].set_xlabel("Temperature \n(Celsius)",size=12) 121 | ax[1].set_ylabel("Duration \n(minutes)",size=12) 122 | ax[1].legend(loc='upper right') 123 | 124 | 125 | def plt_output_unit(W,b): 126 | """ plots a single unit function with 3 inputs """ 127 | steps = 10 128 | fig = plt.figure() 129 | ax = fig.add_subplot(projection='3d') 130 | x_ = np.linspace(0., 1., steps) 131 | y_ = np.linspace(0., 1., steps) 132 | z_ = np.linspace(0., 1., steps) 133 | x, y, z = np.meshgrid(x_, y_, z_, indexing='ij') 134 | d = np.zeros((steps,steps,steps)) 135 | cmap = plt.get_cmap('Blues') 136 | for i in range(steps): 137 | for j in range(steps): 138 | for k in range(steps): 139 | v = np.array([x[i,j,k],y[i,j,k],z[i,j,k]]) 140 | d[i,j,k] = tf.keras.activations.sigmoid(np.dot(v,W[:,0])+b).numpy() 141 | pcm = ax.scatter(x, y, z, c=d, cmap=cmap, alpha = 1 ) 142 | ax.set_xlabel("unit 0"); 143 | ax.set_ylabel("unit 1"); 144 | ax.set_zlabel("unit 2"); 145 | ax.view_init(30, -120) 146 | ax.figure.colorbar(pcm, ax=ax) 147 | ax.set_title(f"Layer 2, output unit") 148 | 149 | plt.show() -------------------------------------------------------------------------------- /DeepLearning/Week1/lab_neurons_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | plt.style.use('./deeplearning.mplstyle') 4 | from matplotlib import cm 5 | import matplotlib.colors as colors 6 | from lab_utils_common import dlc 7 | 8 | def plt_prob_1d(ax,fwb): 9 | """ plots a decision boundary but include shading to indicate the probability """ 10 | #setup useful ranges and common linspaces 11 | x_space = np.linspace(0, 5 , 50) 12 | y_space = np.linspace(0, 1 , 50) 13 | 14 | # get probability for x range, extend to y 15 | z = np.zeros((len(x_space),len(y_space))) 16 | for i in range(len(x_space)): 17 | x = np.array([[x_space[i]]]) 18 | z[:,i] = fwb(x) 19 | 20 | cmap = plt.get_cmap('Blues') 21 | new_cmap = truncate_colormap(cmap, 0.0, 0.5) 22 | pcm = ax.pcolormesh(x_space, y_space, z, 23 | norm=cm.colors.Normalize(vmin=0, vmax=1), 24 | cmap=new_cmap, shading='nearest', alpha = 0.9) 25 | ax.figure.colorbar(pcm, ax=ax) 26 | 27 | def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): 28 | """ truncates color map """ 29 | new_cmap = colors.LinearSegmentedColormap.from_list( 30 | 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), 31 | cmap(np.linspace(minval, maxval, n))) 32 | return new_cmap 33 | 34 | 35 | def sigmoidnp(z): 36 | """ 37 | Compute the sigmoid of z 38 | 39 | Parameters 40 | ---------- 41 | z : array_like 42 | A scalar or numpy array of any size. 43 | 44 | Returns 45 | ------- 46 | g : array_like 47 | sigmoid(z) 48 | """ 49 | z = np.clip( z, -500, 500 ) # protect against overflow 50 | g = 1.0/(1.0+np.exp(-z)) 51 | 52 | return g 53 | 54 | def plt_linear(X_train, Y_train, prediction_tf, prediction_np): 55 | fig, ax = plt.subplots(1,2, figsize=(16,4)) 56 | ax[0].scatter(X_train, Y_train, marker='x', c='r', label="Data Points") 57 | ax[0].plot(X_train, prediction_tf, c=dlc['dlblue'], label="model output") 58 | ax[0].text(1.6,350,r"y=$200 x + 100$", fontsize='xx-large', color=dlc['dlmagenta']) 59 | ax[0].legend(fontsize='xx-large') 60 | ax[0].set_ylabel('Price (in 1000s of dollars)', fontsize='xx-large') 61 | ax[0].set_xlabel('Size (1000 sqft)', fontsize='xx-large') 62 | ax[0].set_title("Tensorflow prediction",fontsize='xx-large') 63 | 64 | ax[1].scatter(X_train, Y_train, marker='x', c='r', label="Data Points") 65 | ax[1].plot(X_train, prediction_np, c=dlc['dlblue'], label="model output") 66 | ax[1].text(1.6,350,r"y=$200 x + 100$", fontsize='xx-large', color=dlc['dlmagenta']) 67 | ax[1].legend(fontsize='xx-large') 68 | ax[1].set_ylabel('Price (in 1000s of dollars)', fontsize='xx-large') 69 | ax[1].set_xlabel('Size (1000 sqft)', fontsize='xx-large') 70 | ax[1].set_title("Numpy prediction",fontsize='xx-large') 71 | plt.show() 72 | 73 | 74 | def plt_logistic(X_train, Y_train, model, set_w, set_b, pos, neg): 75 | fig,ax = plt.subplots(1,2,figsize=(16,4)) 76 | 77 | layerf= lambda x : model.predict(x) 78 | plt_prob_1d(ax[0], layerf) 79 | 80 | ax[0].scatter(X_train[pos], Y_train[pos], marker='x', s=80, c = 'red', label="y=1") 81 | ax[0].scatter(X_train[neg], Y_train[neg], marker='o', s=100, label="y=0", facecolors='none', 82 | edgecolors=dlc["dlblue"],lw=3) 83 | 84 | ax[0].set_ylim(-0.08,1.1) 85 | ax[0].set_xlim(-0.5,5.5) 86 | ax[0].set_ylabel('y', fontsize=16) 87 | ax[0].set_xlabel('x', fontsize=16) 88 | ax[0].set_title('Tensorflow Model', fontsize=20) 89 | ax[0].legend(fontsize=16) 90 | 91 | layerf= lambda x : sigmoidnp(np.dot(set_w,x.reshape(1,1)) + set_b) 92 | plt_prob_1d(ax[1], layerf) 93 | 94 | ax[1].scatter(X_train[pos], Y_train[pos], marker='x', s=80, c = 'red', label="y=1") 95 | ax[1].scatter(X_train[neg], Y_train[neg], marker='o', s=100, label="y=0", facecolors='none', 96 | edgecolors=dlc["dlblue"],lw=3) 97 | 98 | ax[1].set_ylim(-0.08,1.1) 99 | ax[1].set_xlim(-0.5,5.5) 100 | ax[1].set_ylabel('y', fontsize=16) 101 | ax[1].set_xlabel('x', fontsize=16) 102 | ax[1].set_title('Numpy Model', fontsize=20) 103 | ax[1].legend(fontsize=16) 104 | plt.show() 105 | -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/__pycache__/autils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/__pycache__/autils.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/__pycache__/autils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/__pycache__/autils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/__pycache__/public_tests.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/__pycache__/public_tests.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/__pycache__/public_tests.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/__pycache__/public_tests.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/autils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def load_data(): 4 | X = np.load("data/X.npy") 5 | y = np.load("data/y.npy") 6 | X = X[0:1000] 7 | y = y[0:1000] 8 | return X, y 9 | 10 | def load_weights(): 11 | w1 = np.load("data/w1.npy") 12 | b1 = np.load("data/b1.npy") 13 | w2 = np.load("data/w2.npy") 14 | b2 = np.load("data/b2.npy") 15 | return w1, b1, w2, b2 16 | 17 | def sigmoid(x): 18 | return 1. / (1. + np.exp(-x)) 19 | -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/data/X.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/data/X.npy -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/data/y.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/data/y.npy -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_Assign1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_Assign1.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_Assign1_BroadcastIndexes.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_Assign1_BroadcastIndexes.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_Assign1_BroadcastMatrix.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_Assign1_BroadcastMatrix.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_Assign1_Broadcasting.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_Assign1_Broadcasting.gif -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_Assign1_VectorAdd.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_Assign1_VectorAdd.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_CoffeeRoasting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_CoffeeRoasting.png -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_L3_Lab01_3Neurons.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_L3_Lab01_3Neurons.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_MatrixMatrix.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_MatrixMatrix.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_NeuronsAndLayers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_NeuronsAndLayers.png -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_RoastingDecision.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_RoastingDecision.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_RoastingNetwork.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_RoastingNetwork.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_VectorMatrix.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_VectorMatrix.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_dense.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_dense.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/images/C2_W1_dense2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week1Assignment/images/C2_W1_dense2.PNG -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/public_tests.py: -------------------------------------------------------------------------------- 1 | # UNIT TESTS 2 | from tensorflow.keras.activations import sigmoid 3 | from tensorflow.keras.layers import Dense 4 | 5 | import numpy as np 6 | 7 | def test_c1(target): 8 | assert len(target.layers) == 3, \ 9 | f"Wrong number of layers. Expected 3 but got {len(target.layers)}" 10 | assert target.input.shape.as_list() == [None, 400], \ 11 | f"Wrong input shape. Expected [None, 400] but got {target.input.shape.as_list()}" 12 | i = 0 13 | expected = [[Dense, [None, 25], sigmoid], 14 | [Dense, [None, 15], sigmoid], 15 | [Dense, [None, 1], sigmoid]] 16 | 17 | for layer in target.layers: 18 | assert type(layer) == expected[i][0], \ 19 | f"Wrong type in layer {i}. Expected {expected[i][0]} but got {type(layer)}" 20 | assert layer.output.shape.as_list() == expected[i][1], \ 21 | f"Wrong number of units in layer {i}. Expected {expected[i][1]} but got {layer.output.shape.as_list()}" 22 | assert layer.activation == expected[i][2], \ 23 | f"Wrong activation in layer {i}. Expected {expected[i][2]} but got {layer.activation}" 24 | i = i + 1 25 | 26 | print("\033[92mAll tests passed!") 27 | 28 | def test_c2(target): 29 | 30 | def linear(a): 31 | return a 32 | 33 | def linear_times3(a): 34 | return a * 3 35 | 36 | x_tst = np.array([1., 2., 3., 4.]) # (1 examples, 4 features) 37 | W_tst = np.array([[1., 2.], [1., 2.], [1., 2.], [1., 2.]]) # (4 input features, 2 output features) 38 | b_tst = np.array([0., 0.]) # (2 features) 39 | 40 | A_tst = target(x_tst, W_tst, b_tst, linear) 41 | assert A_tst.shape[0] == len(b_tst) 42 | assert np.allclose(A_tst, [10., 20.]), \ 43 | "Wrong output. Check the dot product" 44 | 45 | b_tst = np.array([3., 5.]) # (2 features) 46 | 47 | A_tst = target(x_tst, W_tst, b_tst, linear) 48 | assert np.allclose(A_tst, [13., 25.]), \ 49 | "Wrong output. Check the bias term in the formula" 50 | 51 | A_tst = target(x_tst, W_tst, b_tst, linear_times3) 52 | assert np.allclose(A_tst, [39., 75.]), \ 53 | "Wrong output. Did you apply the activation function at the end?" 54 | 55 | print("\033[92mAll tests passed!") 56 | 57 | def test_c3(target): 58 | 59 | def linear(a): 60 | return a 61 | 62 | def linear_times3(a): 63 | return a * 3 64 | 65 | x_tst = np.array([1., 2., 3., 4.]) # (1 examples, 3 features) 66 | W_tst = np.array([[1., 2.], [1., 2.], [1., 2.], [1., 2.]]) # (3 input features, 2 output features) 67 | b_tst = np.array([0., 0.]) # (2 features) 68 | 69 | A_tst = target(x_tst, W_tst, b_tst, linear) 70 | assert A_tst.shape[0] == len(b_tst) 71 | assert np.allclose(A_tst, [10., 20.]), \ 72 | "Wrong output. Check the dot product" 73 | 74 | b_tst = np.array([3., 5.]) # (2 features) 75 | 76 | A_tst = target(x_tst, W_tst, b_tst, linear) 77 | assert np.allclose(A_tst, [13., 25.]), \ 78 | "Wrong output. Check the bias term in the formula" 79 | 80 | A_tst = target(x_tst, W_tst, b_tst, linear_times3) 81 | assert np.allclose(A_tst, [39., 75.]), \ 82 | "Wrong output. Did you apply the activation function at the end?" 83 | 84 | x_tst = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]]) # (2 examples, 4 features) 85 | W_tst = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12]]) # (3 input features, 2 output features) 86 | b_tst = np.array([0., 0., 0.]) # (2 features) 87 | 88 | A_tst = target(x_tst, W_tst, b_tst, linear) 89 | assert A_tst.shape == (2, 3) 90 | assert np.allclose(A_tst, [[ 70., 80., 90.], [158., 184., 210.]]), \ 91 | "Wrong output. Check the dot product" 92 | 93 | b_tst = np.array([3., 5., 6]) # (3 features) 94 | 95 | A_tst = target(x_tst, W_tst, b_tst, linear) 96 | assert np.allclose(A_tst, [[ 73., 85., 96.], [161., 189., 216.]]), \ 97 | "Wrong output. Check the bias term in the formula" 98 | 99 | A_tst = target(x_tst, W_tst, b_tst, linear_times3) 100 | assert np.allclose(A_tst, [[ 219., 255., 288.], [483., 567., 648.]]), \ 101 | "Wrong output. Did you apply the activation function at the end?" 102 | 103 | print("\033[92mAll tests passed!") 104 | -------------------------------------------------------------------------------- /DeepLearning/Week1Assignment/utils.py: -------------------------------------------------------------------------------- 1 | # C2_W1 Utilities 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from sklearn.datasets import make_blobs 5 | 6 | def sigmoid(x): 7 | return 1 / (1 + np.exp(-x)) 8 | 9 | # Plot multi-class training points 10 | def plot_mc_data(X, y, class_labels=None, legend=False,size=40): 11 | classes = np.unique(y) 12 | for i in classes: 13 | label = class_labels[i] if class_labels else "class {}".format(i) 14 | idx = np.where(y == i) 15 | plt.scatter(X[idx, 0], X[idx, 1], cmap=plt.cm.Paired, 16 | edgecolor='black', s=size, label=label) 17 | if legend: plt.legend() 18 | 19 | 20 | #Plot a multi-class categorical decision boundary 21 | # This version handles a non-vector prediction (adds a for-loop over points) 22 | def plot_cat_decision_boundary(X,predict , class_labels=None, legend=False, vector=True): 23 | 24 | # create a mesh to points to plot 25 | x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 26 | y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 27 | h = max(x_max-x_min, y_max-y_min)/200 28 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), 29 | np.arange(y_min, y_max, h)) 30 | points = np.c_[xx.ravel(), yy.ravel()] 31 | 32 | #make predictions for each point in mesh 33 | if vector: 34 | Z = predict(points) 35 | else: 36 | Z = np.zeros((len(points),)) 37 | for i in range(len(points)): 38 | Z[i] = predict(points[i].reshape(1,2)) 39 | Z = Z.reshape(xx.shape) 40 | 41 | #contour plot highlights boundaries between values - classes in this case 42 | plt.figure() 43 | plt.contour(xx, yy, Z, colors='g') 44 | plt.axis('tight') -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/autils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/autils.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/autils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/autils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_common.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_common.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_common.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_multiclass.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_multiclass.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_multiclass_TF.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_multiclass_TF.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_multiclass_TF.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_multiclass_TF.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_relu.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_relu.cpython-310.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_relu.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_relu.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/__pycache__/lab_utils_softmax.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/__pycache__/lab_utils_softmax.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2/autils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | from tensorflow.keras.models import Sequential 5 | from tensorflow.keras.layers import Dense 6 | from tensorflow.keras.activations import linear, relu, sigmoid 7 | 8 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0', dldarkblue = '#0D5BDC', dlmedblue='#4285F4') 9 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'; dldarkblue = '#0D5BDC'; dlmedblue='#4285F4' 10 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple] 11 | plt.style.use('deeplearning.mplstyle') 12 | 13 | 14 | def load_data(): 15 | X = np.load("data/X.npy") 16 | y = np.load("data/y.npy") 17 | return X, y 18 | 19 | def plt_act_trio(): 20 | X = np.linspace(-5,5,100) 21 | fig,ax = plt.subplots(1,3, figsize=(6,2)) 22 | widgvis(fig) 23 | ax[0].plot(X,tf.keras.activations.linear(X)) 24 | ax[0].axvline(0, lw=0.3, c="black") 25 | ax[0].axhline(0, lw=0.3, c="black") 26 | ax[0].set_title("Linear") 27 | ax[1].plot(X,tf.keras.activations.sigmoid(X)) 28 | ax[1].axvline(0, lw=0.3, c="black") 29 | ax[1].axhline(0, lw=0.3, c="black") 30 | ax[1].set_title("Sigmoid") 31 | ax[2].plot(X,tf.keras.activations.relu(X)) 32 | ax[2].axhline(0, lw=0.3, c="black") 33 | ax[2].axvline(0, lw=0.3, c="black") 34 | ax[2].set_title("ReLu") 35 | fig.suptitle("Common Activation Functions", fontsize=14) 36 | fig.tight_layout(pad=0.2) 37 | plt.show() 38 | 39 | def widgvis(fig): 40 | fig.canvas.toolbar_visible = False 41 | fig.canvas.header_visible = False 42 | fig.canvas.footer_visible = False 43 | 44 | def plt_ex1(): 45 | X = np.linspace(0,2*np.pi, 100) 46 | y = np.cos(X)+1 47 | y[50:100]=0 48 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 49 | widgvis(fig) 50 | ax.set_title("Target") 51 | ax.set_xlabel("x") 52 | ax.set_ylabel("y") 53 | ax.plot(X,y) 54 | fig.tight_layout(pad=0.1) 55 | plt.show() 56 | return(X,y) 57 | 58 | def plt_ex2(): 59 | X = np.linspace(0,2*np.pi, 100) 60 | y = np.cos(X)+1 61 | y[0:49]=0 62 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 63 | widgvis(fig) 64 | ax.set_title("Target") 65 | ax.set_xlabel("x") 66 | ax.set_ylabel("y") 67 | ax.plot(X,y) 68 | fig.tight_layout(pad=0.1) 69 | plt.show() 70 | return(X,y) 71 | 72 | def gen_data(): 73 | X = np.linspace(0,2*np.pi, 100) 74 | y = np.cos(X)+1 75 | X=X.reshape(-1,1) 76 | return(X,y) 77 | 78 | def plt_dual(X,y,yhat): 79 | fig,ax = plt.subplots(1,2, figsize=(4,2)) 80 | widgvis(fig) 81 | ax[0].set_title("Target") 82 | ax[0].set_xlabel("x") 83 | ax[0].set_ylabel("y") 84 | ax[0].plot(X,y) 85 | ax[1].set_title("Prediction") 86 | ax[1].set_xlabel("x") 87 | ax[1].set_ylabel("y") 88 | ax[1].plot(X,y) 89 | ax[1].plot(X,yhat) 90 | fig.tight_layout(pad=0.1) 91 | plt.show() 92 | 93 | def plt_act1(X,y,z,a): 94 | fig,ax = plt.subplots(1,3, figsize=(6,2.5)) 95 | widgvis(fig) 96 | ax[0].plot(X,y,label="target") 97 | ax[0].axvline(0, lw=0.3, c="black") 98 | ax[0].axhline(0, lw=0.3, c="black") 99 | ax[0].set_title("y - target") 100 | ax[1].plot(X,y, label="target") 101 | ax[1].plot(X,z, c=dlc["dldarkred"],label="z") 102 | ax[1].axvline(0, lw=0.3, c="black") 103 | ax[1].axhline(0, lw=0.3, c="black") 104 | ax[1].set_title(r"$z = w \cdot x+b$") 105 | ax[1].legend(loc="upper center") 106 | ax[2].plot(X,y, label="target") 107 | ax[2].plot(X,a, c=dlc["dldarkred"],label="ReLu(z)") 108 | ax[2].axhline(0, lw=0.3, c="black") 109 | ax[2].axvline(0, lw=0.3, c="black") 110 | ax[2].set_title("max(0,z)") 111 | ax[2].legend() 112 | fig.suptitle("Role of Non-Linear Activation", fontsize=12) 113 | fig.tight_layout(pad=0.22) 114 | return(ax) 115 | 116 | 117 | def plt_add_notation(ax): 118 | ax[1].annotate(text = "matches\n here", xy =(1.5,1.0), 119 | xytext = (0.1,-1.5), fontsize=9, 120 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 121 | ax[1].annotate(text = "but not\n here", xy =(5,-2.5), 122 | xytext = (1,-3), fontsize=9, 123 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 124 | ax[2].annotate(text = "ReLu\n 'off'", xy =(2.6,0), 125 | xytext = (0.1,0.1), fontsize=9, 126 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 127 | 128 | def compile_fit(model,X,y): 129 | model.compile( 130 | loss=tf.keras.losses.MeanSquaredError(), 131 | optimizer=tf.keras.optimizers.Adam(0.01), 132 | ) 133 | 134 | model.fit( 135 | X,y, 136 | epochs=100, 137 | verbose = 0 138 | ) 139 | l1=model.get_layer("l1") 140 | l2=model.get_layer("l2") 141 | w1,b1 = l1.get_weights() 142 | w2,b2 = l2.get_weights() 143 | return(w1,b1,w2,b2) 144 | 145 | def plt_model(X,y,yhat_pre, yhat_post): 146 | fig,ax = plt.subplots(1,3, figsize=(8,2)) 147 | widgvis(fig) 148 | ax[0].set_title("Target") 149 | ax[0].set_xlabel("x") 150 | ax[0].set_ylabel("y") 151 | ax[0].plot(X,y) 152 | ax[1].set_title("Prediction, pre-training") 153 | ax[1].set_xlabel("x") 154 | ax[1].set_ylabel("y") 155 | ax[1].plot(X,y) 156 | ax[1].plot(X,yhat_pre) 157 | ax[2].set_title("Prediction, post-training") 158 | ax[2].set_xlabel("x") 159 | ax[2].set_ylabel("y") 160 | ax[2].plot(X,y) 161 | ax[2].plot(X,yhat_post) 162 | fig.tight_layout(pad=0.1) 163 | plt.show() 164 | 165 | def display_errors(model,X,y): 166 | f = model.predict(X) 167 | yhat = np.argmax(f, axis=1) 168 | doo = yhat != y[:,0] 169 | idxs = np.where(yhat != y[:,0])[0] 170 | if len(idxs) == 0: 171 | print("no errors found") 172 | else: 173 | cnt = min(8, len(idxs)) 174 | fig, ax = plt.subplots(1,cnt, figsize=(5,1.2)) 175 | fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.80]) #[left, bottom, right, top] 176 | widgvis(fig) 177 | 178 | for i in range(cnt): 179 | j = idxs[i] 180 | X_reshaped = X[j].reshape((20,20)).T 181 | 182 | # Display the image 183 | ax[i].imshow(X_reshaped, cmap='gray') 184 | 185 | # Predict using the Neural Network 186 | prediction = model.predict(X[j].reshape(1,400)) 187 | prediction_p = tf.nn.softmax(prediction) 188 | yhat = np.argmax(prediction_p) 189 | 190 | # Display the label above the image 191 | ax[i].set_title(f"{y[j,0]},{yhat}",fontsize=10) 192 | ax[i].set_axis_off() 193 | fig.suptitle("Label, yhat", fontsize=12) 194 | return(len(idxs)) 195 | 196 | def display_digit(X): 197 | """ display a single digit. The input is one digit (400,). """ 198 | fig, ax = plt.subplots(1,1, figsize=(0.5,0.5)) 199 | widgvis(fig) 200 | X_reshaped = X.reshape((20,20)).T 201 | # Display the image 202 | ax.imshow(X_reshaped, cmap='gray') 203 | plt.show() 204 | -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/autils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | from tensorflow.keras.models import Sequential 5 | from tensorflow.keras.layers import Dense 6 | from tensorflow.keras.activations import linear, relu, sigmoid 7 | 8 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0', dldarkblue = '#0D5BDC', dlmedblue='#4285F4') 9 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'; dldarkblue = '#0D5BDC'; dlmedblue='#4285F4' 10 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple] 11 | plt.style.use('./deeplearning.mplstyle') 12 | 13 | 14 | def load_data(): 15 | X = np.load("data/X.npy") 16 | y = np.load("data/y.npy") 17 | return X, y 18 | 19 | def plt_act_trio(): 20 | X = np.linspace(-5,5,100) 21 | fig,ax = plt.subplots(1,3, figsize=(6,2)) 22 | widgvis(fig) 23 | ax[0].plot(X,tf.keras.activations.linear(X)) 24 | ax[0].axvline(0, lw=0.3, c="black") 25 | ax[0].axhline(0, lw=0.3, c="black") 26 | ax[0].set_title("Linear") 27 | ax[1].plot(X,tf.keras.activations.sigmoid(X)) 28 | ax[1].axvline(0, lw=0.3, c="black") 29 | ax[1].axhline(0, lw=0.3, c="black") 30 | ax[1].set_title("Sigmoid") 31 | ax[2].plot(X,tf.keras.activations.relu(X)) 32 | ax[2].axhline(0, lw=0.3, c="black") 33 | ax[2].axvline(0, lw=0.3, c="black") 34 | ax[2].set_title("ReLu") 35 | fig.suptitle("Common Activation Functions", fontsize=14) 36 | fig.tight_layout(pad=0.2) 37 | plt.show() 38 | 39 | def widgvis(fig): 40 | fig.canvas.toolbar_visible = False 41 | fig.canvas.header_visible = False 42 | fig.canvas.footer_visible = False 43 | 44 | def plt_ex1(): 45 | X = np.linspace(0,2*np.pi, 100) 46 | y = np.cos(X)+1 47 | y[50:100]=0 48 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 49 | widgvis(fig) 50 | ax.set_title("Target") 51 | ax.set_xlabel("x") 52 | ax.set_ylabel("y") 53 | ax.plot(X,y) 54 | fig.tight_layout(pad=0.1) 55 | plt.show() 56 | return(X,y) 57 | 58 | def plt_ex2(): 59 | X = np.linspace(0,2*np.pi, 100) 60 | y = np.cos(X)+1 61 | y[0:49]=0 62 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 63 | widgvis(fig) 64 | ax.set_title("Target") 65 | ax.set_xlabel("x") 66 | ax.set_ylabel("y") 67 | ax.plot(X,y) 68 | fig.tight_layout(pad=0.1) 69 | plt.show() 70 | return(X,y) 71 | 72 | def gen_data(): 73 | X = np.linspace(0,2*np.pi, 100) 74 | y = np.cos(X)+1 75 | X=X.reshape(-1,1) 76 | return(X,y) 77 | 78 | def plt_dual(X,y,yhat): 79 | fig,ax = plt.subplots(1,2, figsize=(4,2)) 80 | widgvis(fig) 81 | ax[0].set_title("Target") 82 | ax[0].set_xlabel("x") 83 | ax[0].set_ylabel("y") 84 | ax[0].plot(X,y) 85 | ax[1].set_title("Prediction") 86 | ax[1].set_xlabel("x") 87 | ax[1].set_ylabel("y") 88 | ax[1].plot(X,y) 89 | ax[1].plot(X,yhat) 90 | fig.tight_layout(pad=0.1) 91 | plt.show() 92 | 93 | def plt_act1(X,y,z,a): 94 | fig,ax = plt.subplots(1,3, figsize=(6,2.5)) 95 | widgvis(fig) 96 | ax[0].plot(X,y,label="target") 97 | ax[0].axvline(0, lw=0.3, c="black") 98 | ax[0].axhline(0, lw=0.3, c="black") 99 | ax[0].set_title("y - target") 100 | ax[1].plot(X,y, label="target") 101 | ax[1].plot(X,z, c=dlc["dldarkred"],label="z") 102 | ax[1].axvline(0, lw=0.3, c="black") 103 | ax[1].axhline(0, lw=0.3, c="black") 104 | ax[1].set_title(r"$z = w \cdot x+b$") 105 | ax[1].legend(loc="upper center") 106 | ax[2].plot(X,y, label="target") 107 | ax[2].plot(X,a, c=dlc["dldarkred"],label="ReLu(z)") 108 | ax[2].axhline(0, lw=0.3, c="black") 109 | ax[2].axvline(0, lw=0.3, c="black") 110 | ax[2].set_title("max(0,z)") 111 | ax[2].legend() 112 | fig.suptitle("Role of Non-Linear Activation", fontsize=12) 113 | fig.tight_layout(pad=0.22) 114 | return(ax) 115 | 116 | 117 | def plt_add_notation(ax): 118 | ax[1].annotate(text = "matches\n here", xy =(1.5,1.0), 119 | xytext = (0.1,-1.5), fontsize=9, 120 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 121 | ax[1].annotate(text = "but not\n here", xy =(5,-2.5), 122 | xytext = (1,-3), fontsize=9, 123 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 124 | ax[2].annotate(text = "ReLu\n 'off'", xy =(2.6,0), 125 | xytext = (0.1,0.1), fontsize=9, 126 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 127 | 128 | def compile_fit(model,X,y): 129 | model.compile( 130 | loss=tf.keras.losses.MeanSquaredError(), 131 | optimizer=tf.keras.optimizers.Adam(0.01), 132 | ) 133 | 134 | model.fit( 135 | X,y, 136 | epochs=100, 137 | verbose = 0 138 | ) 139 | l1=model.get_layer("l1") 140 | l2=model.get_layer("l2") 141 | w1,b1 = l1.get_weights() 142 | w2,b2 = l2.get_weights() 143 | return(w1,b1,w2,b2) 144 | 145 | def plt_model(X,y,yhat_pre, yhat_post): 146 | fig,ax = plt.subplots(1,3, figsize=(8,2)) 147 | widgvis(fig) 148 | ax[0].set_title("Target") 149 | ax[0].set_xlabel("x") 150 | ax[0].set_ylabel("y") 151 | ax[0].plot(X,y) 152 | ax[1].set_title("Prediction, pre-training") 153 | ax[1].set_xlabel("x") 154 | ax[1].set_ylabel("y") 155 | ax[1].plot(X,y) 156 | ax[1].plot(X,yhat_pre) 157 | ax[2].set_title("Prediction, post-training") 158 | ax[2].set_xlabel("x") 159 | ax[2].set_ylabel("y") 160 | ax[2].plot(X,y) 161 | ax[2].plot(X,yhat_post) 162 | fig.tight_layout(pad=0.1) 163 | plt.show() 164 | 165 | def display_errors(model,X,y): 166 | f = model.predict(X) 167 | yhat = np.argmax(f, axis=1) 168 | doo = yhat != y[:,0] 169 | idxs = np.where(yhat != y[:,0])[0] 170 | if len(idxs) == 0: 171 | print("no errors found") 172 | else: 173 | cnt = min(8, len(idxs)) 174 | fig, ax = plt.subplots(1,cnt, figsize=(5,1.2)) 175 | fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.80]) #[left, bottom, right, top] 176 | widgvis(fig) 177 | 178 | for i in range(cnt): 179 | j = idxs[i] 180 | X_reshaped = X[j].reshape((20,20)).T 181 | 182 | # Display the image 183 | ax[i].imshow(X_reshaped, cmap='gray') 184 | 185 | # Predict using the Neural Network 186 | prediction = model.predict(X[j].reshape(1,400)) 187 | prediction_p = tf.nn.softmax(prediction) 188 | yhat = np.argmax(prediction_p) 189 | 190 | # Display the label above the image 191 | ax[i].set_title(f"{y[j,0]},{yhat}",fontsize=10) 192 | ax[i].set_axis_off() 193 | fig.suptitle("Label, yhat", fontsize=12) 194 | return(len(idxs)) 195 | 196 | def display_digit(X): 197 | """ display a single digit. The input is one digit (400,). """ 198 | fig, ax = plt.subplots(1,1, figsize=(0.5,0.5)) 199 | widgvis(fig) 200 | X_reshaped = X.reshape((20,20)).T 201 | # Display the image 202 | ax.imshow(X_reshaped, cmap='gray') 203 | plt.show() 204 | -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_NNSoftmax.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_NNSoftmax.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Graph.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Network.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_ReLU_Plot.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_ReLu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_ReLu.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_SoftMaxCost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_SoftMaxCost.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_SoftMaxNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_SoftMaxNN.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_Softmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_Softmax.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_SoftmaxReg_NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_SoftmaxReg_NN.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_Softmax_Header.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_Softmax_Header.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_mclass_header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_mclass_header.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_mclass_lab_network.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_mclass_lab_network.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_mclass_layer1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_mclass_layer1.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_mclass_layer2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_mclass_layer2.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_mclass_relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_mclass_relu.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_smallnetwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_smallnetwork.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/images/C2_W2_softmax_accurate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/betaversion/images/C2_W2_softmax_accurate.png -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/lab_utils_multiclass.py: -------------------------------------------------------------------------------- 1 | # C2_W1 Utilities 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from sklearn.datasets import make_blobs 5 | 6 | def sigmoid(x): 7 | return 1 / (1 + np.exp(-x)) 8 | 9 | # Plot multi-class training points 10 | def plot_mc_data(X, y, class_labels=None, legend=False,size=40): 11 | classes = np.unique(y) 12 | for i in classes: 13 | label = class_labels[i] if class_labels else "class {}".format(i) 14 | idx = np.where(y == i) 15 | plt.scatter(X[idx, 0], X[idx, 1], cmap=plt.cm.Paired, 16 | edgecolor='black', s=size, label=label) 17 | if legend: plt.legend() 18 | 19 | 20 | #Plot a multi-class categorical decision boundary 21 | # This version handles a non-vector prediction (adds a for-loop over points) 22 | def plot_cat_decision_boundary(X,predict , class_labels=None, legend=False, vector=True): 23 | 24 | # create a mesh to points to plot 25 | x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 26 | y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 27 | h = max(x_max-x_min, y_max-y_min)/200 28 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), 29 | np.arange(y_min, y_max, h)) 30 | points = np.c_[xx.ravel(), yy.ravel()] 31 | print("points", points.shape) 32 | print("xx.shape", xx.shape) 33 | 34 | #make predictions for each point in mesh 35 | if vector: 36 | Z = predict(points) 37 | else: 38 | Z = np.zeros((len(points),)) 39 | for i in range(len(points)): 40 | Z[i] = predict(points[i].reshape(1,2)) 41 | Z = Z.reshape(xx.shape) 42 | 43 | #contour plot highlights boundaries between values - classes in this case 44 | plt.figure() 45 | plt.contour(xx, yy, Z, colors='g') 46 | plt.axis('tight') -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/lab_utils_relu.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from matplotlib.gridspec import GridSpec 4 | plt.style.use('./deeplearning.mplstyle') 5 | from matplotlib.widgets import Slider 6 | from lab_utils_common import dlc 7 | 8 | def widgvis(fig): 9 | fig.canvas.toolbar_visible = False 10 | fig.canvas.header_visible = False 11 | fig.canvas.footer_visible = False 12 | 13 | 14 | def plt_base(ax): 15 | X = np.linspace(0, 3, 3*100) 16 | y = np.r_[ -2*X[0:100]+2, 1*X[100:200]-3+2, 3*X[200:300]-7+2 ] 17 | w00 = -2 18 | b00 = 2 19 | w01 = 0 # 1 20 | b01 = 0 # -1 21 | w02 = 0 # 2 22 | b02 = 0 # -4 23 | ax[0].plot(X, y, color = dlc["dlblue"], label="target") 24 | arts = [] 25 | arts.extend( plt_yhat(ax[0], X, w00, b00, w01, b01, w02, b02) ) 26 | _ = plt_unit(ax[1], X, w00, b00) #Fixed 27 | arts.extend( plt_unit(ax[2], X, w01, b01) ) 28 | arts.extend( plt_unit(ax[3], X, w02, b02) ) 29 | return(X, arts) 30 | 31 | def plt_yhat(ax, X, w00, b00, w01, b01, w02, b02): 32 | yhat = np.maximum(0, np.dot(w00, X) + b00) + \ 33 | np.maximum(0, np.dot(w01, X) + b01) + \ 34 | np.maximum(0, np.dot(w02, X) + b02) 35 | lp = ax.plot(X, yhat, lw=2, color = dlc["dlorange"], label="a2") 36 | return(lp) 37 | 38 | def plt_unit(ax, X, w, b): 39 | z = np.dot(w,X) + b 40 | yhat = np.maximum(0,z) 41 | lpa = ax.plot(X, z, dlc["dlblue"], label="z") 42 | lpb = ax.plot(X, yhat, dlc["dlmagenta"], lw=1, label="a") 43 | return([lpa[0], lpb[0]]) 44 | 45 | # if output is need for debug, put this in a cell and call ahead of time. Output will be below that cell. 46 | #from ipywidgets import Output #this line stays here 47 | #output = Output() #this line stays here 48 | #display(output) #this line goes in notebook 49 | 50 | def plt_relu_ex(): 51 | artists = [] 52 | 53 | fig = plt.figure() 54 | fig.suptitle("Explore Non-Linear Activation") 55 | 56 | gs = GridSpec(3, 2, width_ratios=[2, 1], height_ratios=[1, 1, 1]) 57 | ax1 = fig.add_subplot(gs[0:2,0]) 58 | ax2 = fig.add_subplot(gs[0,1]) 59 | ax3 = fig.add_subplot(gs[1,1]) 60 | ax4 = fig.add_subplot(gs[2,1]) 61 | ax = [ax1,ax2,ax3,ax4] 62 | 63 | widgvis(fig) 64 | #plt.subplots_adjust(bottom=0.35) 65 | 66 | axb2 = fig.add_axes([0.15, 0.10, 0.30, 0.03]) # [left, bottom, width, height] 67 | axw2 = fig.add_axes([0.15, 0.15, 0.30, 0.03]) 68 | axb1 = fig.add_axes([0.15, 0.20, 0.30, 0.03]) 69 | axw1 = fig.add_axes([0.15, 0.25, 0.30, 0.03]) 70 | 71 | sw1 = Slider(axw1, 'w1', -4.0, 4.0, valinit=0, valstep=0.1) 72 | sb1 = Slider(axb1, 'b1', -4.0, 4.0, valinit=0, valstep=0.1) 73 | sw2 = Slider(axw2, 'w2', -4.0, 4.0, valinit=0, valstep=0.1) 74 | sb2 = Slider(axb2, 'b2', -4.0, 4.0, valinit=0, valstep=0.1) 75 | 76 | X,lp = plt_base(ax) 77 | artists.extend( lp ) 78 | 79 | #@output.capture() 80 | def update(val): 81 | #print("-----------") 82 | #print(f"len artists {len(artists)}", artists) 83 | for i in range(len(artists)): 84 | artist = artists[i] 85 | #print("artist:", artist) 86 | artist.remove() 87 | artists.clear() 88 | #print(artists) 89 | w00 = -2 90 | b00 = 2 91 | w01 = sw1.val # 1 92 | b01 = sb1.val # -1 93 | w02 = sw2.val # 2 94 | b02 = sb2.val # -4 95 | artists.extend(plt_yhat(ax[0], X, w00, b00, w01, b01, w02, b02)) 96 | artists.extend(plt_unit(ax[2], X, w01, b01) ) 97 | artists.extend(plt_unit(ax[3], X, w02, b02) ) 98 | #fig.canvas.draw_idle() 99 | 100 | sw1.on_changed(update) 101 | sb1.on_changed(update) 102 | sw2.on_changed(update) 103 | sb2.on_changed(update) 104 | 105 | ax[0].set_title(" Match Target ") 106 | ax[0].legend() 107 | ax[0].set_xlabel("x") 108 | ax[1].set_title("Unit 0 (fixed) ") 109 | ax[1].legend() 110 | ax[2].set_title("Unit 1") 111 | ax[2].legend() 112 | ax[3].set_title("Unit 2") 113 | ax[3].legend() 114 | plt.tight_layout() 115 | 116 | plt.show() 117 | return([sw1,sw2,sb1,sb2,artists]) # returned to keep a live reference to sliders 118 | 119 | -------------------------------------------------------------------------------- /DeepLearning/Week2/betaversion/lab_utils_softmax.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | plt.style.use('./deeplearning.mplstyle') 4 | import tensorflow as tf 5 | from IPython.display import display, Markdown, Latex 6 | from matplotlib.widgets import Slider 7 | from lab_utils_common import dlc 8 | 9 | 10 | def plt_softmax(my_softmax): 11 | fig, ax = plt.subplots(1,2,figsize=(8,4)) 12 | plt.subplots_adjust(bottom=0.35) 13 | 14 | axz0 = fig.add_axes([0.15, 0.10, 0.30, 0.03]) # [left, bottom, width, height] 15 | axz1 = fig.add_axes([0.15, 0.15, 0.30, 0.03]) 16 | axz2 = fig.add_axes([0.15, 0.20, 0.30, 0.03]) 17 | axz3 = fig.add_axes([0.15, 0.25, 0.30, 0.03]) 18 | 19 | z3 = Slider(axz3, 'z3', 0.1, 10.0, valinit=4, valstep=0.1) 20 | z2 = Slider(axz2, 'z2', 0.1, 10.0, valinit=3, valstep=0.1) 21 | z1 = Slider(axz1, 'z1', 0.1, 10.0, valinit=2, valstep=0.1) 22 | z0 = Slider(axz0, 'z0', 0.1, 10.0, valinit=1, valstep=0.1) 23 | 24 | z = np.array(['z0','z1','z2','z3']) 25 | bar = ax[0].barh(z, height=0.6, width=[z0.val,z1.val,z2.val,z3.val], left=None, align='center') 26 | bars = bar.get_children() 27 | ax[0].set_xlim([0,10]) 28 | ax[0].set_title("z input to softmax") 29 | 30 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 31 | anames = np.array(['a0','a1','a2','a3']) 32 | sbar = ax[1].barh(anames, height=0.6, width=a, left=None, align='center',color=dlc["dldarkred"]) 33 | sbars = sbar.get_children() 34 | ax[1].set_xlim([0,1]) 35 | ax[1].set_title("softmax(z)") 36 | 37 | def update(val): 38 | bars[0].set_width(z0.val) 39 | bars[1].set_width(z1.val) 40 | bars[2].set_width(z2.val) 41 | bars[3].set_width(z3.val) 42 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 43 | sbars[0].set_width(a[0]) 44 | sbars[1].set_width(a[1]) 45 | sbars[2].set_width(a[2]) 46 | sbars[3].set_width(a[3]) 47 | 48 | fig.canvas.draw_idle() 49 | 50 | z0.on_changed(update) 51 | z1.on_changed(update) 52 | z2.on_changed(update) 53 | z3.on_changed(update) 54 | 55 | plt.show() 56 | -------------------------------------------------------------------------------- /DeepLearning/Week2/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_Assigment_NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_Assigment_NN.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_BinaryVsMultiClass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_BinaryVsMultiClass.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_GradDesc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_GradDesc.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_NNSoftmax.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_NNSoftmax.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_ReLU_Graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_ReLU_Graph.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_ReLU_Network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_ReLU_Network.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_ReLU_Plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_ReLU_Plot.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_ReLu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_ReLu.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_SoftMaxCost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_SoftMaxCost.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_SoftMaxNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_SoftMaxNN.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_Softmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_Softmax.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_SoftmaxReg_NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_SoftmaxReg_NN.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_Softmax_Header.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_Softmax_Header.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_mclass_header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_mclass_header.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_mclass_lab_network.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_mclass_lab_network.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_mclass_layer1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_mclass_layer1.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_mclass_layer2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_mclass_layer2.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_mclass_relu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_mclass_relu.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_smallnetwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_smallnetwork.png -------------------------------------------------------------------------------- /DeepLearning/Week2/images/C2_W2_softmax_accurate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2/images/C2_W2_softmax_accurate.png -------------------------------------------------------------------------------- /DeepLearning/Week2/lab_utils_multiclass.py: -------------------------------------------------------------------------------- 1 | # C2_W1 Utilities 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from sklearn.datasets import make_blobs 5 | 6 | def sigmoid(x): 7 | return 1 / (1 + np.exp(-x)) 8 | 9 | # Plot multi-class training points 10 | def plot_mc_data(X, y, class_labels=None, legend=False,size=40): 11 | classes = np.unique(y) 12 | for i in classes: 13 | label = class_labels[i] if class_labels else "class {}".format(i) 14 | idx = np.where(y == i) 15 | plt.scatter(X[idx, 0], X[idx, 1], cmap=plt.cm.Paired, 16 | edgecolor='black', s=size, label=label) 17 | if legend: plt.legend() 18 | 19 | 20 | #Plot a multi-class categorical decision boundary 21 | # This version handles a non-vector prediction (adds a for-loop over points) 22 | def plot_cat_decision_boundary(X,predict , class_labels=None, legend=False, vector=True): 23 | 24 | # create a mesh to points to plot 25 | x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 26 | y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 27 | h = max(x_max-x_min, y_max-y_min)/200 28 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), 29 | np.arange(y_min, y_max, h)) 30 | points = np.c_[xx.ravel(), yy.ravel()] 31 | print("points", points.shape) 32 | print("xx.shape", xx.shape) 33 | 34 | #make predictions for each point in mesh 35 | if vector: 36 | Z = predict(points) 37 | else: 38 | Z = np.zeros((len(points),)) 39 | for i in range(len(points)): 40 | Z[i] = predict(points[i].reshape(1,2)) 41 | Z = Z.reshape(xx.shape) 42 | 43 | #contour plot highlights boundaries between values - classes in this case 44 | plt.figure() 45 | plt.contour(xx, yy, Z, colors='g') 46 | plt.axis('tight') -------------------------------------------------------------------------------- /DeepLearning/Week2/lab_utils_multiclass_TF.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import matplotlib as mpl 4 | import warnings 5 | from matplotlib import cm 6 | from matplotlib.patches import FancyArrowPatch 7 | from matplotlib.colors import ListedColormap, LinearSegmentedColormap 8 | import matplotlib.colors as colors 9 | from lab_utils_common import dlc 10 | from matplotlib import cm 11 | 12 | 13 | 14 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0', dldarkblue = '#0D5BDC') 15 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'; dldarkblue = '#0D5BDC' 16 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple] 17 | plt.style.use('deeplearning.mplstyle') 18 | 19 | dkcolors = plt.cm.Paired((1,3,7,9,5,11)) 20 | ltcolors = plt.cm.Paired((0,2,6,8,4,10)) 21 | dkcolors_map = mpl.colors.ListedColormap(dkcolors) 22 | ltcolors_map = mpl.colors.ListedColormap(ltcolors) 23 | 24 | #Plot a multi-class categorical decision boundary 25 | # This version handles a non-vector prediction (adds a for-loop over points) 26 | def plot_cat_decision_boundary_mc(ax, X, predict , class_labels=None, legend=False, vector=True): 27 | 28 | # create a mesh to points to plot 29 | x_min, x_max = X[:, 0].min()- 0.5, X[:, 0].max()+0.5 30 | y_min, y_max = X[:, 1].min()- 0.5, X[:, 1].max()+0.5 31 | h = max(x_max-x_min, y_max-y_min)/100 32 | xx, yy = np.meshgrid(np.arange(x_min, x_max, h), 33 | np.arange(y_min, y_max, h)) 34 | points = np.c_[xx.ravel(), yy.ravel()] 35 | #print("points", points.shape) 36 | #print("xx.shape", xx.shape) 37 | 38 | #make predictions for each point in mesh 39 | if vector: 40 | Z = predict(points) 41 | else: 42 | Z = np.zeros((len(points),)) 43 | for i in range(len(points)): 44 | Z[i] = predict(points[i].reshape(1,2)) 45 | Z = Z.reshape(xx.shape) 46 | 47 | #contour plot highlights boundaries between values - classes in this case 48 | ax.contour(xx, yy, Z, linewidths=1) 49 | #ax.axis('tight') 50 | 51 | 52 | def plt_mc_data(ax, X, y, classes, class_labels=None, map=plt.cm.Paired, 53 | legend=False, size=50, m='o', equal_xy = False): 54 | """ Plot multiclass data. Note, if equal_xy is True, setting ylim on the plot may not work """ 55 | for i in range(classes): 56 | idx = np.where(y == i) 57 | col = len(idx[0])*[i] 58 | label = class_labels[i] if class_labels else "c{}".format(i) 59 | # this didn't work on coursera but did in local version 60 | #ax.scatter(X[idx, 0], X[idx, 1], marker=m, 61 | # c=col, vmin=0, vmax=map.N, cmap=map, 62 | # s=size, label=label) 63 | ax.scatter(X[idx, 0], X[idx, 1], marker=m, 64 | color=map(col), vmin=0, vmax=map.N, 65 | s=size, label=label) 66 | if legend: ax.legend() 67 | if equal_xy: ax.axis("equal") 68 | 69 | def plt_mc(X_train,y_train,classes, centers, std): 70 | css = np.unique(y_train) 71 | fig,ax = plt.subplots(1,1,figsize=(3,3)) 72 | fig.canvas.toolbar_visible = False 73 | fig.canvas.header_visible = False 74 | fig.canvas.footer_visible = False 75 | plt_mc_data(ax, X_train,y_train,classes, map=dkcolors_map, legend=True, size=50, equal_xy = False) 76 | ax.set_title("Multiclass Data") 77 | ax.set_xlabel("x0") 78 | ax.set_ylabel("x1") 79 | #for c in css: 80 | # circ = plt.Circle(centers[c], 2*std, color=dkcolors_map(c), clip_on=False, fill=False, lw=0.5) 81 | # ax.add_patch(circ) 82 | plt.show() 83 | 84 | def plt_cat_mc(X_train, y_train, model, classes): 85 | #make a model for plotting routines to call 86 | model_predict = lambda Xl: np.argmax(model.predict(Xl),axis=1) 87 | 88 | fig,ax = plt.subplots(1,1, figsize=(3,3)) 89 | fig.canvas.toolbar_visible = False 90 | fig.canvas.header_visible = False 91 | fig.canvas.footer_visible = False 92 | 93 | #add the original data to the decison boundary 94 | plt_mc_data(ax, X_train,y_train, classes, map=dkcolors_map, legend=True) 95 | #plot the decison boundary. 96 | plot_cat_decision_boundary_mc(ax, X_train, model_predict, vector=True) 97 | ax.set_title("model decision boundary") 98 | 99 | plt.xlabel(r'$x_0$'); 100 | plt.ylabel(r"$x_1$"); 101 | plt.show() 102 | 103 | 104 | def plt_prob_z(ax,fwb, x0_rng=(-8,8), x1_rng=(-5,4)): 105 | """ plots a decision boundary but include shading to indicate the probability 106 | and adds a conouter to show where z=0 107 | """ 108 | #setup useful ranges and common linspaces 109 | x0_space = np.linspace(x0_rng[0], x0_rng[1], 40) 110 | x1_space = np.linspace(x1_rng[0], x1_rng[1], 40) 111 | 112 | # get probability for x0,x1 ranges 113 | tmp_x0,tmp_x1 = np.meshgrid(x0_space,x1_space) 114 | z = np.zeros_like(tmp_x0) 115 | c = np.zeros_like(tmp_x0) 116 | for i in range(tmp_x0.shape[0]): 117 | for j in range(tmp_x1.shape[1]): 118 | x = np.array([[tmp_x0[i,j],tmp_x1[i,j]]]) 119 | z[i,j] = fwb(x) 120 | c[i,j] = 0. if z[i,j] == 0 else 1. 121 | with warnings.catch_warnings(): # suppress no contour warning 122 | warnings.simplefilter("ignore") 123 | #ax.contour(tmp_x0, tmp_x1, c, colors='b', linewidths=1) 124 | ax.contour(tmp_x0, tmp_x1, c, linewidths=1) 125 | 126 | cmap = plt.get_cmap('Blues') 127 | new_cmap = truncate_colormap(cmap, 0.0, 0.7) 128 | 129 | pcm = ax.pcolormesh(tmp_x0, tmp_x1, z, 130 | norm=cm.colors.Normalize(vmin=np.amin(z), vmax=np.amax(z)), 131 | cmap=new_cmap, shading='nearest', alpha = 0.9) 132 | ax.figure.colorbar(pcm, ax=ax) 133 | 134 | def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): 135 | """ truncates color map """ 136 | new_cmap = colors.LinearSegmentedColormap.from_list( 137 | 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), 138 | cmap(np.linspace(minval, maxval, n))) 139 | return new_cmap 140 | 141 | 142 | def plt_layer_relu(X, Y, W1, b1, classes): 143 | nunits = (W1.shape[1]) 144 | Y = Y.reshape(-1,) 145 | fig,ax = plt.subplots(1,W1.shape[1], figsize=(7,2.5)) 146 | fig.canvas.toolbar_visible = False 147 | fig.canvas.header_visible = False 148 | fig.canvas.footer_visible = False 149 | 150 | for i in range(nunits): 151 | layerf= lambda x : np.maximum(0,(np.dot(x,W1[:,i]) + b1[i])) 152 | plt_prob_z(ax[i], layerf) 153 | plt_mc_data(ax[i], X, Y, classes, map=dkcolors_map,legend=True, size=50, m='o') 154 | ax[i].set_title(f"Layer 1 Unit {i}") 155 | ax[i].set_ylabel(r"$x_1$",size=10) 156 | ax[i].set_xlabel(r"$x_0$",size=10) 157 | fig.tight_layout() 158 | plt.show() 159 | 160 | 161 | def plt_output_layer_linear(X, Y, W, b, classes, x0_rng=None, x1_rng=None): 162 | nunits = (W.shape[1]) 163 | Y = Y.reshape(-1,) 164 | fig,ax = plt.subplots(2,int(nunits/2), figsize=(7,5)) 165 | fig.canvas.toolbar_visible = False 166 | fig.canvas.header_visible = False 167 | fig.canvas.footer_visible = False 168 | for i,axi in enumerate(ax.flat): 169 | layerf = lambda x : np.dot(x,W[:,i]) + b[i] 170 | plt_prob_z(axi, layerf, x0_rng=x0_rng, x1_rng=x1_rng) 171 | plt_mc_data(axi, X, Y, classes, map=dkcolors_map,legend=True, size=50, m='o') 172 | axi.set_ylabel(r"$a^{[1]}_1$",size=9) 173 | axi.set_xlabel(r"$a^{[1]}_0$",size=9) 174 | axi.set_xlim(x0_rng) 175 | axi.set_ylim(x1_rng) 176 | axi.set_title(f"Linear Output Unit {i}") 177 | fig.tight_layout() 178 | plt.show() -------------------------------------------------------------------------------- /DeepLearning/Week2/lab_utils_relu.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from matplotlib.gridspec import GridSpec 4 | plt.style.use('deeplearning.mplstyle') 5 | from matplotlib.widgets import Slider 6 | from lab_utils_common import dlc 7 | 8 | def widgvis(fig): 9 | fig.canvas.toolbar_visible = False 10 | fig.canvas.header_visible = False 11 | fig.canvas.footer_visible = False 12 | 13 | 14 | def plt_base(ax): 15 | X = np.linspace(0, 3, 3*100) 16 | y = np.r_[ -2*X[0:100]+2, 1*X[100:200]-3+2, 3*X[200:300]-7+2 ] 17 | w00 = -2 18 | b00 = 2 19 | w01 = 0 # 1 20 | b01 = 0 # -1 21 | w02 = 0 # 2 22 | b02 = 0 # -4 23 | ax[0].plot(X, y, color = dlc["dlblue"], label="target") 24 | arts = [] 25 | arts.extend( plt_yhat(ax[0], X, w00, b00, w01, b01, w02, b02) ) 26 | _ = plt_unit(ax[1], X, w00, b00) #Fixed 27 | arts.extend( plt_unit(ax[2], X, w01, b01) ) 28 | arts.extend( plt_unit(ax[3], X, w02, b02) ) 29 | return(X, arts) 30 | 31 | def plt_yhat(ax, X, w00, b00, w01, b01, w02, b02): 32 | yhat = np.maximum(0, np.dot(w00, X) + b00) + \ 33 | np.maximum(0, np.dot(w01, X) + b01) + \ 34 | np.maximum(0, np.dot(w02, X) + b02) 35 | lp = ax.plot(X, yhat, lw=2, color = dlc["dlorange"], label="a2") 36 | return(lp) 37 | 38 | def plt_unit(ax, X, w, b): 39 | z = np.dot(w,X) + b 40 | yhat = np.maximum(0,z) 41 | lpa = ax.plot(X, z, dlc["dlblue"], label="z") 42 | lpb = ax.plot(X, yhat, dlc["dlmagenta"], lw=1, label="a") 43 | return([lpa[0], lpb[0]]) 44 | 45 | # if output is need for debug, put this in a cell and call ahead of time. Output will be below that cell. 46 | #from ipywidgets import Output #this line stays here 47 | #output = Output() #this line stays here 48 | #display(output) #this line goes in notebook 49 | 50 | def plt_relu_ex(): 51 | artists = [] 52 | 53 | fig = plt.figure() 54 | fig.suptitle("Explore Non-Linear Activation") 55 | 56 | gs = GridSpec(3, 2, width_ratios=[2, 1], height_ratios=[1, 1, 1]) 57 | ax1 = fig.add_subplot(gs[0:2,0]) 58 | ax2 = fig.add_subplot(gs[0,1]) 59 | ax3 = fig.add_subplot(gs[1,1]) 60 | ax4 = fig.add_subplot(gs[2,1]) 61 | ax = [ax1,ax2,ax3,ax4] 62 | 63 | widgvis(fig) 64 | #plt.subplots_adjust(bottom=0.35) 65 | 66 | axb2 = fig.add_axes([0.15, 0.10, 0.30, 0.03]) # [left, bottom, width, height] 67 | axw2 = fig.add_axes([0.15, 0.15, 0.30, 0.03]) 68 | axb1 = fig.add_axes([0.15, 0.20, 0.30, 0.03]) 69 | axw1 = fig.add_axes([0.15, 0.25, 0.30, 0.03]) 70 | 71 | sw1 = Slider(axw1, 'w1', -4.0, 4.0, valinit=0, valstep=0.1) 72 | sb1 = Slider(axb1, 'b1', -4.0, 4.0, valinit=0, valstep=0.1) 73 | sw2 = Slider(axw2, 'w2', -4.0, 4.0, valinit=0, valstep=0.1) 74 | sb2 = Slider(axb2, 'b2', -4.0, 4.0, valinit=0, valstep=0.1) 75 | 76 | X,lp = plt_base(ax) 77 | artists.extend( lp ) 78 | 79 | #@output.capture() 80 | def update(val): 81 | #print("-----------") 82 | #print(f"len artists {len(artists)}", artists) 83 | for i in range(len(artists)): 84 | artist = artists[i] 85 | #print("artist:", artist) 86 | artist.remove() 87 | artists.clear() 88 | #print(artists) 89 | w00 = -2 90 | b00 = 2 91 | w01 = sw1.val # 1 92 | b01 = sb1.val # -1 93 | w02 = sw2.val # 2 94 | b02 = sb2.val # -4 95 | artists.extend(plt_yhat(ax[0], X, w00, b00, w01, b01, w02, b02)) 96 | artists.extend(plt_unit(ax[2], X, w01, b01) ) 97 | artists.extend(plt_unit(ax[3], X, w02, b02) ) 98 | #fig.canvas.draw_idle() 99 | 100 | sw1.on_changed(update) 101 | sb1.on_changed(update) 102 | sw2.on_changed(update) 103 | sb2.on_changed(update) 104 | 105 | ax[0].set_title(" Match Target ") 106 | ax[0].legend() 107 | ax[0].set_xlabel("x") 108 | ax[1].set_title("Unit 0 (fixed) ") 109 | ax[1].legend() 110 | ax[2].set_title("Unit 1") 111 | ax[2].legend() 112 | ax[3].set_title("Unit 2") 113 | ax[3].legend() 114 | plt.tight_layout() 115 | 116 | plt.show() 117 | return([sw1,sw2,sb1,sb2,artists]) # returned to keep a live reference to sliders 118 | 119 | -------------------------------------------------------------------------------- /DeepLearning/Week2/lab_utils_softmax.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | plt.style.use('./deeplearning.mplstyle') 4 | import tensorflow as tf 5 | from IPython.display import display, Markdown, Latex 6 | from matplotlib.widgets import Slider 7 | from lab_utils_common import dlc 8 | 9 | 10 | def plt_softmax(my_softmax): 11 | fig, ax = plt.subplots(1,2,figsize=(8,4)) 12 | plt.subplots_adjust(bottom=0.35) 13 | 14 | axz0 = fig.add_axes([0.15, 0.10, 0.30, 0.03]) # [left, bottom, width, height] 15 | axz1 = fig.add_axes([0.15, 0.15, 0.30, 0.03]) 16 | axz2 = fig.add_axes([0.15, 0.20, 0.30, 0.03]) 17 | axz3 = fig.add_axes([0.15, 0.25, 0.30, 0.03]) 18 | 19 | z3 = Slider(axz3, 'z3', 0.1, 10.0, valinit=4, valstep=0.1) 20 | z2 = Slider(axz2, 'z2', 0.1, 10.0, valinit=3, valstep=0.1) 21 | z1 = Slider(axz1, 'z1', 0.1, 10.0, valinit=2, valstep=0.1) 22 | z0 = Slider(axz0, 'z0', 0.1, 10.0, valinit=1, valstep=0.1) 23 | 24 | z = np.array(['z0','z1','z2','z3']) 25 | bar = ax[0].barh(z, height=0.6, width=[z0.val,z1.val,z2.val,z3.val], left=None, align='center') 26 | bars = bar.get_children() 27 | ax[0].set_xlim([0,10]) 28 | ax[0].set_title("z input to softmax") 29 | 30 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 31 | anames = np.array(['a0','a1','a2','a3']) 32 | sbar = ax[1].barh(anames, height=0.6, width=a, left=None, align='center',color=dlc["dldarkred"]) 33 | sbars = sbar.get_children() 34 | ax[1].set_xlim([0,1]) 35 | ax[1].set_title("softmax(z)") 36 | 37 | def update(val): 38 | bars[0].set_width(z0.val) 39 | bars[1].set_width(z1.val) 40 | bars[2].set_width(z2.val) 41 | bars[3].set_width(z3.val) 42 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 43 | sbars[0].set_width(a[0]) 44 | sbars[1].set_width(a[1]) 45 | sbars[2].set_width(a[2]) 46 | sbars[3].set_width(a[3]) 47 | 48 | fig.canvas.draw_idle() 49 | 50 | z0.on_changed(update) 51 | z1.on_changed(update) 52 | z2.on_changed(update) 53 | z3.on_changed(update) 54 | 55 | plt.show() 56 | -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/__pycache__/autils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/__pycache__/autils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/__pycache__/lab_utils_common.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/__pycache__/lab_utils_common.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/__pycache__/lab_utils_softmax.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/__pycache__/lab_utils_softmax.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/__pycache__/public_tests.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/__pycache__/public_tests.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/autils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | from tensorflow.keras.models import Sequential 5 | from tensorflow.keras.layers import Dense 6 | from tensorflow.keras.activations import linear, relu, sigmoid 7 | 8 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0', dldarkblue = '#0D5BDC', dlmedblue='#4285F4') 9 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'; dldarkblue = '#0D5BDC'; dlmedblue='#4285F4' 10 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple] 11 | plt.style.use('./deeplearning.mplstyle') 12 | 13 | 14 | def load_data(): 15 | X = np.load("data/X.npy") 16 | y = np.load("data/y.npy") 17 | return X, y 18 | 19 | def plt_act_trio(): 20 | X = np.linspace(-5,5,100) 21 | fig,ax = plt.subplots(1,3, figsize=(6,2)) 22 | widgvis(fig) 23 | ax[0].plot(X,tf.keras.activations.linear(X)) 24 | ax[0].axvline(0, lw=0.3, c="black") 25 | ax[0].axhline(0, lw=0.3, c="black") 26 | ax[0].set_title("Linear") 27 | ax[1].plot(X,tf.keras.activations.sigmoid(X)) 28 | ax[1].axvline(0, lw=0.3, c="black") 29 | ax[1].axhline(0, lw=0.3, c="black") 30 | ax[1].set_title("Sigmoid") 31 | ax[2].plot(X,tf.keras.activations.relu(X)) 32 | ax[2].axhline(0, lw=0.3, c="black") 33 | ax[2].axvline(0, lw=0.3, c="black") 34 | ax[2].set_title("ReLu") 35 | fig.suptitle("Common Activation Functions", fontsize=14) 36 | fig.tight_layout(pad=0.2) 37 | plt.show() 38 | 39 | def widgvis(fig): 40 | fig.canvas.toolbar_visible = False 41 | fig.canvas.header_visible = False 42 | fig.canvas.footer_visible = False 43 | 44 | def plt_ex1(): 45 | X = np.linspace(0,2*np.pi, 100) 46 | y = np.cos(X)+1 47 | y[50:100]=0 48 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 49 | widgvis(fig) 50 | ax.set_title("Target") 51 | ax.set_xlabel("x") 52 | ax.set_ylabel("y") 53 | ax.plot(X,y) 54 | fig.tight_layout(pad=0.1) 55 | plt.show() 56 | return(X,y) 57 | 58 | def plt_ex2(): 59 | X = np.linspace(0,2*np.pi, 100) 60 | y = np.cos(X)+1 61 | y[0:49]=0 62 | fig,ax = plt.subplots(1,1, figsize=(2,2)) 63 | widgvis(fig) 64 | ax.set_title("Target") 65 | ax.set_xlabel("x") 66 | ax.set_ylabel("y") 67 | ax.plot(X,y) 68 | fig.tight_layout(pad=0.1) 69 | plt.show() 70 | return(X,y) 71 | 72 | def gen_data(): 73 | X = np.linspace(0,2*np.pi, 100) 74 | y = np.cos(X)+1 75 | X=X.reshape(-1,1) 76 | return(X,y) 77 | 78 | def plt_dual(X,y,yhat): 79 | fig,ax = plt.subplots(1,2, figsize=(4,2)) 80 | widgvis(fig) 81 | ax[0].set_title("Target") 82 | ax[0].set_xlabel("x") 83 | ax[0].set_ylabel("y") 84 | ax[0].plot(X,y) 85 | ax[1].set_title("Prediction") 86 | ax[1].set_xlabel("x") 87 | ax[1].set_ylabel("y") 88 | ax[1].plot(X,y) 89 | ax[1].plot(X,yhat) 90 | fig.tight_layout(pad=0.1) 91 | plt.show() 92 | 93 | def plt_act1(X,y,z,a): 94 | fig,ax = plt.subplots(1,3, figsize=(6,2.5)) 95 | widgvis(fig) 96 | ax[0].plot(X,y,label="target") 97 | ax[0].axvline(0, lw=0.3, c="black") 98 | ax[0].axhline(0, lw=0.3, c="black") 99 | ax[0].set_title("y - target") 100 | ax[1].plot(X,y, label="target") 101 | ax[1].plot(X,z, c=dlc["dldarkred"],label="z") 102 | ax[1].axvline(0, lw=0.3, c="black") 103 | ax[1].axhline(0, lw=0.3, c="black") 104 | ax[1].set_title(r"$z = w \cdot x+b$") 105 | ax[1].legend(loc="upper center") 106 | ax[2].plot(X,y, label="target") 107 | ax[2].plot(X,a, c=dlc["dldarkred"],label="ReLu(z)") 108 | ax[2].axhline(0, lw=0.3, c="black") 109 | ax[2].axvline(0, lw=0.3, c="black") 110 | ax[2].set_title("max(0,z)") 111 | ax[2].legend() 112 | fig.suptitle("Role of Non-Linear Activation", fontsize=12) 113 | fig.tight_layout(pad=0.22) 114 | return(ax) 115 | 116 | 117 | def plt_add_notation(ax): 118 | ax[1].annotate(text = "matches\n here", xy =(1.5,1.0), 119 | xytext = (0.1,-1.5), fontsize=9, 120 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 121 | ax[1].annotate(text = "but not\n here", xy =(5,-2.5), 122 | xytext = (1,-3), fontsize=9, 123 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 124 | ax[2].annotate(text = "ReLu\n 'off'", xy =(2.6,0), 125 | xytext = (0.1,0.1), fontsize=9, 126 | arrowprops=dict(facecolor=dlc["dlpurple"],width=2, headwidth=8)) 127 | 128 | def compile_fit(model,X,y): 129 | model.compile( 130 | loss=tf.keras.losses.MeanSquaredError(), 131 | optimizer=tf.keras.optimizers.Adam(0.01), 132 | ) 133 | 134 | model.fit( 135 | X,y, 136 | epochs=100, 137 | verbose = 0 138 | ) 139 | l1=model.get_layer("l1") 140 | l2=model.get_layer("l2") 141 | w1,b1 = l1.get_weights() 142 | w2,b2 = l2.get_weights() 143 | return(w1,b1,w2,b2) 144 | 145 | def plt_model(X,y,yhat_pre, yhat_post): 146 | fig,ax = plt.subplots(1,3, figsize=(8,2)) 147 | widgvis(fig) 148 | ax[0].set_title("Target") 149 | ax[0].set_xlabel("x") 150 | ax[0].set_ylabel("y") 151 | ax[0].plot(X,y) 152 | ax[1].set_title("Prediction, pre-training") 153 | ax[1].set_xlabel("x") 154 | ax[1].set_ylabel("y") 155 | ax[1].plot(X,y) 156 | ax[1].plot(X,yhat_pre) 157 | ax[2].set_title("Prediction, post-training") 158 | ax[2].set_xlabel("x") 159 | ax[2].set_ylabel("y") 160 | ax[2].plot(X,y) 161 | ax[2].plot(X,yhat_post) 162 | fig.tight_layout(pad=0.1) 163 | plt.show() 164 | 165 | def display_errors(model,X,y): 166 | f = model.predict(X) 167 | yhat = np.argmax(f, axis=1) 168 | doo = yhat != y[:,0] 169 | idxs = np.where(yhat != y[:,0])[0] 170 | if len(idxs) == 0: 171 | print("no errors found") 172 | else: 173 | cnt = min(8, len(idxs)) 174 | fig, ax = plt.subplots(1,cnt, figsize=(5,1.2)) 175 | fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.80]) #[left, bottom, right, top] 176 | widgvis(fig) 177 | 178 | for i in range(cnt): 179 | j = idxs[i] 180 | X_reshaped = X[j].reshape((20,20)).T 181 | 182 | # Display the image 183 | ax[i].imshow(X_reshaped, cmap='gray') 184 | 185 | # Predict using the Neural Network 186 | prediction = model.predict(X[j].reshape(1,400)) 187 | prediction_p = tf.nn.softmax(prediction) 188 | yhat = np.argmax(prediction_p) 189 | 190 | # Display the label above the image 191 | ax[i].set_title(f"{y[j,0]},{yhat}",fontsize=10) 192 | ax[i].set_axis_off() 193 | fig.suptitle("Label, yhat", fontsize=12) 194 | return(len(idxs)) 195 | 196 | def display_digit(X): 197 | """ display a single digit. The input is one digit (400,). """ 198 | fig, ax = plt.subplots(1,1, figsize=(0.5,0.5)) 199 | widgvis(fig) 200 | X_reshaped = X.reshape((20,20)).T 201 | # Display the image 202 | ax.imshow(X_reshaped, cmap='gray') 203 | plt.show() 204 | 205 | 206 | def plot_loss_tf(history): 207 | fig,ax = plt.subplots(1,1, figsize = (4,3)) 208 | widgvis(fig) 209 | ax.plot(history.history['loss'], label='loss') 210 | ax.set_ylim([0, 2]) 211 | ax.set_xlabel('Epoch') 212 | ax.set_ylabel('loss (cost)') 213 | ax.legend() 214 | ax.grid(True) 215 | plt.show() 216 | -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/data/X.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/data/X.npy -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/data/y.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/data/y.npy -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_Assigment_NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_Assigment_NN.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_BinaryVsMultiClass.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_BinaryVsMultiClass.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_NNSoftmax.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_NNSoftmax.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_ReLu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_ReLu.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_SoftMaxCost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_SoftMaxCost.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_SoftMaxNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_SoftMaxNN.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_Softmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_Softmax.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_SoftmaxReg_NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_SoftmaxReg_NN.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/images/C2_W2_smallnetwork.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/images/C2_W2_smallnetwork.png -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/lab_utils_softmax.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | plt.style.use('./deeplearning.mplstyle') 4 | import tensorflow as tf 5 | from IPython.display import display, Markdown, Latex 6 | from matplotlib.widgets import Slider 7 | from lab_utils_common import dlc 8 | 9 | 10 | def plt_softmax(my_softmax): 11 | fig, ax = plt.subplots(1,2,figsize=(8,4)) 12 | plt.subplots_adjust(bottom=0.35) 13 | 14 | axz0 = fig.add_axes([0.15, 0.10, 0.30, 0.03]) # [left, bottom, width, height] 15 | axz1 = fig.add_axes([0.15, 0.15, 0.30, 0.03]) 16 | axz2 = fig.add_axes([0.15, 0.20, 0.30, 0.03]) 17 | axz3 = fig.add_axes([0.15, 0.25, 0.30, 0.03]) 18 | 19 | z3 = Slider(axz3, 'z3', 0.1, 10.0, valinit=4, valstep=0.1) 20 | z2 = Slider(axz2, 'z2', 0.1, 10.0, valinit=3, valstep=0.1) 21 | z1 = Slider(axz1, 'z1', 0.1, 10.0, valinit=2, valstep=0.1) 22 | z0 = Slider(axz0, 'z0', 0.1, 10.0, valinit=1, valstep=0.1) 23 | 24 | z = np.array(['z0','z1','z2','z3']) 25 | bar = ax[0].barh(z, height=0.6, width=[z0.val,z1.val,z2.val,z3.val], left=None, align='center') 26 | bars = bar.get_children() 27 | ax[0].set_xlim([0,10]) 28 | ax[0].set_title("z input to softmax") 29 | 30 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 31 | anames = np.array(['a0','a1','a2','a3']) 32 | sbar = ax[1].barh(anames, height=0.6, width=a, left=None, align='center',color=dlc["dldarkred"]) 33 | sbars = sbar.get_children() 34 | ax[1].set_xlim([0,1]) 35 | ax[1].set_title("softmax(z)") 36 | 37 | def update(val): 38 | bars[0].set_width(z0.val) 39 | bars[1].set_width(z1.val) 40 | bars[2].set_width(z2.val) 41 | bars[3].set_width(z3.val) 42 | a = my_softmax(np.array([z0.val,z1.val,z2.val,z3.val])) 43 | sbars[0].set_width(a[0]) 44 | sbars[1].set_width(a[1]) 45 | sbars[2].set_width(a[2]) 46 | sbars[3].set_width(a[3]) 47 | 48 | fig.canvas.draw_idle() 49 | 50 | z0.on_changed(update) 51 | z1.on_changed(update) 52 | z2.on_changed(update) 53 | z3.on_changed(update) 54 | 55 | plt.show() 56 | -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1645828646.ed92f2b0de47.31.186.v2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1645828646.ed92f2b0de47.31.186.v2 -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1645828646.ed92f2b0de47.profile-empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1645828646.ed92f2b0de47.profile-empty -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1647805546.30f1e0ee4ffd.31.186.v2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/logs/train/events.out.tfevents.1647805546.30f1e0ee4ffd.31.186.v2 -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/logs/train/plugins/profile/2022-02-25_22-37-26/local.trace: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/logs/train/plugins/profile/2022-02-25_22-37-26/local.trace -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/logs/train/plugins/profile/2022-03-20_19-45-46/local.trace: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Assignment/logs/train/plugins/profile/2022-03-20_19-45-46/local.trace -------------------------------------------------------------------------------- /DeepLearning/Week2Assignment/public_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.keras.models import Sequential 4 | from tensorflow.keras.layers import Dense 5 | from tensorflow.keras.activations import linear, sigmoid, relu 6 | 7 | def test_my_softmax(target): 8 | z = np.array([1., 2., 3., 4.]) 9 | a = target(z) 10 | atf = tf.nn.softmax(z) 11 | 12 | assert np.allclose(a, atf, atol=1e-10), f"Wrong values. Expected {atf}, got {a}" 13 | 14 | z = np.array([np.log(0.1)] * 10) 15 | a = target(z) 16 | atf = tf.nn.softmax(z) 17 | 18 | assert np.allclose(a, atf, atol=1e-10), f"Wrong values. Expected {atf}, got {a}" 19 | 20 | print("\033[92m All tests passed.") 21 | 22 | def test_model(target, classes, input_size): 23 | target.build(input_shape=(None,input_size)) 24 | 25 | assert len(target.layers) == 3, \ 26 | f"Wrong number of layers. Expected 3 but got {len(target.layers)}" 27 | assert target.input.shape.as_list() == [None, input_size], \ 28 | f"Wrong input shape. Expected [None, {input_size}] but got {target.input.shape.as_list()}" 29 | i = 0 30 | expected = [[Dense, [None, 25], relu], 31 | [Dense, [None, 15], relu], 32 | [Dense, [None, classes], linear]] 33 | 34 | for layer in target.layers: 35 | assert type(layer) == expected[i][0], \ 36 | f"Wrong type in layer {i}. Expected {expected[i][0]} but got {type(layer)}" 37 | assert layer.output.shape.as_list() == expected[i][1], \ 38 | f"Wrong number of units in layer {i}. Expected {expected[i][1]} but got {layer.output.shape.as_list()}" 39 | assert layer.activation == expected[i][2], \ 40 | f"Wrong activation in layer {i}. Expected {expected[i][2]} but got {layer.activation}" 41 | i = i + 1 42 | 43 | print("\033[92mAll tests passed!") 44 | -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/__pycache__/lab_utils_backprop.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/__pycache__/lab_utils_backprop.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network0.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network0.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network0_a.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network0_a.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network0_diff.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network0_diff.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network0_j.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network0_j.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network1.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network1_a.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network1_a.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network1_c.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network1_c.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network1_d.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network1_d.PNG -------------------------------------------------------------------------------- /DeepLearning/Week2Optional/images/C2_W2_BP_network1_jdsq.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week2Optional/images/C2_W2_BP_network1_jdsq.PNG -------------------------------------------------------------------------------- /DeepLearning/Week3/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week3/data/c2w3_lab2_data1.csv: -------------------------------------------------------------------------------- 1 | 1.000000000000000000e+03,1.287842045686397512e+02 2 | 1.030303030303030255e+03,1.734242769500015697e+02 3 | 1.060606060606060510e+03,1.802564336525880151e+02 4 | 1.090909090909090992e+03,2.446794756889501627e+02 5 | 1.121212121212121247e+03,1.434604977434445061e+02 6 | 1.151515151515151501e+03,2.045800309450459054e+02 7 | 1.181818181818181756e+03,2.454042948081866484e+02 8 | 1.212121212121212011e+03,2.008705650086419041e+02 9 | 1.242424242424242493e+03,1.824458483886727436e+02 10 | 1.272727272727272748e+03,2.506004560127234697e+02 11 | 1.303030303030303003e+03,2.153410502349354374e+02 12 | 1.333333333333333485e+03,1.757744721261364305e+02 13 | 1.363636363636363512e+03,2.275500270221369021e+02 14 | 1.393939393939393995e+03,1.971439758918511984e+02 15 | 1.424242424242424249e+03,2.372311235639274116e+02 16 | 1.454545454545454504e+03,1.787522909238361137e+02 17 | 1.484848484848484986e+03,2.433613668249439286e+02 18 | 1.515151515151515014e+03,2.338692884524472788e+02 19 | 1.545454545454545496e+03,1.693033877076997271e+02 20 | 1.575757575757575751e+03,2.277564416466448449e+02 21 | 1.606060606060606005e+03,2.284064186256713072e+02 22 | 1.636363636363636488e+03,2.073355108457962785e+02 23 | 1.666666666666666742e+03,1.815184879281909787e+02 24 | 1.696969696969696997e+03,2.394967629953531230e+02 25 | 1.727272727272727252e+03,2.679763092887683342e+02 26 | 1.757575757575757507e+03,2.578305999692003070e+02 27 | 1.787878787878787989e+03,2.544377753383037088e+02 28 | 1.818181818181818244e+03,2.031730194919663859e+02 29 | 1.848484848484848499e+03,2.308685249223791516e+02 30 | 1.878787878787878981e+03,2.184733346626521779e+02 31 | 1.909090909090909008e+03,3.191306166915554741e+02 32 | 1.939393939393939490e+03,3.091552082524627281e+02 33 | 1.969696969696969745e+03,2.397621893206443815e+02 34 | 2.000000000000000000e+03,3.128519835550055745e+02 35 | 2.030303030303030255e+03,2.168340839395049500e+02 36 | 2.060606060606060964e+03,3.148852710295237216e+02 37 | 2.090909090909090992e+03,2.742532491918257165e+02 38 | 2.121212121212121019e+03,3.046852985246734988e+02 39 | 2.151515151515151501e+03,2.756644469217490041e+02 40 | 2.181818181818181984e+03,2.723978121299111308e+02 41 | 2.212121212121212011e+03,2.934917335165571330e+02 42 | 2.242424242424242493e+03,2.943905941942586537e+02 43 | 2.272727272727272975e+03,3.405530995878719409e+02 44 | 2.303030303030303003e+03,3.400732521479328625e+02 45 | 2.333333333333333485e+03,2.988662633827075297e+02 46 | 2.363636363636363967e+03,3.001550622135666231e+02 47 | 2.393939393939393995e+03,3.242976884247309499e+02 48 | 2.424242424242424022e+03,3.121859083247088051e+02 49 | 2.454545454545454504e+03,3.661108969477970732e+02 50 | 2.484848484848484986e+03,3.193909598966387762e+02 51 | 2.515151515151515014e+03,2.636618303870548061e+02 52 | 2.545454545454545496e+03,3.458403047642118509e+02 53 | 2.575757575757575978e+03,2.710460382983969794e+02 54 | 2.606060606060606005e+03,3.667018741908710808e+02 55 | 2.636363636363636488e+03,3.252192748593863598e+02 56 | 2.666666666666666970e+03,3.097501603150986966e+02 57 | 2.696969696969696997e+03,3.296858795406530476e+02 58 | 2.727272727272727025e+03,3.524245775113896002e+02 59 | 2.757575757575757962e+03,3.510404308014648223e+02 60 | 2.787878787878787989e+03,3.079269650788658055e+02 61 | 2.818181818181818016e+03,3.069943955153893853e+02 62 | 2.848484848484848499e+03,3.539077476721453195e+02 63 | 2.878787878787878981e+03,3.352324353289420742e+02 64 | 2.909090909090909008e+03,3.273924741931961648e+02 65 | 2.939393939393939490e+03,3.025989465693812690e+02 66 | 2.969696969696969973e+03,3.618025422089732501e+02 67 | 3.000000000000000000e+03,2.856008452297830331e+02 68 | 3.030303030303030482e+03,3.411536698052910310e+02 69 | 3.060606060606060510e+03,3.489797458150254670e+02 70 | 3.090909090909090992e+03,3.743855510791337338e+02 71 | 3.121212121212121474e+03,3.458251057024332908e+02 72 | 3.151515151515151501e+03,3.688809619499662631e+02 73 | 3.181818181818181984e+03,3.082625550618088823e+02 74 | 3.212121212121212011e+03,3.920723232707963462e+02 75 | 3.242424242424242493e+03,3.327976931997177985e+02 76 | 3.272727272727272975e+03,4.090678984547932941e+02 77 | 3.303030303030303003e+03,3.736736998711214710e+02 78 | 3.333333333333333485e+03,3.124181841457580617e+02 79 | 3.363636363636363967e+03,3.720158352114816580e+02 80 | 3.393939393939393995e+03,3.213539960630363908e+02 81 | 3.424242424242424477e+03,3.912140172368818298e+02 82 | 3.454545454545454504e+03,3.000950109054720087e+02 83 | 3.484848484848484986e+03,3.643170875165196776e+02 84 | 3.515151515151515468e+03,3.629992314574616898e+02 85 | 3.545454545454545496e+03,3.992162896635122138e+02 86 | 3.575757575757575978e+03,3.998255200898182693e+02 87 | 3.606060606060606005e+03,3.984808009785242575e+02 88 | 3.636363636363636488e+03,3.665970345461780084e+02 89 | 3.666666666666666970e+03,3.520949729358272293e+02 90 | 3.696969696969696997e+03,3.782772286849595957e+02 91 | 3.727272727272727479e+03,3.125773626416007573e+02 92 | 3.757575757575757507e+03,4.140512442839284404e+02 93 | 3.787878787878787989e+03,3.238766160021849032e+02 94 | 3.818181818181818471e+03,3.914460937947946491e+02 95 | 3.848484848484848499e+03,3.508151890986384842e+02 96 | 3.878787878787878981e+03,4.139147020133902402e+02 97 | 3.909090909090909008e+03,3.714006885964357139e+02 98 | 3.939393939393939490e+03,3.602809108836295877e+02 99 | 3.969696969696969973e+03,3.251339093444213404e+02 100 | 4.000000000000000000e+03,3.833239023012517350e+02 101 | -------------------------------------------------------------------------------- /DeepLearning/Week3/data/data_w3_ex1.csv: -------------------------------------------------------------------------------- 1 | 1.651000000000000227e+03,4.326452172406379759e+02 2 | 1.691816326530612287e+03,4.549355296196551421e+02 3 | 1.732632653061224573e+03,4.715252475759918411e+02 4 | 1.773448979591836860e+03,4.825063887519466448e+02 5 | 1.814265306122449374e+03,4.683578863371971579e+02 6 | 1.855081632653061433e+03,4.821525306782943971e+02 7 | 1.895897959183673720e+03,5.400217555097202649e+02 8 | 1.936714285714286007e+03,5.345842671578692489e+02 9 | 1.977530612244898066e+03,5.583462076116229582e+02 10 | 2.018346938775510353e+03,5.664234447617513979e+02 11 | 2.059163265306122412e+03,5.813976511525488604e+02 12 | 2.099979591836734926e+03,5.964587372673581740e+02 13 | 2.140795918367346985e+03,5.967148316883482266e+02 14 | 2.181612244897959044e+03,6.194513901486195664e+02 15 | 2.222428571428571558e+03,6.165762649903032298e+02 16 | 2.263244897959184073e+03,6.531624598802053470e+02 17 | 2.304061224489796132e+03,6.665199210989537733e+02 18 | 2.344877551020408191e+03,6.705897593631339078e+02 19 | 2.385693877551020705e+03,6.690228872001849822e+02 20 | 2.426510204081633219e+03,6.789093230505670817e+02 21 | 2.467326530612245278e+03,7.074369643938030094e+02 22 | 2.508142857142857338e+03,7.107602881111782835e+02 23 | 2.548959183673469852e+03,7.451913410872418808e+02 24 | 2.589775510204081911e+03,7.298457618209563407e+02 25 | 2.630591836734693970e+03,7.438029215960094689e+02 26 | 2.671408163265306030e+03,7.382026910802763950e+02 27 | 2.712224489795918544e+03,7.729461222927953941e+02 28 | 2.753040816326530603e+03,7.722177055478060765e+02 29 | 2.793857142857142662e+03,7.842138118483546805e+02 30 | 2.834673469387755176e+03,7.764326081753612243e+02 31 | 2.875489795918367690e+03,8.047762635147317951e+02 32 | 2.916306122448979750e+03,8.332724588451544605e+02 33 | 2.957122448979591809e+03,8.256903699737844136e+02 34 | 2.997938775510204323e+03,8.210533253634432640e+02 35 | 3.038755102040816382e+03,8.338220373981649800e+02 36 | 3.079571428571428896e+03,8.330614217591135002e+02 37 | 3.120387755102041410e+03,8.256980744196746400e+02 38 | 3.161204081632653470e+03,8.435773369341511625e+02 39 | 3.202020408163265529e+03,8.693955170200721341e+02 40 | 3.242836734693877588e+03,8.515030147483425935e+02 41 | 3.283653061224490102e+03,8.631825574614074412e+02 42 | 3.324469387755102161e+03,8.530060131500782745e+02 43 | 3.365285714285714221e+03,8.771567537653886575e+02 44 | 3.406102040816326735e+03,8.637419958565878915e+02 45 | 3.446918367346938794e+03,8.746719415444281367e+02 46 | 3.487734693877551763e+03,8.777363361538990603e+02 47 | 3.528551020408163367e+03,8.741125977570638952e+02 48 | 3.569367346938775881e+03,8.828032952080189943e+02 49 | 3.610183673469387486e+03,9.108327208115878193e+02 50 | 3.651000000000000455e+03,8.974204630543912344e+02 51 | -------------------------------------------------------------------------------- /DeepLearning/Week3/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | lines.markersize: 12 5 | 6 | legend.fancybox: true 7 | 8 | # Verdana" for non-math text, 9 | # Cambria Math 10 | 11 | #Blue (Crayon-Aqua) 0096FF 12 | #Dark Red C00000 13 | #Orange (Apple Orange) FF9300 14 | #Black 000000 15 | #Magenta FF40FF 16 | #Purple 7030A0 17 | 18 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 19 | #axes.facecolor: f0f0f0 # grey 20 | axes.facecolor: ffffff # white 21 | axes.labelsize: large 22 | axes.axisbelow: true 23 | axes.grid: False 24 | axes.edgecolor: f0f0f0 25 | axes.linewidth: 3.0 26 | axes.titlesize: x-large 27 | 28 | patch.edgecolor: f0f0f0 29 | patch.linewidth: 0.5 30 | 31 | svg.fonttype: path 32 | 33 | grid.linestyle: - 34 | grid.linewidth: 1.0 35 | grid.color: cbcbcb 36 | 37 | xtick.major.size: 0 38 | xtick.minor.size: 0 39 | ytick.major.size: 0 40 | ytick.minor.size: 0 41 | 42 | savefig.edgecolor: f0f0f0 43 | savefig.facecolor: f0f0f0 44 | 45 | #figure.subplot.left: 0.08 46 | #figure.subplot.right: 0.95 47 | #figure.subplot.bottom: 0.07 48 | 49 | #figure.facecolor: f0f0f0 # grey 50 | figure.facecolor: ffffff # white 51 | figure.figsize: 12,8 52 | 53 | ## *************************************************************************** 54 | ## * FONT * 55 | ## *************************************************************************** 56 | ## The font properties used by `text.Text`. 57 | ## See https://matplotlib.org/api/font_manager_api.html for more information 58 | ## on font properties. The 6 font properties used for font matching are 59 | ## given below with their default values. 60 | ## 61 | ## The font.family property can take either a concrete font name (not supported 62 | ## when rendering text with usetex), or one of the following five generic 63 | ## values: 64 | ## - 'serif' (e.g., Times), 65 | ## - 'sans-serif' (e.g., Helvetica), 66 | ## - 'cursive' (e.g., Zapf-Chancery), 67 | ## - 'fantasy' (e.g., Western), and 68 | ## - 'monospace' (e.g., Courier). 69 | ## Each of these values has a corresponding default list of font names 70 | ## (font.serif, etc.); the first available font in the list is used. Note that 71 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 72 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 73 | ## Matplotlib and is thus guaranteed to be available; the other entries are 74 | ## left as examples of other possible values. 75 | ## 76 | ## The font.style property has three values: normal (or roman), italic 77 | ## or oblique. The oblique style will be used for italic, if it is not 78 | ## present. 79 | ## 80 | ## The font.variant property has two values: normal or small-caps. For 81 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 82 | ## to using a font size of 'smaller', or about 83%% of the current font 83 | ## size. 84 | ## 85 | ## The font.weight property has effectively 13 values: normal, bold, 86 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 87 | ## 400, and bold is 700. bolder and lighter are relative values with 88 | ## respect to the current weight. 89 | ## 90 | ## The font.stretch property has 11 values: ultra-condensed, 91 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 92 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 93 | ## property is not currently implemented. 94 | ## 95 | ## The font.size property is the default font size for text, given in points. 96 | ## 10 pt is the standard value. 97 | ## 98 | ## Note that font.size controls default text sizes. To configure 99 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 100 | ## settings for axes and ticks. Special text sizes can be defined 101 | ## relative to font.size, using the following values: xx-small, x-small, 102 | ## small, medium, large, x-large, xx-large, larger, or smaller 103 | 104 | 105 | font.family: sans-serif 106 | font.style: normal 107 | font.variant: normal 108 | font.weight: normal 109 | font.stretch: normal 110 | font.size: 12.0 111 | 112 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 113 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 114 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 115 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 116 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 117 | 118 | 119 | ## *************************************************************************** 120 | ## * TEXT * 121 | ## *************************************************************************** 122 | ## The text properties used by `text.Text`. 123 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 124 | ## for more information on text properties 125 | #text.color: black -------------------------------------------------------------------------------- /DeepLearning/Week3/images/C2_W3_BiasVariance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3/images/C2_W3_BiasVariance.png -------------------------------------------------------------------------------- /DeepLearning/Week3/images/C2_W3_NN_Arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3/images/C2_W3_NN_Arch.png -------------------------------------------------------------------------------- /DeepLearning/Week3/images/C2_W3_poly.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3/images/C2_W3_poly.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/__pycache__/assigment_utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/__pycache__/assigment_utils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/__pycache__/public_tests_a1.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/__pycache__/public_tests_a1.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W3_BiasVarianceDegree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W3_BiasVarianceDegree.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W3_Compute_Cost_linear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W3_Compute_Cost_linear.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W3_TrainingVsNew.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W3_TrainingVsNew.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W3_TrainingVsNew_Slide.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W3_TrainingVsNew_Slide.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W4_degree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W4_degree.png -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/images/C2_W4_justright.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week3Assignment/images/C2_W4_justright.PNG -------------------------------------------------------------------------------- /DeepLearning/Week3Assignment/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn import datasets 3 | 4 | 5 | def load_data(): 6 | iris = datasets.load_iris() 7 | X = iris.data[:, :2] # we only take the first two features. 8 | y = iris.target 9 | 10 | X = X[y != 2] # only two classes 11 | y = y[y != 2] 12 | return X, y -------------------------------------------------------------------------------- /DeepLearning/Week4/__pycache__/public_tests.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/__pycache__/public_tests.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week4/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week4/images/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/0.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/1.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/2.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/3.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/4.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/5.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/6.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/7.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/8.png -------------------------------------------------------------------------------- /DeepLearning/Week4/images/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4/images/9.png -------------------------------------------------------------------------------- /DeepLearning/Week4/public_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def compute_entropy_test(target): 4 | y = np.array([1] * 10) 5 | result = target(y) 6 | 7 | assert result == 0, "Entropy must be 0 with array of ones" 8 | 9 | y = np.array([0] * 10) 10 | result = target(y) 11 | 12 | assert result == 0, "Entropy must be 0 with array of zeros" 13 | 14 | y = np.array([0] * 12 + [1] * 12) 15 | result = target(y) 16 | 17 | assert result == 1, "Entropy must be 1 with same ammount of ones and zeros" 18 | 19 | y = np.array([1, 0, 1, 0, 1, 1, 1, 0, 1]) 20 | assert np.isclose(target(y), 0.918295, atol=1e-6), "Wrong value. Something between 0 and 1" 21 | assert np.isclose(target(-y + 1), target(y), atol=1e-6), "Wrong value" 22 | 23 | print("\033[92m All tests passed. ") 24 | 25 | def split_dataset_test(target): 26 | 27 | # Case 1 28 | X = np.array([[1, 0], 29 | [1, 0], 30 | [1, 1], 31 | [0, 0], 32 | [0, 1]]) 33 | X_t = np.array([[0, 1, 0, 1, 0]]) 34 | X = np.concatenate((X, X_t.T), axis=1) 35 | 36 | left, right = target(X, list(range(5)), 2) 37 | expected = {'left': np.array([1, 3]), 38 | 'right': np.array([0, 2, 4])} 39 | 40 | assert type(left) == list, f"Wrong type for left. Expected: list got: {type(left)}" 41 | assert type(right) == list, f"Wrong type for right. Expected: list got: {type(right)}" 42 | 43 | assert type(left[0]) == int, f"Wrong type for elements in the left list. Expected: int got: {type(left[0])}" 44 | assert type(right[0]) == int, f"Wrong type for elements in the right list. Expected: number got: {type(right[0])}" 45 | 46 | assert len(left) == 2, f"left must have 2 elements but got: {len(left)}" 47 | assert len(right) == 3, f"right must have 3 elements but got: {len(right)}" 48 | 49 | assert np.allclose(right, expected['right']), f"Wrong value for right. Expected: { expected['right']} \ngot: {right}" 50 | assert np.allclose(left, expected['left']), f"Wrong value for left. Expected: { expected['left']} \ngot: {left}" 51 | 52 | 53 | # Case 2 54 | X = np.array([[0, 1], 55 | [1, 1], 56 | [1, 1], 57 | [0, 0], 58 | [1, 0]]) 59 | X_t = np.array([[0, 1, 0, 1, 0]]) 60 | X = np.concatenate((X_t.T, X), axis=1) 61 | 62 | left, right = target(X, list(range(5)), 0) 63 | expected = {'left': np.array([1, 3]), 64 | 'right': np.array([0, 2, 4])} 65 | 66 | 67 | assert len(left) == 2, f"left must have 2 elements but got: {len(left)}" 68 | assert len(right) == 3, f"right must have 3 elements but got: {len(right)}" 69 | assert np.allclose(right, expected['right']) and np.allclose(left, expected['left']), f"Wrong value when target is at index 0." 70 | 71 | 72 | # Case 3 73 | X = (np.random.rand(11, 3) > 0.5) * 1 # Just random binary numbers 74 | X_t = np.array([[0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0]]) 75 | X = np.concatenate((X, X_t.T), axis=1) 76 | 77 | left, right = target(X, [1, 2, 3, 6, 7, 9, 10], 3) 78 | expected = {'left': np.array([1, 3, 6]), 79 | 'right': np.array([2, 7, 9, 10])} 80 | 81 | assert np.allclose(right, expected['right']) and np.allclose(left, expected['left']), f"Wrong value when target is at index 0. \nExpected: {expected} \ngot: \{left:{left}, 'right': {right}\}" 82 | 83 | 84 | print("\033[92m All tests passed.") 85 | 86 | def compute_information_gain_test(target): 87 | X = np.array([[1, 0], 88 | [1, 0], 89 | [1, 0], 90 | [0, 0], 91 | [0, 1]]) 92 | 93 | y = np.array([[0, 0, 0, 0, 0]]).T 94 | node_indexes = list(range(5)) 95 | 96 | result1 = target(X, y, node_indexes, 0) 97 | result2 = target(X, y, node_indexes, 0) 98 | 99 | assert result1 == 0 and result2 == 0, f"Information gain must be 0 when target variable is pure. Got {result1} and {result2}" 100 | 101 | y = np.array([[0, 1, 0, 1, 0]]).T 102 | node_indexes = list(range(5)) 103 | 104 | result = target(X, y, node_indexes, 0) 105 | assert np.isclose(result, 0.019973, atol=1e-6), f"Wrong information gain. Expected {0.019973} got: {result}" 106 | 107 | result = target(X, y, node_indexes, 1) 108 | assert np.isclose(result, 0.170951, atol=1e-6), f"Wrong information gain. Expected {0.170951} got: {result}" 109 | 110 | node_indexes = list(range(4)) 111 | result = target(X, y, node_indexes, 0) 112 | assert np.isclose(result, 0.311278, atol=1e-6), f"Wrong information gain. Expected {0.311278} got: {result}" 113 | 114 | result = target(X, y, node_indexes, 1) 115 | assert np.isclose(result, 0, atol=1e-6), f"Wrong information gain. Expected {0.0} got: {result}" 116 | 117 | print("\033[92m All tests passed.") 118 | 119 | def get_best_split_test(target): 120 | X = np.array([[1, 0], 121 | [1, 0], 122 | [1, 0], 123 | [0, 0], 124 | [0, 1]]) 125 | 126 | y = np.array([[0, 0, 0, 0, 0]]).T 127 | node_indexes = list(range(5)) 128 | 129 | result = target(X, y, node_indexes) 130 | 131 | assert result == -1, f"When the target variable is pure, there is no best split to do. Expected -1, got {result}" 132 | 133 | y = X[:,0] 134 | result = target(X, y, node_indexes) 135 | assert result == 0, f"If the target is fully correlated with other feature, that feature must be the best split. Expected 0, got {result}" 136 | y = X[:,1] 137 | result = target(X, y, node_indexes) 138 | assert result == 1, f"If the target is fully correlated with other feature, that feature must be the best split. Expected 1, got {result}" 139 | 140 | y = 1 - X[:,0] 141 | result = target(X, y, node_indexes) 142 | assert result == 0, f"If the target is fully correlated with other feature, that feature must be the best split. Expected 0, got {result}" 143 | 144 | y = np.array([[0, 1, 0, 1, 0]]).T 145 | result = target(X, y, node_indexes) 146 | assert result == 1, f"Wrong result. Expected 1, got {result}" 147 | 148 | y = np.array([[0, 1, 0, 1, 0]]).T 149 | node_indexes = [2, 3, 4] 150 | result = target(X, y, node_indexes) 151 | assert result == 0, f"Wrong result. Expected 0, got {result}" 152 | 153 | n_samples = 100 154 | X0 = np.array([[1] * n_samples]) 155 | X1 = np.array([[0] * n_samples]) 156 | X2 = (np.random.rand(1, 100) > 0.5) * 1 157 | X3 = np.array([[1] * int(n_samples / 2) + [0] * int(n_samples / 2)]) 158 | 159 | y = X2.T 160 | node_indexes = list(range(20, 80)) 161 | X = np.array([X0, X1, X2, X3]).T.reshape(n_samples, 4) 162 | result = target(X, y, node_indexes) 163 | 164 | assert result == 2, f"Wrong result. Expected 2, got {result}" 165 | 166 | y = X0.T 167 | result = target(X, y, node_indexes) 168 | assert result == -1, f"When the target variable is pure, there is no best split to do. Expected -1, got {result}" 169 | print("\033[92m All tests passed.") 170 | -------------------------------------------------------------------------------- /DeepLearning/Week4/utils.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import networkx as nx 3 | import matplotlib.pyplot as plt 4 | from networkx.drawing.nx_pydot import graphviz_layout 5 | 6 | def generate_node_image(node_indices): 7 | image_paths = ["images/%d.png" % idx for idx in node_indices] 8 | images = [Image.open(x) for x in image_paths] 9 | widths, heights = zip(*(i.size for i in images)) 10 | 11 | total_width = sum(widths) 12 | max_height = max(heights) 13 | 14 | new_im = Image.new('RGB', (total_width, max_height)) 15 | 16 | x_offset = 0 17 | for im in images: 18 | new_im.paste(im, (x_offset,0)) 19 | x_offset += im.size[0] 20 | 21 | new_im = new_im.resize((int(total_width*len(node_indices)/10), int(max_height*len(node_indices)/10))) 22 | 23 | return new_im 24 | 25 | 26 | def generate_split_viz(node_indices, left_indices, right_indices, feature): 27 | 28 | G=nx.DiGraph() 29 | 30 | indices_list = [node_indices, left_indices, right_indices] 31 | for idx, indices in enumerate(indices_list): 32 | G.add_node(idx,image= generate_node_image(indices)) 33 | 34 | G.add_edge(0,1) 35 | G.add_edge(0,2) 36 | 37 | pos = graphviz_layout(G, prog="dot") 38 | 39 | fig=plt.figure() 40 | ax=plt.subplot(111) 41 | ax.set_aspect('equal') 42 | nx.draw_networkx_edges(G,pos,ax=ax, arrows=True, arrowsize=40) 43 | 44 | trans=ax.transData.transform 45 | trans2=fig.transFigure.inverted().transform 46 | 47 | feature_name = ["Brown Cap", "Tapering Stalk Shape", "Solitary"][feature] 48 | ax_name = ["Splitting on %s" % feature_name , "Left: %s = 1" % feature_name, "Right: %s = 0" % feature_name] 49 | for idx, n in enumerate(G): 50 | xx,yy=trans(pos[n]) # figure coordinates 51 | xa,ya=trans2((xx,yy)) # axes coordinates 52 | piesize = len(indices_list[idx])/9 53 | p2=piesize/2.0 54 | a = plt.axes([xa-p2,ya-p2, piesize, piesize]) 55 | a.set_aspect('equal') 56 | a.imshow(G.nodes[n]['image']) 57 | a.axis('off') 58 | a.set_title(ax_name[idx]) 59 | ax.axis('off') 60 | plt.show() 61 | 62 | 63 | def generate_tree_viz(root_indices, y, tree): 64 | 65 | G=nx.DiGraph() 66 | 67 | 68 | G.add_node(0,image= generate_node_image(root_indices)) 69 | idx = 1 70 | root = 0 71 | 72 | num_images = [len(root_indices)] 73 | 74 | feature_name = ["Brown Cap", "Tapering Stalk Shape", "Solitary"] 75 | y_name = ["Poisonous","Edible"] 76 | 77 | decision_names = [] 78 | leaf_names = [] 79 | 80 | for i, level in enumerate(tree): 81 | indices_list = level[:2] 82 | for indices in indices_list: 83 | G.add_node(idx,image= generate_node_image(indices)) 84 | G.add_edge(root, idx) 85 | 86 | # For visualization 87 | num_images.append(len(indices)) 88 | idx += 1 89 | if i > 0: 90 | leaf_names.append("Leaf node: %s" % y_name[max(y[indices])]) 91 | 92 | decision_names.append("Split on: %s" % feature_name[level[2]]) 93 | root += 1 94 | 95 | 96 | node_names = decision_names + leaf_names 97 | pos = graphviz_layout(G, prog="dot") 98 | 99 | fig=plt.figure(figsize=(14, 10)) 100 | ax=plt.subplot(111) 101 | ax.set_aspect('equal') 102 | nx.draw_networkx_edges(G,pos,ax=ax, arrows=True, arrowsize=40) 103 | 104 | trans=ax.transData.transform 105 | trans2=fig.transFigure.inverted().transform 106 | 107 | for idx, n in enumerate(G): 108 | xx,yy=trans(pos[n]) # figure coordinates 109 | xa,ya=trans2((xx,yy)) # axes coordinates 110 | piesize = num_images[idx]/25 111 | p2=piesize/2.0 112 | a = plt.axes([xa-p2,ya-p2, piesize, piesize]) 113 | a.set_aspect('equal') 114 | a.imshow(G.nodes[n]['image']) 115 | a.axis('off') 116 | a.set_title(node_names[idx], y=-0.8, fontsize=13, loc="left") 117 | ax.axis('off') 118 | plt.show() -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/0.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/1.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/2.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/3.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/4.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/5.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/6.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/7.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/8.png -------------------------------------------------------------------------------- /DeepLearning/Week4Assignment/images/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/DeepLearning/Week4Assignment/images/9.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Supervised-Machine-Learning-Regression-and-Classification 2 | All Optional LAB covered here with courtesy of Andrew Ng 3 | 4 | # Stay connected next we will bring series of kaggle lec. 5 | `https://www.kaggle.com/learn` 6 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/README.md: -------------------------------------------------------------------------------- 1 | Contains:- 2 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week1/CostFunction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week1/CostFunction.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week1/Gradient Descent soltuion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week1/Gradient Descent soltuion.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week1/Python and Jupyter Notebooks.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "8fdca02a", 6 | "metadata": {}, 7 | "source": [ 8 | "# Optional Lab: Brief Introduction to Python and Jupyter Notebooks\n", 9 | "Welcome to the first optional lab! \n", 10 | "Optional labs are available to:\n", 11 | "- provide information - like this notebook\n", 12 | "- reinforce lecture material with hands-on examples\n", 13 | "- provide working examples of routines used in the graded labs" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "id": "3ae3d378", 19 | "metadata": {}, 20 | "source": [ 21 | "## Goals\n", 22 | "In this lab, you will:\n", 23 | "- Get a brief introduction to Jupyter notebooks\n", 24 | "- Take a tour of Jupyter notebooks\n", 25 | "- Learn the difference between markdown cells and code cells\n", 26 | "- Practice some basic python\n" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "id": "245bc782", 32 | "metadata": {}, 33 | "source": [ 34 | "The easiest way to become familiar with Jupyter notebooks is to take the tour available above in the Help menu:" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "17521231", 40 | "metadata": {}, 41 | "source": [ 42 | "
\n", 43 | "
missing
\n", 44 | "
" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "id": "557313f9", 50 | "metadata": {}, 51 | "source": [ 52 | "Jupyter notebooks have two types of cells that are used in this course. Cells such as this which contain documentation called `Markdown Cells`. The name is derived from the simple formatting language used in the cells. You will not be required to produce markdown cells. Its useful to understand the `cell pulldown` shown in graphic below. Occasionally, a cell will end up in the wrong mode and you may need to restore it to the right state:" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "id": "e1c82249", 58 | "metadata": {}, 59 | "source": [ 60 | "
\n", 61 | " missing\n", 62 | "
" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "id": "a7f870f6", 68 | "metadata": {}, 69 | "source": [ 70 | "The other type of cell is the `code cell` where you will write your code:" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 1, 76 | "id": "2017b458", 77 | "metadata": {}, 78 | "outputs": [ 79 | { 80 | "name": "stdout", 81 | "output_type": "stream", 82 | "text": [ 83 | "This is code cell\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "#This is a 'Code' Cell\n", 89 | "print(\"This is code cell\")" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "id": "832f3f2c", 95 | "metadata": {}, 96 | "source": [ 97 | "## Python\n", 98 | "You can write your code in the code cells. \n", 99 | "To run the code, select the cell and either\n", 100 | "- hold the shift-key down and hit 'enter' or 'return'\n", 101 | "- click the 'run' arrow above\n", 102 | "
\n", 103 | " \n", 104 | "
\n", 105 | "\n", 106 | " " 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "id": "2d6125aa", 112 | "metadata": {}, 113 | "source": [ 114 | "### Print statement\n", 115 | "Print statements will generally use the python f-string style. \n", 116 | "Try creating your own print in the following cell. \n", 117 | "Try both methods of running the cell." 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 2, 123 | "id": "55b8eaca", 124 | "metadata": {}, 125 | "outputs": [ 126 | { 127 | "name": "stdout", 128 | "output_type": "stream", 129 | "text": [ 130 | "f strings allow you to embed variables right in the strings!\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "# print statements\n", 136 | "variable = \"right in the strings!\"\n", 137 | "print(f\"f strings allow you to embed variables {variable}\")" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "id": "1d8cc3cc", 143 | "metadata": {}, 144 | "source": [ 145 | "# Congratulations!\n", 146 | "You now know how to find your way around a Jupyter Notebook." 147 | ] 148 | } 149 | ], 150 | "metadata": { 151 | "kernelspec": { 152 | "display_name": "Python 3.10.4 64-bit", 153 | "language": "python", 154 | "name": "python3" 155 | }, 156 | "language_info": { 157 | "codemirror_mode": { 158 | "name": "ipython", 159 | "version": 3 160 | }, 161 | "file_extension": ".py", 162 | "mimetype": "text/x-python", 163 | "name": "python", 164 | "nbconvert_exporter": "python", 165 | "pygments_lexer": "ipython3", 166 | "version": "3.10.4" 167 | }, 168 | "vscode": { 169 | "interpreter": { 170 | "hash": "2d3283b7ede15a3ba02d29b57611833951a6474bfc2330b92af0513ee46fd488" 171 | } 172 | } 173 | }, 174 | "nbformat": 4, 175 | "nbformat_minor": 5 176 | } 177 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week2/MachineLearning_Steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week2/MachineLearning_Steps.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week2/Trying_2nd_Lab.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 4, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np\n", 10 | "import copy, math\n", 11 | "import matplotlib.pyplot as plt\n", 12 | "np.set_printoptions(precision=2)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "# Trying " 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 8, 34 | "metadata": {}, 35 | "outputs": [ 36 | { 37 | "name": "stdout", 38 | "output_type": "stream", 39 | "text": [ 40 | "The xtrainig example are :- \n", 41 | "[[2104 5 1 45]\n", 42 | " [1416 3 2 40]\n", 43 | " [ 852 2 1 35]]\n", 44 | "The youtputs are :- \n", 45 | "[460 232 178]\n" 46 | ] 47 | } 48 | ], 49 | "source": [ 50 | "X_train = np.array([[2104,5,1,45],[1416,3,2,40],[852,2,1,35]])\n", 51 | "y_train = np.array([460,232,178])\n", 52 | "print(f\"The xtrainig example are :- \\n{X_train}\")\n", 53 | "print(f\"The youtputs are :- \\n{y_train}\")" 54 | ] 55 | } 56 | ], 57 | "metadata": { 58 | "kernelspec": { 59 | "display_name": "Python 3.10.6 64-bit", 60 | "language": "python", 61 | "name": "python3" 62 | }, 63 | "language_info": { 64 | "codemirror_mode": { 65 | "name": "ipython", 66 | "version": 3 67 | }, 68 | "file_extension": ".py", 69 | "mimetype": "text/x-python", 70 | "name": "python", 71 | "nbconvert_exporter": "python", 72 | "pygments_lexer": "ipython3", 73 | "version": "3.10.0 (tags/v3.10.0:b494f59, Oct 4 2021, 19:00:18) [MSC v.1929 64 bit (AMD64)]" 74 | }, 75 | "orig_nbformat": 4, 76 | "vscode": { 77 | "interpreter": { 78 | "hash": "2d3283b7ede15a3ba02d29b57611833951a6474bfc2330b92af0513ee46fd488" 79 | } 80 | } 81 | }, 82 | "nbformat": 4, 83 | "nbformat_minor": 2 84 | } 85 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week2/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # Data For Science style 2 | # Author: Bruno Goncalves 3 | # Modified from the matplotlib FiveThirtyEight style by 4 | # Author: Cameron Davidson-Pilon, replicated styles from FiveThirtyEight.com 5 | # See https://www.dataorigami.net/blogs/fivethirtyeight-mpl 6 | 7 | lines.linewidth: 4 8 | lines.solid_capstyle: butt 9 | 10 | legend.fancybox: true 11 | 12 | axes.prop_cycle: cycler('color', ['51a7f9', 'cf51f9', '70bf41', 'f39019', 'f9e351', 'f9517b', '6d904f', '8b8b8b','810f7c']) 13 | 14 | axes.labelsize: large 15 | axes.axisbelow: true 16 | axes.grid: true 17 | axes.edgecolor: f0f0f0 18 | axes.linewidth: 3.0 19 | axes.titlesize: x-large 20 | 21 | patch.edgecolor: f0f0f0 22 | patch.linewidth: 0.5 23 | 24 | svg.fonttype: path 25 | 26 | grid.linestyle: - 27 | grid.linewidth: 1.0 28 | 29 | xtick.major.size: 0 30 | xtick.minor.size: 0 31 | ytick.major.size: 0 32 | ytick.minor.size: 0 33 | 34 | font.size: 30.0 35 | 36 | savefig.edgecolor: f0f0f0 37 | savefig.facecolor: f0f0f0 38 | 39 | figure.subplot.left: 0.08 40 | figure.subplot.right: 0.95 41 | figure.subplot.bottom: 0.07 42 | figure.figsize: 12.8, 8.8 43 | figure.autolayout: True 44 | figure.dpi: 300 -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week2/lab_utils_common.py: -------------------------------------------------------------------------------- 1 | """ 2 | lab_utils_common.py 3 | functions common to all optional labs, Course 1, Week 2 4 | """ 5 | 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | 9 | plt.style.use('./deeplearning.mplstyle') 10 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'; 11 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple] 12 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0') 13 | 14 | 15 | ########################################################## 16 | # Regression Routines 17 | ########################################################## 18 | 19 | #Function to calculate the cost 20 | def compute_cost_matrix(X, y, w, b, verbose=False): 21 | """ 22 | Computes the gradient for linear regression 23 | Args: 24 | X (ndarray (m,n)): Data, m examples with n features 25 | y (ndarray (m,)) : target values 26 | w (ndarray (n,)) : model parameters 27 | b (scalar) : model parameter 28 | verbose : (Boolean) If true, print out intermediate value f_wb 29 | Returns 30 | cost: (scalar) 31 | """ 32 | m = X.shape[0] 33 | 34 | # calculate f_wb for all examples. 35 | f_wb = X @ w + b 36 | # calculate cost 37 | total_cost = (1/(2*m)) * np.sum((f_wb-y)**2) 38 | 39 | if verbose: print("f_wb:") 40 | if verbose: print(f_wb) 41 | 42 | return total_cost 43 | 44 | def compute_gradient_matrix(X, y, w, b): 45 | """ 46 | Computes the gradient for linear regression 47 | 48 | Args: 49 | X (ndarray (m,n)): Data, m examples with n features 50 | y (ndarray (m,)) : target values 51 | w (ndarray (n,)) : model parameters 52 | b (scalar) : model parameter 53 | Returns 54 | dj_dw (ndarray (n,1)): The gradient of the cost w.r.t. the parameters w. 55 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b. 56 | 57 | """ 58 | m,n = X.shape 59 | f_wb = X @ w + b 60 | e = f_wb - y 61 | dj_dw = (1/m) * (X.T @ e) 62 | dj_db = (1/m) * np.sum(e) 63 | 64 | return dj_db,dj_dw 65 | 66 | 67 | # Loop version of multi-variable compute_cost 68 | def compute_cost(X, y, w, b): 69 | """ 70 | compute cost 71 | Args: 72 | X (ndarray (m,n)): Data, m examples with n features 73 | y (ndarray (m,)) : target values 74 | w (ndarray (n,)) : model parameters 75 | b (scalar) : model parameter 76 | Returns 77 | cost (scalar) : cost 78 | """ 79 | m = X.shape[0] 80 | cost = 0.0 81 | for i in range(m): 82 | f_wb_i = np.dot(X[i],w) + b #(n,)(n,)=scalar 83 | cost = cost + (f_wb_i - y[i])**2 84 | cost = cost/(2*m) 85 | return cost 86 | 87 | def compute_gradient(X, y, w, b): 88 | """ 89 | Computes the gradient for linear regression 90 | Args: 91 | X (ndarray (m,n)): Data, m examples with n features 92 | y (ndarray (m,)) : target values 93 | w (ndarray (n,)) : model parameters 94 | b (scalar) : model parameter 95 | Returns 96 | dj_dw (ndarray Shape (n,)): The gradient of the cost w.r.t. the parameters w. 97 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b. 98 | """ 99 | m,n = X.shape #(number of examples, number of features) 100 | dj_dw = np.zeros((n,)) 101 | dj_db = 0. 102 | 103 | for i in range(m): 104 | err = (np.dot(X[i], w) + b) - y[i] 105 | for j in range(n): 106 | dj_dw[j] = dj_dw[j] + err * X[i,j] 107 | dj_db = dj_db + err 108 | dj_dw = dj_dw/m 109 | dj_db = dj_db/m 110 | 111 | return dj_db,dj_dw 112 | 113 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3/C1_W3_Lab07_Scikit_Learn_Soln.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Ungraded Lab: Logistic Regression using Scikit-Learn\n", 8 | "\n", 9 | "\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## Goals\n", 17 | "In this lab you will:\n", 18 | "- Train a logistic regression model using scikit-learn.\n" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "## Dataset \n", 26 | "Let's start with the same dataset as before." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 1, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "import numpy as np\n", 36 | "\n", 37 | "X = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]])\n", 38 | "y = np.array([0, 0, 0, 1, 1, 1])" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Fit the model\n", 46 | "\n", 47 | "The code below imports the [logistic regression model](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression) from scikit-learn. You can fit this model on the training data by calling `fit` function." 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "data": { 57 | "text/plain": [ 58 | "LogisticRegression()" 59 | ] 60 | }, 61 | "execution_count": 2, 62 | "metadata": {}, 63 | "output_type": "execute_result" 64 | } 65 | ], 66 | "source": [ 67 | "from sklearn.linear_model import LogisticRegression\n", 68 | "\n", 69 | "lr_model = LogisticRegression()\n", 70 | "lr_model.fit(X, y)" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "## Make Predictions\n", 78 | "\n", 79 | "You can see the predictions made by this model by calling the `predict` function." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 3, 85 | "metadata": {}, 86 | "outputs": [ 87 | { 88 | "name": "stdout", 89 | "output_type": "stream", 90 | "text": [ 91 | "Prediction on training set: [0 0 0 1 1 1]\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "y_pred = lr_model.predict(X)\n", 97 | "\n", 98 | "print(\"Prediction on training set:\", y_pred)" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "## Calculate accuracy\n", 106 | "\n", 107 | "You can calculate this accuracy of this model by calling the `score` function." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 4, 113 | "metadata": {}, 114 | "outputs": [ 115 | { 116 | "name": "stdout", 117 | "output_type": "stream", 118 | "text": [ 119 | "Accuracy on training set: 1.0\n" 120 | ] 121 | } 122 | ], 123 | "source": [ 124 | "print(\"Accuracy on training set:\", lr_model.score(X, y))" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 5, 130 | "id": "13128e84", 131 | "metadata": {}, 132 | "outputs": [ 133 | { 134 | "name": "stdout", 135 | "output_type": "stream", 136 | "text": [ 137 | "HFD\n" 138 | ] 139 | } 140 | ], 141 | "source": [ 142 | "print(\"HFD\")" 143 | ] 144 | } 145 | ], 146 | "metadata": { 147 | "kernelspec": { 148 | "display_name": "base", 149 | "language": "python", 150 | "name": "python3" 151 | }, 152 | "language_info": { 153 | "codemirror_mode": { 154 | "name": "ipython", 155 | "version": 3 156 | }, 157 | "file_extension": ".py", 158 | "mimetype": "text/x-python", 159 | "name": "python", 160 | "nbconvert_exporter": "python", 161 | "pygments_lexer": "ipython3", 162 | "version": "3.9.13" 163 | }, 164 | "vscode": { 165 | "interpreter": { 166 | "hash": "88279d2366fe020547cde40dd65aa0e3aa662a6ec1f3ca12d88834876c85e1a6" 167 | } 168 | } 169 | }, 170 | "nbformat": 4, 171 | "nbformat_minor": 5 172 | } 173 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3/ImportingSklearn.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "Creating a numpy array by importing it." 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 3, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "\n", 18 | "X = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]])\n", 19 | "y = np.array([0, 0, 0, 1, 1, 1])" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 16, 25 | "metadata": {}, 26 | "outputs": [ 27 | { 28 | "name": "stdout", 29 | "output_type": "stream", 30 | "text": [ 31 | "LogisticRegression()\n" 32 | ] 33 | } 34 | ], 35 | "source": [ 36 | "from sklearn.linear_model import LogisticRegression\n", 37 | "\n", 38 | "LogoisticR = LogisticRegression()\n", 39 | "LogoisticR.fit(X,y)\n" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 25, 45 | "metadata": {}, 46 | "outputs": [ 47 | { 48 | "name": "stdout", 49 | "output_type": "stream", 50 | "text": [ 51 | "Prediction on training set: [1]\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "PredictedValue = LogoisticR.predict(X1)\n", 57 | "\n", 58 | "print(\"Prediction on training set:\", PredictedValue)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 26, 64 | "metadata": {}, 65 | "outputs": [ 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "Accuracy on training set: 1.0\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "print(\"Accuracy on training set:\", LogoisticR.score(X, y))" 76 | ] 77 | } 78 | ], 79 | "metadata": { 80 | "kernelspec": { 81 | "display_name": "Python 3", 82 | "language": "python", 83 | "name": "python3" 84 | }, 85 | "language_info": { 86 | "codemirror_mode": { 87 | "name": "ipython", 88 | "version": 3 89 | }, 90 | "file_extension": ".py", 91 | "mimetype": "text/x-python", 92 | "name": "python", 93 | "nbconvert_exporter": "python", 94 | "pygments_lexer": "ipython3", 95 | "version": "3.10.0 (tags/v3.10.0:b494f59, Oct 4 2021, 19:00:18) [MSC v.1929 64 bit (AMD64)]" 96 | }, 97 | "orig_nbformat": 4, 98 | "vscode": { 99 | "interpreter": { 100 | "hash": "2d3283b7ede15a3ba02d29b57611833951a6474bfc2330b92af0513ee46fd488" 101 | } 102 | } 103 | }, 104 | "nbformat": 4, 105 | "nbformat_minor": 2 106 | } 107 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3/deeplearning.mplstyle: -------------------------------------------------------------------------------- 1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html 2 | lines.linewidth: 4 3 | lines.solid_capstyle: butt 4 | 5 | legend.fancybox: true 6 | 7 | # Verdana" for non-math text, 8 | # Cambria Math 9 | 10 | #Blue (Crayon-Aqua) 0096FF 11 | #Dark Red C00000 12 | #Orange (Apple Orange) FF9300 13 | #Black 000000 14 | #Magenta FF40FF 15 | #Purple 7030A0 16 | 17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000']) 18 | #axes.facecolor: f0f0f0 # grey 19 | axes.facecolor: ffffff # white 20 | axes.labelsize: large 21 | axes.axisbelow: true 22 | axes.grid: False 23 | axes.edgecolor: f0f0f0 24 | axes.linewidth: 3.0 25 | axes.titlesize: x-large 26 | 27 | patch.edgecolor: f0f0f0 28 | patch.linewidth: 0.5 29 | 30 | svg.fonttype: path 31 | 32 | grid.linestyle: - 33 | grid.linewidth: 1.0 34 | grid.color: cbcbcb 35 | 36 | xtick.major.size: 0 37 | xtick.minor.size: 0 38 | ytick.major.size: 0 39 | ytick.minor.size: 0 40 | 41 | savefig.edgecolor: f0f0f0 42 | savefig.facecolor: f0f0f0 43 | 44 | #figure.subplot.left: 0.08 45 | #figure.subplot.right: 0.95 46 | #figure.subplot.bottom: 0.07 47 | 48 | #figure.facecolor: f0f0f0 # grey 49 | figure.facecolor: ffffff # white 50 | 51 | ## *************************************************************************** 52 | ## * FONT * 53 | ## *************************************************************************** 54 | ## The font properties used by `text.Text`. 55 | ## See https://matplotlib.org/api/font_manager_api.html for more information 56 | ## on font properties. The 6 font properties used for font matching are 57 | ## given below with their default values. 58 | ## 59 | ## The font.family property can take either a concrete font name (not supported 60 | ## when rendering text with usetex), or one of the following five generic 61 | ## values: 62 | ## - 'serif' (e.g., Times), 63 | ## - 'sans-serif' (e.g., Helvetica), 64 | ## - 'cursive' (e.g., Zapf-Chancery), 65 | ## - 'fantasy' (e.g., Western), and 66 | ## - 'monospace' (e.g., Courier). 67 | ## Each of these values has a corresponding default list of font names 68 | ## (font.serif, etc.); the first available font in the list is used. Note that 69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of 70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with 71 | ## Matplotlib and is thus guaranteed to be available; the other entries are 72 | ## left as examples of other possible values. 73 | ## 74 | ## The font.style property has three values: normal (or roman), italic 75 | ## or oblique. The oblique style will be used for italic, if it is not 76 | ## present. 77 | ## 78 | ## The font.variant property has two values: normal or small-caps. For 79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent 80 | ## to using a font size of 'smaller', or about 83%% of the current font 81 | ## size. 82 | ## 83 | ## The font.weight property has effectively 13 values: normal, bold, 84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as 85 | ## 400, and bold is 700. bolder and lighter are relative values with 86 | ## respect to the current weight. 87 | ## 88 | ## The font.stretch property has 11 values: ultra-condensed, 89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded, 90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This 91 | ## property is not currently implemented. 92 | ## 93 | ## The font.size property is the default font size for text, given in points. 94 | ## 10 pt is the standard value. 95 | ## 96 | ## Note that font.size controls default text sizes. To configure 97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc 98 | ## settings for axes and ticks. Special text sizes can be defined 99 | ## relative to font.size, using the following values: xx-small, x-small, 100 | ## small, medium, large, x-large, xx-large, larger, or smaller 101 | 102 | 103 | font.family: sans-serif 104 | font.style: normal 105 | font.variant: normal 106 | font.weight: normal 107 | font.stretch: normal 108 | font.size: 8.0 109 | 110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif 111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif 112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive 113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy 114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace 115 | 116 | 117 | ## *************************************************************************** 118 | ## * TEXT * 119 | ## *************************************************************************** 120 | ## The text properties used by `text.Text`. 121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text 122 | ## for more information on text properties 123 | #text.color: black 124 | 125 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3/test.ipynb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3/test.ipynb -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3/tryingLogisiticRegression.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Optical Logistic Regression\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import numpy as np\n", 19 | "%matplotlib widget\n", 20 | "import matplotlib.pyplot as plt\n", 21 | "from plt_logistic_loss import plt_logistic_cost, plt_two_logistic_loss_curves, plt_simple_example\n", 22 | "from plt_logistic_loss import soup_bowl, plt_logistic_squared_error\n", 23 | "plt.style.use('./deeplearning.mplstyle')" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "def CreateBowe():\n", 33 | " fig = plt.figure(figsize=(4,4))\n", 34 | " fig.canvas.toolbar_visible = False\n", 35 | " " 36 | ] 37 | } 38 | ], 39 | "metadata": { 40 | "kernelspec": { 41 | "display_name": "Python 3.10.6 64-bit", 42 | "language": "python", 43 | "name": "python3" 44 | }, 45 | "language_info": { 46 | "codemirror_mode": { 47 | "name": "ipython", 48 | "version": 3 49 | }, 50 | "file_extension": ".py", 51 | "mimetype": "text/x-python", 52 | "name": "python", 53 | "nbconvert_exporter": "python", 54 | "pygments_lexer": "ipython3", 55 | "version": "3.10.6" 56 | }, 57 | "orig_nbformat": 4, 58 | "vscode": { 59 | "interpreter": { 60 | "hash": "2d3283b7ede15a3ba02d29b57611833951a6474bfc2330b92af0513ee46fd488" 61 | } 62 | } 63 | }, 64 | "nbformat": 4, 65 | "nbformat_minor": 2 66 | } 67 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/data/ex2data1.txt: -------------------------------------------------------------------------------- 1 | 34.62365962451697,78.0246928153624,0 2 | 30.28671076822607,43.89499752400101,0 3 | 35.84740876993872,72.90219802708364,0 4 | 60.18259938620976,86.30855209546826,1 5 | 79.0327360507101,75.3443764369103,1 6 | 45.08327747668339,56.3163717815305,0 7 | 61.10666453684766,96.51142588489624,1 8 | 75.02474556738889,46.55401354116538,1 9 | 76.09878670226257,87.42056971926803,1 10 | 84.43281996120035,43.53339331072109,1 11 | 95.86155507093572,38.22527805795094,0 12 | 75.01365838958247,30.60326323428011,0 13 | 82.30705337399482,76.48196330235604,1 14 | 69.36458875970939,97.71869196188608,1 15 | 39.53833914367223,76.03681085115882,0 16 | 53.9710521485623,89.20735013750205,1 17 | 69.07014406283025,52.74046973016765,1 18 | 67.94685547711617,46.67857410673128,0 19 | 70.66150955499435,92.92713789364831,1 20 | 76.97878372747498,47.57596364975532,1 21 | 67.37202754570876,42.83843832029179,0 22 | 89.67677575072079,65.79936592745237,1 23 | 50.534788289883,48.85581152764205,0 24 | 34.21206097786789,44.20952859866288,0 25 | 77.9240914545704,68.9723599933059,1 26 | 62.27101367004632,69.95445795447587,1 27 | 80.1901807509566,44.82162893218353,1 28 | 93.114388797442,38.80067033713209,0 29 | 61.83020602312595,50.25610789244621,0 30 | 38.78580379679423,64.99568095539578,0 31 | 61.379289447425,72.80788731317097,1 32 | 85.40451939411645,57.05198397627122,1 33 | 52.10797973193984,63.12762376881715,0 34 | 52.04540476831827,69.43286012045222,1 35 | 40.23689373545111,71.16774802184875,0 36 | 54.63510555424817,52.21388588061123,0 37 | 33.91550010906887,98.86943574220611,0 38 | 64.17698887494485,80.90806058670817,1 39 | 74.78925295941542,41.57341522824434,0 40 | 34.1836400264419,75.2377203360134,0 41 | 83.90239366249155,56.30804621605327,1 42 | 51.54772026906181,46.85629026349976,0 43 | 94.44336776917852,65.56892160559052,1 44 | 82.36875375713919,40.61825515970618,0 45 | 51.04775177128865,45.82270145776001,0 46 | 62.22267576120188,52.06099194836679,0 47 | 77.19303492601364,70.45820000180959,1 48 | 97.77159928000232,86.7278223300282,1 49 | 62.07306379667647,96.76882412413983,1 50 | 91.56497449807442,88.69629254546599,1 51 | 79.94481794066932,74.16311935043758,1 52 | 99.2725269292572,60.99903099844988,1 53 | 90.54671411399852,43.39060180650027,1 54 | 34.52451385320009,60.39634245837173,0 55 | 50.2864961189907,49.80453881323059,0 56 | 49.58667721632031,59.80895099453265,0 57 | 97.64563396007767,68.86157272420604,1 58 | 32.57720016809309,95.59854761387875,0 59 | 74.24869136721598,69.82457122657193,1 60 | 71.79646205863379,78.45356224515052,1 61 | 75.3956114656803,85.75993667331619,1 62 | 35.28611281526193,47.02051394723416,0 63 | 56.25381749711624,39.26147251058019,0 64 | 30.05882244669796,49.59297386723685,0 65 | 44.66826172480893,66.45008614558913,0 66 | 66.56089447242954,41.09209807936973,0 67 | 40.45755098375164,97.53518548909936,1 68 | 49.07256321908844,51.88321182073966,0 69 | 80.27957401466998,92.11606081344084,1 70 | 66.74671856944039,60.99139402740988,1 71 | 32.72283304060323,43.30717306430063,0 72 | 64.0393204150601,78.03168802018232,1 73 | 72.34649422579923,96.22759296761404,1 74 | 60.45788573918959,73.09499809758037,1 75 | 58.84095621726802,75.85844831279042,1 76 | 99.82785779692128,72.36925193383885,1 77 | 47.26426910848174,88.47586499559782,1 78 | 50.45815980285988,75.80985952982456,1 79 | 60.45555629271532,42.50840943572217,0 80 | 82.22666157785568,42.71987853716458,0 81 | 88.9138964166533,69.80378889835472,1 82 | 94.83450672430196,45.69430680250754,1 83 | 67.31925746917527,66.58935317747915,1 84 | 57.23870631569862,59.51428198012956,1 85 | 80.36675600171273,90.96014789746954,1 86 | 68.46852178591112,85.59430710452014,1 87 | 42.0754545384731,78.84478600148043,0 88 | 75.47770200533905,90.42453899753964,1 89 | 78.63542434898018,96.64742716885644,1 90 | 52.34800398794107,60.76950525602592,0 91 | 94.09433112516793,77.15910509073893,1 92 | 90.44855097096364,87.50879176484702,1 93 | 55.48216114069585,35.57070347228866,0 94 | 74.49269241843041,84.84513684930135,1 95 | 89.84580670720979,45.35828361091658,1 96 | 83.48916274498238,48.38028579728175,1 97 | 42.2617008099817,87.10385094025457,1 98 | 99.31500880510394,68.77540947206617,1 99 | 55.34001756003703,64.9319380069486,1 100 | 74.77589300092767,89.52981289513276,1 101 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/data/ex2data2.txt: -------------------------------------------------------------------------------- 1 | 0.051267,0.69956,1 2 | -0.092742,0.68494,1 3 | -0.21371,0.69225,1 4 | -0.375,0.50219,1 5 | -0.51325,0.46564,1 6 | -0.52477,0.2098,1 7 | -0.39804,0.034357,1 8 | -0.30588,-0.19225,1 9 | 0.016705,-0.40424,1 10 | 0.13191,-0.51389,1 11 | 0.38537,-0.56506,1 12 | 0.52938,-0.5212,1 13 | 0.63882,-0.24342,1 14 | 0.73675,-0.18494,1 15 | 0.54666,0.48757,1 16 | 0.322,0.5826,1 17 | 0.16647,0.53874,1 18 | -0.046659,0.81652,1 19 | -0.17339,0.69956,1 20 | -0.47869,0.63377,1 21 | -0.60541,0.59722,1 22 | -0.62846,0.33406,1 23 | -0.59389,0.005117,1 24 | -0.42108,-0.27266,1 25 | -0.11578,-0.39693,1 26 | 0.20104,-0.60161,1 27 | 0.46601,-0.53582,1 28 | 0.67339,-0.53582,1 29 | -0.13882,0.54605,1 30 | -0.29435,0.77997,1 31 | -0.26555,0.96272,1 32 | -0.16187,0.8019,1 33 | -0.17339,0.64839,1 34 | -0.28283,0.47295,1 35 | -0.36348,0.31213,1 36 | -0.30012,0.027047,1 37 | -0.23675,-0.21418,1 38 | -0.06394,-0.18494,1 39 | 0.062788,-0.16301,1 40 | 0.22984,-0.41155,1 41 | 0.2932,-0.2288,1 42 | 0.48329,-0.18494,1 43 | 0.64459,-0.14108,1 44 | 0.46025,0.012427,1 45 | 0.6273,0.15863,1 46 | 0.57546,0.26827,1 47 | 0.72523,0.44371,1 48 | 0.22408,0.52412,1 49 | 0.44297,0.67032,1 50 | 0.322,0.69225,1 51 | 0.13767,0.57529,1 52 | -0.0063364,0.39985,1 53 | -0.092742,0.55336,1 54 | -0.20795,0.35599,1 55 | -0.20795,0.17325,1 56 | -0.43836,0.21711,1 57 | -0.21947,-0.016813,1 58 | -0.13882,-0.27266,1 59 | 0.18376,0.93348,0 60 | 0.22408,0.77997,0 61 | 0.29896,0.61915,0 62 | 0.50634,0.75804,0 63 | 0.61578,0.7288,0 64 | 0.60426,0.59722,0 65 | 0.76555,0.50219,0 66 | 0.92684,0.3633,0 67 | 0.82316,0.27558,0 68 | 0.96141,0.085526,0 69 | 0.93836,0.012427,0 70 | 0.86348,-0.082602,0 71 | 0.89804,-0.20687,0 72 | 0.85196,-0.36769,0 73 | 0.82892,-0.5212,0 74 | 0.79435,-0.55775,0 75 | 0.59274,-0.7405,0 76 | 0.51786,-0.5943,0 77 | 0.46601,-0.41886,0 78 | 0.35081,-0.57968,0 79 | 0.28744,-0.76974,0 80 | 0.085829,-0.75512,0 81 | 0.14919,-0.57968,0 82 | -0.13306,-0.4481,0 83 | -0.40956,-0.41155,0 84 | -0.39228,-0.25804,0 85 | -0.74366,-0.25804,0 86 | -0.69758,0.041667,0 87 | -0.75518,0.2902,0 88 | -0.69758,0.68494,0 89 | -0.4038,0.70687,0 90 | -0.38076,0.91886,0 91 | -0.50749,0.90424,0 92 | -0.54781,0.70687,0 93 | 0.10311,0.77997,0 94 | 0.057028,0.91886,0 95 | -0.10426,0.99196,0 96 | -0.081221,1.1089,0 97 | 0.28744,1.087,0 98 | 0.39689,0.82383,0 99 | 0.63882,0.88962,0 100 | 0.82316,0.66301,0 101 | 0.67339,0.64108,0 102 | 1.0709,0.10015,0 103 | -0.046659,-0.57968,0 104 | -0.23675,-0.63816,0 105 | -0.15035,-0.36769,0 106 | -0.49021,-0.3019,0 107 | -0.46717,-0.13377,0 108 | -0.28859,-0.060673,0 109 | -0.61118,-0.067982,0 110 | -0.66302,-0.21418,0 111 | -0.59965,-0.41886,0 112 | -0.72638,-0.082602,0 113 | -0.83007,0.31213,0 114 | -0.72062,0.53874,0 115 | -0.59389,0.49488,0 116 | -0.48445,0.99927,0 117 | -0.0063364,0.99927,0 118 | 0.63265,-0.030612,0 119 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 1.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 2.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 3.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 4.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 5.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Kd-Here/Machine-Learning/4cf370ec0a2747efbdc8ae2e22f4b6692a474c41/Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/images/figure 6.png -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/public_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | 4 | def sigmoid_test(target): 5 | assert np.isclose(target(3.0), 0.9525741268224334), "Failed for scalar input" 6 | assert np.allclose(target(np.array([2.5, 0])), [0.92414182, 0.5]), "Failed for 1D array" 7 | assert np.allclose(target(np.array([[2.5, -2.5], [0, 1]])), 8 | [[0.92414182, 0.07585818], [0.5, 0.73105858]]), "Failed for 2D array" 9 | print('\033[92mAll tests passed!') 10 | 11 | def compute_cost_test(target): 12 | X = np.array([[0, 0, 0, 0]]).T 13 | y = np.array([0, 0, 0, 0]) 14 | w = np.array([0]) 15 | b = 1 16 | result = target(X, y, w, b) 17 | if math.isinf(result): 18 | raise ValueError("Did you get the sigmoid of z_wb?") 19 | 20 | np.random.seed(17) 21 | X = np.random.randn(5, 2) 22 | y = np.array([1, 0, 0, 1, 1]) 23 | w = np.random.randn(2) 24 | b = 0 25 | result = target(X, y, w, b) 26 | assert np.isclose(result, 2.15510667), f"Wrong output. Expected: {2.15510667} got: {result}" 27 | 28 | X = np.random.randn(4, 3) 29 | y = np.array([1, 1, 0, 0]) 30 | w = np.random.randn(3) 31 | b = 0 32 | 33 | result = target(X, y, w, b) 34 | assert np.isclose(result, 0.80709376), f"Wrong output. Expected: {0.80709376} got: {result}" 35 | 36 | X = np.random.randn(4, 3) 37 | y = np.array([1, 0,1, 0]) 38 | w = np.random.randn(3) 39 | b = 3 40 | result = target(X, y, w, b) 41 | assert np.isclose(result, 0.4529660647), f"Wrong output. Expected: {0.4529660647} got: {result}. Did you inizialized z_wb = b?" 42 | 43 | print('\033[92mAll tests passed!') 44 | 45 | def compute_gradient_test(target): 46 | np.random.seed(1) 47 | X = np.random.randn(7, 3) 48 | y = np.array([1, 0, 1, 0, 1, 1, 0]) 49 | test_w = np.array([1, 0.5, -0.35]) 50 | test_b = 1.7 51 | dj_db, dj_dw = target(X, y, test_w, test_b) 52 | 53 | assert np.isclose(dj_db, 0.28936094), f"Wrong value for dj_db. Expected: {0.28936094} got: {dj_db}" 54 | assert dj_dw.shape == test_w.shape, f"Wrong shape for dj_dw. Expected: {test_w.shape} got: {dj_dw.shape}" 55 | assert np.allclose(dj_dw, [-0.11999166, 0.41498775, -0.71968405]), f"Wrong values for dj_dw. Got: {dj_dw}" 56 | 57 | print('\033[92mAll tests passed!') 58 | 59 | def predict_test(target): 60 | np.random.seed(5) 61 | b = 0.5 62 | w = np.random.randn(3) 63 | X = np.random.randn(8, 3) 64 | 65 | result = target(X, w, b) 66 | wrong_1 = [1., 1., 0., 0., 1., 0., 0., 1.] 67 | expected_1 = [1., 1., 1., 0., 1., 0., 0., 1.] 68 | if np.allclose(result, wrong_1): 69 | raise ValueError("Did you apply the sigmoid before applying the threshold?") 70 | assert result.shape == (len(X),), f"Wrong length. Expected : {(len(X),)} got: {result.shape}" 71 | assert np.allclose(result, expected_1), f"Wrong output: Expected : {expected_1} got: {result}" 72 | 73 | b = -1.7 74 | w = np.random.randn(4) + 0.6 75 | X = np.random.randn(6, 4) 76 | 77 | result = target(X, w, b) 78 | expected_2 = [0., 0., 0., 1., 1., 0.] 79 | assert result.shape == (len(X),), f"Wrong length. Expected : {(len(X),)} got: {result.shape}" 80 | assert np.allclose(result,expected_2), f"Wrong output: Expected : {expected_2} got: {result}" 81 | 82 | print('\033[92mAll tests passed!') 83 | 84 | def compute_cost_reg_test(target): 85 | np.random.seed(1) 86 | w = np.random.randn(3) 87 | b = 0.4 88 | X = np.random.randn(6, 3) 89 | y = np.array([0, 1, 1, 0, 1, 1]) 90 | lambda_ = 0.1 91 | expected_output = target(X, y, w, b, lambda_) 92 | 93 | assert np.isclose(expected_output, 0.5469746792761936), f"Wrong output. Expected: {0.5469746792761936} got:{expected_output}" 94 | 95 | w = np.random.randn(5) 96 | b = -0.6 97 | X = np.random.randn(8, 5) 98 | y = np.array([1, 0, 1, 0, 0, 1, 0, 1]) 99 | lambda_ = 0.01 100 | output = target(X, y, w, b, lambda_) 101 | assert np.isclose(output, 1.2608591964119995), f"Wrong output. Expected: {1.2608591964119995} got:{output}" 102 | 103 | w = np.array([2, 2, 2, 2, 2]) 104 | b = 0 105 | X = np.zeros((8, 5)) 106 | y = np.array([0.5] * 8) 107 | lambda_ = 3 108 | output = target(X, y, w, b, lambda_) 109 | expected = -np.log(0.5) + 3. / (2. * 8.) * 20. 110 | assert np.isclose(output, expected), f"Wrong output. Expected: {expected} got:{output}" 111 | 112 | print('\033[92mAll tests passed!') 113 | 114 | def compute_gradient_reg_test(target): 115 | np.random.seed(1) 116 | w = np.random.randn(5) 117 | b = 0.2 118 | X = np.random.randn(7, 5) 119 | y = np.array([0, 1, 1, 0, 1, 1, 0]) 120 | lambda_ = 0.1 121 | expected1 = (-0.1506447567869257, np.array([ 0.19530838, -0.00632206, 0.19687367, 0.15741161, 0.02791437])) 122 | dj_db, dj_dw = target(X, y, w, b, lambda_) 123 | 124 | assert np.isclose(dj_db, expected1[0]), f"Wrong dj_db. Expected: {expected1[0]} got: {dj_db}" 125 | assert np.allclose(dj_dw, expected1[1]), f"Wrong dj_dw. Expected: {expected1[1]} got: {dj_dw}" 126 | 127 | 128 | w = np.random.randn(7) 129 | b = 0 130 | X = np.random.randn(7, 7) 131 | y = np.array([1, 0, 0, 0, 1, 1, 0]) 132 | lambda_ = 0 133 | expected2 = (0.02660329857573818, np.array([ 0.23567643, -0.06921029, -0.19705212, -0.0002884 , 0.06490588, 134 | 0.26948175, 0.10777992])) 135 | dj_db, dj_dw = target(X, y, w, b, lambda_) 136 | assert np.isclose(dj_db, expected2[0]), f"Wrong dj_db. Expected: {expected2[0]} got: {dj_db}" 137 | assert np.allclose(dj_dw, expected2[1]), f"Wrong dj_dw. Expected: {expected2[1]} got: {dj_dw}" 138 | 139 | print('\033[92mAll tests passed!') 140 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from copy import deepcopy 3 | 4 | 5 | def datatype_check(expected_output, target_output, error): 6 | success = 0 7 | if isinstance(target_output, dict): 8 | for key in target_output.keys(): 9 | try: 10 | success += datatype_check(expected_output[key], 11 | target_output[key], error) 12 | except: 13 | print("Error: {} in variable {}. Got {} but expected type {}".format(error, 14 | key, 15 | type( 16 | target_output[key]), 17 | type(expected_output[key]))) 18 | if success == len(target_output.keys()): 19 | return 1 20 | else: 21 | return 0 22 | elif isinstance(target_output, tuple) or isinstance(target_output, list): 23 | for i in range(len(target_output)): 24 | try: 25 | success += datatype_check(expected_output[i], 26 | target_output[i], error) 27 | except: 28 | print("Error: {} in variable {}, expected type: {} but expected type {}".format(error, 29 | i, 30 | type( 31 | target_output[i]), 32 | type(expected_output[i] 33 | ))) 34 | if success == len(target_output): 35 | return 1 36 | else: 37 | return 0 38 | 39 | else: 40 | assert isinstance(target_output, type(expected_output)) 41 | return 1 42 | 43 | 44 | def equation_output_check(expected_output, target_output, error): 45 | success = 0 46 | if isinstance(target_output, dict): 47 | for key in target_output.keys(): 48 | try: 49 | success += equation_output_check(expected_output[key], 50 | target_output[key], error) 51 | except: 52 | print("Error: {} for variable {}.".format(error, 53 | key)) 54 | if success == len(target_output.keys()): 55 | return 1 56 | else: 57 | return 0 58 | elif isinstance(target_output, tuple) or isinstance(target_output, list): 59 | for i in range(len(target_output)): 60 | try: 61 | success += equation_output_check(expected_output[i], 62 | target_output[i], error) 63 | except: 64 | print("Error: {} for variable in position {}.".format(error, i)) 65 | if success == len(target_output): 66 | return 1 67 | else: 68 | return 0 69 | 70 | else: 71 | if hasattr(target_output, 'shape'): 72 | np.testing.assert_array_almost_equal( 73 | target_output, expected_output) 74 | else: 75 | assert target_output == expected_output 76 | return 1 77 | 78 | 79 | def shape_check(expected_output, target_output, error): 80 | success = 0 81 | if isinstance(target_output, dict): 82 | for key in target_output.keys(): 83 | try: 84 | success += shape_check(expected_output[key], 85 | target_output[key], error) 86 | except: 87 | print("Error: {} for variable {}.".format(error, key)) 88 | if success == len(target_output.keys()): 89 | return 1 90 | else: 91 | return 0 92 | elif isinstance(target_output, tuple) or isinstance(target_output, list): 93 | for i in range(len(target_output)): 94 | try: 95 | success += shape_check(expected_output[i], 96 | target_output[i], error) 97 | except: 98 | print("Error: {} for variable {}.".format(error, i)) 99 | if success == len(target_output): 100 | return 1 101 | else: 102 | return 0 103 | 104 | else: 105 | if hasattr(target_output, 'shape'): 106 | assert target_output.shape == expected_output.shape 107 | return 1 108 | 109 | 110 | def single_test(test_cases, target): 111 | success = 0 112 | for test_case in test_cases: 113 | try: 114 | if test_case['name'] == "datatype_check": 115 | assert isinstance(target(*test_case['input']), 116 | type(test_case["expected"])) 117 | success += 1 118 | if test_case['name'] == "equation_output_check": 119 | assert np.allclose(test_case["expected"], 120 | target(*test_case['input'])) 121 | success += 1 122 | if test_case['name'] == "shape_check": 123 | assert test_case['expected'].shape == target( 124 | *test_case['input']).shape 125 | success += 1 126 | except: 127 | print("Error: " + test_case['error']) 128 | 129 | if success == len(test_cases): 130 | print("\033[92m All tests passed.") 131 | else: 132 | print('\033[92m', success, " Tests passed") 133 | print('\033[91m', len(test_cases) - success, " Tests failed") 134 | raise AssertionError( 135 | "Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__)) 136 | 137 | 138 | def multiple_test(test_cases, target): 139 | success = 0 140 | for test_case in test_cases: 141 | try: 142 | test_input = deepcopy(test_case['input']) 143 | target_answer = target(*test_input) 144 | if test_case['name'] == "datatype_check": 145 | success += datatype_check(test_case['expected'], 146 | target_answer, test_case['error']) 147 | if test_case['name'] == "equation_output_check": 148 | success += equation_output_check( 149 | test_case['expected'], target_answer, test_case['error']) 150 | if test_case['name'] == "shape_check": 151 | success += shape_check(test_case['expected'], 152 | target_answer, test_case['error']) 153 | except: 154 | print('\33[30m', "Error: " + test_case['error']) 155 | 156 | if success == len(test_cases): 157 | print("\033[92m All tests passed.") 158 | else: 159 | print('\033[92m', success, " Tests passed") 160 | print('\033[91m', len(test_cases) - success, " Tests failed") 161 | raise AssertionError( 162 | "Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__)) 163 | 164 | -------------------------------------------------------------------------------- /Supervised-Machine-Learning-Regression-and-Classification/Week3Assignment/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | def load_data(filename): 5 | data = np.loadtxt(filename, delimiter=',') 6 | X = data[:,:2] 7 | y = data[:,2] 8 | return X, y 9 | 10 | def sig(z): 11 | 12 | return 1/(1+np.exp(-z)) 13 | 14 | def map_feature(X1, X2): 15 | """ 16 | Feature mapping function to polynomial features 17 | """ 18 | X1 = np.atleast_1d(X1) 19 | X2 = np.atleast_1d(X2) 20 | degree = 6 21 | out = [] 22 | for i in range(1, degree+1): 23 | for j in range(i + 1): 24 | out.append((X1**(i-j) * (X2**j))) 25 | return np.stack(out, axis=1) 26 | 27 | 28 | def plot_data(X, y, pos_label="y=1", neg_label="y=0"): 29 | positive = y == 1 30 | negative = y == 0 31 | 32 | #This will only print True value means where y = 1 33 | # This is x-axis #this is y-axis 34 | plt.plot(X[positive, 0], X[positive, 1], '*', label=pos_label) 35 | plt.plot(X[negative, 0], X[negative, 1], 'yo', label=neg_label) 36 | 37 | 38 | def plot_decision_boundary(w, b, X, y): 39 | # Credit to dibgerge on Github for this plotting code 40 | 41 | plot_data(X[:, 0:2], y) 42 | 43 | if X.shape[1] <= 2: 44 | plot_x = np.array([min(X[:, 0]), max(X[:, 0])]) 45 | plot_y = (-1. / w[1]) * (w[0] * plot_x + b) 46 | 47 | plt.plot(plot_x, plot_y, c="b") 48 | 49 | else: 50 | u = np.linspace(-1, 1.5, 50) 51 | v = np.linspace(-1, 1.5, 50) 52 | 53 | z = np.zeros((len(u), len(v))) 54 | 55 | # Evaluate z = theta*x over the grid 56 | for i in range(len(u)): 57 | for j in range(len(v)): 58 | z[i,j] = sig(np.dot(map_feature(u[i], v[j]), w) + b) 59 | 60 | # important to transpose z before calling contour 61 | z = z.T 62 | 63 | # Plot z = 0.5 64 | plt.contour(u,v,z, levels = [0.5], colors="g") 65 | 66 | --------------------------------------------------------------------------------