├── .idea
├── .gitignore
├── Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers.iml
├── dbnavigator.xml
├── modules.xml
└── vcs.xml
├── README.md
├── Week1
├── Optional Labs
│ ├── C1_W1_Lab01_Python_Jupyter_Soln.ipynb
│ ├── C1_W1_Lab03_Model_Representation_Soln.ipynb
│ ├── C1_W1_Lab04_Cost_function_Soln.ipynb
│ ├── C1_W1_Lab05_Gradient_Descent_Soln.ipynb
│ ├── data.txt
│ ├── deeplearning.mplstyle
│ ├── images
│ │ ├── C1W1L1_Markdown.PNG
│ │ ├── C1W1L1_Run.PNG
│ │ ├── C1W1L1_Tour.PNG
│ │ ├── C1_W1_L3_S1_Lecture_b.png
│ │ ├── C1_W1_L3_S1_model.png
│ │ ├── C1_W1_L3_S1_trainingdata.png
│ │ ├── C1_W1_L3_S2_Lecture_b.png
│ │ ├── C1_W1_L4_S1_Lecture_GD.png
│ │ ├── C1_W1_Lab02_GoalOfRegression.PNG
│ │ ├── C1_W1_Lab03_alpha_too_big.PNG
│ │ ├── C1_W1_Lab03_lecture_learningrate.PNG
│ │ └── C1_W1_Lab03_lecture_slopes.PNG
│ ├── lab_utils_common.py
│ └── lab_utils_uni.py
├── Practice Quiz - Regression
│ └── Regression Quiz.png
├── Practice Quiz - Train the model with gradient descent
│ └── Practice Quiz - Train the model with gradient descent.png
└── Practice Quiz - Supervised vs Unsupervised Learning
│ └── Practice Quiz - Supervised vs Unsupervised Learning.png
├── Week2
├── C1W2A1
│ ├── C1_W2_Linear_Regression.ipynb
│ ├── data
│ │ ├── ex1data1.txt
│ │ └── ex1data2.txt
│ ├── public_tests.py
│ └── utils.py
├── Optional Labs
│ ├── C1_W2_Lab01_Python_Numpy_Vectorization_Soln.ipynb
│ ├── C1_W2_Lab02_Multiple_Variable_Soln.ipynb
│ ├── C1_W2_Lab03_Feature_Scaling_and_Learning_Rate_Soln.ipynb
│ ├── C1_W2_Lab04_FeatEng_PolyReg_Soln.ipynb
│ ├── C1_W2_Lab05_Sklearn_GD_Soln.ipynb
│ ├── C1_W2_Lab06_Sklearn_Normal_Soln.ipynb
│ ├── data
│ │ └── houses.txt
│ ├── deeplearning.mplstyle
│ ├── images
│ │ ├── C1_W2_L1_S1_Lecture_b.png
│ │ ├── C1_W2_L1_S1_model.png
│ │ ├── C1_W2_L1_S1_trainingdata.png
│ │ ├── C1_W2_L1_S2_Lectureb.png
│ │ ├── C1_W2_L2_S1_Lecture_GD.png
│ │ ├── C1_W2_Lab02_GoalOfRegression.PNG
│ │ ├── C1_W2_Lab03_alpha_to_big.PNG
│ │ ├── C1_W2_Lab03_lecture_learningrate.PNG
│ │ ├── C1_W2_Lab03_lecture_slopes.PNG
│ │ ├── C1_W2_Lab04_Figures And animations.pptx
│ │ ├── C1_W2_Lab04_Matrices.PNG
│ │ ├── C1_W2_Lab04_Vectors.PNG
│ │ ├── C1_W2_Lab04_dot_notrans.gif
│ │ ├── C1_W2_Lab06_LongRun.PNG
│ │ ├── C1_W2_Lab06_ShortRun.PNG
│ │ ├── C1_W2_Lab06_contours.PNG
│ │ ├── C1_W2_Lab06_featurescalingheader.PNG
│ │ ├── C1_W2_Lab06_learningrate.PNG
│ │ ├── C1_W2_Lab06_scale.PNG
│ │ └── C1_W2_Lab07_FeatureEngLecture.PNG
│ ├── lab_utils_common.py
│ └── lab_utils_multi.py
├── Practice Quiz - Gradient descent in practice
│ ├── Practice Quiz - Gradient descent in practice 1.png
│ └── Practice Quiz - Gradient descent in practice 2.png
└── Practice Quiz - Multiple linear regression
│ └── Practice Quiz - Multiple linear regression.png
├── images
└── course.png
└── week3
├── C1W3A1
├── C1_W3_Logistic_Regression.ipynb
├── archive
│ ├── .ipynb_checkpoints
│ │ └── C1_W3_Logistic_Regression-Copy1-checkpoint.ipynb
│ └── C1_W3_Logistic_Regression-Copy1.ipynb
├── data
│ ├── ex2data1.txt
│ └── ex2data2.txt
├── images
│ ├── figure 1.png
│ ├── figure 2.png
│ ├── figure 3.png
│ ├── figure 4.png
│ ├── figure 5.png
│ └── figure 6.png
├── public_tests.py
├── test_utils.py
└── utils.py
├── Optional Labs
├── C1_W3_Lab01_Classification_Soln.ipynb
├── C1_W3_Lab02_Sigmoid_function_Soln.ipynb
├── C1_W3_Lab03_Decision_Boundary_Soln.ipynb
├── C1_W3_Lab04_LogisticLoss_Soln.ipynb
├── C1_W3_Lab05_Cost_Function_Soln.ipynb
├── C1_W3_Lab06_Gradient_Descent_Soln.ipynb
├── C1_W3_Lab07_Scikit_Learn_Soln.ipynb
├── C1_W3_Lab08_Overfitting_Soln.ipynb
├── C1_W3_Lab09_Regularization_Soln.ipynb
├── archive
│ ├── .ipynb_checkpoints
│ │ ├── C1_W3_Lab05_Cost_Function_Soln-Copy1-checkpoint.ipynb
│ │ ├── C1_W3_Lab05_Cost_Function_Soln-Copy2-checkpoint.ipynb
│ │ └── C1_W3_Lab09_Regularization_Soln-Copy1-checkpoint.ipynb
│ ├── C1_W3_Lab05_Cost_Function_Soln-Copy1.ipynb
│ ├── C1_W3_Lab05_Cost_Function_Soln-Copy2.ipynb
│ └── C1_W3_Lab09_Regularization_Soln-Copy1.ipynb
├── deeplearning.mplstyle
├── images
│ ├── C1W3_XW.PNG
│ ├── C1W3_boundary.PNG
│ ├── C1W3_example2.PNG
│ ├── C1W3_mcpredict.PNG
│ ├── C1W3_trainvpredict.PNG
│ ├── C1_W3_Classification.png
│ ├── C1_W3_Lab07_overfitting.PNG
│ ├── C1_W3_LinearCostRegularized.png
│ ├── C1_W3_LinearGradientRegularized.png
│ ├── C1_W3_LogisticCostRegularized.png
│ ├── C1_W3_LogisticGradientRegularized.png
│ ├── C1_W3_LogisticLoss_a.png
│ ├── C1_W3_LogisticLoss_b.png
│ ├── C1_W3_LogisticLoss_c.png
│ ├── C1_W3_LogisticRegression.png
│ ├── C1_W3_LogisticRegression_left.png
│ ├── C1_W3_LogisticRegression_right.png
│ ├── C1_W3_Logistic_gradient_descent.png
│ ├── C1_W3_Overfitting_a.png
│ ├── C1_W3_Overfitting_b.png
│ ├── C1_W3_Overfitting_c.png
│ └── C1_W3_SqErrorVsLogistic.png
├── lab_utils_common.py
├── plt_logistic_loss.py
├── plt_one_addpt_onclick.py
├── plt_overfit.py
└── plt_quad_logistic.py
├── Practice quiz_ Cost function for logistic regression
├── Readme.md
└── ss1.png
├── Practice quiz_ Gradient descent for logistic regression
├── Readme.md
└── ss1.png
└── Readme.md
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 |
--------------------------------------------------------------------------------
/.idea/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers
2 | ## Machine Learning Specialization Coursera
3 |
4 |
5 | 
6 |
7 | Contains Solutions and Notes for the [Machine Learning Specialization](https://www.coursera.org/specializations/machine-learning-introduction/?utm_medium=coursera&utm_source=home-page&utm_campaign=mlslaunch2022IN) by Andrew NG on Coursera
8 |
--------------------------------------------------------------------------------
/Week1/Optional Labs/C1_W1_Lab01_Python_Jupyter_Soln.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "pycharm": {
7 | "name": "#%% md\n"
8 | }
9 | },
10 | "source": [
11 | "# Optional Lab: Brief Introduction to Python and Jupyter Notebooks\n",
12 | "Welcome to the first optional lab! \n",
13 | "Optional labs are available to:\n",
14 | "- provide information - like this notebook\n",
15 | "- reinforce lecture material with hands-on examples\n",
16 | "- provide working examples of routines used in the graded labs"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {
22 | "pycharm": {
23 | "name": "#%% md\n"
24 | }
25 | },
26 | "source": [
27 | "## Goals\n",
28 | "In this lab, you will:\n",
29 | "- Get a brief introduction to Jupyter notebooks\n",
30 | "- Take a tour of Jupyter notebooks\n",
31 | "- Learn the difference between markdown cells and code cells\n",
32 | "- Practice some basic python\n"
33 | ]
34 | },
35 | {
36 | "cell_type": "markdown",
37 | "metadata": {
38 | "pycharm": {
39 | "name": "#%% md\n"
40 | }
41 | },
42 | "source": [
43 | "The easiest way to become familiar with Jupyter notebooks is to take the tour available above in the Help menu:"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {
49 | "pycharm": {
50 | "name": "#%% md\n"
51 | }
52 | },
53 | "source": [
54 | "\n",
55 | "
\n",
56 | ""
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {
62 | "pycharm": {
63 | "name": "#%% md\n"
64 | }
65 | },
66 | "source": [
67 | "Jupyter notebooks have two types of cells that are used in this course. Cells such as this which contain documentation called `Markdown Cells`. The name is derived from the simple formatting language used in the cells. You will not be required to produce markdown cells. Its useful to understand the `cell pulldown` shown in graphic below. Occasionally, a cell will end up in the wrong mode and you may need to restore it to the right state:"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {
73 | "pycharm": {
74 | "name": "#%% md\n"
75 | }
76 | },
77 | "source": [
78 | "\n",
79 | "
\n",
80 | ""
81 | ]
82 | },
83 | {
84 | "cell_type": "markdown",
85 | "metadata": {
86 | "pycharm": {
87 | "name": "#%% md\n"
88 | }
89 | },
90 | "source": [
91 | "The other type of cell is the `code cell` where you will write your code:"
92 | ]
93 | },
94 | {
95 | "cell_type": "code",
96 | "execution_count": 1,
97 | "metadata": {
98 | "pycharm": {
99 | "name": "#%%\n"
100 | }
101 | },
102 | "outputs": [
103 | {
104 | "name": "stdout",
105 | "output_type": "stream",
106 | "text": [
107 | "This is code cell\n"
108 | ]
109 | }
110 | ],
111 | "source": [
112 | "#This is a 'Code' Cell\n",
113 | "print(\"This is code cell\")"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {
119 | "pycharm": {
120 | "name": "#%% md\n"
121 | }
122 | },
123 | "source": [
124 | "## Python\n",
125 | "You can write your code in the code cells. \n",
126 | "To run the code, select the cell and either\n",
127 | "- hold the shift-key down and hit 'enter' or 'return'\n",
128 | "- click the 'run' arrow above\n",
129 | "\n",
130 | "
\n",
131 | "\n",
132 | "\n",
133 | " "
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {
139 | "pycharm": {
140 | "name": "#%% md\n"
141 | }
142 | },
143 | "source": [
144 | "### Print statement\n",
145 | "Print statements will generally use the python f-string style. \n",
146 | "Try creating your own print in the following cell. \n",
147 | "Try both methods of running the cell."
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": 2,
153 | "metadata": {
154 | "pycharm": {
155 | "name": "#%%\n"
156 | }
157 | },
158 | "outputs": [
159 | {
160 | "name": "stdout",
161 | "output_type": "stream",
162 | "text": [
163 | "f strings allow you to embed variables right in the strings!\n"
164 | ]
165 | }
166 | ],
167 | "source": [
168 | "# print statements\n",
169 | "variable = \"right in the strings!\"\n",
170 | "print(f\"f strings allow you to embed variables {variable}\")"
171 | ]
172 | },
173 | {
174 | "cell_type": "markdown",
175 | "metadata": {
176 | "pycharm": {
177 | "name": "#%% md\n"
178 | }
179 | },
180 | "source": [
181 | "# Congratulations!\n",
182 | "You now know how to find your way around a Jupyter Notebook."
183 | ]
184 | }
185 | ],
186 | "metadata": {
187 | "kernelspec": {
188 | "display_name": "Python 3",
189 | "language": "python",
190 | "name": "python3"
191 | },
192 | "language_info": {
193 | "codemirror_mode": {
194 | "name": "ipython",
195 | "version": 3
196 | },
197 | "file_extension": ".py",
198 | "mimetype": "text/x-python",
199 | "name": "python",
200 | "nbconvert_exporter": "python",
201 | "pygments_lexer": "ipython3",
202 | "version": "3.7.6"
203 | }
204 | },
205 | "nbformat": 4,
206 | "nbformat_minor": 5
207 | }
--------------------------------------------------------------------------------
/Week1/Optional Labs/data.txt:
--------------------------------------------------------------------------------
1 | 2104,3,399900
2 | 1600,3,329900
3 | 2400,3,369000
4 | 1416,2,232000
5 | 3000,4,539900
6 | 1985,4,299900
7 | 1534,3,314900
8 | 1427,3,198999
9 | 1380,3,212000
10 | 1494,3,242500
11 | 1940,4,239999
12 | 2000,3,347000
13 | 1890,3,329999
14 | 4478,5,699900
15 | 1268,3,259900
16 | 2300,4,449900
17 | 1320,2,299900
18 | 1236,3,199900
19 | 2609,4,499998
20 | 3031,4,599000
21 | 1767,3,252900
22 | 1888,2,255000
23 | 1604,3,242900
24 | 1962,4,259900
25 | 3890,3,573900
26 | 1100,3,249900
27 | 1458,3,464500
28 | 2526,3,469000
29 | 2200,3,475000
30 | 2637,3,299900
31 | 1839,2,349900
32 | 1000,1,169900
33 | 2040,4,314900
34 | 3137,3,579900
35 | 1811,4,285900
36 | 1437,3,249900
37 | 1239,3,229900
38 | 2132,4,345000
39 | 4215,4,549000
40 | 2162,4,287000
41 | 1664,2,368500
42 | 2238,3,329900
43 | 2567,4,314000
44 | 1200,3,299000
45 | 852,2,179900
46 | 1852,4,299900
47 | 1203,3,239500
--------------------------------------------------------------------------------
/Week1/Optional Labs/deeplearning.mplstyle:
--------------------------------------------------------------------------------
1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html
2 | lines.linewidth: 4
3 | lines.solid_capstyle: butt
4 |
5 | legend.fancybox: true
6 |
7 | # Verdana" for non-math text,
8 | # Cambria Math
9 |
10 | #Blue (Crayon-Aqua) 0096FF
11 | #Dark Red C00000
12 | #Orange (Apple Orange) FF9300
13 | #Black 000000
14 | #Magenta FF40FF
15 | #Purple 7030A0
16 |
17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000'])
18 | #axes.facecolor: f0f0f0 # grey
19 | axes.facecolor: ffffff # white
20 | axes.labelsize: large
21 | axes.axisbelow: true
22 | axes.grid: False
23 | axes.edgecolor: f0f0f0
24 | axes.linewidth: 3.0
25 | axes.titlesize: x-large
26 |
27 | patch.edgecolor: f0f0f0
28 | patch.linewidth: 0.5
29 |
30 | svg.fonttype: path
31 |
32 | grid.linestyle: -
33 | grid.linewidth: 1.0
34 | grid.color: cbcbcb
35 |
36 | xtick.major.size: 0
37 | xtick.minor.size: 0
38 | ytick.major.size: 0
39 | ytick.minor.size: 0
40 |
41 | savefig.edgecolor: f0f0f0
42 | savefig.facecolor: f0f0f0
43 |
44 | #figure.subplot.left: 0.08
45 | #figure.subplot.right: 0.95
46 | #figure.subplot.bottom: 0.07
47 |
48 | #figure.facecolor: f0f0f0 # grey
49 | figure.facecolor: ffffff # white
50 |
51 | ## ***************************************************************************
52 | ## * FONT *
53 | ## ***************************************************************************
54 | ## The font properties used by `text.Text`.
55 | ## See https://matplotlib.org/api/font_manager_api.html for more information
56 | ## on font properties. The 6 font properties used for font matching are
57 | ## given below with their default values.
58 | ##
59 | ## The font.family property can take either a concrete font name (not supported
60 | ## when rendering text with usetex), or one of the following five generic
61 | ## values:
62 | ## - 'serif' (e.g., Times),
63 | ## - 'sans-serif' (e.g., Helvetica),
64 | ## - 'cursive' (e.g., Zapf-Chancery),
65 | ## - 'fantasy' (e.g., Western), and
66 | ## - 'monospace' (e.g., Courier).
67 | ## Each of these values has a corresponding default list of font names
68 | ## (font.serif, etc.); the first available font in the list is used. Note that
69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of
70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with
71 | ## Matplotlib and is thus guaranteed to be available; the other entries are
72 | ## left as examples of other possible values.
73 | ##
74 | ## The font.style property has three values: normal (or roman), italic
75 | ## or oblique. The oblique style will be used for italic, if it is not
76 | ## present.
77 | ##
78 | ## The font.variant property has two values: normal or small-caps. For
79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent
80 | ## to using a font size of 'smaller', or about 83%% of the current font
81 | ## size.
82 | ##
83 | ## The font.weight property has effectively 13 values: normal, bold,
84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as
85 | ## 400, and bold is 700. bolder and lighter are relative values with
86 | ## respect to the current weight.
87 | ##
88 | ## The font.stretch property has 11 values: ultra-condensed,
89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded,
90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This
91 | ## property is not currently implemented.
92 | ##
93 | ## The font.size property is the default font size for text, given in points.
94 | ## 10 pt is the standard value.
95 | ##
96 | ## Note that font.size controls default text sizes. To configure
97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc
98 | ## settings for axes and ticks. Special text sizes can be defined
99 | ## relative to font.size, using the following values: xx-small, x-small,
100 | ## small, medium, large, x-large, xx-large, larger, or smaller
101 |
102 |
103 | font.family: sans-serif
104 | font.style: normal
105 | font.variant: normal
106 | font.weight: normal
107 | font.stretch: normal
108 | font.size: 8.0
109 |
110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif
111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif
112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive
113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy
114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace
115 |
116 |
117 | ## ***************************************************************************
118 | ## * TEXT *
119 | ## ***************************************************************************
120 | ## The text properties used by `text.Text`.
121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text
122 | ## for more information on text properties
123 | #text.color: black
124 |
125 |
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1W1L1_Markdown.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1W1L1_Markdown.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1W1L1_Run.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1W1L1_Run.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1W1L1_Tour.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1W1L1_Tour.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_L3_S1_Lecture_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_L3_S1_Lecture_b.png
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_L3_S1_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_L3_S1_model.png
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_L3_S1_trainingdata.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_L3_S1_trainingdata.png
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_L3_S2_Lecture_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_L3_S2_Lecture_b.png
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_L4_S1_Lecture_GD.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_L4_S1_Lecture_GD.png
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_Lab02_GoalOfRegression.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_Lab02_GoalOfRegression.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_Lab03_alpha_too_big.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_Lab03_alpha_too_big.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_Lab03_lecture_learningrate.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_Lab03_lecture_learningrate.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/images/C1_W1_Lab03_lecture_slopes.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Optional Labs/images/C1_W1_Lab03_lecture_slopes.PNG
--------------------------------------------------------------------------------
/Week1/Optional Labs/lab_utils_common.py:
--------------------------------------------------------------------------------
1 | """
2 | lab_utils_common.py
3 | functions common to all optional labs, Course 1, Week 2
4 | """
5 |
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 |
9 | plt.style.use('./deeplearning.mplstyle')
10 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0';
11 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple]
12 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0')
13 |
14 |
15 | ##########################################################
16 | # Regression Routines
17 | ##########################################################
18 |
19 | #Function to calculate the cost
20 | def compute_cost_matrix(X, y, w, b, verbose=False):
21 | """
22 | Computes the gradient for linear regression
23 | Args:
24 | X (ndarray (m,n)): Data, m examples with n features
25 | y (ndarray (m,)) : target values
26 | w (ndarray (n,)) : model parameters
27 | b (scalar) : model parameter
28 | verbose : (Boolean) If true, print out intermediate value f_wb
29 | Returns
30 | cost: (scalar)
31 | """
32 | m = X.shape[0]
33 |
34 | # calculate f_wb for all examples.
35 | f_wb = X @ w + b
36 | # calculate cost
37 | total_cost = (1/(2*m)) * np.sum((f_wb-y)**2)
38 |
39 | if verbose: print("f_wb:")
40 | if verbose: print(f_wb)
41 |
42 | return total_cost
43 |
44 | def compute_gradient_matrix(X, y, w, b):
45 | """
46 | Computes the gradient for linear regression
47 |
48 | Args:
49 | X (ndarray (m,n)): Data, m examples with n features
50 | y (ndarray (m,)) : target values
51 | w (ndarray (n,)) : model parameters
52 | b (scalar) : model parameter
53 | Returns
54 | dj_dw (ndarray (n,1)): The gradient of the cost w.r.t. the parameters w.
55 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
56 |
57 | """
58 | m,n = X.shape
59 | f_wb = X @ w + b
60 | e = f_wb - y
61 | dj_dw = (1/m) * (X.T @ e)
62 | dj_db = (1/m) * np.sum(e)
63 |
64 | return dj_db,dj_dw
65 |
66 |
67 | # Loop version of multi-variable compute_cost
68 | def compute_cost(X, y, w, b):
69 | """
70 | compute cost
71 | Args:
72 | X (ndarray (m,n)): Data, m examples with n features
73 | y (ndarray (m,)) : target values
74 | w (ndarray (n,)) : model parameters
75 | b (scalar) : model parameter
76 | Returns
77 | cost (scalar) : cost
78 | """
79 | m = X.shape[0]
80 | cost = 0.0
81 | for i in range(m):
82 | f_wb_i = np.dot(X[i],w) + b #(n,)(n,)=scalar
83 | cost = cost + (f_wb_i - y[i])**2
84 | cost = cost/(2*m)
85 | return cost
86 |
87 | def compute_gradient(X, y, w, b):
88 | """
89 | Computes the gradient for linear regression
90 | Args:
91 | X (ndarray (m,n)): Data, m examples with n features
92 | y (ndarray (m,)) : target values
93 | w (ndarray (n,)) : model parameters
94 | b (scalar) : model parameter
95 | Returns
96 | dj_dw (ndarray Shape (n,)): The gradient of the cost w.r.t. the parameters w.
97 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
98 | """
99 | m,n = X.shape #(number of examples, number of features)
100 | dj_dw = np.zeros((n,))
101 | dj_db = 0.
102 |
103 | for i in range(m):
104 | err = (np.dot(X[i], w) + b) - y[i]
105 | for j in range(n):
106 | dj_dw[j] = dj_dw[j] + err * X[i,j]
107 | dj_db = dj_db + err
108 | dj_dw = dj_dw/m
109 | dj_db = dj_db/m
110 |
111 | return dj_db,dj_dw
112 |
113 |
--------------------------------------------------------------------------------
/Week1/Optional Labs/lab_utils_uni.py:
--------------------------------------------------------------------------------
1 | """
2 | lab_utils_uni.py
3 | routines used in Course 1, Week2, labs1-3 dealing with single variables (univariate)
4 | """
5 | import numpy as np
6 | import matplotlib.pyplot as plt
7 | from matplotlib.ticker import MaxNLocator
8 | from matplotlib.gridspec import GridSpec
9 | from matplotlib.colors import LinearSegmentedColormap
10 | from ipywidgets import interact
11 | from lab_utils_common import compute_cost
12 | from lab_utils_common import dlblue, dlorange, dldarkred, dlmagenta, dlpurple, dlcolors
13 |
14 | plt.style.use('./deeplearning.mplstyle')
15 | n_bin = 5
16 | dlcm = LinearSegmentedColormap.from_list(
17 | 'dl_map', dlcolors, N=n_bin)
18 |
19 | ##########################################################
20 | # Plotting Routines
21 | ##########################################################
22 |
23 | def plt_house_x(X, y,f_wb=None, ax=None):
24 | ''' plot house with aXis '''
25 | if not ax:
26 | fig, ax = plt.subplots(1,1)
27 | ax.scatter(X, y, marker='x', c='r', label="Actual Value")
28 |
29 | ax.set_title("Housing Prices")
30 | ax.set_ylabel('Price (in 1000s of dollars)')
31 | ax.set_xlabel(f'Size (1000 sqft)')
32 | if f_wb is not None:
33 | ax.plot(X, f_wb, c=dlblue, label="Our Prediction")
34 | ax.legend()
35 |
36 |
37 | def mk_cost_lines(x,y,w,b, ax):
38 | ''' makes vertical cost lines'''
39 | cstr = "cost = (1/m)*("
40 | ctot = 0
41 | label = 'cost for point'
42 | addedbreak = False
43 | for p in zip(x,y):
44 | f_wb_p = w*p[0]+b
45 | c_p = ((f_wb_p - p[1])**2)/2
46 | c_p_txt = c_p
47 | ax.vlines(p[0], p[1],f_wb_p, lw=3, color=dlpurple, ls='dotted', label=label)
48 | label='' #just one
49 | cxy = [p[0], p[1] + (f_wb_p-p[1])/2]
50 | ax.annotate(f'{c_p_txt:0.0f}', xy=cxy, xycoords='data',color=dlpurple,
51 | xytext=(5, 0), textcoords='offset points')
52 | cstr += f"{c_p_txt:0.0f} +"
53 | if len(cstr) > 38 and addedbreak is False:
54 | cstr += "\n"
55 | addedbreak = True
56 | ctot += c_p
57 | ctot = ctot/(len(x))
58 | cstr = cstr[:-1] + f") = {ctot:0.0f}"
59 | ax.text(0.15,0.02,cstr, transform=ax.transAxes, color=dlpurple)
60 |
61 | ##########
62 | # Cost lab
63 | ##########
64 |
65 |
66 | def plt_intuition(x_train, y_train):
67 |
68 | w_range = np.array([200-200,200+200])
69 | tmp_b = 100
70 |
71 | w_array = np.arange(*w_range, 5)
72 | cost = np.zeros_like(w_array)
73 | for i in range(len(w_array)):
74 | tmp_w = w_array[i]
75 | cost[i] = compute_cost(x_train, y_train, tmp_w, tmp_b)
76 |
77 | @interact(w=(*w_range,10),continuous_update=False)
78 | def func( w=150):
79 | f_wb = np.dot(x_train, w) + tmp_b
80 |
81 | fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(8,4))
82 | fig.canvas.toolbar_position = 'bottom'
83 |
84 | mk_cost_lines(x_train, y_train, w, tmp_b, ax[0])
85 | plt_house_x(x_train, y_train, f_wb=f_wb, ax=ax[0])
86 |
87 | ax[1].plot(w_array, cost)
88 | cur_cost = compute_cost(x_train, y_train, w, tmp_b)
89 | ax[1].scatter(w,cur_cost, s=100, color=dldarkred, zorder= 10, label= f"cost at w={w}")
90 | ax[1].hlines(cur_cost, ax[1].get_xlim()[0],w, lw=4, color=dlpurple, ls='dotted')
91 | ax[1].vlines(w, ax[1].get_ylim()[0],cur_cost, lw=4, color=dlpurple, ls='dotted')
92 | ax[1].set_title("Cost vs. w, (b fixed at 100)")
93 | ax[1].set_ylabel('Cost')
94 | ax[1].set_xlabel('w')
95 | ax[1].legend(loc='upper center')
96 | fig.suptitle(f"Minimize Cost: Current Cost = {cur_cost:0.0f}", fontsize=12)
97 | plt.show()
98 |
99 | # this is the 2D cost curve with interactive slider
100 | def plt_stationary(x_train, y_train):
101 | # setup figure
102 | fig = plt.figure( figsize=(9,8))
103 | #fig = plt.figure(constrained_layout=True, figsize=(12,10))
104 | fig.set_facecolor('#ffffff') #white
105 | fig.canvas.toolbar_position = 'top'
106 | #gs = GridSpec(2, 2, figure=fig, wspace = 0.01)
107 | gs = GridSpec(2, 2, figure=fig)
108 | ax0 = fig.add_subplot(gs[0, 0])
109 | ax1 = fig.add_subplot(gs[0, 1])
110 | ax2 = fig.add_subplot(gs[1, :], projection='3d')
111 | ax = np.array([ax0,ax1,ax2])
112 |
113 | #setup useful ranges and common linspaces
114 | w_range = np.array([200-300.,200+300])
115 | b_range = np.array([50-300., 50+300])
116 | b_space = np.linspace(*b_range, 100)
117 | w_space = np.linspace(*w_range, 100)
118 |
119 | # get cost for w,b ranges for contour and 3D
120 | tmp_b,tmp_w = np.meshgrid(b_space,w_space)
121 | z=np.zeros_like(tmp_b)
122 | for i in range(tmp_w.shape[0]):
123 | for j in range(tmp_w.shape[1]):
124 | z[i,j] = compute_cost(x_train, y_train, tmp_w[i][j], tmp_b[i][j] )
125 | if z[i,j] == 0: z[i,j] = 1e-6
126 |
127 | w0=200;b=-100 #initial point
128 | ### plot model w cost ###
129 | f_wb = np.dot(x_train,w0) + b
130 | mk_cost_lines(x_train,y_train,w0,b,ax[0])
131 | plt_house_x(x_train, y_train, f_wb=f_wb, ax=ax[0])
132 |
133 | ### plot contour ###
134 | CS = ax[1].contour(tmp_w, tmp_b, np.log(z),levels=12, linewidths=2, alpha=0.7,colors=dlcolors)
135 | ax[1].set_title('Cost(w,b)')
136 | ax[1].set_xlabel('w', fontsize=10)
137 | ax[1].set_ylabel('b', fontsize=10)
138 | ax[1].set_xlim(w_range) ; ax[1].set_ylim(b_range)
139 | cscat = ax[1].scatter(w0,b, s=100, color=dlblue, zorder= 10, label="cost with \ncurrent w,b")
140 | chline = ax[1].hlines(b, ax[1].get_xlim()[0],w0, lw=4, color=dlpurple, ls='dotted')
141 | cvline = ax[1].vlines(w0, ax[1].get_ylim()[0],b, lw=4, color=dlpurple, ls='dotted')
142 | ax[1].text(0.5,0.95,"Click to choose w,b", bbox=dict(facecolor='white', ec = 'black'), fontsize = 10,
143 | transform=ax[1].transAxes, verticalalignment = 'center', horizontalalignment= 'center')
144 |
145 | #Surface plot of the cost function J(w,b)
146 | ax[2].plot_surface(tmp_w, tmp_b, z, cmap = dlcm, alpha=0.3, antialiased=True)
147 | ax[2].plot_wireframe(tmp_w, tmp_b, z, color='k', alpha=0.1)
148 | plt.xlabel("$w$")
149 | plt.ylabel("$b$")
150 | ax[2].zaxis.set_rotate_label(False)
151 | ax[2].xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
152 | ax[2].yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
153 | ax[2].zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
154 | ax[2].set_zlabel("J(w, b)\n\n", rotation=90)
155 | plt.title("Cost(w,b) \n [You can rotate this figure]", size=12)
156 | ax[2].view_init(30, -120)
157 |
158 | return fig,ax, [cscat, chline, cvline]
159 |
160 |
161 | #https://matplotlib.org/stable/users/event_handling.html
162 | class plt_update_onclick:
163 | def __init__(self, fig, ax, x_train,y_train, dyn_items):
164 | self.fig = fig
165 | self.ax = ax
166 | self.x_train = x_train
167 | self.y_train = y_train
168 | self.dyn_items = dyn_items
169 | self.cid = fig.canvas.mpl_connect('button_press_event', self)
170 |
171 | def __call__(self, event):
172 | if event.inaxes == self.ax[1]:
173 | ws = event.xdata
174 | bs = event.ydata
175 | cst = compute_cost(self.x_train, self.y_train, ws, bs)
176 |
177 | # clear and redraw line plot
178 | self.ax[0].clear()
179 | f_wb = np.dot(self.x_train,ws) + bs
180 | mk_cost_lines(self.x_train,self.y_train,ws,bs,self.ax[0])
181 | plt_house_x(self.x_train, self.y_train, f_wb=f_wb, ax=self.ax[0])
182 |
183 | # remove lines and re-add on countour plot and 3d plot
184 | for artist in self.dyn_items:
185 | artist.remove()
186 |
187 | a = self.ax[1].scatter(ws,bs, s=100, color=dlblue, zorder= 10, label="cost with \ncurrent w,b")
188 | b = self.ax[1].hlines(bs, self.ax[1].get_xlim()[0],ws, lw=4, color=dlpurple, ls='dotted')
189 | c = self.ax[1].vlines(ws, self.ax[1].get_ylim()[0],bs, lw=4, color=dlpurple, ls='dotted')
190 | d = self.ax[1].annotate(f"Cost: {cst:.0f}", xy= (ws, bs), xytext = (4,4), textcoords = 'offset points',
191 | bbox=dict(facecolor='white'), size = 10)
192 |
193 | #Add point in 3D surface plot
194 | e = self.ax[2].scatter3D(ws, bs,cst , marker='X', s=100)
195 |
196 | self.dyn_items = [a,b,c,d,e]
197 | self.fig.canvas.draw()
198 |
199 |
200 | def soup_bowl():
201 | """ Create figure and plot with a 3D projection"""
202 | fig = plt.figure(figsize=(8,8))
203 |
204 | #Plot configuration
205 | ax = fig.add_subplot(111, projection='3d')
206 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
207 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
208 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
209 | ax.zaxis.set_rotate_label(False)
210 | ax.view_init(45, -120)
211 |
212 | #Useful linearspaces to give values to the parameters w and b
213 | w = np.linspace(-20, 20, 100)
214 | b = np.linspace(-20, 20, 100)
215 |
216 | #Get the z value for a bowl-shaped cost function
217 | z=np.zeros((len(w), len(b)))
218 | j=0
219 | for x in w:
220 | i=0
221 | for y in b:
222 | z[i,j] = x**2 + y**2
223 | i+=1
224 | j+=1
225 |
226 | #Meshgrid used for plotting 3D functions
227 | W, B = np.meshgrid(w, b)
228 |
229 | #Create the 3D surface plot of the bowl-shaped cost function
230 | ax.plot_surface(W, B, z, cmap = "Spectral_r", alpha=0.7, antialiased=False)
231 | ax.plot_wireframe(W, B, z, color='k', alpha=0.1)
232 | ax.set_xlabel("$w$")
233 | ax.set_ylabel("$b$")
234 | ax.set_zlabel("$J(w,b)$", rotation=90)
235 | ax.set_title("$J(w,b)$\n [You can rotate this figure]", size=15)
236 |
237 | plt.show()
238 |
239 | def inbounds(a,b,xlim,ylim):
240 | xlow,xhigh = xlim
241 | ylow,yhigh = ylim
242 | ax, ay = a
243 | bx, by = b
244 | if (ax > xlow and ax < xhigh) and (bx > xlow and bx < xhigh) \
245 | and (ay > ylow and ay < yhigh) and (by > ylow and by < yhigh):
246 | return True
247 | return False
248 |
249 | def plt_contour_wgrad(x, y, hist, ax, w_range=[-100, 500, 5], b_range=[-500, 500, 5],
250 | contours = [0.1,50,1000,5000,10000,25000,50000],
251 | resolution=5, w_final=200, b_final=100,step=10 ):
252 | b0,w0 = np.meshgrid(np.arange(*b_range),np.arange(*w_range))
253 | z=np.zeros_like(b0)
254 | for i in range(w0.shape[0]):
255 | for j in range(w0.shape[1]):
256 | z[i][j] = compute_cost(x, y, w0[i][j], b0[i][j] )
257 |
258 | CS = ax.contour(w0, b0, z, contours, linewidths=2,
259 | colors=[dlblue, dlorange, dldarkred, dlmagenta, dlpurple])
260 | ax.clabel(CS, inline=1, fmt='%1.0f', fontsize=10)
261 | ax.set_xlabel("w"); ax.set_ylabel("b")
262 | ax.set_title('Contour plot of cost J(w,b), vs b,w with path of gradient descent')
263 | w = w_final; b=b_final
264 | ax.hlines(b, ax.get_xlim()[0],w, lw=2, color=dlpurple, ls='dotted')
265 | ax.vlines(w, ax.get_ylim()[0],b, lw=2, color=dlpurple, ls='dotted')
266 |
267 | base = hist[0]
268 | for point in hist[0::step]:
269 | edist = np.sqrt((base[0] - point[0])**2 + (base[1] - point[1])**2)
270 | if(edist > resolution or point==hist[-1]):
271 | if inbounds(point,base, ax.get_xlim(),ax.get_ylim()):
272 | plt.annotate('', xy=point, xytext=base,xycoords='data',
273 | arrowprops={'arrowstyle': '->', 'color': 'r', 'lw': 3},
274 | va='center', ha='center')
275 | base=point
276 | return
277 |
278 |
279 | def plt_divergence(p_hist, J_hist, x_train,y_train):
280 |
281 | x=np.zeros(len(p_hist))
282 | y=np.zeros(len(p_hist))
283 | v=np.zeros(len(p_hist))
284 | for i in range(len(p_hist)):
285 | x[i] = p_hist[i][0]
286 | y[i] = p_hist[i][1]
287 | v[i] = J_hist[i]
288 |
289 | fig = plt.figure(figsize=(12,5))
290 | plt.subplots_adjust( wspace=0 )
291 | gs = fig.add_gridspec(1, 5)
292 | fig.suptitle(f"Cost escalates when learning rate is too large")
293 | #===============
294 | # First subplot
295 | #===============
296 | ax = fig.add_subplot(gs[:2], )
297 |
298 | # Print w vs cost to see minimum
299 | fix_b = 100
300 | w_array = np.arange(-70000, 70000, 1000)
301 | cost = np.zeros_like(w_array)
302 |
303 | for i in range(len(w_array)):
304 | tmp_w = w_array[i]
305 | cost[i] = compute_cost(x_train, y_train, tmp_w, fix_b)
306 |
307 | ax.plot(w_array, cost)
308 | ax.plot(x,v, c=dlmagenta)
309 | ax.set_title("Cost vs w, b set to 100")
310 | ax.set_ylabel('Cost')
311 | ax.set_xlabel('w')
312 | ax.xaxis.set_major_locator(MaxNLocator(2))
313 |
314 | #===============
315 | # Second Subplot
316 | #===============
317 |
318 | tmp_b,tmp_w = np.meshgrid(np.arange(-35000, 35000, 500),np.arange(-70000, 70000, 500))
319 | z=np.zeros_like(tmp_b)
320 | for i in range(tmp_w.shape[0]):
321 | for j in range(tmp_w.shape[1]):
322 | z[i][j] = compute_cost(x_train, y_train, tmp_w[i][j], tmp_b[i][j] )
323 |
324 | ax = fig.add_subplot(gs[2:], projection='3d')
325 | ax.plot_surface(tmp_w, tmp_b, z, alpha=0.3, color=dlblue)
326 | ax.xaxis.set_major_locator(MaxNLocator(2))
327 | ax.yaxis.set_major_locator(MaxNLocator(2))
328 |
329 | ax.set_xlabel('w', fontsize=16)
330 | ax.set_ylabel('b', fontsize=16)
331 | ax.set_zlabel('\ncost', fontsize=16)
332 | plt.title('Cost vs (b, w)')
333 | # Customize the view angle
334 | ax.view_init(elev=20., azim=-65)
335 | ax.plot(x, y, v,c=dlmagenta)
336 |
337 | return
338 |
339 | # draw derivative line
340 | # y = m*(x - x1) + y1
341 | def add_line(dj_dx, x1, y1, d, ax):
342 | x = np.linspace(x1-d, x1+d,50)
343 | y = dj_dx*(x - x1) + y1
344 | ax.scatter(x1, y1, color=dlblue, s=50)
345 | ax.plot(x, y, '--', c=dldarkred,zorder=10, linewidth = 1)
346 | xoff = 30 if x1 == 200 else 10
347 | ax.annotate(r"$\frac{\partial J}{\partial w}$ =%d" % dj_dx, fontsize=14,
348 | xy=(x1, y1), xycoords='data',
349 | xytext=(xoff, 10), textcoords='offset points',
350 | arrowprops=dict(arrowstyle="->"),
351 | horizontalalignment='left', verticalalignment='top')
352 |
353 | def plt_gradients(x_train,y_train, f_compute_cost, f_compute_gradient):
354 | #===============
355 | # First subplot
356 | #===============
357 | fig,ax = plt.subplots(1,2,figsize=(12,4))
358 |
359 | # Print w vs cost to see minimum
360 | fix_b = 100
361 | w_array = np.linspace(-100, 500, 50)
362 | w_array = np.linspace(0, 400, 50)
363 | cost = np.zeros_like(w_array)
364 |
365 | for i in range(len(w_array)):
366 | tmp_w = w_array[i]
367 | cost[i] = f_compute_cost(x_train, y_train, tmp_w, fix_b)
368 | ax[0].plot(w_array, cost,linewidth=1)
369 | ax[0].set_title("Cost vs w, with gradient; b set to 100")
370 | ax[0].set_ylabel('Cost')
371 | ax[0].set_xlabel('w')
372 |
373 | # plot lines for fixed b=100
374 | for tmp_w in [100,200,300]:
375 | fix_b = 100
376 | dj_dw,dj_db = f_compute_gradient(x_train, y_train, tmp_w, fix_b )
377 | j = f_compute_cost(x_train, y_train, tmp_w, fix_b)
378 | add_line(dj_dw, tmp_w, j, 30, ax[0])
379 |
380 | #===============
381 | # Second Subplot
382 | #===============
383 |
384 | tmp_b,tmp_w = np.meshgrid(np.linspace(-200, 200, 10), np.linspace(-100, 600, 10))
385 | U = np.zeros_like(tmp_w)
386 | V = np.zeros_like(tmp_b)
387 | for i in range(tmp_w.shape[0]):
388 | for j in range(tmp_w.shape[1]):
389 | U[i][j], V[i][j] = f_compute_gradient(x_train, y_train, tmp_w[i][j], tmp_b[i][j] )
390 | X = tmp_w
391 | Y = tmp_b
392 | n=-2
393 | color_array = np.sqrt(((V-n)/2)**2 + ((U-n)/2)**2)
394 |
395 | ax[1].set_title('Gradient shown in quiver plot')
396 | Q = ax[1].quiver(X, Y, U, V, color_array, units='width', )
397 | ax[1].quiverkey(Q, 0.9, 0.9, 2, r'$2 \frac{m}{s}$', labelpos='E',coordinates='figure')
398 | ax[1].set_xlabel("w"); ax[1].set_ylabel("b")
399 |
--------------------------------------------------------------------------------
/Week1/Practice Quiz - Regression/Regression Quiz.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Practice Quiz - Regression/Regression Quiz.png
--------------------------------------------------------------------------------
/Week1/Practice Quiz - Train the model with gradient descent/Practice Quiz - Train the model with gradient descent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Practice Quiz - Train the model with gradient descent/Practice Quiz - Train the model with gradient descent.png
--------------------------------------------------------------------------------
/Week1/Practice Quiz - Supervised vs Unsupervised Learning/Practice Quiz - Supervised vs Unsupervised Learning.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week1/Practice Quiz - Supervised vs Unsupervised Learning/Practice Quiz - Supervised vs Unsupervised Learning.png
--------------------------------------------------------------------------------
/Week2/C1W2A1/data/ex1data1.txt:
--------------------------------------------------------------------------------
1 | 6.1101,17.592
2 | 5.5277,9.1302
3 | 8.5186,13.662
4 | 7.0032,11.854
5 | 5.8598,6.8233
6 | 8.3829,11.886
7 | 7.4764,4.3483
8 | 8.5781,12
9 | 6.4862,6.5987
10 | 5.0546,3.8166
11 | 5.7107,3.2522
12 | 14.164,15.505
13 | 5.734,3.1551
14 | 8.4084,7.2258
15 | 5.6407,0.71618
16 | 5.3794,3.5129
17 | 6.3654,5.3048
18 | 5.1301,0.56077
19 | 6.4296,3.6518
20 | 7.0708,5.3893
21 | 6.1891,3.1386
22 | 20.27,21.767
23 | 5.4901,4.263
24 | 6.3261,5.1875
25 | 5.5649,3.0825
26 | 18.945,22.638
27 | 12.828,13.501
28 | 10.957,7.0467
29 | 13.176,14.692
30 | 22.203,24.147
31 | 5.2524,-1.22
32 | 6.5894,5.9966
33 | 9.2482,12.134
34 | 5.8918,1.8495
35 | 8.2111,6.5426
36 | 7.9334,4.5623
37 | 8.0959,4.1164
38 | 5.6063,3.3928
39 | 12.836,10.117
40 | 6.3534,5.4974
41 | 5.4069,0.55657
42 | 6.8825,3.9115
43 | 11.708,5.3854
44 | 5.7737,2.4406
45 | 7.8247,6.7318
46 | 7.0931,1.0463
47 | 5.0702,5.1337
48 | 5.8014,1.844
49 | 11.7,8.0043
50 | 5.5416,1.0179
51 | 7.5402,6.7504
52 | 5.3077,1.8396
53 | 7.4239,4.2885
54 | 7.6031,4.9981
55 | 6.3328,1.4233
56 | 6.3589,-1.4211
57 | 6.2742,2.4756
58 | 5.6397,4.6042
59 | 9.3102,3.9624
60 | 9.4536,5.4141
61 | 8.8254,5.1694
62 | 5.1793,-0.74279
63 | 21.279,17.929
64 | 14.908,12.054
65 | 18.959,17.054
66 | 7.2182,4.8852
67 | 8.2951,5.7442
68 | 10.236,7.7754
69 | 5.4994,1.0173
70 | 20.341,20.992
71 | 10.136,6.6799
72 | 7.3345,4.0259
73 | 6.0062,1.2784
74 | 7.2259,3.3411
75 | 5.0269,-2.6807
76 | 6.5479,0.29678
77 | 7.5386,3.8845
78 | 5.0365,5.7014
79 | 10.274,6.7526
80 | 5.1077,2.0576
81 | 5.7292,0.47953
82 | 5.1884,0.20421
83 | 6.3557,0.67861
84 | 9.7687,7.5435
85 | 6.5159,5.3436
86 | 8.5172,4.2415
87 | 9.1802,6.7981
88 | 6.002,0.92695
89 | 5.5204,0.152
90 | 5.0594,2.8214
91 | 5.7077,1.8451
92 | 7.6366,4.2959
93 | 5.8707,7.2029
94 | 5.3054,1.9869
95 | 8.2934,0.14454
96 | 13.394,9.0551
97 | 5.4369,0.61705
98 |
--------------------------------------------------------------------------------
/Week2/C1W2A1/data/ex1data2.txt:
--------------------------------------------------------------------------------
1 | 2104,3,399900
2 | 1600,3,329900
3 | 2400,3,369000
4 | 1416,2,232000
5 | 3000,4,539900
6 | 1985,4,299900
7 | 1534,3,314900
8 | 1427,3,198999
9 | 1380,3,212000
10 | 1494,3,242500
11 | 1940,4,239999
12 | 2000,3,347000
13 | 1890,3,329999
14 | 4478,5,699900
15 | 1268,3,259900
16 | 2300,4,449900
17 | 1320,2,299900
18 | 1236,3,199900
19 | 2609,4,499998
20 | 3031,4,599000
21 | 1767,3,252900
22 | 1888,2,255000
23 | 1604,3,242900
24 | 1962,4,259900
25 | 3890,3,573900
26 | 1100,3,249900
27 | 1458,3,464500
28 | 2526,3,469000
29 | 2200,3,475000
30 | 2637,3,299900
31 | 1839,2,349900
32 | 1000,1,169900
33 | 2040,4,314900
34 | 3137,3,579900
35 | 1811,4,285900
36 | 1437,3,249900
37 | 1239,3,229900
38 | 2132,4,345000
39 | 4215,4,549000
40 | 2162,4,287000
41 | 1664,2,368500
42 | 2238,3,329900
43 | 2567,4,314000
44 | 1200,3,299000
45 | 852,2,179900
46 | 1852,4,299900
47 | 1203,3,239500
48 |
--------------------------------------------------------------------------------
/Week2/C1W2A1/public_tests.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def compute_cost_test(target):
4 | # print("Using X with shape (4, 1)")
5 | # Case 1
6 | x = np.array([2, 4, 6, 8]).T
7 | y = np.array([7, 11, 15, 19]).T
8 | initial_w = 2
9 | initial_b = 3.0
10 | cost = target(x, y, initial_w, initial_b)
11 | assert cost == 0, f"Case 1: Cost must be 0 for a perfect prediction but got {cost}"
12 |
13 | # Case 2
14 | x = np.array([2, 4, 6, 8]).T
15 | y = np.array([7, 11, 15, 19]).T
16 | initial_w = 2.0
17 | initial_b = 1.0
18 | cost = target(x, y, initial_w, initial_b)
19 | assert cost == 2, f"Case 2: Cost must be 2 but got {cost}"
20 |
21 | # print("Using X with shape (5, 1)")
22 | # Case 3
23 | x = np.array([1.5, 2.5, 3.5, 4.5, 1.5]).T
24 | y = np.array([4, 7, 10, 13, 5]).T
25 | initial_w = 1
26 | initial_b = 0.0
27 | cost = target(x, y, initial_w, initial_b)
28 | assert np.isclose(cost, 15.325), f"Case 3: Cost must be 15.325 for a perfect prediction but got {cost}"
29 |
30 | # Case 4
31 | initial_b = 1.0
32 | cost = target(x, y, initial_w, initial_b)
33 | assert np.isclose(cost, 10.725), f"Case 4: Cost must be 10.725 but got {cost}"
34 |
35 | # Case 5
36 | y = y - 2
37 | initial_b = 1.0
38 | cost = target(x, y, initial_w, initial_b)
39 | assert np.isclose(cost, 4.525), f"Case 5: Cost must be 4.525 but got {cost}"
40 |
41 | print("\033[92mAll tests passed!")
42 |
43 | def compute_gradient_test(target):
44 | print("Using X with shape (4, 1)")
45 | # Case 1
46 | x = np.array([2, 4, 6, 8]).T
47 | y = np.array([4.5, 8.5, 12.5, 16.5]).T
48 | initial_w = 2.
49 | initial_b = 0.5
50 | dj_dw, dj_db = target(x, y, initial_w, initial_b)
51 | #assert dj_dw.shape == initial_w.shape, f"Wrong shape for dj_dw. {dj_dw} != {initial_w.shape}"
52 | assert dj_db == 0.0, f"Case 1: dj_db is wrong: {dj_db} != 0.0"
53 | assert np.allclose(dj_dw, 0), f"Case 1: dj_dw is wrong: {dj_dw} != [[0.0]]"
54 |
55 | # Case 2
56 | x = np.array([2, 4, 6, 8]).T
57 | y = np.array([4, 7, 10, 13]).T + 2
58 | initial_w = 1.5
59 | initial_b = 1
60 | dj_dw, dj_db = target(x, y, initial_w, initial_b)
61 | #assert dj_dw.shape == initial_w.shape, f"Wrong shape for dj_dw. {dj_dw} != {initial_w.shape}"
62 | assert dj_db == -2, f"Case 1: dj_db is wrong: {dj_db} != -2"
63 | assert np.allclose(dj_dw, -10.0), f"Case 1: dj_dw is wrong: {dj_dw} != -10.0"
64 |
65 | print("\033[92mAll tests passed!")
66 |
67 |
68 |
--------------------------------------------------------------------------------
/Week2/C1W2A1/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def load_data():
4 | data = np.loadtxt("data/ex1data1.txt", delimiter=',')
5 | X = data[:,0]
6 | y = data[:,1]
7 | return X, y
8 |
9 | def load_data_multi():
10 | data = np.loadtxt("data/ex1data2.txt", delimiter=',')
11 | X = data[:,:2]
12 | y = data[:,2]
13 | return X, y
14 |
--------------------------------------------------------------------------------
/Week2/Optional Labs/C1_W2_Lab06_Sklearn_Normal_Soln.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "pycharm": {
7 | "name": "#%% md\n"
8 | }
9 | },
10 | "source": [
11 | "# Optional Lab: Linear Regression using Scikit-Learn"
12 | ]
13 | },
14 | {
15 | "cell_type": "markdown",
16 | "metadata": {
17 | "pycharm": {
18 | "name": "#%% md\n"
19 | }
20 | },
21 | "source": [
22 | "There is an open-source, commercially usable machine learning toolkit called [scikit-learn](https://scikit-learn.org/stable/index.html). This toolkit contains implementations of many of the algorithms that you will work with in this course.\n",
23 | "\n"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {
29 | "pycharm": {
30 | "name": "#%% md\n"
31 | }
32 | },
33 | "source": [
34 | "## Goals\n",
35 | "In this lab you will:\n",
36 | "- Utilize scikit-learn to implement linear regression using a close form solution based on the normal equation"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "metadata": {
42 | "pycharm": {
43 | "name": "#%% md\n"
44 | }
45 | },
46 | "source": [
47 | "## Tools\n",
48 | "You will utilize functions from scikit-learn as well as matplotlib and NumPy. "
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 1,
54 | "metadata": {
55 | "pycharm": {
56 | "name": "#%%\n"
57 | }
58 | },
59 | "outputs": [],
60 | "source": [
61 | "import numpy as np\n",
62 | "import matplotlib.pyplot as plt\n",
63 | "from sklearn.linear_model import LinearRegression\n",
64 | "from lab_utils_multi import load_house_data\n",
65 | "plt.style.use('./deeplearning.mplstyle')\n",
66 | "np.set_printoptions(precision=2)"
67 | ]
68 | },
69 | {
70 | "cell_type": "markdown",
71 | "metadata": {
72 | "pycharm": {
73 | "name": "#%% md\n"
74 | }
75 | },
76 | "source": [
77 | "\n",
78 | "# Linear Regression, closed-form solution\n",
79 | "Scikit-learn has the [linear regression model](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression) which implements a closed-form linear regression.\n",
80 | "\n",
81 | "Let's use the data from the early labs - a house with 1000 square feet sold for \\\\$300,000 and a house with 2000 square feet sold for \\\\$500,000.\n",
82 | "\n",
83 | "| Size (1000 sqft) | Price (1000s of dollars) |\n",
84 | "| ----------------| ------------------------ |\n",
85 | "| 1 | 300 |\n",
86 | "| 2 | 500 |\n"
87 | ]
88 | },
89 | {
90 | "cell_type": "markdown",
91 | "metadata": {
92 | "pycharm": {
93 | "name": "#%% md\n"
94 | }
95 | },
96 | "source": [
97 | "### Load the data set"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": 2,
103 | "metadata": {
104 | "pycharm": {
105 | "name": "#%%\n"
106 | }
107 | },
108 | "outputs": [],
109 | "source": [
110 | "X_train = np.array([1.0, 2.0]) #features\n",
111 | "y_train = np.array([300, 500]) #target value"
112 | ]
113 | },
114 | {
115 | "cell_type": "markdown",
116 | "metadata": {
117 | "pycharm": {
118 | "name": "#%% md\n"
119 | }
120 | },
121 | "source": [
122 | "### Create and fit the model\n",
123 | "The code below performs regression using scikit-learn. \n",
124 | "The first step creates a regression object. \n",
125 | "The second step utilizes one of the methods associated with the object, `fit`. This performs regression, fitting the parameters to the input data. The toolkit expects a two-dimensional X matrix."
126 | ]
127 | },
128 | {
129 | "cell_type": "code",
130 | "execution_count": 3,
131 | "metadata": {
132 | "pycharm": {
133 | "name": "#%%\n"
134 | }
135 | },
136 | "outputs": [
137 | {
138 | "data": {
139 | "text/plain": "LinearRegression()"
140 | },
141 | "execution_count": 3,
142 | "metadata": {},
143 | "output_type": "execute_result"
144 | }
145 | ],
146 | "source": [
147 | "linear_model = LinearRegression()\n",
148 | "#X must be a 2-D Matrix\n",
149 | "linear_model.fit(X_train.reshape(-1, 1), y_train) "
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {
155 | "pycharm": {
156 | "name": "#%% md\n"
157 | }
158 | },
159 | "source": [
160 | "### View Parameters \n",
161 | "The $\\mathbf{w}$ and $\\mathbf{b}$ parameters are referred to as 'coefficients' and 'intercept' in scikit-learn."
162 | ]
163 | },
164 | {
165 | "cell_type": "code",
166 | "execution_count": 4,
167 | "metadata": {
168 | "pycharm": {
169 | "name": "#%%\n"
170 | }
171 | },
172 | "outputs": [
173 | {
174 | "name": "stdout",
175 | "output_type": "stream",
176 | "text": [
177 | "w = [200.], b = 100.00\n",
178 | "'manual' prediction: f_wb = wx+b : [240100.]\n"
179 | ]
180 | }
181 | ],
182 | "source": [
183 | "b = linear_model.intercept_\n",
184 | "w = linear_model.coef_\n",
185 | "print(f\"w = {w:}, b = {b:0.2f}\")\n",
186 | "print(f\"'manual' prediction: f_wb = wx+b : {1200*w + b}\")"
187 | ]
188 | },
189 | {
190 | "cell_type": "markdown",
191 | "metadata": {
192 | "pycharm": {
193 | "name": "#%% md\n"
194 | }
195 | },
196 | "source": [
197 | "### Make Predictions\n",
198 | "\n",
199 | "Calling the `predict` function generates predictions."
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": 5,
205 | "metadata": {
206 | "pycharm": {
207 | "name": "#%%\n"
208 | }
209 | },
210 | "outputs": [
211 | {
212 | "name": "stdout",
213 | "output_type": "stream",
214 | "text": [
215 | "Prediction on training set: [300. 500.]\n",
216 | "Prediction for 1200 sqft house: $240100.00\n"
217 | ]
218 | }
219 | ],
220 | "source": [
221 | "y_pred = linear_model.predict(X_train.reshape(-1, 1))\n",
222 | "\n",
223 | "print(\"Prediction on training set:\", y_pred)\n",
224 | "\n",
225 | "X_test = np.array([[1200]])\n",
226 | "print(f\"Prediction for 1200 sqft house: ${linear_model.predict(X_test)[0]:0.2f}\")"
227 | ]
228 | },
229 | {
230 | "cell_type": "markdown",
231 | "metadata": {
232 | "pycharm": {
233 | "name": "#%% md\n"
234 | }
235 | },
236 | "source": [
237 | "## Second Example\n",
238 | "The second example is from an earlier lab with multiple features. The final parameter values and predictions are very close to the results from the un-normalized 'long-run' from that lab. That un-normalized run took hours to produce results, while this is nearly instantaneous. The closed-form solution work well on smaller data sets such as these but can be computationally demanding on larger data sets. \n",
239 | ">The closed-form solution does not require normalization."
240 | ]
241 | },
242 | {
243 | "cell_type": "code",
244 | "execution_count": 6,
245 | "metadata": {
246 | "pycharm": {
247 | "name": "#%%\n"
248 | }
249 | },
250 | "outputs": [],
251 | "source": [
252 | "# load the dataset\n",
253 | "X_train, y_train = load_house_data()\n",
254 | "X_features = ['size(sqft)','bedrooms','floors','age']"
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "execution_count": 7,
260 | "metadata": {
261 | "pycharm": {
262 | "name": "#%%\n"
263 | }
264 | },
265 | "outputs": [
266 | {
267 | "data": {
268 | "text/plain": "LinearRegression()"
269 | },
270 | "execution_count": 7,
271 | "metadata": {},
272 | "output_type": "execute_result"
273 | }
274 | ],
275 | "source": [
276 | "linear_model = LinearRegression()\n",
277 | "linear_model.fit(X_train, y_train) "
278 | ]
279 | },
280 | {
281 | "cell_type": "code",
282 | "execution_count": 8,
283 | "metadata": {
284 | "pycharm": {
285 | "name": "#%%\n"
286 | }
287 | },
288 | "outputs": [
289 | {
290 | "name": "stdout",
291 | "output_type": "stream",
292 | "text": [
293 | "w = [ 0.27 -32.62 -67.25 -1.47], b = 220.42\n"
294 | ]
295 | }
296 | ],
297 | "source": [
298 | "b = linear_model.intercept_\n",
299 | "w = linear_model.coef_\n",
300 | "print(f\"w = {w:}, b = {b:0.2f}\")"
301 | ]
302 | },
303 | {
304 | "cell_type": "code",
305 | "execution_count": 9,
306 | "metadata": {
307 | "pycharm": {
308 | "name": "#%%\n"
309 | }
310 | },
311 | "outputs": [
312 | {
313 | "name": "stdout",
314 | "output_type": "stream",
315 | "text": [
316 | "Prediction on training set:\n",
317 | " [295.18 485.98 389.52 492.15]\n",
318 | "prediction using w,b:\n",
319 | " [295.18 485.98 389.52 492.15]\n",
320 | "Target values \n",
321 | " [300. 509.8 394. 540. ]\n",
322 | " predicted price of a house with 1200 sqft, 3 bedrooms, 1 floor, 40 years old = $318709.09\n"
323 | ]
324 | }
325 | ],
326 | "source": [
327 | "print(f\"Prediction on training set:\\n {linear_model.predict(X_train)[:4]}\" )\n",
328 | "print(f\"prediction using w,b:\\n {(X_train @ w + b)[:4]}\")\n",
329 | "print(f\"Target values \\n {y_train[:4]}\")\n",
330 | "\n",
331 | "x_house = np.array([1200, 3,1, 40]).reshape(-1,4)\n",
332 | "x_house_predict = linear_model.predict(x_house)[0]\n",
333 | "print(f\" predicted price of a house with 1200 sqft, 3 bedrooms, 1 floor, 40 years old = ${x_house_predict*1000:0.2f}\")"
334 | ]
335 | },
336 | {
337 | "cell_type": "markdown",
338 | "metadata": {
339 | "pycharm": {
340 | "name": "#%% md\n"
341 | }
342 | },
343 | "source": [
344 | "## Congratulations!\n",
345 | "In this lab you:\n",
346 | "- utilized an open-source machine learning toolkit, scikit-learn\n",
347 | "- implemented linear regression using a close-form solution from that toolkit"
348 | ]
349 | },
350 | {
351 | "cell_type": "code",
352 | "execution_count": 9,
353 | "metadata": {
354 | "pycharm": {
355 | "name": "#%%\n"
356 | }
357 | },
358 | "outputs": [],
359 | "source": []
360 | }
361 | ],
362 | "metadata": {
363 | "kernelspec": {
364 | "display_name": "Python 3",
365 | "language": "python",
366 | "name": "python3"
367 | },
368 | "language_info": {
369 | "codemirror_mode": {
370 | "name": "ipython",
371 | "version": 3
372 | },
373 | "file_extension": ".py",
374 | "mimetype": "text/x-python",
375 | "name": "python",
376 | "nbconvert_exporter": "python",
377 | "pygments_lexer": "ipython3",
378 | "version": "3.8.10"
379 | }
380 | },
381 | "nbformat": 4,
382 | "nbformat_minor": 5
383 | }
--------------------------------------------------------------------------------
/Week2/Optional Labs/data/houses.txt:
--------------------------------------------------------------------------------
1 | 9.520000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,6.500000000000000000e+01,2.715000000000000000e+02
2 | 1.244000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.400000000000000000e+01,3.000000000000000000e+02
3 | 1.947000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.700000000000000000e+01,5.098000000000000114e+02
4 | 1.725000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,4.200000000000000000e+01,3.940000000000000000e+02
5 | 1.959000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.500000000000000000e+01,5.400000000000000000e+02
6 | 1.314000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.400000000000000000e+01,4.150000000000000000e+02
7 | 8.640000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,6.600000000000000000e+01,2.300000000000000000e+02
8 | 1.836000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.700000000000000000e+01,5.600000000000000000e+02
9 | 1.026000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,4.300000000000000000e+01,2.940000000000000000e+02
10 | 3.194000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,8.700000000000000000e+01,7.182000000000000455e+02
11 | 7.880000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,8.000000000000000000e+01,2.000000000000000000e+02
12 | 1.200000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,1.700000000000000000e+01,3.020000000000000000e+02
13 | 1.557000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.800000000000000000e+01,4.680000000000000000e+02
14 | 1.430000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.000000000000000000e+01,3.741999999999999886e+02
15 | 1.220000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.500000000000000000e+01,3.880000000000000000e+02
16 | 1.092000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,6.400000000000000000e+01,2.820000000000000000e+02
17 | 8.480000000000000000e+02,1.000000000000000000e+00,1.000000000000000000e+00,1.700000000000000000e+01,3.118000000000000114e+02
18 | 1.682000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.300000000000000000e+01,4.010000000000000000e+02
19 | 1.768000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.800000000000000000e+01,4.498000000000000114e+02
20 | 1.040000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,4.400000000000000000e+01,3.010000000000000000e+02
21 | 1.652000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,2.100000000000000000e+01,5.020000000000000000e+02
22 | 1.088000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,3.500000000000000000e+01,3.400000000000000000e+02
23 | 1.316000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.400000000000000000e+01,4.002819999999999823e+02
24 | 1.593000000000000000e+03,0.000000000000000000e+00,1.000000000000000000e+00,2.000000000000000000e+01,5.720000000000000000e+02
25 | 9.720000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,7.300000000000000000e+01,2.640000000000000000e+02
26 | 1.097000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,3.700000000000000000e+01,3.040000000000000000e+02
27 | 1.004000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,5.100000000000000000e+01,2.980000000000000000e+02
28 | 9.040000000000000000e+02,3.000000000000000000e+00,1.000000000000000000e+00,5.500000000000000000e+01,2.198000000000000114e+02
29 | 1.694000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.300000000000000000e+01,4.906999999999999886e+02
30 | 1.073000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.000000000000000000e+02,2.169600000000000080e+02
31 | 1.419000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.900000000000000000e+01,3.681999999999999886e+02
32 | 1.164000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,5.200000000000000000e+01,2.800000000000000000e+02
33 | 1.935000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.200000000000000000e+01,5.268700000000000045e+02
34 | 1.216000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,7.400000000000000000e+01,2.370000000000000000e+02
35 | 2.482000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,5.624260000000000446e+02
36 | 1.200000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.800000000000000000e+01,3.698000000000000114e+02
37 | 1.840000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.000000000000000000e+01,4.600000000000000000e+02
38 | 1.851000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,5.700000000000000000e+01,3.740000000000000000e+02
39 | 1.660000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.900000000000000000e+01,3.900000000000000000e+02
40 | 1.096000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,9.700000000000000000e+01,1.580000000000000000e+02
41 | 1.775000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.800000000000000000e+01,4.260000000000000000e+02
42 | 2.030000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,4.500000000000000000e+01,3.900000000000000000e+02
43 | 1.784000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,1.070000000000000000e+02,2.777740000000000009e+02
44 | 1.073000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.000000000000000000e+02,2.169600000000000080e+02
45 | 1.552000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.600000000000000000e+01,4.258000000000000114e+02
46 | 1.953000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,5.040000000000000000e+02
47 | 1.224000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,1.200000000000000000e+01,3.290000000000000000e+02
48 | 1.616000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.600000000000000000e+01,4.640000000000000000e+02
49 | 8.160000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,5.800000000000000000e+01,2.200000000000000000e+02
50 | 1.349000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.100000000000000000e+01,3.580000000000000000e+02
51 | 1.571000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.400000000000000000e+01,4.780000000000000000e+02
52 | 1.486000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,5.700000000000000000e+01,3.340000000000000000e+02
53 | 1.506000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.600000000000000000e+01,4.269800000000000182e+02
54 | 1.097000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.700000000000000000e+01,2.900000000000000000e+02
55 | 1.764000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.400000000000000000e+01,4.630000000000000000e+02
56 | 1.208000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.400000000000000000e+01,3.908000000000000114e+02
57 | 1.470000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.400000000000000000e+01,3.540000000000000000e+02
58 | 1.768000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,8.400000000000000000e+01,3.500000000000000000e+02
59 | 1.654000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.900000000000000000e+01,4.600000000000000000e+02
60 | 1.029000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.000000000000000000e+01,2.370000000000000000e+02
61 | 1.120000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,2.883039999999999736e+02
62 | 1.150000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.200000000000000000e+01,2.820000000000000000e+02
63 | 8.160000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,3.900000000000000000e+01,2.490000000000000000e+02
64 | 1.040000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.500000000000000000e+01,3.040000000000000000e+02
65 | 1.392000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.400000000000000000e+01,3.320000000000000000e+02
66 | 1.603000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.900000000000000000e+01,3.518000000000000114e+02
67 | 1.215000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.300000000000000000e+01,3.100000000000000000e+02
68 | 1.073000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.000000000000000000e+02,2.169600000000000080e+02
69 | 2.599000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,2.200000000000000000e+01,6.663360000000000127e+02
70 | 1.431000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,5.900000000000000000e+01,3.300000000000000000e+02
71 | 2.090000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.600000000000000000e+01,4.800000000000000000e+02
72 | 1.790000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,4.900000000000000000e+01,3.303000000000000114e+02
73 | 1.484000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,3.480000000000000000e+02
74 | 1.040000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.500000000000000000e+01,3.040000000000000000e+02
75 | 1.431000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.200000000000000000e+01,3.840000000000000000e+02
76 | 1.159000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,5.300000000000000000e+01,3.160000000000000000e+02
77 | 1.547000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.200000000000000000e+01,4.303999999999999773e+02
78 | 1.983000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.200000000000000000e+01,4.500000000000000000e+02
79 | 1.056000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,5.300000000000000000e+01,2.840000000000000000e+02
80 | 1.180000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,9.900000000000000000e+01,2.750000000000000000e+02
81 | 1.358000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.700000000000000000e+01,4.140000000000000000e+02
82 | 9.600000000000000000e+02,3.000000000000000000e+00,1.000000000000000000e+00,5.100000000000000000e+01,2.580000000000000000e+02
83 | 1.456000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,3.780000000000000000e+02
84 | 1.446000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.500000000000000000e+01,3.500000000000000000e+02
85 | 1.208000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,1.500000000000000000e+01,4.120000000000000000e+02
86 | 1.553000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.600000000000000000e+01,3.730000000000000000e+02
87 | 8.820000000000000000e+02,3.000000000000000000e+00,1.000000000000000000e+00,4.900000000000000000e+01,2.250000000000000000e+02
88 | 2.030000000000000000e+03,4.000000000000000000e+00,2.000000000000000000e+00,4.500000000000000000e+01,3.900000000000000000e+02
89 | 1.040000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.200000000000000000e+01,2.673999999999999773e+02
90 | 1.616000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.600000000000000000e+01,4.640000000000000000e+02
91 | 8.030000000000000000e+02,2.000000000000000000e+00,1.000000000000000000e+00,8.000000000000000000e+01,1.740000000000000000e+02
92 | 1.430000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,2.100000000000000000e+01,3.400000000000000000e+02
93 | 1.656000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,6.100000000000000000e+01,4.300000000000000000e+02
94 | 1.541000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,1.600000000000000000e+01,4.400000000000000000e+02
95 | 9.480000000000000000e+02,3.000000000000000000e+00,1.000000000000000000e+00,5.300000000000000000e+01,2.160000000000000000e+02
96 | 1.224000000000000000e+03,2.000000000000000000e+00,2.000000000000000000e+00,1.200000000000000000e+01,3.290000000000000000e+02
97 | 1.432000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,4.300000000000000000e+01,3.880000000000000000e+02
98 | 1.660000000000000000e+03,3.000000000000000000e+00,2.000000000000000000e+00,1.900000000000000000e+01,3.900000000000000000e+02
99 | 1.212000000000000000e+03,3.000000000000000000e+00,1.000000000000000000e+00,2.000000000000000000e+01,3.560000000000000000e+02
100 | 1.050000000000000000e+03,2.000000000000000000e+00,1.000000000000000000e+00,6.500000000000000000e+01,2.578000000000000114e+02
101 |
--------------------------------------------------------------------------------
/Week2/Optional Labs/deeplearning.mplstyle:
--------------------------------------------------------------------------------
1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html
2 | lines.linewidth: 4
3 | lines.solid_capstyle: butt
4 |
5 | legend.fancybox: true
6 |
7 | # Verdana" for non-math text,
8 | # Cambria Math
9 |
10 | #Blue (Crayon-Aqua) 0096FF
11 | #Dark Red C00000
12 | #Orange (Apple Orange) FF9300
13 | #Black 000000
14 | #Magenta FF40FF
15 | #Purple 7030A0
16 |
17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000'])
18 | #axes.facecolor: f0f0f0 # grey
19 | axes.facecolor: ffffff # white
20 | axes.labelsize: large
21 | axes.axisbelow: true
22 | axes.grid: False
23 | axes.edgecolor: f0f0f0
24 | axes.linewidth: 3.0
25 | axes.titlesize: x-large
26 |
27 | patch.edgecolor: f0f0f0
28 | patch.linewidth: 0.5
29 |
30 | svg.fonttype: path
31 |
32 | grid.linestyle: -
33 | grid.linewidth: 1.0
34 | grid.color: cbcbcb
35 |
36 | xtick.major.size: 0
37 | xtick.minor.size: 0
38 | ytick.major.size: 0
39 | ytick.minor.size: 0
40 |
41 | savefig.edgecolor: f0f0f0
42 | savefig.facecolor: f0f0f0
43 |
44 | #figure.subplot.left: 0.08
45 | #figure.subplot.right: 0.95
46 | #figure.subplot.bottom: 0.07
47 |
48 | #figure.facecolor: f0f0f0 # grey
49 | figure.facecolor: ffffff # white
50 |
51 | ## ***************************************************************************
52 | ## * FONT *
53 | ## ***************************************************************************
54 | ## The font properties used by `text.Text`.
55 | ## See https://matplotlib.org/api/font_manager_api.html for more information
56 | ## on font properties. The 6 font properties used for font matching are
57 | ## given below with their default values.
58 | ##
59 | ## The font.family property can take either a concrete font name (not supported
60 | ## when rendering text with usetex), or one of the following five generic
61 | ## values:
62 | ## - 'serif' (e.g., Times),
63 | ## - 'sans-serif' (e.g., Helvetica),
64 | ## - 'cursive' (e.g., Zapf-Chancery),
65 | ## - 'fantasy' (e.g., Western), and
66 | ## - 'monospace' (e.g., Courier).
67 | ## Each of these values has a corresponding default list of font names
68 | ## (font.serif, etc.); the first available font in the list is used. Note that
69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of
70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with
71 | ## Matplotlib and is thus guaranteed to be available; the other entries are
72 | ## left as examples of other possible values.
73 | ##
74 | ## The font.style property has three values: normal (or roman), italic
75 | ## or oblique. The oblique style will be used for italic, if it is not
76 | ## present.
77 | ##
78 | ## The font.variant property has two values: normal or small-caps. For
79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent
80 | ## to using a font size of 'smaller', or about 83%% of the current font
81 | ## size.
82 | ##
83 | ## The font.weight property has effectively 13 values: normal, bold,
84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as
85 | ## 400, and bold is 700. bolder and lighter are relative values with
86 | ## respect to the current weight.
87 | ##
88 | ## The font.stretch property has 11 values: ultra-condensed,
89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded,
90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This
91 | ## property is not currently implemented.
92 | ##
93 | ## The font.size property is the default font size for text, given in points.
94 | ## 10 pt is the standard value.
95 | ##
96 | ## Note that font.size controls default text sizes. To configure
97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc
98 | ## settings for axes and ticks. Special text sizes can be defined
99 | ## relative to font.size, using the following values: xx-small, x-small,
100 | ## small, medium, large, x-large, xx-large, larger, or smaller
101 |
102 |
103 | font.family: sans-serif
104 | font.style: normal
105 | font.variant: normal
106 | font.weight: normal
107 | font.stretch: normal
108 | font.size: 12.0
109 |
110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif
111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif
112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive
113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy
114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace
115 |
116 |
117 | ## ***************************************************************************
118 | ## * TEXT *
119 | ## ***************************************************************************
120 | ## The text properties used by `text.Text`.
121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text
122 | ## for more information on text properties
123 | #text.color: black
124 |
125 |
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_L1_S1_Lecture_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_L1_S1_Lecture_b.png
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_L1_S1_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_L1_S1_model.png
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_L1_S1_trainingdata.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_L1_S1_trainingdata.png
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_L1_S2_Lectureb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_L1_S2_Lectureb.png
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_L2_S1_Lecture_GD.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_L2_S1_Lecture_GD.png
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab02_GoalOfRegression.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab02_GoalOfRegression.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab03_alpha_to_big.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab03_alpha_to_big.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab03_lecture_learningrate.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab03_lecture_learningrate.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab03_lecture_slopes.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab03_lecture_slopes.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab04_Figures And animations.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab04_Figures And animations.pptx
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab04_Matrices.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab04_Matrices.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab04_Vectors.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab04_Vectors.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab04_dot_notrans.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab04_dot_notrans.gif
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_LongRun.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_LongRun.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_ShortRun.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_ShortRun.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_contours.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_contours.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_featurescalingheader.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_featurescalingheader.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_learningrate.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_learningrate.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab06_scale.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab06_scale.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/images/C1_W2_Lab07_FeatureEngLecture.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Optional Labs/images/C1_W2_Lab07_FeatureEngLecture.PNG
--------------------------------------------------------------------------------
/Week2/Optional Labs/lab_utils_common.py:
--------------------------------------------------------------------------------
1 | """
2 | lab_utils_common.py
3 | functions common to all optional labs, Course 1, Week 2
4 | """
5 |
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 |
9 | plt.style.use('./deeplearning.mplstyle')
10 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0';
11 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple]
12 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0')
13 |
14 |
15 | ##########################################################
16 | # Regression Routines
17 | ##########################################################
18 |
19 | #Function to calculate the cost
20 | def compute_cost_matrix(X, y, w, b, verbose=False):
21 | """
22 | Computes the gradient for linear regression
23 | Args:
24 | X (ndarray (m,n)): Data, m examples with n features
25 | y (ndarray (m,)) : target values
26 | w (ndarray (n,)) : model parameters
27 | b (scalar) : model parameter
28 | verbose : (Boolean) If true, print out intermediate value f_wb
29 | Returns
30 | cost: (scalar)
31 | """
32 | m = X.shape[0]
33 |
34 | # calculate f_wb for all examples.
35 | f_wb = X @ w + b
36 | # calculate cost
37 | total_cost = (1/(2*m)) * np.sum((f_wb-y)**2)
38 |
39 | if verbose: print("f_wb:")
40 | if verbose: print(f_wb)
41 |
42 | return total_cost
43 |
44 | def compute_gradient_matrix(X, y, w, b):
45 | """
46 | Computes the gradient for linear regression
47 |
48 | Args:
49 | X (ndarray (m,n)): Data, m examples with n features
50 | y (ndarray (m,)) : target values
51 | w (ndarray (n,)) : model parameters
52 | b (scalar) : model parameter
53 | Returns
54 | dj_dw (ndarray (n,1)): The gradient of the cost w.r.t. the parameters w.
55 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
56 |
57 | """
58 | m,n = X.shape
59 | f_wb = X @ w + b
60 | e = f_wb - y
61 | dj_dw = (1/m) * (X.T @ e)
62 | dj_db = (1/m) * np.sum(e)
63 |
64 | return dj_db,dj_dw
65 |
66 |
67 | # Loop version of multi-variable compute_cost
68 | def compute_cost(X, y, w, b):
69 | """
70 | compute cost
71 | Args:
72 | X (ndarray (m,n)): Data, m examples with n features
73 | y (ndarray (m,)) : target values
74 | w (ndarray (n,)) : model parameters
75 | b (scalar) : model parameter
76 | Returns
77 | cost (scalar) : cost
78 | """
79 | m = X.shape[0]
80 | cost = 0.0
81 | for i in range(m):
82 | f_wb_i = np.dot(X[i],w) + b #(n,)(n,)=scalar
83 | cost = cost + (f_wb_i - y[i])**2
84 | cost = cost/(2*m)
85 | return cost
86 |
87 | def compute_gradient(X, y, w, b):
88 | """
89 | Computes the gradient for linear regression
90 | Args:
91 | X (ndarray (m,n)): Data, m examples with n features
92 | y (ndarray (m,)) : target values
93 | w (ndarray (n,)) : model parameters
94 | b (scalar) : model parameter
95 | Returns
96 | dj_dw (ndarray Shape (n,)): The gradient of the cost w.r.t. the parameters w.
97 | dj_db (scalar): The gradient of the cost w.r.t. the parameter b.
98 | """
99 | m,n = X.shape #(number of examples, number of features)
100 | dj_dw = np.zeros((n,))
101 | dj_db = 0.
102 |
103 | for i in range(m):
104 | err = (np.dot(X[i], w) + b) - y[i]
105 | for j in range(n):
106 | dj_dw[j] = dj_dw[j] + err * X[i,j]
107 | dj_db = dj_db + err
108 | dj_dw = dj_dw/m
109 | dj_db = dj_db/m
110 |
111 | return dj_db,dj_dw
112 |
113 |
--------------------------------------------------------------------------------
/Week2/Practice Quiz - Gradient descent in practice/Practice Quiz - Gradient descent in practice 1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Practice Quiz - Gradient descent in practice/Practice Quiz - Gradient descent in practice 1.png
--------------------------------------------------------------------------------
/Week2/Practice Quiz - Gradient descent in practice/Practice Quiz - Gradient descent in practice 2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Practice Quiz - Gradient descent in practice/Practice Quiz - Gradient descent in practice 2.png
--------------------------------------------------------------------------------
/Week2/Practice Quiz - Multiple linear regression/Practice Quiz - Multiple linear regression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/Week2/Practice Quiz - Multiple linear regression/Practice Quiz - Multiple linear regression.png
--------------------------------------------------------------------------------
/images/course.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/images/course.png
--------------------------------------------------------------------------------
/week3/C1W3A1/data/ex2data1.txt:
--------------------------------------------------------------------------------
1 | 34.62365962451697,78.0246928153624,0
2 | 30.28671076822607,43.89499752400101,0
3 | 35.84740876993872,72.90219802708364,0
4 | 60.18259938620976,86.30855209546826,1
5 | 79.0327360507101,75.3443764369103,1
6 | 45.08327747668339,56.3163717815305,0
7 | 61.10666453684766,96.51142588489624,1
8 | 75.02474556738889,46.55401354116538,1
9 | 76.09878670226257,87.42056971926803,1
10 | 84.43281996120035,43.53339331072109,1
11 | 95.86155507093572,38.22527805795094,0
12 | 75.01365838958247,30.60326323428011,0
13 | 82.30705337399482,76.48196330235604,1
14 | 69.36458875970939,97.71869196188608,1
15 | 39.53833914367223,76.03681085115882,0
16 | 53.9710521485623,89.20735013750205,1
17 | 69.07014406283025,52.74046973016765,1
18 | 67.94685547711617,46.67857410673128,0
19 | 70.66150955499435,92.92713789364831,1
20 | 76.97878372747498,47.57596364975532,1
21 | 67.37202754570876,42.83843832029179,0
22 | 89.67677575072079,65.79936592745237,1
23 | 50.534788289883,48.85581152764205,0
24 | 34.21206097786789,44.20952859866288,0
25 | 77.9240914545704,68.9723599933059,1
26 | 62.27101367004632,69.95445795447587,1
27 | 80.1901807509566,44.82162893218353,1
28 | 93.114388797442,38.80067033713209,0
29 | 61.83020602312595,50.25610789244621,0
30 | 38.78580379679423,64.99568095539578,0
31 | 61.379289447425,72.80788731317097,1
32 | 85.40451939411645,57.05198397627122,1
33 | 52.10797973193984,63.12762376881715,0
34 | 52.04540476831827,69.43286012045222,1
35 | 40.23689373545111,71.16774802184875,0
36 | 54.63510555424817,52.21388588061123,0
37 | 33.91550010906887,98.86943574220611,0
38 | 64.17698887494485,80.90806058670817,1
39 | 74.78925295941542,41.57341522824434,0
40 | 34.1836400264419,75.2377203360134,0
41 | 83.90239366249155,56.30804621605327,1
42 | 51.54772026906181,46.85629026349976,0
43 | 94.44336776917852,65.56892160559052,1
44 | 82.36875375713919,40.61825515970618,0
45 | 51.04775177128865,45.82270145776001,0
46 | 62.22267576120188,52.06099194836679,0
47 | 77.19303492601364,70.45820000180959,1
48 | 97.77159928000232,86.7278223300282,1
49 | 62.07306379667647,96.76882412413983,1
50 | 91.56497449807442,88.69629254546599,1
51 | 79.94481794066932,74.16311935043758,1
52 | 99.2725269292572,60.99903099844988,1
53 | 90.54671411399852,43.39060180650027,1
54 | 34.52451385320009,60.39634245837173,0
55 | 50.2864961189907,49.80453881323059,0
56 | 49.58667721632031,59.80895099453265,0
57 | 97.64563396007767,68.86157272420604,1
58 | 32.57720016809309,95.59854761387875,0
59 | 74.24869136721598,69.82457122657193,1
60 | 71.79646205863379,78.45356224515052,1
61 | 75.3956114656803,85.75993667331619,1
62 | 35.28611281526193,47.02051394723416,0
63 | 56.25381749711624,39.26147251058019,0
64 | 30.05882244669796,49.59297386723685,0
65 | 44.66826172480893,66.45008614558913,0
66 | 66.56089447242954,41.09209807936973,0
67 | 40.45755098375164,97.53518548909936,1
68 | 49.07256321908844,51.88321182073966,0
69 | 80.27957401466998,92.11606081344084,1
70 | 66.74671856944039,60.99139402740988,1
71 | 32.72283304060323,43.30717306430063,0
72 | 64.0393204150601,78.03168802018232,1
73 | 72.34649422579923,96.22759296761404,1
74 | 60.45788573918959,73.09499809758037,1
75 | 58.84095621726802,75.85844831279042,1
76 | 99.82785779692128,72.36925193383885,1
77 | 47.26426910848174,88.47586499559782,1
78 | 50.45815980285988,75.80985952982456,1
79 | 60.45555629271532,42.50840943572217,0
80 | 82.22666157785568,42.71987853716458,0
81 | 88.9138964166533,69.80378889835472,1
82 | 94.83450672430196,45.69430680250754,1
83 | 67.31925746917527,66.58935317747915,1
84 | 57.23870631569862,59.51428198012956,1
85 | 80.36675600171273,90.96014789746954,1
86 | 68.46852178591112,85.59430710452014,1
87 | 42.0754545384731,78.84478600148043,0
88 | 75.47770200533905,90.42453899753964,1
89 | 78.63542434898018,96.64742716885644,1
90 | 52.34800398794107,60.76950525602592,0
91 | 94.09433112516793,77.15910509073893,1
92 | 90.44855097096364,87.50879176484702,1
93 | 55.48216114069585,35.57070347228866,0
94 | 74.49269241843041,84.84513684930135,1
95 | 89.84580670720979,45.35828361091658,1
96 | 83.48916274498238,48.38028579728175,1
97 | 42.2617008099817,87.10385094025457,1
98 | 99.31500880510394,68.77540947206617,1
99 | 55.34001756003703,64.9319380069486,1
100 | 74.77589300092767,89.52981289513276,1
101 |
--------------------------------------------------------------------------------
/week3/C1W3A1/data/ex2data2.txt:
--------------------------------------------------------------------------------
1 | 0.051267,0.69956,1
2 | -0.092742,0.68494,1
3 | -0.21371,0.69225,1
4 | -0.375,0.50219,1
5 | -0.51325,0.46564,1
6 | -0.52477,0.2098,1
7 | -0.39804,0.034357,1
8 | -0.30588,-0.19225,1
9 | 0.016705,-0.40424,1
10 | 0.13191,-0.51389,1
11 | 0.38537,-0.56506,1
12 | 0.52938,-0.5212,1
13 | 0.63882,-0.24342,1
14 | 0.73675,-0.18494,1
15 | 0.54666,0.48757,1
16 | 0.322,0.5826,1
17 | 0.16647,0.53874,1
18 | -0.046659,0.81652,1
19 | -0.17339,0.69956,1
20 | -0.47869,0.63377,1
21 | -0.60541,0.59722,1
22 | -0.62846,0.33406,1
23 | -0.59389,0.005117,1
24 | -0.42108,-0.27266,1
25 | -0.11578,-0.39693,1
26 | 0.20104,-0.60161,1
27 | 0.46601,-0.53582,1
28 | 0.67339,-0.53582,1
29 | -0.13882,0.54605,1
30 | -0.29435,0.77997,1
31 | -0.26555,0.96272,1
32 | -0.16187,0.8019,1
33 | -0.17339,0.64839,1
34 | -0.28283,0.47295,1
35 | -0.36348,0.31213,1
36 | -0.30012,0.027047,1
37 | -0.23675,-0.21418,1
38 | -0.06394,-0.18494,1
39 | 0.062788,-0.16301,1
40 | 0.22984,-0.41155,1
41 | 0.2932,-0.2288,1
42 | 0.48329,-0.18494,1
43 | 0.64459,-0.14108,1
44 | 0.46025,0.012427,1
45 | 0.6273,0.15863,1
46 | 0.57546,0.26827,1
47 | 0.72523,0.44371,1
48 | 0.22408,0.52412,1
49 | 0.44297,0.67032,1
50 | 0.322,0.69225,1
51 | 0.13767,0.57529,1
52 | -0.0063364,0.39985,1
53 | -0.092742,0.55336,1
54 | -0.20795,0.35599,1
55 | -0.20795,0.17325,1
56 | -0.43836,0.21711,1
57 | -0.21947,-0.016813,1
58 | -0.13882,-0.27266,1
59 | 0.18376,0.93348,0
60 | 0.22408,0.77997,0
61 | 0.29896,0.61915,0
62 | 0.50634,0.75804,0
63 | 0.61578,0.7288,0
64 | 0.60426,0.59722,0
65 | 0.76555,0.50219,0
66 | 0.92684,0.3633,0
67 | 0.82316,0.27558,0
68 | 0.96141,0.085526,0
69 | 0.93836,0.012427,0
70 | 0.86348,-0.082602,0
71 | 0.89804,-0.20687,0
72 | 0.85196,-0.36769,0
73 | 0.82892,-0.5212,0
74 | 0.79435,-0.55775,0
75 | 0.59274,-0.7405,0
76 | 0.51786,-0.5943,0
77 | 0.46601,-0.41886,0
78 | 0.35081,-0.57968,0
79 | 0.28744,-0.76974,0
80 | 0.085829,-0.75512,0
81 | 0.14919,-0.57968,0
82 | -0.13306,-0.4481,0
83 | -0.40956,-0.41155,0
84 | -0.39228,-0.25804,0
85 | -0.74366,-0.25804,0
86 | -0.69758,0.041667,0
87 | -0.75518,0.2902,0
88 | -0.69758,0.68494,0
89 | -0.4038,0.70687,0
90 | -0.38076,0.91886,0
91 | -0.50749,0.90424,0
92 | -0.54781,0.70687,0
93 | 0.10311,0.77997,0
94 | 0.057028,0.91886,0
95 | -0.10426,0.99196,0
96 | -0.081221,1.1089,0
97 | 0.28744,1.087,0
98 | 0.39689,0.82383,0
99 | 0.63882,0.88962,0
100 | 0.82316,0.66301,0
101 | 0.67339,0.64108,0
102 | 1.0709,0.10015,0
103 | -0.046659,-0.57968,0
104 | -0.23675,-0.63816,0
105 | -0.15035,-0.36769,0
106 | -0.49021,-0.3019,0
107 | -0.46717,-0.13377,0
108 | -0.28859,-0.060673,0
109 | -0.61118,-0.067982,0
110 | -0.66302,-0.21418,0
111 | -0.59965,-0.41886,0
112 | -0.72638,-0.082602,0
113 | -0.83007,0.31213,0
114 | -0.72062,0.53874,0
115 | -0.59389,0.49488,0
116 | -0.48445,0.99927,0
117 | -0.0063364,0.99927,0
118 | 0.63265,-0.030612,0
119 |
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 1.png
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 2.png
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 3.png
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 4.png
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 5.png
--------------------------------------------------------------------------------
/week3/C1W3A1/images/figure 6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/C1W3A1/images/figure 6.png
--------------------------------------------------------------------------------
/week3/C1W3A1/public_tests.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 |
4 | def sigmoid_test(target):
5 | assert np.isclose(target(3.0), 0.9525741268224334), "Failed for scalar input"
6 | assert np.allclose(target(np.array([2.5, 0])), [0.92414182, 0.5]), "Failed for 1D array"
7 | assert np.allclose(target(np.array([[2.5, -2.5], [0, 1]])),
8 | [[0.92414182, 0.07585818], [0.5, 0.73105858]]), "Failed for 2D array"
9 | print('\033[92mAll tests passed!')
10 |
11 | def compute_cost_test(target):
12 | X = np.array([[0, 0, 0, 0]]).T
13 | y = np.array([0, 0, 0, 0])
14 | w = np.array([0])
15 | b = 1
16 | result = target(X, y, w, b)
17 | if math.isinf(result):
18 | raise ValueError("Did you get the sigmoid of z_wb?")
19 |
20 | np.random.seed(17)
21 | X = np.random.randn(5, 2)
22 | y = np.array([1, 0, 0, 1, 1])
23 | w = np.random.randn(2)
24 | b = 0
25 | result = target(X, y, w, b)
26 | assert np.isclose(result, 2.15510667), f"Wrong output. Expected: {2.15510667} got: {result}"
27 |
28 | X = np.random.randn(4, 3)
29 | y = np.array([1, 1, 0, 0])
30 | w = np.random.randn(3)
31 | b = 0
32 |
33 | result = target(X, y, w, b)
34 | assert np.isclose(result, 0.80709376), f"Wrong output. Expected: {0.80709376} got: {result}"
35 |
36 | X = np.random.randn(4, 3)
37 | y = np.array([1, 0,1, 0])
38 | w = np.random.randn(3)
39 | b = 3
40 | result = target(X, y, w, b)
41 | assert np.isclose(result, 0.4529660647), f"Wrong output. Expected: {0.4529660647} got: {result}. Did you inizialized z_wb = b?"
42 |
43 | print('\033[92mAll tests passed!')
44 |
45 | def compute_gradient_test(target):
46 | np.random.seed(1)
47 | X = np.random.randn(7, 3)
48 | y = np.array([1, 0, 1, 0, 1, 1, 0])
49 | test_w = np.array([1, 0.5, -0.35])
50 | test_b = 1.7
51 | dj_db, dj_dw = target(X, y, test_w, test_b)
52 |
53 | assert np.isclose(dj_db, 0.28936094), f"Wrong value for dj_db. Expected: {0.28936094} got: {dj_db}"
54 | assert dj_dw.shape == test_w.shape, f"Wrong shape for dj_dw. Expected: {test_w.shape} got: {dj_dw.shape}"
55 | assert np.allclose(dj_dw, [-0.11999166, 0.41498775, -0.71968405]), f"Wrong values for dj_dw. Got: {dj_dw}"
56 |
57 | print('\033[92mAll tests passed!')
58 |
59 | def predict_test(target):
60 | np.random.seed(5)
61 | b = 0.5
62 | w = np.random.randn(3)
63 | X = np.random.randn(8, 3)
64 |
65 | result = target(X, w, b)
66 | wrong_1 = [1., 1., 0., 0., 1., 0., 0., 1.]
67 | expected_1 = [1., 1., 1., 0., 1., 0., 0., 1.]
68 | if np.allclose(result, wrong_1):
69 | raise ValueError("Did you apply the sigmoid before applying the threshold?")
70 | assert result.shape == (len(X),), f"Wrong length. Expected : {(len(X),)} got: {result.shape}"
71 | assert np.allclose(result, expected_1), f"Wrong output: Expected : {expected_1} got: {result}"
72 |
73 | b = -1.7
74 | w = np.random.randn(4) + 0.6
75 | X = np.random.randn(6, 4)
76 |
77 | result = target(X, w, b)
78 | expected_2 = [0., 0., 0., 1., 1., 0.]
79 | assert result.shape == (len(X),), f"Wrong length. Expected : {(len(X),)} got: {result.shape}"
80 | assert np.allclose(result,expected_2), f"Wrong output: Expected : {expected_2} got: {result}"
81 |
82 | print('\033[92mAll tests passed!')
83 |
84 | def compute_cost_reg_test(target):
85 | np.random.seed(1)
86 | w = np.random.randn(3)
87 | b = 0.4
88 | X = np.random.randn(6, 3)
89 | y = np.array([0, 1, 1, 0, 1, 1])
90 | lambda_ = 0.1
91 | expected_output = target(X, y, w, b, lambda_)
92 |
93 | assert np.isclose(expected_output, 0.5469746792761936), f"Wrong output. Expected: {0.5469746792761936} got:{expected_output}"
94 |
95 | w = np.random.randn(5)
96 | b = -0.6
97 | X = np.random.randn(8, 5)
98 | y = np.array([1, 0, 1, 0, 0, 1, 0, 1])
99 | lambda_ = 0.01
100 | output = target(X, y, w, b, lambda_)
101 | assert np.isclose(output, 1.2608591964119995), f"Wrong output. Expected: {1.2608591964119995} got:{output}"
102 |
103 | w = np.array([2, 2, 2, 2, 2])
104 | b = 0
105 | X = np.zeros((8, 5))
106 | y = np.array([0.5] * 8)
107 | lambda_ = 3
108 | output = target(X, y, w, b, lambda_)
109 | expected = -np.log(0.5) + 3. / (2. * 8.) * 20.
110 | assert np.isclose(output, expected), f"Wrong output. Expected: {expected} got:{output}"
111 |
112 | print('\033[92mAll tests passed!')
113 |
114 | def compute_gradient_reg_test(target):
115 | np.random.seed(1)
116 | w = np.random.randn(5)
117 | b = 0.2
118 | X = np.random.randn(7, 5)
119 | y = np.array([0, 1, 1, 0, 1, 1, 0])
120 | lambda_ = 0.1
121 | expected1 = (-0.1506447567869257, np.array([ 0.19530838, -0.00632206, 0.19687367, 0.15741161, 0.02791437]))
122 | dj_db, dj_dw = target(X, y, w, b, lambda_)
123 |
124 | assert np.isclose(dj_db, expected1[0]), f"Wrong dj_db. Expected: {expected1[0]} got: {dj_db}"
125 | assert np.allclose(dj_dw, expected1[1]), f"Wrong dj_dw. Expected: {expected1[1]} got: {dj_dw}"
126 |
127 |
128 | w = np.random.randn(7)
129 | b = 0
130 | X = np.random.randn(7, 7)
131 | y = np.array([1, 0, 0, 0, 1, 1, 0])
132 | lambda_ = 0
133 | expected2 = (0.02660329857573818, np.array([ 0.23567643, -0.06921029, -0.19705212, -0.0002884 , 0.06490588,
134 | 0.26948175, 0.10777992]))
135 | dj_db, dj_dw = target(X, y, w, b, lambda_)
136 | assert np.isclose(dj_db, expected2[0]), f"Wrong dj_db. Expected: {expected2[0]} got: {dj_db}"
137 | assert np.allclose(dj_dw, expected2[1]), f"Wrong dj_dw. Expected: {expected2[1]} got: {dj_dw}"
138 |
139 | print('\033[92mAll tests passed!')
140 |
--------------------------------------------------------------------------------
/week3/C1W3A1/test_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from copy import deepcopy
3 |
4 |
5 | def datatype_check(expected_output, target_output, error):
6 | success = 0
7 | if isinstance(target_output, dict):
8 | for key in target_output.keys():
9 | try:
10 | success += datatype_check(expected_output[key],
11 | target_output[key], error)
12 | except:
13 | print("Error: {} in variable {}. Got {} but expected type {}".format(error,
14 | key,
15 | type(
16 | target_output[key]),
17 | type(expected_output[key])))
18 | if success == len(target_output.keys()):
19 | return 1
20 | else:
21 | return 0
22 | elif isinstance(target_output, tuple) or isinstance(target_output, list):
23 | for i in range(len(target_output)):
24 | try:
25 | success += datatype_check(expected_output[i],
26 | target_output[i], error)
27 | except:
28 | print("Error: {} in variable {}, expected type: {} but expected type {}".format(error,
29 | i,
30 | type(
31 | target_output[i]),
32 | type(expected_output[i]
33 | )))
34 | if success == len(target_output):
35 | return 1
36 | else:
37 | return 0
38 |
39 | else:
40 | assert isinstance(target_output, type(expected_output))
41 | return 1
42 |
43 |
44 | def equation_output_check(expected_output, target_output, error):
45 | success = 0
46 | if isinstance(target_output, dict):
47 | for key in target_output.keys():
48 | try:
49 | success += equation_output_check(expected_output[key],
50 | target_output[key], error)
51 | except:
52 | print("Error: {} for variable {}.".format(error,
53 | key))
54 | if success == len(target_output.keys()):
55 | return 1
56 | else:
57 | return 0
58 | elif isinstance(target_output, tuple) or isinstance(target_output, list):
59 | for i in range(len(target_output)):
60 | try:
61 | success += equation_output_check(expected_output[i],
62 | target_output[i], error)
63 | except:
64 | print("Error: {} for variable in position {}.".format(error, i))
65 | if success == len(target_output):
66 | return 1
67 | else:
68 | return 0
69 |
70 | else:
71 | if hasattr(target_output, 'shape'):
72 | np.testing.assert_array_almost_equal(
73 | target_output, expected_output)
74 | else:
75 | assert target_output == expected_output
76 | return 1
77 |
78 |
79 | def shape_check(expected_output, target_output, error):
80 | success = 0
81 | if isinstance(target_output, dict):
82 | for key in target_output.keys():
83 | try:
84 | success += shape_check(expected_output[key],
85 | target_output[key], error)
86 | except:
87 | print("Error: {} for variable {}.".format(error, key))
88 | if success == len(target_output.keys()):
89 | return 1
90 | else:
91 | return 0
92 | elif isinstance(target_output, tuple) or isinstance(target_output, list):
93 | for i in range(len(target_output)):
94 | try:
95 | success += shape_check(expected_output[i],
96 | target_output[i], error)
97 | except:
98 | print("Error: {} for variable {}.".format(error, i))
99 | if success == len(target_output):
100 | return 1
101 | else:
102 | return 0
103 |
104 | else:
105 | if hasattr(target_output, 'shape'):
106 | assert target_output.shape == expected_output.shape
107 | return 1
108 |
109 |
110 | def single_test(test_cases, target):
111 | success = 0
112 | for test_case in test_cases:
113 | try:
114 | if test_case['name'] == "datatype_check":
115 | assert isinstance(target(*test_case['input']),
116 | type(test_case["expected"]))
117 | success += 1
118 | if test_case['name'] == "equation_output_check":
119 | assert np.allclose(test_case["expected"],
120 | target(*test_case['input']))
121 | success += 1
122 | if test_case['name'] == "shape_check":
123 | assert test_case['expected'].shape == target(
124 | *test_case['input']).shape
125 | success += 1
126 | except:
127 | print("Error: " + test_case['error'])
128 |
129 | if success == len(test_cases):
130 | print("\033[92m All tests passed.")
131 | else:
132 | print('\033[92m', success, " Tests passed")
133 | print('\033[91m', len(test_cases) - success, " Tests failed")
134 | raise AssertionError(
135 | "Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__))
136 |
137 |
138 | def multiple_test(test_cases, target):
139 | success = 0
140 | for test_case in test_cases:
141 | try:
142 | test_input = deepcopy(test_case['input'])
143 | target_answer = target(*test_input)
144 | if test_case['name'] == "datatype_check":
145 | success += datatype_check(test_case['expected'],
146 | target_answer, test_case['error'])
147 | if test_case['name'] == "equation_output_check":
148 | success += equation_output_check(
149 | test_case['expected'], target_answer, test_case['error'])
150 | if test_case['name'] == "shape_check":
151 | success += shape_check(test_case['expected'],
152 | target_answer, test_case['error'])
153 | except:
154 | print('\33[30m', "Error: " + test_case['error'])
155 |
156 | if success == len(test_cases):
157 | print("\033[92m All tests passed.")
158 | else:
159 | print('\033[92m', success, " Tests passed")
160 | print('\033[91m', len(test_cases) - success, " Tests failed")
161 | raise AssertionError(
162 | "Not all tests were passed for {}. Check your equations and avoid using global variables inside the function.".format(target.__name__))
163 |
164 |
--------------------------------------------------------------------------------
/week3/C1W3A1/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 |
4 | def load_data(filename):
5 | data = np.loadtxt(filename, delimiter=',')
6 | X = data[:,:2]
7 | y = data[:,2]
8 | return X, y
9 |
10 | def sig(z):
11 |
12 | return 1/(1+np.exp(-z))
13 |
14 | def map_feature(X1, X2):
15 | """
16 | Feature mapping function to polynomial features
17 | """
18 | X1 = np.atleast_1d(X1)
19 | X2 = np.atleast_1d(X2)
20 | degree = 6
21 | out = []
22 | for i in range(1, degree+1):
23 | for j in range(i + 1):
24 | out.append((X1**(i-j) * (X2**j)))
25 | return np.stack(out, axis=1)
26 |
27 |
28 | def plot_data(X, y, pos_label="y=1", neg_label="y=0"):
29 | positive = y == 1
30 | negative = y == 0
31 |
32 | # Plot examples
33 | plt.plot(X[positive, 0], X[positive, 1], 'k+', label=pos_label)
34 | plt.plot(X[negative, 0], X[negative, 1], 'yo', label=neg_label)
35 |
36 |
37 | def plot_decision_boundary(w, b, X, y):
38 | # Credit to dibgerge on Github for this plotting code
39 |
40 | plot_data(X[:, 0:2], y)
41 |
42 | if X.shape[1] <= 2:
43 | plot_x = np.array([min(X[:, 0]), max(X[:, 0])])
44 | plot_y = (-1. / w[1]) * (w[0] * plot_x + b)
45 |
46 | plt.plot(plot_x, plot_y, c="b")
47 |
48 | else:
49 | u = np.linspace(-1, 1.5, 50)
50 | v = np.linspace(-1, 1.5, 50)
51 |
52 | z = np.zeros((len(u), len(v)))
53 |
54 | # Evaluate z = theta*x over the grid
55 | for i in range(len(u)):
56 | for j in range(len(v)):
57 | z[i,j] = sig(np.dot(map_feature(u[i], v[j]), w) + b)
58 |
59 | # important to transpose z before calling contour
60 | z = z.T
61 |
62 | # Plot z = 0
63 | plt.contour(u,v,z, levels = [0.5], colors="g")
64 |
65 |
--------------------------------------------------------------------------------
/week3/Optional Labs/C1_W3_Lab07_Scikit_Learn_Soln.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "pycharm": {
7 | "name": "#%% md\n"
8 | }
9 | },
10 | "source": [
11 | "# Ungraded Lab: Logistic Regression using Scikit-Learn\n",
12 | "\n",
13 | "\n"
14 | ]
15 | },
16 | {
17 | "cell_type": "markdown",
18 | "metadata": {
19 | "pycharm": {
20 | "name": "#%% md\n"
21 | }
22 | },
23 | "source": [
24 | "## Goals\n",
25 | "In this lab you will:\n",
26 | "- Train a logistic regression model using scikit-learn.\n"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "metadata": {
32 | "pycharm": {
33 | "name": "#%% md\n"
34 | }
35 | },
36 | "source": [
37 | "## Dataset \n",
38 | "Let's start with the same dataset as before."
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": 1,
44 | "metadata": {
45 | "pycharm": {
46 | "name": "#%%\n"
47 | }
48 | },
49 | "outputs": [],
50 | "source": [
51 | "import numpy as np\n",
52 | "\n",
53 | "X = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]])\n",
54 | "y = np.array([0, 0, 0, 1, 1, 1])"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {
60 | "pycharm": {
61 | "name": "#%% md\n"
62 | }
63 | },
64 | "source": [
65 | "## Fit the model\n",
66 | "\n",
67 | "The code below imports the [logistic regression model](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html#sklearn.linear_model.LogisticRegression) from scikit-learn. You can fit this model on the training data by calling `fit` function."
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": 2,
73 | "metadata": {
74 | "pycharm": {
75 | "name": "#%%\n"
76 | }
77 | },
78 | "outputs": [
79 | {
80 | "data": {
81 | "text/plain": "LogisticRegression()"
82 | },
83 | "execution_count": 2,
84 | "metadata": {},
85 | "output_type": "execute_result"
86 | }
87 | ],
88 | "source": [
89 | "from sklearn.linear_model import LogisticRegression\n",
90 | "\n",
91 | "lr_model = LogisticRegression()\n",
92 | "lr_model.fit(X, y)"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {
98 | "pycharm": {
99 | "name": "#%% md\n"
100 | }
101 | },
102 | "source": [
103 | "## Make Predictions\n",
104 | "\n",
105 | "You can see the predictions made by this model by calling the `predict` function."
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": 3,
111 | "metadata": {
112 | "pycharm": {
113 | "name": "#%%\n"
114 | }
115 | },
116 | "outputs": [
117 | {
118 | "name": "stdout",
119 | "output_type": "stream",
120 | "text": [
121 | "Prediction on training set: [0 0 0 1 1 1]\n"
122 | ]
123 | }
124 | ],
125 | "source": [
126 | "y_pred = lr_model.predict(X)\n",
127 | "\n",
128 | "print(\"Prediction on training set:\", y_pred)"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "metadata": {
134 | "pycharm": {
135 | "name": "#%% md\n"
136 | }
137 | },
138 | "source": [
139 | "## Calculate accuracy\n",
140 | "\n",
141 | "You can calculate this accuracy of this model by calling the `score` function."
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": 4,
147 | "metadata": {
148 | "pycharm": {
149 | "name": "#%%\n"
150 | }
151 | },
152 | "outputs": [
153 | {
154 | "name": "stdout",
155 | "output_type": "stream",
156 | "text": [
157 | "Accuracy on training set: 1.0\n"
158 | ]
159 | }
160 | ],
161 | "source": [
162 | "print(\"Accuracy on training set:\", lr_model.score(X, y))"
163 | ]
164 | }
165 | ],
166 | "metadata": {
167 | "kernelspec": {
168 | "display_name": "Python 3",
169 | "language": "python",
170 | "name": "python3"
171 | },
172 | "language_info": {
173 | "codemirror_mode": {
174 | "name": "ipython",
175 | "version": 3
176 | },
177 | "file_extension": ".py",
178 | "mimetype": "text/x-python",
179 | "name": "python",
180 | "nbconvert_exporter": "python",
181 | "pygments_lexer": "ipython3",
182 | "version": "3.8.10"
183 | }
184 | },
185 | "nbformat": 4,
186 | "nbformat_minor": 5
187 | }
--------------------------------------------------------------------------------
/week3/Optional Labs/archive/.ipynb_checkpoints/C1_W3_Lab05_Cost_Function_Soln-Copy1-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Optional Lab: Cost Function for Logistic Regression\n",
8 | "\n",
9 | "## Goals\n",
10 | "In this lab, you will:\n",
11 | "- examine the implementation and utilize the cost function for logistic regression."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import numpy as np\n",
21 | "%matplotlib widget\n",
22 | "import matplotlib.pyplot as plt\n",
23 | "from lab_utils_common import plot_data, sigmoid, dlc\n",
24 | "plt.style.use('./deeplearning.mplstyle')"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "## Dataset \n",
32 | "Let's start with the same dataset as was used in the decision boundary lab."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "tags": []
40 | },
41 | "outputs": [],
42 | "source": [
43 | "X_train = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]]) #(m,n)\n",
44 | "y_train = np.array([0, 0, 0, 1, 1, 1]) #(m,)"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "We will use a helper function to plot this data. The data points with label $y=1$ are shown as red crosses, while the data points with label $y=0$ are shown as blue circles."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "fig,ax = plt.subplots(1,1,figsize=(4,4))\n",
61 | "plot_data(X_train, y_train, ax)\n",
62 | "\n",
63 | "# Set both axes to be from 0-4\n",
64 | "ax.axis([0, 4, 0, 3.5])\n",
65 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
66 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
67 | "plt.show()"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "## Cost function\n",
75 | "\n",
76 | "In a previous lab, you developed the *logistic loss* function. Recall, loss is defined to apply to one example. Here you combine the losses to form the **cost**, which includes all the examples.\n",
77 | "\n",
78 | "\n",
79 | "Recall that for logistic regression, the cost function is of the form \n",
80 | "\n",
81 | "$$ J(\\mathbf{w},b) = \\frac{1}{m} \\sum_{i=0}^{m-1} \\left[ loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) \\right] \\tag{1}$$\n",
82 | "\n",
83 | "where\n",
84 | "* $loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)})$ is the cost for a single data point, which is:\n",
85 | "\n",
86 | " $$loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) = -y^{(i)} \\log\\left(f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) - \\left( 1 - y^{(i)}\\right) \\log \\left( 1 - f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) \\tag{2}$$\n",
87 | " \n",
88 | "* where m is the number of training examples in the data set and:\n",
89 | "$$\n",
90 | "\\begin{align}\n",
91 | " f_{\\mathbf{w},b}(\\mathbf{x^{(i)}}) &= g(z^{(i)})\\tag{3} \\\\\n",
92 | " z^{(i)} &= \\mathbf{w} \\cdot \\mathbf{x}^{(i)}+ b\\tag{4} \\\\\n",
93 | " g(z^{(i)}) &= \\frac{1}{1+e^{-z^{(i)}}}\\tag{5} \n",
94 | "\\end{align}\n",
95 | "$$\n",
96 | " "
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "\n",
104 | "#### Code Description\n",
105 | "\n",
106 | "The algorithm for `compute_cost_logistic` loops over all the examples calculating the loss for each example summing.\n",
107 | "\n",
108 | "Note that the variables X and y are not scalar values but matrices of shape ($m, n$) and ($𝑚$,) respectively, where $𝑛$ is the number of features and $𝑚$ is the number of training examples.\n"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "def compute_cost_logistic(X, y, w, b):\n",
118 | " \"\"\"\n",
119 | " Computes cost\n",
120 | "\n",
121 | " Args:\n",
122 | " X (ndarray (m,n)): Data, m examples with n features\n",
123 | " y (ndarray (m,)) : target values\n",
124 | " w (ndarray (n,)) : model parameters \n",
125 | " b (scalar) : model parameter\n",
126 | " \n",
127 | " Returns:\n",
128 | " cost (scalar): cost\n",
129 | " \"\"\"\n",
130 | "\n",
131 | " m = X.shape[0]\n",
132 | " cost = 0.0\n",
133 | " for i in range(m):\n",
134 | " z_i = np.dot(X[i],w) + b\n",
135 | " f_wb_i = sigmoid(z_i)\n",
136 | " cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)\n",
137 | " \n",
138 | " cost = cost / m\n",
139 | " return cost\n"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "Check the implementation of the cost function using the cell below."
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "w_tmp = np.array([1,1])\n",
156 | "b_tmp = -3\n",
157 | "print(compute_cost_logistic(X_train, y_train, w_tmp, b_tmp))"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "**Expected output**: 0.3668667864055175"
165 | ]
166 | },
167 | {
168 | "cell_type": "markdown",
169 | "metadata": {},
170 | "source": [
171 | "## Example\n",
172 | "Now, let's see what the cost function output is for a different value of $w$. \n",
173 | "\n",
174 | "* In a previous lab, you plotted the decision boundary for $b = -3, w_0 = 1, w_1 = 1$. That is, you had `w = np.array([-3,1,1])`.\n",
175 | "\n",
176 | "* Let's say you want to see if $b = -4, w_0 = 1, w_1 = 1$, or `w = np.array([-4,1,1])` provides a better model.\n",
177 | "\n",
178 | "Let's first plot the decision boundary for these two different $b$ values to see which one fits the data better.\n",
179 | "\n",
180 | "* For $b = -3, w_0 = 1, w_1 = 1$, we'll plot $-3 + x_0+x_1 = 0$ (shown in blue)\n",
181 | "* For $b = -4, w_0 = 1, w_1 = 1$, we'll plot $-4 + x_0+x_1 = 0$ (shown in magenta)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "import matplotlib.pyplot as plt\n",
191 | "\n",
192 | "# Choose values between 0 and 6\n",
193 | "x0 = np.arange(0,6)\n",
194 | "\n",
195 | "# Plot the two decision boundaries\n",
196 | "x1 = 3 - x0\n",
197 | "x1_other = 4 - x0\n",
198 | "\n",
199 | "fig,ax = plt.subplots(1, 1, figsize=(4,4))\n",
200 | "# Plot the decision boundary\n",
201 | "ax.plot(x0,x1, c=dlc[\"dlblue\"], label=\"$b$=-3\")\n",
202 | "ax.plot(x0,x1_other, c=dlc[\"dlmagenta\"], label=\"$b$=-4\")\n",
203 | "ax.axis([0, 4, 0, 4])\n",
204 | "\n",
205 | "# Plot the original data\n",
206 | "plot_data(X_train,y_train,ax)\n",
207 | "ax.axis([0, 4, 0, 4])\n",
208 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
209 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
210 | "plt.legend(loc=\"upper right\")\n",
211 | "plt.title(\"Decision Boundary\")\n",
212 | "plt.show()"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "You can see from this plot that `w = np.array([-4,1,1])` is a worse model for the training data. Let's see if the cost function implementation reflects this."
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "w_array1 = np.array([1,1])\n",
229 | "b_1 = -3\n",
230 | "w_array2 = np.array([1,1])\n",
231 | "b_2 = -4\n",
232 | "\n",
233 | "print(\"Cost for b = -3 : \", compute_cost_logistic(X_train, y_train, w_array1, b_1))\n",
234 | "print(\"Cost for b = -4 : \", compute_cost_logistic(X_train, y_train, w_array2, b_2))"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "**Expected output**\n",
242 | "\n",
243 | "Cost for b = -3 : 0.3668667864055175\n",
244 | "\n",
245 | "Cost for b = -4 : 0.5036808636748461\n",
246 | "\n",
247 | "\n",
248 | "You can see the cost function behaves as expected and the cost for `w = np.array([-4,1,1])` is indeed higher than the cost for `w = np.array([-3,1,1])`"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "## Congratulations!\n",
256 | "In this lab you examined and utilized the cost function for logistic regression."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": []
265 | }
266 | ],
267 | "metadata": {
268 | "kernelspec": {
269 | "display_name": "Python 3",
270 | "language": "python",
271 | "name": "python3"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 3
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython3",
283 | "version": "3.8.10"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 5
288 | }
289 |
--------------------------------------------------------------------------------
/week3/Optional Labs/archive/.ipynb_checkpoints/C1_W3_Lab05_Cost_Function_Soln-Copy2-checkpoint.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Optional Lab: Cost Function for Logistic Regression\n",
8 | "\n",
9 | "## Goals\n",
10 | "In this lab, you will:\n",
11 | "- examine the implementation and utilize the cost function for logistic regression."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import numpy as np\n",
21 | "%matplotlib widget\n",
22 | "import matplotlib.pyplot as plt\n",
23 | "from lab_utils_common import plot_data, sigmoid, dlc\n",
24 | "plt.style.use('./deeplearning.mplstyle')"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "## Dataset \n",
32 | "Let's start with the same dataset as was used in the decision boundary lab."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "tags": []
40 | },
41 | "outputs": [],
42 | "source": [
43 | "X_train = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]]) #(m,n)\n",
44 | "y_train = np.array([0, 0, 0, 1, 1, 1]) #(m,)"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "We will use a helper function to plot this data. The data points with label $y=1$ are shown as red crosses, while the data points with label $y=0$ are shown as blue circles."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "fig,ax = plt.subplots(1,1,figsize=(4,4))\n",
61 | "plot_data(X_train, y_train, ax)\n",
62 | "\n",
63 | "# Set both axes to be from 0-4\n",
64 | "ax.axis([0, 4, 0, 3.5])\n",
65 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
66 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
67 | "plt.show()"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "## Cost function\n",
75 | "\n",
76 | "In a previous lab, you developed the *logistic loss* function. Recall, loss is defined to apply to one example. Here you combine the losses to form the **cost**, which includes all the examples.\n",
77 | "\n",
78 | "\n",
79 | "Recall that for logistic regression, the cost function is of the form \n",
80 | "\n",
81 | "$$ J(\\mathbf{w},b) = \\frac{1}{m} \\sum_{i=0}^{m-1} \\left[ loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) \\right] \\tag{1}$$\n",
82 | "\n",
83 | "where\n",
84 | "* $loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)})$ is the cost for a single data point, which is:\n",
85 | "\n",
86 | " $$loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) = -y^{(i)} \\log\\left(f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) - \\left( 1 - y^{(i)}\\right) \\log \\left( 1 - f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) \\tag{2}$$\n",
87 | " \n",
88 | "* where m is the number of training examples in the data set and:\n",
89 | "$$\n",
90 | "\\begin{align}\n",
91 | " f_{\\mathbf{w},b}(\\mathbf{x^{(i)}}) &= g(z^{(i)})\\tag{3} \\\\\n",
92 | " z^{(i)} &= \\mathbf{w} \\cdot \\mathbf{x}^{(i)}+ b\\tag{4} \\\\\n",
93 | " g(z^{(i)}) &= \\frac{1}{1+e^{-z^{(i)}}}\\tag{5} \n",
94 | "\\end{align}\n",
95 | "$$\n",
96 | " "
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "\n",
104 | "#### Code Description\n",
105 | "\n",
106 | "The algorithm for `compute_cost_logistic` loops over all the examples calculating the loss for each example and accumulating the total.\n",
107 | "\n",
108 | "Note that the variables X and y are not scalar values but matrices of shape ($m, n$) and ($𝑚$,) respectively, where $𝑛$ is the number of features and $𝑚$ is the number of training examples.\n"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "def compute_cost_logistic(X, y, w, b):\n",
118 | " \"\"\"\n",
119 | " Computes cost\n",
120 | "\n",
121 | " Args:\n",
122 | " X (ndarray (m,n)): Data, m examples with n features\n",
123 | " y (ndarray (m,)) : target values\n",
124 | " w (ndarray (n,)) : model parameters \n",
125 | " b (scalar) : model parameter\n",
126 | " \n",
127 | " Returns:\n",
128 | " cost (scalar): cost\n",
129 | " \"\"\"\n",
130 | "\n",
131 | " m = X.shape[0]\n",
132 | " cost = 0.0\n",
133 | " for i in range(m):\n",
134 | " z_i = np.dot(X[i],w) + b\n",
135 | " f_wb_i = sigmoid(z_i)\n",
136 | " cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)\n",
137 | " \n",
138 | " cost = cost / m\n",
139 | " return cost\n"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "Check the implementation of the cost function using the cell below."
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "w_tmp = np.array([1,1])\n",
156 | "b_tmp = -3\n",
157 | "print(compute_cost_logistic(X_train, y_train, w_tmp, b_tmp))"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "**Expected output**: 0.3668667864055175"
165 | ]
166 | },
167 | {
168 | "cell_type": "markdown",
169 | "metadata": {},
170 | "source": [
171 | "## Example\n",
172 | "Now, let's see what the cost function output is for a different value of $w$. \n",
173 | "\n",
174 | "* In a previous lab, you plotted the decision boundary for $b = -3, w_0 = 1, w_1 = 1$. That is, you had `w = np.array([-3,1,1])`.\n",
175 | "\n",
176 | "* Let's say you want to see if $b = -4, w_0 = 1, w_1 = 1$, or `w = np.array([-4,1,1])` provides a better model.\n",
177 | "\n",
178 | "Let's first plot the decision boundary for these two different $b$ values to see which one fits the data better.\n",
179 | "\n",
180 | "* For $b = -3, w_0 = 1, w_1 = 1$, we'll plot $-3 + x_0+x_1 = 0$ (shown in blue)\n",
181 | "* For $b = -4, w_0 = 1, w_1 = 1$, we'll plot $-4 + x_0+x_1 = 0$ (shown in magenta)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "import matplotlib.pyplot as plt\n",
191 | "\n",
192 | "# Choose values between 0 and 6\n",
193 | "x0 = np.arange(0,6)\n",
194 | "\n",
195 | "# Plot the two decision boundaries\n",
196 | "x1 = 3 - x0\n",
197 | "x1_other = 4 - x0\n",
198 | "\n",
199 | "fig,ax = plt.subplots(1, 1, figsize=(4,4))\n",
200 | "# Plot the decision boundary\n",
201 | "ax.plot(x0,x1, c=dlc[\"dlblue\"], label=\"$b$=-3\")\n",
202 | "ax.plot(x0,x1_other, c=dlc[\"dlmagenta\"], label=\"$b$=-4\")\n",
203 | "ax.axis([0, 4, 0, 4])\n",
204 | "\n",
205 | "# Plot the original data\n",
206 | "plot_data(X_train,y_train,ax)\n",
207 | "ax.axis([0, 4, 0, 4])\n",
208 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
209 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
210 | "plt.legend(loc=\"upper right\")\n",
211 | "plt.title(\"Decision Boundary\")\n",
212 | "plt.show()"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "You can see from this plot that `w = np.array([-4,1,1])` is a worse model for the training data. Let's see if the cost function implementation reflects this."
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "w_array1 = np.array([1,1])\n",
229 | "b_1 = -3\n",
230 | "w_array2 = np.array([1,1])\n",
231 | "b_2 = -4\n",
232 | "\n",
233 | "print(\"Cost for b = -3 : \", compute_cost_logistic(X_train, y_train, w_array1, b_1))\n",
234 | "print(\"Cost for b = -4 : \", compute_cost_logistic(X_train, y_train, w_array2, b_2))"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "**Expected output**\n",
242 | "\n",
243 | "Cost for b = -3 : 0.3668667864055175\n",
244 | "\n",
245 | "Cost for b = -4 : 0.5036808636748461\n",
246 | "\n",
247 | "\n",
248 | "You can see the cost function behaves as expected and the cost for `w = np.array([-4,1,1])` is indeed higher than the cost for `w = np.array([-3,1,1])`"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "## Congratulations!\n",
256 | "In this lab you examined and utilized the cost function for logistic regression."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": []
265 | }
266 | ],
267 | "metadata": {
268 | "kernelspec": {
269 | "display_name": "Python 3",
270 | "language": "python",
271 | "name": "python3"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 3
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython3",
283 | "version": "3.7.6"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 5
288 | }
289 |
--------------------------------------------------------------------------------
/week3/Optional Labs/archive/C1_W3_Lab05_Cost_Function_Soln-Copy1.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Optional Lab: Cost Function for Logistic Regression\n",
8 | "\n",
9 | "## Goals\n",
10 | "In this lab, you will:\n",
11 | "- examine the implementation and utilize the cost function for logistic regression."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import numpy as np\n",
21 | "%matplotlib widget\n",
22 | "import matplotlib.pyplot as plt\n",
23 | "from lab_utils_common import plot_data, sigmoid, dlc\n",
24 | "plt.style.use('./deeplearning.mplstyle')"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "## Dataset \n",
32 | "Let's start with the same dataset as was used in the decision boundary lab."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "tags": []
40 | },
41 | "outputs": [],
42 | "source": [
43 | "X_train = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]]) #(m,n)\n",
44 | "y_train = np.array([0, 0, 0, 1, 1, 1]) #(m,)"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "We will use a helper function to plot this data. The data points with label $y=1$ are shown as red crosses, while the data points with label $y=0$ are shown as blue circles."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "fig,ax = plt.subplots(1,1,figsize=(4,4))\n",
61 | "plot_data(X_train, y_train, ax)\n",
62 | "\n",
63 | "# Set both axes to be from 0-4\n",
64 | "ax.axis([0, 4, 0, 3.5])\n",
65 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
66 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
67 | "plt.show()"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "## Cost function\n",
75 | "\n",
76 | "In a previous lab, you developed the *logistic loss* function. Recall, loss is defined to apply to one example. Here you combine the losses to form the **cost**, which includes all the examples.\n",
77 | "\n",
78 | "\n",
79 | "Recall that for logistic regression, the cost function is of the form \n",
80 | "\n",
81 | "$$ J(\\mathbf{w},b) = \\frac{1}{m} \\sum_{i=0}^{m-1} \\left[ loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) \\right] \\tag{1}$$\n",
82 | "\n",
83 | "where\n",
84 | "* $loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)})$ is the cost for a single data point, which is:\n",
85 | "\n",
86 | " $$loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) = -y^{(i)} \\log\\left(f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) - \\left( 1 - y^{(i)}\\right) \\log \\left( 1 - f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) \\tag{2}$$\n",
87 | " \n",
88 | "* where m is the number of training examples in the data set and:\n",
89 | "$$\n",
90 | "\\begin{align}\n",
91 | " f_{\\mathbf{w},b}(\\mathbf{x^{(i)}}) &= g(z^{(i)})\\tag{3} \\\\\n",
92 | " z^{(i)} &= \\mathbf{w} \\cdot \\mathbf{x}^{(i)}+ b\\tag{4} \\\\\n",
93 | " g(z^{(i)}) &= \\frac{1}{1+e^{-z^{(i)}}}\\tag{5} \n",
94 | "\\end{align}\n",
95 | "$$\n",
96 | " "
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "\n",
104 | "#### Code Description\n",
105 | "\n",
106 | "The algorithm for `compute_cost_logistic` loops over all the examples calculating the loss for each example summing.\n",
107 | "\n",
108 | "Note that the variables X and y are not scalar values but matrices of shape ($m, n$) and ($𝑚$,) respectively, where $𝑛$ is the number of features and $𝑚$ is the number of training examples.\n"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "def compute_cost_logistic(X, y, w, b):\n",
118 | " \"\"\"\n",
119 | " Computes cost\n",
120 | "\n",
121 | " Args:\n",
122 | " X (ndarray (m,n)): Data, m examples with n features\n",
123 | " y (ndarray (m,)) : target values\n",
124 | " w (ndarray (n,)) : model parameters \n",
125 | " b (scalar) : model parameter\n",
126 | " \n",
127 | " Returns:\n",
128 | " cost (scalar): cost\n",
129 | " \"\"\"\n",
130 | "\n",
131 | " m = X.shape[0]\n",
132 | " cost = 0.0\n",
133 | " for i in range(m):\n",
134 | " z_i = np.dot(X[i],w) + b\n",
135 | " f_wb_i = sigmoid(z_i)\n",
136 | " cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)\n",
137 | " \n",
138 | " cost = cost / m\n",
139 | " return cost\n"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "Check the implementation of the cost function using the cell below."
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "w_tmp = np.array([1,1])\n",
156 | "b_tmp = -3\n",
157 | "print(compute_cost_logistic(X_train, y_train, w_tmp, b_tmp))"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "**Expected output**: 0.3668667864055175"
165 | ]
166 | },
167 | {
168 | "cell_type": "markdown",
169 | "metadata": {},
170 | "source": [
171 | "## Example\n",
172 | "Now, let's see what the cost function output is for a different value of $w$. \n",
173 | "\n",
174 | "* In a previous lab, you plotted the decision boundary for $b = -3, w_0 = 1, w_1 = 1$. That is, you had `w = np.array([-3,1,1])`.\n",
175 | "\n",
176 | "* Let's say you want to see if $b = -4, w_0 = 1, w_1 = 1$, or `w = np.array([-4,1,1])` provides a better model.\n",
177 | "\n",
178 | "Let's first plot the decision boundary for these two different $b$ values to see which one fits the data better.\n",
179 | "\n",
180 | "* For $b = -3, w_0 = 1, w_1 = 1$, we'll plot $-3 + x_0+x_1 = 0$ (shown in blue)\n",
181 | "* For $b = -4, w_0 = 1, w_1 = 1$, we'll plot $-4 + x_0+x_1 = 0$ (shown in magenta)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "import matplotlib.pyplot as plt\n",
191 | "\n",
192 | "# Choose values between 0 and 6\n",
193 | "x0 = np.arange(0,6)\n",
194 | "\n",
195 | "# Plot the two decision boundaries\n",
196 | "x1 = 3 - x0\n",
197 | "x1_other = 4 - x0\n",
198 | "\n",
199 | "fig,ax = plt.subplots(1, 1, figsize=(4,4))\n",
200 | "# Plot the decision boundary\n",
201 | "ax.plot(x0,x1, c=dlc[\"dlblue\"], label=\"$b$=-3\")\n",
202 | "ax.plot(x0,x1_other, c=dlc[\"dlmagenta\"], label=\"$b$=-4\")\n",
203 | "ax.axis([0, 4, 0, 4])\n",
204 | "\n",
205 | "# Plot the original data\n",
206 | "plot_data(X_train,y_train,ax)\n",
207 | "ax.axis([0, 4, 0, 4])\n",
208 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
209 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
210 | "plt.legend(loc=\"upper right\")\n",
211 | "plt.title(\"Decision Boundary\")\n",
212 | "plt.show()"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "You can see from this plot that `w = np.array([-4,1,1])` is a worse model for the training data. Let's see if the cost function implementation reflects this."
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "w_array1 = np.array([1,1])\n",
229 | "b_1 = -3\n",
230 | "w_array2 = np.array([1,1])\n",
231 | "b_2 = -4\n",
232 | "\n",
233 | "print(\"Cost for b = -3 : \", compute_cost_logistic(X_train, y_train, w_array1, b_1))\n",
234 | "print(\"Cost for b = -4 : \", compute_cost_logistic(X_train, y_train, w_array2, b_2))"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "**Expected output**\n",
242 | "\n",
243 | "Cost for b = -3 : 0.3668667864055175\n",
244 | "\n",
245 | "Cost for b = -4 : 0.5036808636748461\n",
246 | "\n",
247 | "\n",
248 | "You can see the cost function behaves as expected and the cost for `w = np.array([-4,1,1])` is indeed higher than the cost for `w = np.array([-3,1,1])`"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "## Congratulations!\n",
256 | "In this lab you examined and utilized the cost function for logistic regression."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": []
265 | }
266 | ],
267 | "metadata": {
268 | "kernelspec": {
269 | "display_name": "Python 3",
270 | "language": "python",
271 | "name": "python3"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 3
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython3",
283 | "version": "3.8.10"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 5
288 | }
289 |
--------------------------------------------------------------------------------
/week3/Optional Labs/archive/C1_W3_Lab05_Cost_Function_Soln-Copy2.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Optional Lab: Cost Function for Logistic Regression\n",
8 | "\n",
9 | "## Goals\n",
10 | "In this lab, you will:\n",
11 | "- examine the implementation and utilize the cost function for logistic regression."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": null,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import numpy as np\n",
21 | "%matplotlib widget\n",
22 | "import matplotlib.pyplot as plt\n",
23 | "from lab_utils_common import plot_data, sigmoid, dlc\n",
24 | "plt.style.use('./deeplearning.mplstyle')"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "## Dataset \n",
32 | "Let's start with the same dataset as was used in the decision boundary lab."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": null,
38 | "metadata": {
39 | "tags": []
40 | },
41 | "outputs": [],
42 | "source": [
43 | "X_train = np.array([[0.5, 1.5], [1,1], [1.5, 0.5], [3, 0.5], [2, 2], [1, 2.5]]) #(m,n)\n",
44 | "y_train = np.array([0, 0, 0, 1, 1, 1]) #(m,)"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "We will use a helper function to plot this data. The data points with label $y=1$ are shown as red crosses, while the data points with label $y=0$ are shown as blue circles."
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": null,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "fig,ax = plt.subplots(1,1,figsize=(4,4))\n",
61 | "plot_data(X_train, y_train, ax)\n",
62 | "\n",
63 | "# Set both axes to be from 0-4\n",
64 | "ax.axis([0, 4, 0, 3.5])\n",
65 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
66 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
67 | "plt.show()"
68 | ]
69 | },
70 | {
71 | "cell_type": "markdown",
72 | "metadata": {},
73 | "source": [
74 | "## Cost function\n",
75 | "\n",
76 | "In a previous lab, you developed the *logistic loss* function. Recall, loss is defined to apply to one example. Here you combine the losses to form the **cost**, which includes all the examples.\n",
77 | "\n",
78 | "\n",
79 | "Recall that for logistic regression, the cost function is of the form \n",
80 | "\n",
81 | "$$ J(\\mathbf{w},b) = \\frac{1}{m} \\sum_{i=0}^{m-1} \\left[ loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) \\right] \\tag{1}$$\n",
82 | "\n",
83 | "where\n",
84 | "* $loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)})$ is the cost for a single data point, which is:\n",
85 | "\n",
86 | " $$loss(f_{\\mathbf{w},b}(\\mathbf{x}^{(i)}), y^{(i)}) = -y^{(i)} \\log\\left(f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) - \\left( 1 - y^{(i)}\\right) \\log \\left( 1 - f_{\\mathbf{w},b}\\left( \\mathbf{x}^{(i)} \\right) \\right) \\tag{2}$$\n",
87 | " \n",
88 | "* where m is the number of training examples in the data set and:\n",
89 | "$$\n",
90 | "\\begin{align}\n",
91 | " f_{\\mathbf{w},b}(\\mathbf{x^{(i)}}) &= g(z^{(i)})\\tag{3} \\\\\n",
92 | " z^{(i)} &= \\mathbf{w} \\cdot \\mathbf{x}^{(i)}+ b\\tag{4} \\\\\n",
93 | " g(z^{(i)}) &= \\frac{1}{1+e^{-z^{(i)}}}\\tag{5} \n",
94 | "\\end{align}\n",
95 | "$$\n",
96 | " "
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "\n",
104 | "#### Code Description\n",
105 | "\n",
106 | "The algorithm for `compute_cost_logistic` loops over all the examples calculating the loss for each example and accumulating the total.\n",
107 | "\n",
108 | "Note that the variables X and y are not scalar values but matrices of shape ($m, n$) and ($𝑚$,) respectively, where $𝑛$ is the number of features and $𝑚$ is the number of training examples.\n"
109 | ]
110 | },
111 | {
112 | "cell_type": "code",
113 | "execution_count": null,
114 | "metadata": {},
115 | "outputs": [],
116 | "source": [
117 | "def compute_cost_logistic(X, y, w, b):\n",
118 | " \"\"\"\n",
119 | " Computes cost\n",
120 | "\n",
121 | " Args:\n",
122 | " X (ndarray (m,n)): Data, m examples with n features\n",
123 | " y (ndarray (m,)) : target values\n",
124 | " w (ndarray (n,)) : model parameters \n",
125 | " b (scalar) : model parameter\n",
126 | " \n",
127 | " Returns:\n",
128 | " cost (scalar): cost\n",
129 | " \"\"\"\n",
130 | "\n",
131 | " m = X.shape[0]\n",
132 | " cost = 0.0\n",
133 | " for i in range(m):\n",
134 | " z_i = np.dot(X[i],w) + b\n",
135 | " f_wb_i = sigmoid(z_i)\n",
136 | " cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)\n",
137 | " \n",
138 | " cost = cost / m\n",
139 | " return cost\n"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "Check the implementation of the cost function using the cell below."
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "w_tmp = np.array([1,1])\n",
156 | "b_tmp = -3\n",
157 | "print(compute_cost_logistic(X_train, y_train, w_tmp, b_tmp))"
158 | ]
159 | },
160 | {
161 | "cell_type": "markdown",
162 | "metadata": {},
163 | "source": [
164 | "**Expected output**: 0.3668667864055175"
165 | ]
166 | },
167 | {
168 | "cell_type": "markdown",
169 | "metadata": {},
170 | "source": [
171 | "## Example\n",
172 | "Now, let's see what the cost function output is for a different value of $w$. \n",
173 | "\n",
174 | "* In a previous lab, you plotted the decision boundary for $b = -3, w_0 = 1, w_1 = 1$. That is, you had `w = np.array([-3,1,1])`.\n",
175 | "\n",
176 | "* Let's say you want to see if $b = -4, w_0 = 1, w_1 = 1$, or `w = np.array([-4,1,1])` provides a better model.\n",
177 | "\n",
178 | "Let's first plot the decision boundary for these two different $b$ values to see which one fits the data better.\n",
179 | "\n",
180 | "* For $b = -3, w_0 = 1, w_1 = 1$, we'll plot $-3 + x_0+x_1 = 0$ (shown in blue)\n",
181 | "* For $b = -4, w_0 = 1, w_1 = 1$, we'll plot $-4 + x_0+x_1 = 0$ (shown in magenta)"
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {},
188 | "outputs": [],
189 | "source": [
190 | "import matplotlib.pyplot as plt\n",
191 | "\n",
192 | "# Choose values between 0 and 6\n",
193 | "x0 = np.arange(0,6)\n",
194 | "\n",
195 | "# Plot the two decision boundaries\n",
196 | "x1 = 3 - x0\n",
197 | "x1_other = 4 - x0\n",
198 | "\n",
199 | "fig,ax = plt.subplots(1, 1, figsize=(4,4))\n",
200 | "# Plot the decision boundary\n",
201 | "ax.plot(x0,x1, c=dlc[\"dlblue\"], label=\"$b$=-3\")\n",
202 | "ax.plot(x0,x1_other, c=dlc[\"dlmagenta\"], label=\"$b$=-4\")\n",
203 | "ax.axis([0, 4, 0, 4])\n",
204 | "\n",
205 | "# Plot the original data\n",
206 | "plot_data(X_train,y_train,ax)\n",
207 | "ax.axis([0, 4, 0, 4])\n",
208 | "ax.set_ylabel('$x_1$', fontsize=12)\n",
209 | "ax.set_xlabel('$x_0$', fontsize=12)\n",
210 | "plt.legend(loc=\"upper right\")\n",
211 | "plt.title(\"Decision Boundary\")\n",
212 | "plt.show()"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "You can see from this plot that `w = np.array([-4,1,1])` is a worse model for the training data. Let's see if the cost function implementation reflects this."
220 | ]
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {},
226 | "outputs": [],
227 | "source": [
228 | "w_array1 = np.array([1,1])\n",
229 | "b_1 = -3\n",
230 | "w_array2 = np.array([1,1])\n",
231 | "b_2 = -4\n",
232 | "\n",
233 | "print(\"Cost for b = -3 : \", compute_cost_logistic(X_train, y_train, w_array1, b_1))\n",
234 | "print(\"Cost for b = -4 : \", compute_cost_logistic(X_train, y_train, w_array2, b_2))"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "**Expected output**\n",
242 | "\n",
243 | "Cost for b = -3 : 0.3668667864055175\n",
244 | "\n",
245 | "Cost for b = -4 : 0.5036808636748461\n",
246 | "\n",
247 | "\n",
248 | "You can see the cost function behaves as expected and the cost for `w = np.array([-4,1,1])` is indeed higher than the cost for `w = np.array([-3,1,1])`"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "## Congratulations!\n",
256 | "In this lab you examined and utilized the cost function for logistic regression."
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": []
265 | }
266 | ],
267 | "metadata": {
268 | "kernelspec": {
269 | "display_name": "Python 3",
270 | "language": "python",
271 | "name": "python3"
272 | },
273 | "language_info": {
274 | "codemirror_mode": {
275 | "name": "ipython",
276 | "version": 3
277 | },
278 | "file_extension": ".py",
279 | "mimetype": "text/x-python",
280 | "name": "python",
281 | "nbconvert_exporter": "python",
282 | "pygments_lexer": "ipython3",
283 | "version": "3.7.6"
284 | }
285 | },
286 | "nbformat": 4,
287 | "nbformat_minor": 5
288 | }
289 |
--------------------------------------------------------------------------------
/week3/Optional Labs/deeplearning.mplstyle:
--------------------------------------------------------------------------------
1 | # see https://matplotlib.org/stable/tutorials/introductory/customizing.html
2 | lines.linewidth: 4
3 | lines.solid_capstyle: butt
4 |
5 | legend.fancybox: true
6 |
7 | # Verdana" for non-math text,
8 | # Cambria Math
9 |
10 | #Blue (Crayon-Aqua) 0096FF
11 | #Dark Red C00000
12 | #Orange (Apple Orange) FF9300
13 | #Black 000000
14 | #Magenta FF40FF
15 | #Purple 7030A0
16 |
17 | axes.prop_cycle: cycler('color', ['0096FF', 'FF9300', 'FF40FF', '7030A0', 'C00000'])
18 | #axes.facecolor: f0f0f0 # grey
19 | axes.facecolor: ffffff # white
20 | axes.labelsize: large
21 | axes.axisbelow: true
22 | axes.grid: False
23 | axes.edgecolor: f0f0f0
24 | axes.linewidth: 3.0
25 | axes.titlesize: x-large
26 |
27 | patch.edgecolor: f0f0f0
28 | patch.linewidth: 0.5
29 |
30 | svg.fonttype: path
31 |
32 | grid.linestyle: -
33 | grid.linewidth: 1.0
34 | grid.color: cbcbcb
35 |
36 | xtick.major.size: 0
37 | xtick.minor.size: 0
38 | ytick.major.size: 0
39 | ytick.minor.size: 0
40 |
41 | savefig.edgecolor: f0f0f0
42 | savefig.facecolor: f0f0f0
43 |
44 | #figure.subplot.left: 0.08
45 | #figure.subplot.right: 0.95
46 | #figure.subplot.bottom: 0.07
47 |
48 | #figure.facecolor: f0f0f0 # grey
49 | figure.facecolor: ffffff # white
50 |
51 | ## ***************************************************************************
52 | ## * FONT *
53 | ## ***************************************************************************
54 | ## The font properties used by `text.Text`.
55 | ## See https://matplotlib.org/api/font_manager_api.html for more information
56 | ## on font properties. The 6 font properties used for font matching are
57 | ## given below with their default values.
58 | ##
59 | ## The font.family property can take either a concrete font name (not supported
60 | ## when rendering text with usetex), or one of the following five generic
61 | ## values:
62 | ## - 'serif' (e.g., Times),
63 | ## - 'sans-serif' (e.g., Helvetica),
64 | ## - 'cursive' (e.g., Zapf-Chancery),
65 | ## - 'fantasy' (e.g., Western), and
66 | ## - 'monospace' (e.g., Courier).
67 | ## Each of these values has a corresponding default list of font names
68 | ## (font.serif, etc.); the first available font in the list is used. Note that
69 | ## for font.serif, font.sans-serif, and font.monospace, the first element of
70 | ## the list (a DejaVu font) will always be used because DejaVu is shipped with
71 | ## Matplotlib and is thus guaranteed to be available; the other entries are
72 | ## left as examples of other possible values.
73 | ##
74 | ## The font.style property has three values: normal (or roman), italic
75 | ## or oblique. The oblique style will be used for italic, if it is not
76 | ## present.
77 | ##
78 | ## The font.variant property has two values: normal or small-caps. For
79 | ## TrueType fonts, which are scalable fonts, small-caps is equivalent
80 | ## to using a font size of 'smaller', or about 83%% of the current font
81 | ## size.
82 | ##
83 | ## The font.weight property has effectively 13 values: normal, bold,
84 | ## bolder, lighter, 100, 200, 300, ..., 900. Normal is the same as
85 | ## 400, and bold is 700. bolder and lighter are relative values with
86 | ## respect to the current weight.
87 | ##
88 | ## The font.stretch property has 11 values: ultra-condensed,
89 | ## extra-condensed, condensed, semi-condensed, normal, semi-expanded,
90 | ## expanded, extra-expanded, ultra-expanded, wider, and narrower. This
91 | ## property is not currently implemented.
92 | ##
93 | ## The font.size property is the default font size for text, given in points.
94 | ## 10 pt is the standard value.
95 | ##
96 | ## Note that font.size controls default text sizes. To configure
97 | ## special text sizes tick labels, axes, labels, title, etc., see the rc
98 | ## settings for axes and ticks. Special text sizes can be defined
99 | ## relative to font.size, using the following values: xx-small, x-small,
100 | ## small, medium, large, x-large, xx-large, larger, or smaller
101 |
102 |
103 | font.family: sans-serif
104 | font.style: normal
105 | font.variant: normal
106 | font.weight: normal
107 | font.stretch: normal
108 | font.size: 8.0
109 |
110 | font.serif: DejaVu Serif, Bitstream Vera Serif, Computer Modern Roman, New Century Schoolbook, Century Schoolbook L, Utopia, ITC Bookman, Bookman, Nimbus Roman No9 L, Times New Roman, Times, Palatino, Charter, serif
111 | font.sans-serif: Verdana, DejaVu Sans, Bitstream Vera Sans, Computer Modern Sans Serif, Lucida Grande, Geneva, Lucid, Arial, Helvetica, Avant Garde, sans-serif
112 | font.cursive: Apple Chancery, Textile, Zapf Chancery, Sand, Script MT, Felipa, Comic Neue, Comic Sans MS, cursive
113 | font.fantasy: Chicago, Charcoal, Impact, Western, Humor Sans, xkcd, fantasy
114 | font.monospace: DejaVu Sans Mono, Bitstream Vera Sans Mono, Computer Modern Typewriter, Andale Mono, Nimbus Mono L, Courier New, Courier, Fixed, Terminal, monospace
115 |
116 |
117 | ## ***************************************************************************
118 | ## * TEXT *
119 | ## ***************************************************************************
120 | ## The text properties used by `text.Text`.
121 | ## See https://matplotlib.org/api/artist_api.html#module-matplotlib.text
122 | ## for more information on text properties
123 | #text.color: black
124 |
125 |
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1W3_XW.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1W3_XW.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1W3_boundary.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1W3_boundary.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1W3_example2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1W3_example2.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1W3_mcpredict.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1W3_mcpredict.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1W3_trainvpredict.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1W3_trainvpredict.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Classification.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Classification.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Lab07_overfitting.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Lab07_overfitting.PNG
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LinearCostRegularized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LinearCostRegularized.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LinearGradientRegularized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LinearGradientRegularized.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticCostRegularized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticCostRegularized.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticGradientRegularized.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticGradientRegularized.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticLoss_a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticLoss_a.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticLoss_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticLoss_b.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticLoss_c.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticLoss_c.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticRegression.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticRegression.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticRegression_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticRegression_left.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_LogisticRegression_right.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_LogisticRegression_right.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Logistic_gradient_descent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Logistic_gradient_descent.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Overfitting_a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Overfitting_a.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Overfitting_b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Overfitting_b.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_Overfitting_c.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_Overfitting_c.png
--------------------------------------------------------------------------------
/week3/Optional Labs/images/C1_W3_SqErrorVsLogistic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Optional Labs/images/C1_W3_SqErrorVsLogistic.png
--------------------------------------------------------------------------------
/week3/Optional Labs/lab_utils_common.py:
--------------------------------------------------------------------------------
1 | """
2 | lab_utils_common
3 | contains common routines and variable definitions
4 | used by all the labs in this week.
5 | by contrast, specific, large plotting routines will be in separate files
6 | and are generally imported into the week where they are used.
7 | those files will import this file
8 | """
9 | import copy
10 | import math
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 | from matplotlib.patches import FancyArrowPatch
14 | from ipywidgets import Output
15 |
16 | np.set_printoptions(precision=2)
17 |
18 | dlc = dict(dlblue = '#0096ff', dlorange = '#FF9300', dldarkred='#C00000', dlmagenta='#FF40FF', dlpurple='#7030A0')
19 | dlblue = '#0096ff'; dlorange = '#FF9300'; dldarkred='#C00000'; dlmagenta='#FF40FF'; dlpurple='#7030A0'
20 | dlcolors = [dlblue, dlorange, dldarkred, dlmagenta, dlpurple]
21 | plt.style.use('./deeplearning.mplstyle')
22 |
23 | def sigmoid(z):
24 | """
25 | Compute the sigmoid of z
26 |
27 | Parameters
28 | ----------
29 | z : array_like
30 | A scalar or numpy array of any size.
31 |
32 | Returns
33 | -------
34 | g : array_like
35 | sigmoid(z)
36 | """
37 | z = np.clip( z, -500, 500 ) # protect against overflow
38 | g = 1.0/(1.0+np.exp(-z))
39 |
40 | return g
41 |
42 | ##########################################################
43 | # Regression Routines
44 | ##########################################################
45 |
46 | def predict_logistic(X, w, b):
47 | """ performs prediction """
48 | return sigmoid(X @ w + b)
49 |
50 | def predict_linear(X, w, b):
51 | """ performs prediction """
52 | return X @ w + b
53 |
54 | def compute_cost_logistic(X, y, w, b, lambda_=0, safe=False):
55 | """
56 | Computes cost using logistic loss, non-matrix version
57 |
58 | Args:
59 | X (ndarray): Shape (m,n) matrix of examples with n features
60 | y (ndarray): Shape (m,) target values
61 | w (ndarray): Shape (n,) parameters for prediction
62 | b (scalar): parameter for prediction
63 | lambda_ : (scalar, float) Controls amount of regularization, 0 = no regularization
64 | safe : (boolean) True-selects under/overflow safe algorithm
65 | Returns:
66 | cost (scalar): cost
67 | """
68 |
69 | m,n = X.shape
70 | cost = 0.0
71 | for i in range(m):
72 | z_i = np.dot(X[i],w) + b #(n,)(n,) or (n,) ()
73 | if safe: #avoids overflows
74 | cost += -(y[i] * z_i ) + log_1pexp(z_i)
75 | else:
76 | f_wb_i = sigmoid(z_i) #(n,)
77 | cost += -y[i] * np.log(f_wb_i) - (1 - y[i]) * np.log(1 - f_wb_i) # scalar
78 | cost = cost/m
79 |
80 | reg_cost = 0
81 | if lambda_ != 0:
82 | for j in range(n):
83 | reg_cost += (w[j]**2) # scalar
84 | reg_cost = (lambda_/(2*m))*reg_cost
85 |
86 | return cost + reg_cost
87 |
88 |
89 | def log_1pexp(x, maximum=20):
90 | ''' approximate log(1+exp^x)
91 | https://stats.stackexchange.com/questions/475589/numerical-computation-of-cross-entropy-in-practice
92 | Args:
93 | x : (ndarray Shape (n,1) or (n,) input
94 | out : (ndarray Shape matches x output ~= np.log(1+exp(x))
95 | '''
96 |
97 | out = np.zeros_like(x,dtype=float)
98 | i = x <= maximum
99 | ni = np.logical_not(i)
100 |
101 | out[i] = np.log(1 + np.exp(x[i]))
102 | out[ni] = x[ni]
103 | return out
104 |
105 |
106 | def compute_cost_matrix(X, y, w, b, logistic=False, lambda_=0, safe=True):
107 | """
108 | Computes the cost using using matrices
109 | Args:
110 | X : (ndarray, Shape (m,n)) matrix of examples
111 | y : (ndarray Shape (m,) or (m,1)) target value of each example
112 | w : (ndarray Shape (n,) or (n,1)) Values of parameter(s) of the model
113 | b : (scalar ) Values of parameter of the model
114 | verbose : (Boolean) If true, print out intermediate value f_wb
115 | Returns:
116 | total_cost: (scalar) cost
117 | """
118 | m = X.shape[0]
119 | y = y.reshape(-1,1) # ensure 2D
120 | w = w.reshape(-1,1) # ensure 2D
121 | if logistic:
122 | if safe: #safe from overflow
123 | z = X @ w + b #(m,n)(n,1)=(m,1)
124 | cost = -(y * z) + log_1pexp(z)
125 | cost = np.sum(cost)/m # (scalar)
126 | else:
127 | f = sigmoid(X @ w + b) # (m,n)(n,1) = (m,1)
128 | cost = (1/m)*(np.dot(-y.T, np.log(f)) - np.dot((1-y).T, np.log(1-f))) # (1,m)(m,1) = (1,1)
129 | cost = cost[0,0] # scalar
130 | else:
131 | f = X @ w + b # (m,n)(n,1) = (m,1)
132 | cost = (1/(2*m)) * np.sum((f - y)**2) # scalar
133 |
134 | reg_cost = (lambda_/(2*m)) * np.sum(w**2) # scalar
135 |
136 | total_cost = cost + reg_cost # scalar
137 |
138 | return total_cost # scalar
139 |
140 | def compute_gradient_matrix(X, y, w, b, logistic=False, lambda_=0):
141 | """
142 | Computes the gradient using matrices
143 |
144 | Args:
145 | X : (ndarray, Shape (m,n)) matrix of examples
146 | y : (ndarray Shape (m,) or (m,1)) target value of each example
147 | w : (ndarray Shape (n,) or (n,1)) Values of parameters of the model
148 | b : (scalar ) Values of parameter of the model
149 | logistic: (boolean) linear if false, logistic if true
150 | lambda_: (float) applies regularization if non-zero
151 | Returns
152 | dj_dw: (array_like Shape (n,1)) The gradient of the cost w.r.t. the parameters w
153 | dj_db: (scalar) The gradient of the cost w.r.t. the parameter b
154 | """
155 | m = X.shape[0]
156 | y = y.reshape(-1,1) # ensure 2D
157 | w = w.reshape(-1,1) # ensure 2D
158 |
159 | f_wb = sigmoid( X @ w + b ) if logistic else X @ w + b # (m,n)(n,1) = (m,1)
160 | err = f_wb - y # (m,1)
161 | dj_dw = (1/m) * (X.T @ err) # (n,m)(m,1) = (n,1)
162 | dj_db = (1/m) * np.sum(err) # scalar
163 |
164 | dj_dw += (lambda_/m) * w # regularize # (n,1)
165 |
166 | return dj_db, dj_dw # scalar, (n,1)
167 |
168 | def gradient_descent(X, y, w_in, b_in, alpha, num_iters, logistic=False, lambda_=0, verbose=True):
169 | """
170 | Performs batch gradient descent to learn theta. Updates theta by taking
171 | num_iters gradient steps with learning rate alpha
172 |
173 | Args:
174 | X (ndarray): Shape (m,n) matrix of examples
175 | y (ndarray): Shape (m,) or (m,1) target value of each example
176 | w_in (ndarray): Shape (n,) or (n,1) Initial values of parameters of the model
177 | b_in (scalar): Initial value of parameter of the model
178 | logistic: (boolean) linear if false, logistic if true
179 | lambda_: (float) applies regularization if non-zero
180 | alpha (float): Learning rate
181 | num_iters (int): number of iterations to run gradient descent
182 |
183 | Returns:
184 | w (ndarray): Shape (n,) or (n,1) Updated values of parameters; matches incoming shape
185 | b (scalar): Updated value of parameter
186 | """
187 | # An array to store cost J and w's at each iteration primarily for graphing later
188 | J_history = []
189 | w = copy.deepcopy(w_in) #avoid modifying global w within function
190 | b = b_in
191 | w = w.reshape(-1,1) #prep for matrix operations
192 | y = y.reshape(-1,1)
193 |
194 | for i in range(num_iters):
195 |
196 | # Calculate the gradient and update the parameters
197 | dj_db,dj_dw = compute_gradient_matrix(X, y, w, b, logistic, lambda_)
198 |
199 | # Update Parameters using w, b, alpha and gradient
200 | w = w - alpha * dj_dw
201 | b = b - alpha * dj_db
202 |
203 | # Save cost J at each iteration
204 | if i<100000: # prevent resource exhaustion
205 | J_history.append( compute_cost_matrix(X, y, w, b, logistic, lambda_) )
206 |
207 | # Print cost every at intervals 10 times or as many iterations if < 10
208 | if i% math.ceil(num_iters / 10) == 0:
209 | if verbose: print(f"Iteration {i:4d}: Cost {J_history[-1]} ")
210 |
211 | return w.reshape(w_in.shape), b, J_history #return final w,b and J history for graphing
212 |
213 | def zscore_normalize_features(X):
214 | """
215 | computes X, zcore normalized by column
216 |
217 | Args:
218 | X (ndarray): Shape (m,n) input data, m examples, n features
219 |
220 | Returns:
221 | X_norm (ndarray): Shape (m,n) input normalized by column
222 | mu (ndarray): Shape (n,) mean of each feature
223 | sigma (ndarray): Shape (n,) standard deviation of each feature
224 | """
225 | # find the mean of each column/feature
226 | mu = np.mean(X, axis=0) # mu will have shape (n,)
227 | # find the standard deviation of each column/feature
228 | sigma = np.std(X, axis=0) # sigma will have shape (n,)
229 | # element-wise, subtract mu for that column from each example, divide by std for that column
230 | X_norm = (X - mu) / sigma
231 |
232 | return X_norm, mu, sigma
233 |
234 | #check our work
235 | #from sklearn.preprocessing import scale
236 | #scale(X_orig, axis=0, with_mean=True, with_std=True, copy=True)
237 |
238 | ######################################################
239 | # Common Plotting Routines
240 | ######################################################
241 |
242 |
243 | def plot_data(X, y, ax, pos_label="y=1", neg_label="y=0", s=80, loc='best' ):
244 | """ plots logistic data with two axis """
245 | # Find Indices of Positive and Negative Examples
246 | pos = y == 1
247 | neg = y == 0
248 | pos = pos.reshape(-1,) #work with 1D or 1D y vectors
249 | neg = neg.reshape(-1,)
250 |
251 | # Plot examples
252 | ax.scatter(X[pos, 0], X[pos, 1], marker='x', s=s, c = 'red', label=pos_label)
253 | ax.scatter(X[neg, 0], X[neg, 1], marker='o', s=s, label=neg_label, facecolors='none', edgecolors=dlblue, lw=3)
254 | ax.legend(loc=loc)
255 |
256 | ax.figure.canvas.toolbar_visible = False
257 | ax.figure.canvas.header_visible = False
258 | ax.figure.canvas.footer_visible = False
259 |
260 | def plt_tumor_data(x, y, ax):
261 | """ plots tumor data on one axis """
262 | pos = y == 1
263 | neg = y == 0
264 |
265 | ax.scatter(x[pos], y[pos], marker='x', s=80, c = 'red', label="malignant")
266 | ax.scatter(x[neg], y[neg], marker='o', s=100, label="benign", facecolors='none', edgecolors=dlblue,lw=3)
267 | ax.set_ylim(-0.175,1.1)
268 | ax.set_ylabel('y')
269 | ax.set_xlabel('Tumor Size')
270 | ax.set_title("Logistic Regression on Categorical Data")
271 |
272 | ax.figure.canvas.toolbar_visible = False
273 | ax.figure.canvas.header_visible = False
274 | ax.figure.canvas.footer_visible = False
275 |
276 | # Draws a threshold at 0.5
277 | def draw_vthresh(ax,x):
278 | """ draws a threshold """
279 | ylim = ax.get_ylim()
280 | xlim = ax.get_xlim()
281 | ax.fill_between([xlim[0], x], [ylim[1], ylim[1]], alpha=0.2, color=dlblue)
282 | ax.fill_between([x, xlim[1]], [ylim[1], ylim[1]], alpha=0.2, color=dldarkred)
283 | ax.annotate("z >= 0", xy= [x,0.5], xycoords='data',
284 | xytext=[30,5],textcoords='offset points')
285 | d = FancyArrowPatch(
286 | posA=(x, 0.5), posB=(x+3, 0.5), color=dldarkred,
287 | arrowstyle='simple, head_width=5, head_length=10, tail_width=0.0',
288 | )
289 | ax.add_artist(d)
290 | ax.annotate("z < 0", xy= [x,0.5], xycoords='data',
291 | xytext=[-50,5],textcoords='offset points', ha='left')
292 | f = FancyArrowPatch(
293 | posA=(x, 0.5), posB=(x-3, 0.5), color=dlblue,
294 | arrowstyle='simple, head_width=5, head_length=10, tail_width=0.0',
295 | )
296 | ax.add_artist(f)
297 |
--------------------------------------------------------------------------------
/week3/Optional Labs/plt_logistic_loss.py:
--------------------------------------------------------------------------------
1 | """----------------------------------------------------------------
2 | logistic_loss plotting routines and support
3 | """
4 |
5 | from matplotlib import cm
6 | from lab_utils_common import sigmoid, dlblue, dlorange, np, plt, compute_cost_matrix
7 |
8 | def compute_cost_logistic_sq_err(X, y, w, b):
9 | """
10 | compute sq error cost on logicist data (for negative example only, not used in practice)
11 | Args:
12 | X (ndarray): Shape (m,n) matrix of examples with multiple features
13 | w (ndarray): Shape (n) parameters for prediction
14 | b (scalar): parameter for prediction
15 | Returns:
16 | cost (scalar): cost
17 | """
18 | m = X.shape[0]
19 | cost = 0.0
20 | for i in range(m):
21 | z_i = np.dot(X[i],w) + b
22 | f_wb_i = sigmoid(z_i) #add sigmoid to normal sq error cost for linear regression
23 | cost = cost + (f_wb_i - y[i])**2
24 | cost = cost / (2 * m)
25 | return np.squeeze(cost)
26 |
27 | def plt_logistic_squared_error(X,y):
28 | """ plots logistic squared error for demonstration """
29 | wx, by = np.meshgrid(np.linspace(-6,12,50),
30 | np.linspace(10, -20, 40))
31 | points = np.c_[wx.ravel(), by.ravel()]
32 | cost = np.zeros(points.shape[0])
33 |
34 | for i in range(points.shape[0]):
35 | w,b = points[i]
36 | cost[i] = compute_cost_logistic_sq_err(X.reshape(-1,1), y, w, b)
37 | cost = cost.reshape(wx.shape)
38 |
39 | fig = plt.figure()
40 | fig.canvas.toolbar_visible = False
41 | fig.canvas.header_visible = False
42 | fig.canvas.footer_visible = False
43 | ax = fig.add_subplot(1, 1, 1, projection='3d')
44 | ax.plot_surface(wx, by, cost, alpha=0.6,cmap=cm.jet,)
45 |
46 | ax.set_xlabel('w', fontsize=16)
47 | ax.set_ylabel('b', fontsize=16)
48 | ax.set_zlabel("Cost", rotation=90, fontsize=16)
49 | ax.set_title('"Logistic" Squared Error Cost vs (w, b)')
50 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
51 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
52 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
53 |
54 |
55 | def plt_logistic_cost(X,y):
56 | """ plots logistic cost """
57 | wx, by = np.meshgrid(np.linspace(-6,12,50),
58 | np.linspace(0, -20, 40))
59 | points = np.c_[wx.ravel(), by.ravel()]
60 | cost = np.zeros(points.shape[0],dtype=np.longdouble)
61 |
62 | for i in range(points.shape[0]):
63 | w,b = points[i]
64 | cost[i] = compute_cost_matrix(X.reshape(-1,1), y, w, b, logistic=True, safe=True)
65 | cost = cost.reshape(wx.shape)
66 |
67 | fig = plt.figure(figsize=(9,5))
68 | fig.canvas.toolbar_visible = False
69 | fig.canvas.header_visible = False
70 | fig.canvas.footer_visible = False
71 | ax = fig.add_subplot(1, 2, 1, projection='3d')
72 | ax.plot_surface(wx, by, cost, alpha=0.6,cmap=cm.jet,)
73 |
74 | ax.set_xlabel('w', fontsize=16)
75 | ax.set_ylabel('b', fontsize=16)
76 | ax.set_zlabel("Cost", rotation=90, fontsize=16)
77 | ax.set_title('Logistic Cost vs (w, b)')
78 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
79 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
80 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
81 |
82 | ax = fig.add_subplot(1, 2, 2, projection='3d')
83 |
84 | ax.plot_surface(wx, by, np.log(cost), alpha=0.6,cmap=cm.jet,)
85 |
86 | ax.set_xlabel('w', fontsize=16)
87 | ax.set_ylabel('b', fontsize=16)
88 | ax.set_zlabel('\nlog(Cost)', fontsize=16)
89 | ax.set_title('log(Logistic Cost) vs (w, b)')
90 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
91 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
92 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
93 |
94 | plt.show()
95 | return cost
96 |
97 |
98 | def soup_bowl():
99 | """ creates 3D quadratic error surface """
100 | #Create figure and plot with a 3D projection
101 | fig = plt.figure(figsize=(4,4))
102 | fig.canvas.toolbar_visible = False
103 | fig.canvas.header_visible = False
104 | fig.canvas.footer_visible = False
105 |
106 | #Plot configuration
107 | ax = fig.add_subplot(111, projection='3d')
108 | ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
109 | ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
110 | ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
111 | ax.zaxis.set_rotate_label(False)
112 | ax.view_init(15, -120)
113 |
114 | #Useful linearspaces to give values to the parameters w and b
115 | w = np.linspace(-20, 20, 100)
116 | b = np.linspace(-20, 20, 100)
117 |
118 | #Get the z value for a bowl-shaped cost function
119 | z=np.zeros((len(w), len(b)))
120 | j=0
121 | for x in w:
122 | i=0
123 | for y in b:
124 | z[i,j] = x**2 + y**2
125 | i+=1
126 | j+=1
127 |
128 | #Meshgrid used for plotting 3D functions
129 | W, B = np.meshgrid(w, b)
130 |
131 | #Create the 3D surface plot of the bowl-shaped cost function
132 | ax.plot_surface(W, B, z, cmap = "Spectral_r", alpha=0.7, antialiased=False)
133 | ax.plot_wireframe(W, B, z, color='k', alpha=0.1)
134 | ax.set_xlabel("$w$")
135 | ax.set_ylabel("$b$")
136 | ax.set_zlabel("Cost", rotation=90)
137 | ax.set_title("Squared Error Cost used in Linear Regression")
138 |
139 | plt.show()
140 |
141 |
142 | def plt_simple_example(x, y):
143 | """ plots tumor data """
144 | pos = y == 1
145 | neg = y == 0
146 |
147 | fig,ax = plt.subplots(1,1,figsize=(5,3))
148 | fig.canvas.toolbar_visible = False
149 | fig.canvas.header_visible = False
150 | fig.canvas.footer_visible = False
151 |
152 | ax.scatter(x[pos], y[pos], marker='x', s=80, c = 'red', label="malignant")
153 | ax.scatter(x[neg], y[neg], marker='o', s=100, label="benign", facecolors='none', edgecolors=dlblue,lw=3)
154 | ax.set_ylim(-0.075,1.1)
155 | ax.set_ylabel('y')
156 | ax.set_xlabel('Tumor Size')
157 | ax.legend(loc='lower right')
158 | ax.set_title("Example of Logistic Regression on Categorical Data")
159 |
160 |
161 | def plt_two_logistic_loss_curves():
162 | """ plots the logistic loss """
163 | fig,ax = plt.subplots(1,2,figsize=(6,3),sharey=True)
164 | fig.canvas.toolbar_visible = False
165 | fig.canvas.header_visible = False
166 | fig.canvas.footer_visible = False
167 | x = np.linspace(0.01,1-0.01,20)
168 | ax[0].plot(x,-np.log(x))
169 | #ax[0].set_title("y = 1")
170 | ax[0].text(0.5, 4.0, "y = 1", fontsize=12)
171 | ax[0].set_ylabel("loss")
172 | ax[0].set_xlabel(r"$f_{w,b}(x)$")
173 | ax[1].plot(x,-np.log(1-x))
174 | #ax[1].set_title("y = 0")
175 | ax[1].text(0.5, 4.0, "y = 0", fontsize=12)
176 | ax[1].set_xlabel(r"$f_{w,b}(x)$")
177 | ax[0].annotate("prediction \nmatches \ntarget ", xy= [1,0], xycoords='data',
178 | xytext=[-10,30],textcoords='offset points', ha="right", va="center",
179 | arrowprops={'arrowstyle': '->', 'color': dlorange, 'lw': 3},)
180 | ax[0].annotate("loss increases as prediction\n differs from target", xy= [0.1,-np.log(0.1)], xycoords='data',
181 | xytext=[10,30],textcoords='offset points', ha="left", va="center",
182 | arrowprops={'arrowstyle': '->', 'color': dlorange, 'lw': 3},)
183 | ax[1].annotate("prediction \nmatches \ntarget ", xy= [0,0], xycoords='data',
184 | xytext=[10,30],textcoords='offset points', ha="left", va="center",
185 | arrowprops={'arrowstyle': '->', 'color': dlorange, 'lw': 3},)
186 | ax[1].annotate("loss increases as prediction\n differs from target", xy= [0.9,-np.log(1-0.9)], xycoords='data',
187 | xytext=[-10,30],textcoords='offset points', ha="right", va="center",
188 | arrowprops={'arrowstyle': '->', 'color': dlorange, 'lw': 3},)
189 | plt.suptitle("Loss Curves for Two Categorical Target Values", fontsize=12)
190 | plt.tight_layout()
191 | plt.show()
192 |
--------------------------------------------------------------------------------
/week3/Optional Labs/plt_one_addpt_onclick.py:
--------------------------------------------------------------------------------
1 | import time
2 | import copy
3 | from ipywidgets import Output
4 | from matplotlib.widgets import Button, CheckButtons
5 | from matplotlib.patches import FancyArrowPatch
6 | from lab_utils_common import np, plt, dlblue, dlorange, sigmoid, dldarkred, gradient_descent
7 |
8 | # for debug
9 | #output = Output() # sends hidden error messages to display when using widgets
10 | #display(output)
11 |
12 | class plt_one_addpt_onclick:
13 | """ class to run one interactive plot """
14 | def __init__(self, x, y, w, b, logistic=True):
15 | self.logistic=logistic
16 | pos = y == 1
17 | neg = y == 0
18 |
19 | fig,ax = plt.subplots(1,1,figsize=(8,4))
20 | fig.canvas.toolbar_visible = False
21 | fig.canvas.header_visible = False
22 | fig.canvas.footer_visible = False
23 |
24 | plt.subplots_adjust(bottom=0.25)
25 | ax.scatter(x[pos], y[pos], marker='x', s=80, c = 'red', label="malignant")
26 | ax.scatter(x[neg], y[neg], marker='o', s=100, label="benign", facecolors='none', edgecolors=dlblue,lw=3)
27 | ax.set_ylim(-0.05,1.1)
28 | xlim = ax.get_xlim()
29 | ax.set_xlim(xlim[0],xlim[1]*2)
30 | ax.set_ylabel('y')
31 | ax.set_xlabel('Tumor Size')
32 | self.alegend = ax.legend(loc='lower right')
33 | if self.logistic:
34 | ax.set_title("Example of Logistic Regression on Categorical Data")
35 | else:
36 | ax.set_title("Example of Linear Regression on Categorical Data")
37 |
38 | ax.text(0.65,0.8,"[Click to add data points]", size=10, transform=ax.transAxes)
39 |
40 | axcalc = plt.axes([0.1, 0.05, 0.38, 0.075]) #l,b,w,h
41 | axthresh = plt.axes([0.5, 0.05, 0.38, 0.075]) #l,b,w,h
42 | self.tlist = []
43 |
44 | self.fig = fig
45 | self.ax = [ax,axcalc,axthresh]
46 | self.x = x
47 | self.y = y
48 | self.w = copy.deepcopy(w)
49 | self.b = b
50 | f_wb = np.matmul(self.x.reshape(-1,1), self.w) + self.b
51 | if self.logistic:
52 | self.aline = self.ax[0].plot(self.x, sigmoid(f_wb), color=dlblue)
53 | self.bline = self.ax[0].plot(self.x, f_wb, color=dlorange,lw=1)
54 | else:
55 | self.aline = self.ax[0].plot(self.x, sigmoid(f_wb), color=dlblue)
56 |
57 | self.cid = fig.canvas.mpl_connect('button_press_event', self.add_data)
58 | if self.logistic:
59 | self.bcalc = Button(axcalc, 'Run Logistic Regression (click)', color=dlblue)
60 | self.bcalc.on_clicked(self.calc_logistic)
61 | else:
62 | self.bcalc = Button(axcalc, 'Run Linear Regression (click)', color=dlblue)
63 | self.bcalc.on_clicked(self.calc_linear)
64 | self.bthresh = CheckButtons(axthresh, ('Toggle 0.5 threshold (after regression)',))
65 | self.bthresh.on_clicked(self.thresh)
66 | self.resize_sq(self.bthresh)
67 |
68 | # @output.capture() # debug
69 | def add_data(self, event):
70 | #self.ax[0].text(0.1,0.1, f"in onclick")
71 | if event.inaxes == self.ax[0]:
72 | x_coord = event.xdata
73 | y_coord = event.ydata
74 |
75 | if y_coord > 0.5:
76 | self.ax[0].scatter(x_coord, 1, marker='x', s=80, c = 'red' )
77 | self.y = np.append(self.y,1)
78 | else:
79 | self.ax[0].scatter(x_coord, 0, marker='o', s=100, facecolors='none', edgecolors=dlblue,lw=3)
80 | self.y = np.append(self.y,0)
81 | self.x = np.append(self.x,x_coord)
82 | self.fig.canvas.draw()
83 |
84 | # @output.capture() # debug
85 | def calc_linear(self, event):
86 | if self.bthresh.get_status()[0]:
87 | self.remove_thresh()
88 | for it in [1,1,1,1,1,2,4,8,16,32,64,128,256]:
89 | self.w, self.b, _ = gradient_descent(self.x.reshape(-1,1), self.y.reshape(-1,1),
90 | self.w.reshape(-1,1), self.b, 0.01, it,
91 | logistic=False, lambda_=0, verbose=False)
92 | self.aline[0].remove()
93 | self.alegend.remove()
94 | y_hat = np.matmul(self.x.reshape(-1,1), self.w) + self.b
95 | self.aline = self.ax[0].plot(self.x, y_hat, color=dlblue,
96 | label=f"y = {np.squeeze(self.w):0.2f}x+({self.b:0.2f})")
97 | self.alegend = self.ax[0].legend(loc='lower right')
98 | time.sleep(0.3)
99 | self.fig.canvas.draw()
100 | if self.bthresh.get_status()[0]:
101 | self.draw_thresh()
102 | self.fig.canvas.draw()
103 |
104 | def calc_logistic(self, event):
105 | if self.bthresh.get_status()[0]:
106 | self.remove_thresh()
107 | for it in [1, 8,16,32,64,128,256,512,1024,2048,4096]:
108 | self.w, self.b, _ = gradient_descent(self.x.reshape(-1,1), self.y.reshape(-1,1),
109 | self.w.reshape(-1,1), self.b, 0.1, it,
110 | logistic=True, lambda_=0, verbose=False)
111 | self.aline[0].remove()
112 | self.bline[0].remove()
113 | self.alegend.remove()
114 | xlim = self.ax[0].get_xlim()
115 | x_hat = np.linspace(*xlim, 30)
116 | y_hat = sigmoid(np.matmul(x_hat.reshape(-1,1), self.w) + self.b)
117 | self.aline = self.ax[0].plot(x_hat, y_hat, color=dlblue,
118 | label="y = sigmoid(z)")
119 | f_wb = np.matmul(x_hat.reshape(-1,1), self.w) + self.b
120 | self.bline = self.ax[0].plot(x_hat, f_wb, color=dlorange, lw=1,
121 | label=f"z = {np.squeeze(self.w):0.2f}x+({self.b:0.2f})")
122 | self.alegend = self.ax[0].legend(loc='lower right')
123 | time.sleep(0.3)
124 | self.fig.canvas.draw()
125 | if self.bthresh.get_status()[0]:
126 | self.draw_thresh()
127 | self.fig.canvas.draw()
128 |
129 |
130 | def thresh(self, event):
131 | if self.bthresh.get_status()[0]:
132 | #plt.figtext(0,0, f"in thresh {self.bthresh.get_status()}")
133 | self.draw_thresh()
134 | else:
135 | #plt.figtext(0,0.3, f"in thresh {self.bthresh.get_status()}")
136 | self.remove_thresh()
137 |
138 | def draw_thresh(self):
139 | ws = np.squeeze(self.w)
140 | xp5 = -self.b/ws if self.logistic else (0.5 - self.b) / ws
141 | ylim = self.ax[0].get_ylim()
142 | xlim = self.ax[0].get_xlim()
143 | a = self.ax[0].fill_between([xlim[0], xp5], [ylim[1], ylim[1]], alpha=0.2, color=dlblue)
144 | b = self.ax[0].fill_between([xp5, xlim[1]], [ylim[1], ylim[1]], alpha=0.2, color=dldarkred)
145 | c = self.ax[0].annotate("Malignant", xy= [xp5,0.5], xycoords='data',
146 | xytext=[30,5],textcoords='offset points')
147 | d = FancyArrowPatch(
148 | posA=(xp5, 0.5), posB=(xp5+1.5, 0.5), color=dldarkred,
149 | arrowstyle='simple, head_width=5, head_length=10, tail_width=0.0',
150 | )
151 | self.ax[0].add_artist(d)
152 |
153 | e = self.ax[0].annotate("Benign", xy= [xp5,0.5], xycoords='data',
154 | xytext=[-70,5],textcoords='offset points', ha='left')
155 | f = FancyArrowPatch(
156 | posA=(xp5, 0.5), posB=(xp5-1.5, 0.5), color=dlblue,
157 | arrowstyle='simple, head_width=5, head_length=10, tail_width=0.0',
158 | )
159 | self.ax[0].add_artist(f)
160 | self.tlist = [a,b,c,d,e,f]
161 |
162 | self.fig.canvas.draw()
163 |
164 | def remove_thresh(self):
165 | #plt.figtext(0.5,0.0, f"rem thresh {self.bthresh.get_status()}")
166 | for artist in self.tlist:
167 | artist.remove()
168 | self.fig.canvas.draw()
169 |
170 | def resize_sq(self, bcid):
171 | """ resizes the check box """
172 | #future reference
173 | #print(f"width : {bcid.rectangles[0].get_width()}")
174 | #print(f"height : {bcid.rectangles[0].get_height()}")
175 | #print(f"xy : {bcid.rectangles[0].get_xy()}")
176 | #print(f"bb : {bcid.rectangles[0].get_bbox()}")
177 | #print(f"points : {bcid.rectangles[0].get_bbox().get_points()}") #[[xmin,ymin],[xmax,ymax]]
178 |
179 | h = bcid.rectangles[0].get_height()
180 | bcid.rectangles[0].set_height(3*h)
181 |
182 | ymax = bcid.rectangles[0].get_bbox().y1
183 | ymin = bcid.rectangles[0].get_bbox().y0
184 |
185 | bcid.lines[0][0].set_ydata([ymax,ymin])
186 | bcid.lines[0][1].set_ydata([ymin,ymax])
187 |
--------------------------------------------------------------------------------
/week3/Optional Labs/plt_quad_logistic.py:
--------------------------------------------------------------------------------
1 | """
2 | plt_quad_logistic.py
3 | interactive plot and supporting routines showing logistic regression
4 | """
5 |
6 | import time
7 | from matplotlib import cm
8 | import matplotlib.colors as colors
9 | from matplotlib.gridspec import GridSpec
10 | from matplotlib.widgets import Button
11 | from matplotlib.patches import FancyArrowPatch
12 | from ipywidgets import Output
13 | from lab_utils_common import np, plt, dlc, dlcolors, sigmoid, compute_cost_matrix, gradient_descent
14 |
15 | # for debug
16 | #output = Output() # sends hidden error messages to display when using widgets
17 | #display(output)
18 |
19 | class plt_quad_logistic:
20 | ''' plots a quad plot showing logistic regression '''
21 | # pylint: disable=too-many-instance-attributes
22 | # pylint: disable=too-many-locals
23 | # pylint: disable=missing-function-docstring
24 | # pylint: disable=attribute-defined-outside-init
25 | def __init__(self, x_train,y_train, w_range, b_range):
26 | # setup figure
27 | fig = plt.figure( figsize=(10,6))
28 | fig.canvas.toolbar_visible = False
29 | fig.canvas.header_visible = False
30 | fig.canvas.footer_visible = False
31 | fig.set_facecolor('#ffffff') #white
32 | gs = GridSpec(2, 2, figure=fig)
33 | ax0 = fig.add_subplot(gs[0, 0])
34 | ax1 = fig.add_subplot(gs[0, 1])
35 | ax2 = fig.add_subplot(gs[1, 0], projection='3d')
36 | ax3 = fig.add_subplot(gs[1,1])
37 | pos = ax3.get_position().get_points() ##[[lb_x,lb_y], [rt_x, rt_y]]
38 | h = 0.05
39 | width = 0.2
40 | axcalc = plt.axes([pos[1,0]-width, pos[1,1]-h, width, h]) #lx,by,w,h
41 | ax = np.array([ax0, ax1, ax2, ax3, axcalc])
42 | self.fig = fig
43 | self.ax = ax
44 | self.x_train = x_train
45 | self.y_train = y_train
46 |
47 | self.w = 0. #initial point, non-array
48 | self.b = 0.
49 |
50 | # initialize subplots
51 | self.dplot = data_plot(ax[0], x_train, y_train, self.w, self.b)
52 | self.con_plot = contour_and_surface_plot(ax[1], ax[2], x_train, y_train, w_range, b_range, self.w, self.b)
53 | self.cplot = cost_plot(ax[3])
54 |
55 | # setup events
56 | self.cid = fig.canvas.mpl_connect('button_press_event', self.click_contour)
57 | self.bcalc = Button(axcalc, 'Run Gradient Descent \nfrom current w,b (click)', color=dlc["dlorange"])
58 | self.bcalc.on_clicked(self.calc_logistic)
59 |
60 | # @output.capture() # debug
61 | def click_contour(self, event):
62 | ''' called when click in contour '''
63 | if event.inaxes == self.ax[1]: #contour plot
64 | self.w = event.xdata
65 | self.b = event.ydata
66 |
67 | self.cplot.re_init()
68 | self.dplot.update(self.w, self.b)
69 | self.con_plot.update_contour_wb_lines(self.w, self.b)
70 | self.con_plot.path.re_init(self.w, self.b)
71 |
72 | self.fig.canvas.draw()
73 |
74 | # @output.capture() # debug
75 | def calc_logistic(self, event):
76 | ''' called on run gradient event '''
77 | for it in [1, 8,16,32,64,128,256,512,1024,2048,4096]:
78 | w, self.b, J_hist = gradient_descent(self.x_train.reshape(-1,1), self.y_train.reshape(-1,1),
79 | np.array(self.w).reshape(-1,1), self.b, 0.1, it,
80 | logistic=True, lambda_=0, verbose=False)
81 | self.w = w[0,0]
82 | self.dplot.update(self.w, self.b)
83 | self.con_plot.update_contour_wb_lines(self.w, self.b)
84 | self.con_plot.path.add_path_item(self.w,self.b)
85 | self.cplot.add_cost(J_hist)
86 |
87 | time.sleep(0.3)
88 | self.fig.canvas.draw()
89 |
90 |
91 | class data_plot:
92 | ''' handles data plot '''
93 | # pylint: disable=missing-function-docstring
94 | # pylint: disable=attribute-defined-outside-init
95 | def __init__(self, ax, x_train, y_train, w, b):
96 | self.ax = ax
97 | self.x_train = x_train
98 | self.y_train = y_train
99 | self.m = x_train.shape[0]
100 | self.w = w
101 | self.b = b
102 |
103 | self.plt_tumor_data()
104 | self.draw_logistic_lines(firsttime=True)
105 | self.mk_cost_lines(firsttime=True)
106 |
107 | self.ax.autoscale(enable=False) # leave plot scales the same after initial setup
108 |
109 | def plt_tumor_data(self):
110 | x = self.x_train
111 | y = self.y_train
112 | pos = y == 1
113 | neg = y == 0
114 | self.ax.scatter(x[pos], y[pos], marker='x', s=80, c = 'red', label="malignant")
115 | self.ax.scatter(x[neg], y[neg], marker='o', s=100, label="benign", facecolors='none',
116 | edgecolors=dlc["dlblue"],lw=3)
117 | self.ax.set_ylim(-0.175,1.1)
118 | self.ax.set_ylabel('y')
119 | self.ax.set_xlabel('Tumor Size')
120 | self.ax.set_title("Logistic Regression on Categorical Data")
121 |
122 | def update(self, w, b):
123 | self.w = w
124 | self.b = b
125 | self.draw_logistic_lines()
126 | self.mk_cost_lines()
127 |
128 | def draw_logistic_lines(self, firsttime=False):
129 | if not firsttime:
130 | self.aline[0].remove()
131 | self.bline[0].remove()
132 | self.alegend.remove()
133 |
134 | xlim = self.ax.get_xlim()
135 | x_hat = np.linspace(*xlim, 30)
136 | y_hat = sigmoid(np.dot(x_hat.reshape(-1,1), self.w) + self.b)
137 | self.aline = self.ax.plot(x_hat, y_hat, color=dlc["dlblue"],
138 | label="y = sigmoid(z)")
139 | f_wb = np.dot(x_hat.reshape(-1,1), self.w) + self.b
140 | self.bline = self.ax.plot(x_hat, f_wb, color=dlc["dlorange"], lw=1,
141 | label=f"z = {np.squeeze(self.w):0.2f}x+({self.b:0.2f})")
142 | self.alegend = self.ax.legend(loc='upper left')
143 |
144 | def mk_cost_lines(self, firsttime=False):
145 | ''' makes vertical cost lines'''
146 | if not firsttime:
147 | for artist in self.cost_items:
148 | artist.remove()
149 | self.cost_items = []
150 | cstr = f"cost = (1/{self.m})*("
151 | ctot = 0
152 | label = 'cost for point'
153 | addedbreak = False
154 | for p in zip(self.x_train,self.y_train):
155 | f_wb_p = sigmoid(self.w*p[0]+self.b)
156 | c_p = compute_cost_matrix(p[0].reshape(-1,1), p[1],np.array(self.w), self.b, logistic=True, lambda_=0, safe=True)
157 | c_p_txt = c_p
158 | a = self.ax.vlines(p[0], p[1],f_wb_p, lw=3, color=dlc["dlpurple"], ls='dotted', label=label)
159 | label='' #just one
160 | cxy = [p[0], p[1] + (f_wb_p-p[1])/2]
161 | b = self.ax.annotate(f'{c_p_txt:0.1f}', xy=cxy, xycoords='data',color=dlc["dlpurple"],
162 | xytext=(5, 0), textcoords='offset points')
163 | cstr += f"{c_p_txt:0.1f} +"
164 | if len(cstr) > 38 and addedbreak is False:
165 | cstr += "\n"
166 | addedbreak = True
167 | ctot += c_p
168 | self.cost_items.extend((a,b))
169 | ctot = ctot/(len(self.x_train))
170 | cstr = cstr[:-1] + f") = {ctot:0.2f}"
171 | ## todo.. figure out how to get this textbox to extend to the width of the subplot
172 | c = self.ax.text(0.05,0.02,cstr, transform=self.ax.transAxes, color=dlc["dlpurple"])
173 | self.cost_items.append(c)
174 |
175 |
176 | class contour_and_surface_plot:
177 | ''' plots combined in class as they have similar operations '''
178 | # pylint: disable=missing-function-docstring
179 | # pylint: disable=attribute-defined-outside-init
180 | def __init__(self, axc, axs, x_train, y_train, w_range, b_range, w, b):
181 |
182 | self.x_train = x_train
183 | self.y_train = y_train
184 | self.axc = axc
185 | self.axs = axs
186 |
187 | #setup useful ranges and common linspaces
188 | b_space = np.linspace(*b_range, 100)
189 | w_space = np.linspace(*w_range, 100)
190 |
191 | # get cost for w,b ranges for contour and 3D
192 | tmp_b,tmp_w = np.meshgrid(b_space,w_space)
193 | z = np.zeros_like(tmp_b)
194 | for i in range(tmp_w.shape[0]):
195 | for j in range(tmp_w.shape[1]):
196 | z[i,j] = compute_cost_matrix(x_train.reshape(-1,1), y_train, tmp_w[i,j], tmp_b[i,j],
197 | logistic=True, lambda_=0, safe=True)
198 | if z[i,j] == 0:
199 | z[i,j] = 1e-9
200 |
201 | ### plot contour ###
202 | CS = axc.contour(tmp_w, tmp_b, np.log(z),levels=12, linewidths=2, alpha=0.7,colors=dlcolors)
203 | axc.set_title('log(Cost(w,b))')
204 | axc.set_xlabel('w', fontsize=10)
205 | axc.set_ylabel('b', fontsize=10)
206 | axc.set_xlim(w_range)
207 | axc.set_ylim(b_range)
208 | self.update_contour_wb_lines(w, b, firsttime=True)
209 | axc.text(0.7,0.05,"Click to choose w,b", bbox=dict(facecolor='white', ec = 'black'), fontsize = 10,
210 | transform=axc.transAxes, verticalalignment = 'center', horizontalalignment= 'center')
211 |
212 | #Surface plot of the cost function J(w,b)
213 | axs.plot_surface(tmp_w, tmp_b, z, cmap = cm.jet, alpha=0.3, antialiased=True)
214 | axs.plot_wireframe(tmp_w, tmp_b, z, color='k', alpha=0.1)
215 | axs.set_xlabel("$w$")
216 | axs.set_ylabel("$b$")
217 | axs.zaxis.set_rotate_label(False)
218 | axs.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
219 | axs.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
220 | axs.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
221 | axs.set_zlabel("J(w, b)", rotation=90)
222 | axs.view_init(30, -120)
223 |
224 | axs.autoscale(enable=False)
225 | axc.autoscale(enable=False)
226 |
227 | self.path = path(self.w,self.b, self.axc) # initialize an empty path, avoids existance check
228 |
229 | def update_contour_wb_lines(self, w, b, firsttime=False):
230 | self.w = w
231 | self.b = b
232 | cst = compute_cost_matrix(self.x_train.reshape(-1,1), self.y_train, np.array(self.w), self.b,
233 | logistic=True, lambda_=0, safe=True)
234 |
235 | # remove lines and re-add on contour plot and 3d plot
236 | if not firsttime:
237 | for artist in self.dyn_items:
238 | artist.remove()
239 | a = self.axc.scatter(self.w, self.b, s=100, color=dlc["dlblue"], zorder= 10, label="cost with \ncurrent w,b")
240 | b = self.axc.hlines(self.b, self.axc.get_xlim()[0], self.w, lw=4, color=dlc["dlpurple"], ls='dotted')
241 | c = self.axc.vlines(self.w, self.axc.get_ylim()[0] ,self.b, lw=4, color=dlc["dlpurple"], ls='dotted')
242 | d = self.axc.annotate(f"Cost: {cst:0.2f}", xy= (self.w, self.b), xytext = (4,4), textcoords = 'offset points',
243 | bbox=dict(facecolor='white'), size = 10)
244 | #Add point in 3D surface plot
245 | e = self.axs.scatter3D(self.w, self.b, cst , marker='X', s=100)
246 |
247 | self.dyn_items = [a,b,c,d,e]
248 |
249 |
250 | class cost_plot:
251 | """ manages cost plot for plt_quad_logistic """
252 | # pylint: disable=missing-function-docstring
253 | # pylint: disable=attribute-defined-outside-init
254 | def __init__(self,ax):
255 | self.ax = ax
256 | self.ax.set_ylabel("log(cost)")
257 | self.ax.set_xlabel("iteration")
258 | self.costs = []
259 | self.cline = self.ax.plot(0,0, color=dlc["dlblue"])
260 |
261 | def re_init(self):
262 | self.ax.clear()
263 | self.__init__(self.ax)
264 |
265 | def add_cost(self,J_hist):
266 | self.costs.extend(J_hist)
267 | self.cline[0].remove()
268 | self.cline = self.ax.plot(self.costs)
269 |
270 | class path:
271 | ''' tracks paths during gradient descent on contour plot '''
272 | # pylint: disable=missing-function-docstring
273 | # pylint: disable=attribute-defined-outside-init
274 | def __init__(self, w, b, ax):
275 | ''' w, b at start of path '''
276 | self.path_items = []
277 | self.w = w
278 | self.b = b
279 | self.ax = ax
280 |
281 | def re_init(self, w, b):
282 | for artist in self.path_items:
283 | artist.remove()
284 | self.path_items = []
285 | self.w = w
286 | self.b = b
287 |
288 | def add_path_item(self, w, b):
289 | a = FancyArrowPatch(
290 | posA=(self.w, self.b), posB=(w, b), color=dlc["dlblue"],
291 | arrowstyle='simple, head_width=5, head_length=10, tail_width=0.0',
292 | )
293 | self.ax.add_artist(a)
294 | self.path_items.append(a)
295 | self.w = w
296 | self.b = b
297 |
298 | #-----------
299 | # related to the logistic gradient descent lab
300 | #----------
301 |
302 | def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
303 | """ truncates color map """
304 | new_cmap = colors.LinearSegmentedColormap.from_list(
305 | 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
306 | cmap(np.linspace(minval, maxval, n)))
307 | return new_cmap
308 |
309 | def plt_prob(ax, w_out,b_out):
310 | """ plots a decision boundary but include shading to indicate the probability """
311 | #setup useful ranges and common linspaces
312 | x0_space = np.linspace(0, 4 , 100)
313 | x1_space = np.linspace(0, 4 , 100)
314 |
315 | # get probability for x0,x1 ranges
316 | tmp_x0,tmp_x1 = np.meshgrid(x0_space,x1_space)
317 | z = np.zeros_like(tmp_x0)
318 | for i in range(tmp_x0.shape[0]):
319 | for j in range(tmp_x1.shape[1]):
320 | z[i,j] = sigmoid(np.dot(w_out, np.array([tmp_x0[i,j],tmp_x1[i,j]])) + b_out)
321 |
322 |
323 | cmap = plt.get_cmap('Blues')
324 | new_cmap = truncate_colormap(cmap, 0.0, 0.5)
325 | pcm = ax.pcolormesh(tmp_x0, tmp_x1, z,
326 | norm=cm.colors.Normalize(vmin=0, vmax=1),
327 | cmap=new_cmap, shading='nearest', alpha = 0.9)
328 | ax.figure.colorbar(pcm, ax=ax)
329 |
--------------------------------------------------------------------------------
/week3/Practice quiz_ Cost function for logistic regression/Readme.md:
--------------------------------------------------------------------------------
1 | 
2 |
--------------------------------------------------------------------------------
/week3/Practice quiz_ Cost function for logistic regression/ss1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Practice quiz_ Cost function for logistic regression/ss1.png
--------------------------------------------------------------------------------
/week3/Practice quiz_ Gradient descent for logistic regression/Readme.md:
--------------------------------------------------------------------------------
1 | 
2 |
--------------------------------------------------------------------------------
/week3/Practice quiz_ Gradient descent for logistic regression/ss1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yasithWimukthi/Supervised-Machine-Learning-Regression-and-Classification-Coursera-Lab-Answers/865e047aac926b004d2defeef90673094ad48382/week3/Practice quiz_ Gradient descent for logistic regression/ss1.png
--------------------------------------------------------------------------------
/week3/Readme.md:
--------------------------------------------------------------------------------
1 | ### Week 3 Solutions
2 |
3 |
4 |
5 |
6 | - [Practice quiz: Cost function for logistic regression](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Practice%20quiz:%20Cost%20function%20for%20logistic%20regression/)
7 | - [Practice quiz: Gradient descent for logistic regression](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Practice%20quiz:%20Gradient%20descent%20for%20logistic%20regression/)
8 | - [Optional Labs](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/)
9 | - [Classification](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab01_Classification_Soln.ipynb)
10 | - [Sigmoid Function](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab02_Sigmoid_function_Soln.ipynb)
11 | - [Decision Boundary](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab03_Decision_Boundary_Soln.ipynb)
12 | - [Logistic Loss](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab04_LogisticLoss_Soln.ipynb)
13 | - [Cost Function](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab05_Cost_Function_Soln.ipynb)
14 | - [Gradient Descent](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab06_Gradient_Descent_Soln.ipynb)
15 | - [Scikit Learn - Logistic Regression](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab07_Scikit_Learn_Soln.ipynb)
16 | - [Overfitting](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab08_Overfitting_Soln.ipynb)
17 | - [Regularization](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/Optional%20Labs/C1_W3_Lab09_Regularization_Soln.ipynb)
18 | - [Programming Assignment](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/C1W3A1/)
19 | - [Logistic Regression](/C1%20-%20Supervised%20Machine%20Learning:%20Regression%20and%20Classification/week3/C1W3A1/C1_W3_Logistic_Regression.ipynb)
--------------------------------------------------------------------------------