├── .gitignore
├── LICENSE
├── README.md
├── Week1
├── C2_W1_Assignment.ipynb
├── C2_W1_Lab_1_differentiation_in_python.ipynb
├── PersonalNotes_Mathematics for ML C2W1_230226_142113943.pdf
├── data
│ └── prices.csv
└── w1_unittest.py
├── Week2
├── C2_W2_Assignment.ipynb
├── C2_W2_Lab_1_Optimization_Using_Gradient_Descent_in_One_Variable.ipynb
├── C2_W2_Lab_2_Optimization_Using_Gradient_Descent_in_Two_Variables.ipynb
├── PersonalNotes_Mathematics for ML C2W2_230305_191309.pdf
├── data
│ └── tvmarketing.csv
├── w2_tools.py
└── w2_unittest.py
└── Week3
├── C2_W3_Assignment.ipynb
├── C2_W3_Lab_1_Regression_with_Perceptron.ipynb
├── C2_W3_Lab_2_Classification_with_Perceptron.ipynb
├── C2_W3_Lab_3_Optimization_Using_Newtons_Method.ipynb
├── data
├── house_prices_train.csv
└── tvmarketing.csv
├── images
├── nn_model_2_layers.png
├── nn_model_classification_1_layer.png
├── nn_model_linear_regression_multiple.png
└── nn_model_linear_regression_simple.png
└── w3_unittest.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Calculus-for-Machine-Learning-and-Data-Science
2 | My personal notes and solutions from going through Course 2 of the Mathematics for Machine Learning specialization on Coursera by DeepLearning.AI
--------------------------------------------------------------------------------
/Week1/PersonalNotes_Mathematics for ML C2W1_230226_142113943.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week1/PersonalNotes_Mathematics for ML C2W1_230226_142113943.pdf
--------------------------------------------------------------------------------
/Week1/data/prices.csv:
--------------------------------------------------------------------------------
1 | date,price_supplier_a_dollars_per_item,price_supplier_b_dollars_per_item
2 | 1/02/2016,104,76
3 | 1/03/2016,108,76
4 | 1/04/2016,101,84
5 | 1/05/2016,104,79
6 | 1/06/2016,102,81
7 | 1/07/2016,105,84
8 | 1/08/2016,114,90
9 | 1/09/2016,102,93
10 | 1/10/2016,105,93
11 | 1/11/2016,101,99
12 | 1/12/2016,109,98
13 | 1/01/2017,103,96
14 | 1/02/2017,93,94
15 | 1/03/2017,98,104
16 | 1/04/2017,92,101
17 | 1/05/2017,97,102
18 | 1/06/2017,96,104
19 | 1/07/2017,94,106
20 | 1/08/2017,97,105
21 | 1/09/2017,93,103
22 | 1/10/2017,99,106
23 | 1/11/2017,93,104
24 | 1/12/2017,98,113
25 | 1/01/2018,94,115
26 | 1/02/2018,93,114
27 | 1/03/2018,92,124
28 | 1/04/2018,96,119
29 | 1/05/2018,98,115
30 | 1/06/2018,98,112
31 | 1/07/2018,93,111
32 | 1/08/2018,97,106
33 | 1/09/2018,102,107
34 | 1/10/2018,103,108
35 | 1/11/2018,100,108
36 | 1/12/2018,100,102
37 | 1/01/2019,104,104
38 | 1/02/2019,100,101
39 | 1/03/2019,103,101
40 | 1/04/2019,104,100
41 | 1/05/2019,101,103
42 | 1/06/2019,102,106
43 | 1/07/2019,100,100
44 | 1/08/2019,102,97
45 | 1/09/2019,108,98
46 | 1/10/2019,107,90
47 | 1/11/2019,107,92
48 | 1/12/2019,103,92
49 | 1/01/2020,109,99
50 | 1/02/2020,108,94
51 | 1/03/2020,108,91
52 |
--------------------------------------------------------------------------------
/Week1/w1_unittest.py:
--------------------------------------------------------------------------------
1 | # +
2 | import jax.numpy as np
3 | from math import isclose
4 |
5 | # Variables for the default_check test cases.
6 | prices_A = np.array([
7 | 104., 108., 101., 104., 102., 105., 114., 102., 105., 101., 109., 103., 93., 98., 92., 97., 96.,
8 | 94., 97., 93., 99., 93., 98., 94., 93., 92., 96., 98., 98., 93., 97., 102., 103., 100., 100., 104.,
9 | 100., 103., 104., 101., 102., 100., 102., 108., 107., 107., 103., 109., 108., 108.,
10 | ])
11 | prices_B = np.array([
12 | 76., 76., 84., 79., 81., 84., 90., 93., 93., 99., 98., 96., 94., 104., 101., 102., 104., 106., 105.,
13 | 103., 106., 104., 113., 115., 114., 124., 119., 115., 112., 111., 106., 107., 108., 108., 102., 104.,
14 | 101., 101., 100., 103., 106., 100., 97., 98., 90., 92., 92., 99., 94., 91.
15 | ])
16 |
17 |
18 | # -
19 |
20 | def test_load_and_convert_data(target_A, target_B):
21 | successful_cases = 0
22 | failed_cases = []
23 |
24 | test_cases = [
25 | {
26 | "name": "default_check",
27 | "expected": {"prices_A": prices_A,
28 | "prices_B": prices_B,},
29 | },
30 | ]
31 |
32 | for test_case in test_cases:
33 |
34 | try:
35 | assert type(target_A) == type(test_case["expected"]["prices_A"])
36 | successful_cases += 1
37 | except:
38 | failed_cases.append(
39 | {
40 | "name": test_case["name"],
41 | "expected": type(test_case["expected"]["prices_A"]),
42 | "got": type(target_A),
43 | }
44 | )
45 | print(
46 | f"prices_A has incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
47 | )
48 | break
49 |
50 | try:
51 | assert type(target_B) == type(test_case["expected"]["prices_B"])
52 | successful_cases += 1
53 | except:
54 | failed_cases.append(
55 | {
56 | "name": test_case["name"],
57 | "expected": type(test_case["expected"]["prices_B"]),
58 | "got": type(target_B),
59 | }
60 | )
61 | print(
62 | f"prices_B has incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
63 | )
64 | break
65 |
66 | try:
67 | # Check only one element - no need to check all array.
68 | assert type(target_A[0].item()) == type(test_case["expected"]["prices_A"][0].item())
69 | successful_cases += 1
70 | except:
71 | failed_cases.append(
72 | {
73 | "name": test_case["name"],
74 | "expected": type(test_case["expected"]["prices_A"][0].item()),
75 | "got": type(target_A[0].item()),
76 | }
77 | )
78 | print(
79 | f"Elements of prices_A array have incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
80 | )
81 |
82 | try:
83 | # Check only one element - no need to check all array.
84 | assert type(target_B[0].item()) == type(test_case["expected"]["prices_B"][0].item())
85 | successful_cases += 1
86 | except:
87 | failed_cases.append(
88 | {
89 | "name": test_case["name"],
90 | "expected": type(test_case["expected"]["prices_B"][0].item()),
91 | "got": type(target_B[0].item()),
92 | }
93 | )
94 | print(
95 | f"Elements of prices_B array have incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
96 | )
97 |
98 | try:
99 | assert target_A.shape == test_case["expected"]["prices_A"].shape
100 | successful_cases += 1
101 | except:
102 | failed_cases.append(
103 | {
104 | "name": test_case["name"],
105 | "expected": test_case["expected"]["prices_A"].shape,
106 | "got": target_A.shape,
107 | }
108 | )
109 | print(
110 | f"Wrong shape of prices_A array. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
111 | )
112 | break
113 |
114 | try:
115 | assert target_B.shape == test_case["expected"]["prices_B"].shape
116 | successful_cases += 1
117 | except:
118 | failed_cases.append(
119 | {
120 | "name": test_case["name"],
121 | "expected": test_case["expected"]["prices_B"].shape,
122 | "got": target_B.shape,
123 | }
124 | )
125 | print(
126 | f"Wrong shape of prices_B array. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
127 | )
128 | break
129 |
130 | try:
131 | assert np.allclose(target_A, test_case["expected"]["prices_A"])
132 | successful_cases += 1
133 | except:
134 | failed_cases.append(
135 | {
136 | "name": test_case["name"],
137 | "expected": test_case["expected"]["prices_A"],
138 | "got": target_A,
139 | }
140 | )
141 | print(
142 | f"Wrong array prices_A. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
143 | )
144 |
145 | try:
146 | assert np.allclose(target_B, test_case["expected"]["prices_B"])
147 | successful_cases += 1
148 | except:
149 | failed_cases.append(
150 | {
151 | "name": test_case["name"],
152 | "expected": test_case["expected"]["prices_B"],
153 | "got": target_B,
154 | }
155 | )
156 | print(
157 | f"Wrong array prices_B. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
158 | )
159 |
160 | if len(failed_cases) == 0:
161 | print("\033[92m All tests passed")
162 | else:
163 | print("\033[92m", successful_cases, " Tests passed")
164 | print("\033[91m", len(failed_cases), " Tests failed")
165 |
166 |
167 | def test_f_of_omega(target_f_of_omega):
168 | successful_cases = 0
169 | failed_cases = []
170 |
171 | test_cases = [
172 | {
173 | "name": "default_check",
174 | "input": {"omega": 0,},
175 | "expected": {"f_of_omega": prices_B,},
176 | },
177 | {
178 | "name": "extra_check_1",
179 | "input": {"omega": 0.2,},
180 | "expected": {"f_of_omega": prices_A * 0.2 + prices_B * (1-0.2),},
181 | },
182 | {
183 | "name": "extra_check_2",
184 | "input": {"omega": 0.8,},
185 | "expected": {"f_of_omega": prices_A * 0.8 + prices_B * (1-0.8),},
186 | },
187 | {
188 | "name": "extra_check_3",
189 | "input": {"omega": 1,},
190 | "expected": {"f_of_omega": prices_A,},
191 | },
192 | ]
193 |
194 | for test_case in test_cases:
195 | result = target_f_of_omega(test_case["input"]["omega"])
196 |
197 | try:
198 | assert result.shape == test_case["expected"]["f_of_omega"].shape
199 | successful_cases += 1
200 | except:
201 | failed_cases.append(
202 | {
203 | "name": test_case["name"],
204 | "expected": test_case["expected"]["f_of_omega"].shape,
205 | "got": result.shape,
206 | }
207 | )
208 | print(
209 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of f_of_omega output for omega = {test_case['input']['omega']}. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
210 | )
211 |
212 | try:
213 | assert np.allclose(result, test_case["expected"]["f_of_omega"])
214 | successful_cases += 1
215 |
216 | except:
217 | failed_cases.append(
218 | {
219 | "name": test_case["name"],
220 | "expected": test_case["expected"]["f_of_omega"],
221 | "got": result,
222 | }
223 | )
224 | print(
225 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of f_of_omega for omega = {test_case['input']['omega']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
226 | )
227 |
228 | if len(failed_cases) == 0:
229 | print("\033[92m All tests passed")
230 | else:
231 | print("\033[92m", successful_cases, " Tests passed")
232 | print("\033[91m", len(failed_cases), " Tests failed")
233 |
234 |
235 | def test_L_of_omega_array(target_L_of_omega_array):
236 | successful_cases = 0
237 | failed_cases = []
238 |
239 | # Not all of the values of the output array will be checked - only some of them.
240 | # In graders all of the output array gets checked.
241 | test_cases = [
242 | {
243 | "name": "default_check",
244 | "input": {"omega_array": np.linspace(0, 1, 1001, endpoint=True),},
245 | "expected": {"shape": (1001,),
246 | "L_of_omega_array": [
247 | {"i": 0, "L_of_omega": 110.72,},
248 | {"i": 1000, "L_of_omega": 27.48,},
249 | {"i": 400, "L_of_omega": 28.051199,},
250 | ],}
251 | },
252 | {
253 | "name": "extra_check",
254 | "input": {"omega_array": np.linspace(0, 1, 11, endpoint=True),},
255 | "expected": {"shape": (11,),
256 | "L_of_omega_array": [
257 | {"i": 0, "L_of_omega": 110.72,},
258 | {"i": 11, "L_of_omega": 27.48,},
259 | {"i": 5, "L_of_omega": 17.67,},
260 | ],}
261 | },
262 | ]
263 |
264 | for test_case in test_cases:
265 | result = target_L_of_omega_array(test_case["input"]["omega_array"])
266 |
267 | try:
268 | assert result.shape == test_case["expected"]["shape"]
269 | successful_cases += 1
270 | except:
271 | failed_cases.append(
272 | {
273 | "name": test_case["name"],
274 | "expected": test_case["expected"]["shape"],
275 | "got": result.shape,
276 | }
277 | )
278 | print(
279 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of L_of_omega_array output. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
280 | )
281 |
282 | for test_case_i in test_case["expected"]["L_of_omega_array"]:
283 | i = test_case_i["i"]
284 |
285 | try:
286 | assert isclose(result[i], test_case_i["L_of_omega"], abs_tol=1e-5)
287 | successful_cases += 1
288 |
289 | except:
290 | failed_cases.append(
291 | {
292 | "name": test_case["name"],
293 | "expected": test_case_i["L_of_omega"],
294 | "got": result[i],
295 | }
296 | )
297 | print(
298 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of L_of_omega_array for omega_array = \n{test_case['input']['omega_array']}\nTest for index i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
299 | )
300 |
301 | if len(failed_cases) == 0:
302 | print("\033[92m All tests passed")
303 | else:
304 | print("\033[92m", successful_cases, " Tests passed")
305 | print("\033[91m", len(failed_cases), " Tests failed")
306 |
307 |
308 | def test_dLdOmega_of_omega_array(target_dLdOmega_of_omega_array):
309 | successful_cases = 0
310 | failed_cases = []
311 |
312 | # Not all of the values of the output array will be checked - only some of them.
313 | # In graders all of the output array gets checked.
314 | test_cases = [
315 | {
316 | "name": "default_check",
317 | "input": {"omega_array": np.linspace(0, 1, 1001, endpoint=True),},
318 | "expected": {"shape": (1001,),
319 | "dLdOmega_of_omega_array": [
320 | {"i": 0, "dLdOmega_of_omega": -288.96,},
321 | {"i": 1000, "dLdOmega_of_omega": 122.47999,},
322 | {"i": 400, "dLdOmega_of_omega": -124.38398,},
323 | ],}
324 | },
325 | {
326 | "name": "extra_check",
327 | "input": {"omega_array": np.linspace(0, 1, 11, endpoint=True),},
328 | "expected": {"shape": (11,),
329 | "dLdOmega_of_omega_array": [
330 | {"i": 0, "dLdOmega_of_omega": -288.96,},
331 | {"i": 11, "dLdOmega_of_omega": 122.47999,},
332 | {"i": 5, "dLdOmega_of_omega": -83.240036,},
333 | ],}
334 | },
335 | ]
336 |
337 | for test_case in test_cases:
338 | result = target_dLdOmega_of_omega_array(test_case["input"]["omega_array"])
339 |
340 | try:
341 | assert result.shape == test_case["expected"]["shape"]
342 | successful_cases += 1
343 | except:
344 | failed_cases.append(
345 | {
346 | "name": test_case["name"],
347 | "expected": test_case["expected"]["shape"],
348 | "got": result.shape,
349 | }
350 | )
351 | print(
352 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of dLdOmega_of_omega_array output. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
353 | )
354 |
355 | for test_case_i in test_case["expected"]["dLdOmega_of_omega_array"]:
356 | i = test_case_i["i"]
357 |
358 | try:
359 | assert isclose(result[i], test_case_i["dLdOmega_of_omega"], abs_tol=1e-5)
360 | successful_cases += 1
361 |
362 | except:
363 | failed_cases.append(
364 | {
365 | "name": test_case["name"],
366 | "expected": test_case_i["dLdOmega_of_omega"],
367 | "got": result[i],
368 | }
369 | )
370 | print(
371 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of dLdOmega_of_omega_array for omega_array = \n{test_case['input']['omega_array']}\nTest for index i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
372 | )
373 |
374 | if len(failed_cases) == 0:
375 | print("\033[92m All tests passed")
376 | else:
377 | print("\033[92m", successful_cases, " Tests passed")
378 | print("\033[91m", len(failed_cases), " Tests failed")
379 |
--------------------------------------------------------------------------------
/Week2/C2_W2_Assignment.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "EAt-K2qgcIou"
7 | },
8 | "source": [
9 | "# Optimization Using Gradient Descent: Linear Regression"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {
15 | "id": "FZYK-0rin5x7"
16 | },
17 | "source": [
18 | "In this assignment, you will build a simple linear regression model to predict sales based on TV marketing expenses. You will investigate three different approaches to this problem. You will use `NumPy` and `Scikit-Learn` linear regression models, as well as construct and optimize the sum of squares cost function with gradient descent from scratch."
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "# Table of Contents\n",
26 | "\n",
27 | "- [ 1 - Open the Dataset and State the Problem](#1)\n",
28 | " - [ Exercise 1](#ex01)\n",
29 | "- [ 2 - Linear Regression in Python with `NumPy` and `Scikit-Learn`](#2)\n",
30 | " - [ 2.1 - Linear Regression with `NumPy`](#2.1)\n",
31 | " - [ Exercise 2](#ex02)\n",
32 | " - [ 2.2 - Linear Regression with `Scikit-Learn`](#2.2)\n",
33 | " - [ Exercise 3](#ex03)\n",
34 | " - [ Exercise 4](#ex04)\n",
35 | "- [ 3 - Linear Regression using Gradient Descent](#3)\n",
36 | " - [ Exercise 5](#ex05)\n",
37 | " - [ Exercise 6](#ex06)"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "metadata": {},
43 | "source": [
44 | "## Packages\n",
45 | "\n",
46 | "Load the required packages:"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 1,
52 | "metadata": {
53 | "tags": [
54 | "graded"
55 | ]
56 | },
57 | "outputs": [],
58 | "source": [
59 | "import numpy as np\n",
60 | "# A library for programmatic plot generation.\n",
61 | "import matplotlib.pyplot as plt\n",
62 | "# A library for data manipulation and analysis.\n",
63 | "import pandas as pd\n",
64 | "# LinearRegression from sklearn.\n",
65 | "from sklearn.linear_model import LinearRegression"
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "Import the unit tests defined for this notebook."
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 2,
78 | "metadata": {},
79 | "outputs": [],
80 | "source": [
81 | "import w2_unittest"
82 | ]
83 | },
84 | {
85 | "cell_type": "markdown",
86 | "metadata": {},
87 | "source": [
88 | " \n",
89 | "## 1 - Open the Dataset and State the Problem"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "In this lab, you will build a linear regression model for a simple [Kaggle dataset](https://www.kaggle.com/code/devzohaib/simple-linear-regression/notebook), saved in a file `data/tvmarketing.csv`. The dataset has only two fields: TV marketing expenses (`TV`) and sales amount (`Sales`)."
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | " \n",
104 | "### Exercise 1\n",
105 | "\n",
106 | "Use `pandas` function `pd.read_csv` to open the .csv file the from the `path`."
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 3,
112 | "metadata": {
113 | "tags": [
114 | "graded"
115 | ]
116 | },
117 | "outputs": [],
118 | "source": [
119 | "path = \"data/tvmarketing.csv\"\n",
120 | "\n",
121 | "### START CODE HERE ### (~ 1 line of code)\n",
122 | "adv = pd.read_csv(path)\n",
123 | "### END CODE HERE ###"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": 4,
129 | "metadata": {
130 | "tags": [
131 | "graded"
132 | ]
133 | },
134 | "outputs": [
135 | {
136 | "data": {
137 | "text/html": [
138 | "
\n",
139 | "\n",
152 | "
\n",
153 | " \n",
154 | " \n",
155 | " \n",
156 | " TV \n",
157 | " Sales \n",
158 | " \n",
159 | " \n",
160 | " \n",
161 | " \n",
162 | " 0 \n",
163 | " 230.1 \n",
164 | " 22.1 \n",
165 | " \n",
166 | " \n",
167 | " 1 \n",
168 | " 44.5 \n",
169 | " 10.4 \n",
170 | " \n",
171 | " \n",
172 | " 2 \n",
173 | " 17.2 \n",
174 | " 9.3 \n",
175 | " \n",
176 | " \n",
177 | " 3 \n",
178 | " 151.5 \n",
179 | " 18.5 \n",
180 | " \n",
181 | " \n",
182 | " 4 \n",
183 | " 180.8 \n",
184 | " 12.9 \n",
185 | " \n",
186 | " \n",
187 | "
\n",
188 | "
"
189 | ],
190 | "text/plain": [
191 | " TV Sales\n",
192 | "0 230.1 22.1\n",
193 | "1 44.5 10.4\n",
194 | "2 17.2 9.3\n",
195 | "3 151.5 18.5\n",
196 | "4 180.8 12.9"
197 | ]
198 | },
199 | "execution_count": 4,
200 | "metadata": {},
201 | "output_type": "execute_result"
202 | }
203 | ],
204 | "source": [
205 | "# Print some part of the dataset.\n",
206 | "adv.head()"
207 | ]
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "metadata": {},
212 | "source": [
213 | "##### __Expected Output__ \n",
214 | "\n",
215 | "```Python\n",
216 | "\tTV\tSales\n",
217 | "0\t230.1\t22.1\n",
218 | "1\t44.5\t10.4\n",
219 | "2\t17.2\t9.3\n",
220 | "3\t151.5\t18.5\n",
221 | "4\t180.8\t12.9\n",
222 | "```"
223 | ]
224 | },
225 | {
226 | "cell_type": "code",
227 | "execution_count": 5,
228 | "metadata": {},
229 | "outputs": [
230 | {
231 | "name": "stdout",
232 | "output_type": "stream",
233 | "text": [
234 | "\u001b[92m All tests passed\n"
235 | ]
236 | }
237 | ],
238 | "source": [
239 | "w2_unittest.test_load_data(adv)"
240 | ]
241 | },
242 | {
243 | "cell_type": "markdown",
244 | "metadata": {},
245 | "source": [
246 | "`pandas` has a function to make plots from the DataFrame fields. By default, matplotlib is used at the backend. Let's use it here:"
247 | ]
248 | },
249 | {
250 | "cell_type": "code",
251 | "execution_count": 6,
252 | "metadata": {
253 | "tags": [
254 | "graded"
255 | ]
256 | },
257 | "outputs": [
258 | {
259 | "data": {
260 | "text/plain": [
261 | ""
262 | ]
263 | },
264 | "execution_count": 6,
265 | "metadata": {},
266 | "output_type": "execute_result"
267 | },
268 | {
269 | "data": {
270 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEGCAYAAABiq/5QAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/av/WaAAAACXBIWXMAAAsTAAALEwEAmpwYAAAmqElEQVR4nO2dfWwl13nen7Pc+2V+rFfSrbpRtKTduK3VINXuag0VNWQkJtN6hVryNg50UTRufQEtXK+hMImLLQyrKogYlY147QhBrhNQWDnI0k4cxzYQu7TpqlEctL7cXa4+TFWW2pKVVMW8jiVqV+YuudLpH3eGGl7Ox5mZMzNnZp4fMODlzJ2Z98zMfc6Z97znPUJKCUIIIeVhT9YGEEIISRcKPyGElAwKPyGElAwKPyGElAwKPyGElIy9WRugwg033CAnJiayNoMQQnLF+fPnfyylbA6uz4XwT0xM4Ny5c1mbQQghuUIIseq2nq4eQggpGRR+QggpGRR+QggpGRR+QggpGRR+QggpGRR+QgjRQK/Xw+LiInq9XtamBELhJ4SQmMzNzWF8fBxTU1MYHx/H3Nxc1ib5IvKQlvm2226TjOMnhJhIr9fD+Pg4NjY2ttc1Gg2srq6i2dw1dipVhBDnpZS3Da5ni58QQmKwsrKCarW6Y12lUsHKyko2BilA4SeEkBhMTExgc3Nzx7qtrS2YnGaGwk8IITFoNpuYnZ1Fo9HA2NgYGo0GZmdnM3fz+JGLXD2EEGIyrVYLk5OTWFlZwcTERKDo93o95e8mAVv8hBCigWaziaNHjwYKuQkRQIzqIYSQlEg7AohRPYQQkjGmRABR+AkhJCVMiQCi8BNCSEqYEgHEqB5CCEkYZxRP2AigJGCLnxCSa5JOjhb3+G5RPM4IoCySu1H4CSG5JenQyLjH7/V6aLfb2NjYwPr6OjY2NtBut7dFPqvQToZzEkJySdKhkTqOv7i4iKmpKayvr2+vGxsbw8LCAiYmJhIP7Uw9nFMIcbMQ4lEhxLIQ4gdCiPus9Q8IIV4UQly0lmNJ2UAIKS5Jh0bqOL5fFE+WoZ1JunquAfhNKeUtAG4H8FEhxC3WttNSylut5ZsJ2kAIKShJh0bqOL5fFE+WoZ2JCb+U8iUp5QXr8yUATwO4KanzEULMJKnOy6RDI3Udv9VqYXV1FQsLC1hdXUWr1UrFfj9S8fELISYAPAbg5wH8BoB/A+BVAOfQfyt42WWfewHcCwAHDx48srq6mridhBC9zM3Nod1uo1qtYnNzE7Ozs9vCp4ukE57l+fhePv7EhV8IMQLgLwH8tpTyq0KIGwH8GIAEMAPggJTyw37HYOcuIfnD5Jmp0iLrLJyZ5OoRQlQA/BmAP5ZSfhUApJQ/klK+LqV8A8AfAnhXkjYQQrLBlLw0WWFCFk4vkozqEQBmATwtpfysY/0Bx9c+AOCppGwghGSHKXlpVNHZFxEUv581Sbb4/ymAfw3glwZCNz8thHhSCPEEgF8EMJ2gDYSQjDAlL40Kulvnpr/tcAAXISRRsvZzB5FEX4Qp/RvMx08I0Yqqa0R1ZqqsSKJ1ruttJ6lQWAo/IQRAOJExueMyLEn1RXjF76uS6DWWUhq/HDlyRBJCkuPs2bOy0WjIffv2yUajIc+ePev53bW1NdloNCT6IdkSgGw0GnJtbS1Fi/Vil39sbCyw/Gmg6xoDOCddNJUtfkJKTtgIFNM7LqMQt3Wum6SvMSdiIaTk2CLj7Ii0RcbNJ523ME1Vms2mMf0QSV9jtvgJKTlhRSbrMM0sJi5Jm6SvMcM5CSHbOXUqlQq2traUcupkEaaZRu4fk4h7jTPL1aMDCj8hyVPGePui4yX89PETQgCY5eN2I2xfBPGGPn5CiC9hfepJ+eCL2qmcBRR+QognYQcRJTnoKOtO5SJBHz8hJcbPrx/Wp56WD970vgiTYK4eQsgOglrnYQcRuX1/aGhI+8Au03P/5AEKPyElRGW0blifutv3L1++jAsXLijblETfQBni/sNC4SekhKi05sP61JvNJk6fPr1r/fT0dKDoJtU3UKRkcjqhj5+QEhLGHx/Gp764uIj3vve9uHTp0va6sbExLCws4OjRo7FtCQPj/unjJ4Q4CNOaV/Gp2+6UkZERXLt2bce2oJDLqAnJglw4RUwmpwsKPyElRVdGSqc75ciRI2i326FCLqPE56u4cBj374NbrmbTFubjJ8RMvPLGLy8vy263q5Q/fm1tTc7MzMh6va6UDz9MrnrT8uynDTzy8TNlAyEkMl5pFC5fvuzp03fiTLomhMDHP/5xnDhxwvcNIUzqhlarhcnJydzE/ac1RoGuHkIKQFYhi3HcKW4hpZ/61Ke0nzMvcf9pRiBR+AnJOVmGLMZJoxC187WIqRvCzoIWF4ZzEpJjTAlZjOKiiGt7kVI3LC4uYmpqCuvr69vrgsJgVWA4JyEFxJSQxSjulLgtdxNcOLpcbGlHIFH4CckxboKxubmJl19+ORcpCsKGlJqUfkGni81ZCY6MjKBWq+H06dPJVWpuoT6mLQznJMQbZ8hipVKR1WpV7tu3r3Dhi3Y5TShbmJDSMHQ6HVmr1eTo6KiWMsIjnJM+fkIKQK/Xw9LSEu6+++7M/f1JYEpfhk0SPvkkykgfPyEFptlsYv/+/Ub4+5PAlL4MmyR88mmWkcJPSEEocoqCsGWL0xegsm8SIaWp3j83/49pC338hKhhYoqCtbU15fQNfqiWLU5fQNh9dZVt8Py67h/S9vELIW4G8EUAN6Lf+fEHUsrPCyGuA/BlABMAVgD8qpTyZb9j0cdPiDomxbc7UzJsbm5idnY2cjI4ILhscfzkpvQj6Lx/Wfj4rwH4TSnlLQBuB/BRIcQtAE4B+K6U8h0Avmv9TwjRhAnx7UAyo1GDyqbiJ/dy5ZjSj5DG/UtM+KWUL0kpL1ifLwF4GsBNAO4C8Ij1tUcA3J2UDYSQ7AgrpDpi9IP85H6x90XuI9mFm/9H94K+W+f/AhgD8IpjvXD+P7DPvQDOATh38ODBWH4uQkxHt6/YBKKkT9YRo+/lJ1exx8Q+kjjAw8efhuiPADgP4Lj1/ysD218OOgY7d0mRMWlgkk7W1tbkBz/4wR1Ce/LkSdfv6R4M5VaRdrtduW/fvh3nGRsbk91uN3DfvJKJ8AOoAJgH8BuOdc8AOGB9PgDgmaDjUPhJUUlqBGjWnD17Vtbr9R3l8iqbqiDHpajX2g8v4U/Mxy+EEABmATwtpfysY9M3AHzI+vwhAF9PygZCwpJ2LpisOhSTLKfdqXvlypVd29zKlpZvvYjpnCPjVhvoWAC8G/1a9QkAF63lGIDr0Y/meRbAAoDrgo7FFj9JgyxcLlm0QpMup1sLPqhsafrWi+TKCQJZ+fh1LBR+kjRZugH8OiN1C1SUcoa1w+0c9nmC5tL1O08UO8oi8F5Q+AnxIS0/sxeDIpVUqzxsOaPa4azM6vW6nJmZiSXAYe0oaod5WLyEn9k5CYE5ozaTtiXMsU2ZISusHSbdy6xhdk5CfIjb8aezszTJDt8w5Yxrh64RqGHtMGUErtG4vQaYttDVQ9Iiil9Yt1shjf4GlXIG2ZGWDz3s9Shj2KYXoI+fEP0kJTJZjCB1E3IvO9L2oYe9HkUbgRsVCj8hCZBkp3CaUSl+Qj5oR1Ytakb1hMdL+Nm5S0qHzrS3cdMAm5A+OWwZVKcdNKV8ZYadu4TAPztjFKJ2Cuu2Iw5hO0NVRtqaVD7igttrgGkLXT1EB0m6KMK4FUzrfIxij58P3bTylRmknauHENNIOkxSNXTRtHDDKG8trVYLq6urWFhYwOrq6o5ZtUwrn5O0czGZyt6sDSAkLUyZaMMUO5y0Wi1MTk6G8sk3m03X75lYPkD/NJB5hi1+UhpMyc5oih1udsUZcGW3pgEYV76o00AW9g3Bzf9j2kIfP9GJ7jC/qMdLO9wwyfO5hYOaFE4ZJey2CPl+wDh+QryJKlJZpXLOenTxoD2Dnbn1et0Iwbcp6+hfCj8hHkQVxbzk0k/azm63K0dHR3elYZ6ZmdFyfF2EGc2bdbZWXVD4CXEhjijGFQcdee5VbE1axDqdTqhJV7JE9ZoXvcXPzl1iLGl0rMUJPYwTvRJlgFNUW5OMsun1epiennbdZkoIpxPVDmxTO+C14VYbmLawxV8+0vKdx23ZRUkGFvWccWwNstOtJazSOo4yzWKeMKmDOgqgq4fkhbRfs+NmcgwrDnFcL3Fs9bLTrZJVrXijTrNI0oHCT3JDFh1rabbs4lZsOm31sqVeryvbp3uaRaIPL+HnyF1iHFmM/PQahZrUuWZnZ9Fut1GpVLC1tRXKf6zTVrvfwJmZc8+e3V1/tr/e7bxRRv2SbKHwE+OIK4x5wBSxdKtk33jjjb47wEFQxZtmxZkXTE5LTeEnRmIL49LSEgDg0KFDGVukHxPE0quSBVDoijdpTM8LxIlYiLHo+PGY3OoyCbfr5Hftsr6uWZ/fj6CJbdK03Wsilsw7blUWdu6WDx2RPUXItWIiWV/XTqcja7WaHB0dNfK++gUnpH3twKgekid0jIotwsjLuCSRkC7L6+o2Sti0++p1jZaXl1O/dl7Cz5G7xEjiRvbomgwkj2l5bZu/8IUvaJ/+MIlJVlSvca/Xw3333bdr/dDQkFEjhL1G/V6+fNmcCWrcagO/Bf0c/mNh94uzsMWfT+K2NuMOViqjq8i22S1pmo7Wpe4Wf5hr7JUMrlarGdXitxl8/rN4W0IcVw+AswDGAAwDWAbwAoCPq+yrY6Hw5w9dohml8rD36XQ6mVYcaeM1itZebFdZlhVykL1hUyUDkJ1OJ9L5s0DXtVMlrvBftP7+KwC/A6AC4ImAfR4GsAbgKce6BwC8COCitRxTOT+FP19kKZqDFU6n04kkcnlMy+uXN8e+B3ZlmEWFrGKv6uQoo6Ojslar5Ur0bdIcJR5X+H9gif2fAniPte7xgH3uAHDYRfh/S+WczoXCny+yEk2dFY7bsWq1mlxeXk7AcnWb/ATDq0X8lre8Rdbr9W3RN+UtJk6yujwmTsvCbi/hV+3c/QKAFfRdPY8JIcYBvOq3g5TyMQA/UTw+KRBZTbats+PR2UHXaDQA9FMZHDlyREsnaVhU0jgPdipWKhVUKhUIISCEQK/XM6dz0bL39OnTqNVqGBkZUU59HHdu4LRwdlpHScOdKG61gcoCYK/Cdyawu8W/AuAJ9F1B+1XOxRZ/OuhskaTty5QyGRfT8vKyrNVqmbaSo/jC5+fnYydfS5oiuG28cLoc6/W6rFarmVx3xHT13AhgFsC3rP9vAdBW2G9Q+G8EMIR+ZNBvA3jYZ997AZwDcO7gwYOJX6Cyk8Rk2Vm82uqucLzcVvPz86mVLYrrrNvtugr/zMxM6hWyG3nsPFclqJNd5f7pIq7wfwvAr8Ly66Of4+dJhf12CL/qtsGFLf5kcXtQK5VK7kIZbZJOW1ytVmW9XtdybVT89vPz86Fb6svLy66Cs7y8bISPPI+d56oEdbLnqcW/aP1dcqy7qLDfYIv/gOPzNIAvqZyfwp8sJj2oJjL4FlGpVLRcm6CQV+f2arUqK5WKckvdrcVfr9eNEdaytfjthlTab1pxhf+/AbgewAXr/9sB/GXAPnMAXgKwhX7cfxvAHwF4En0f/zecFYHfQuFPFpNeTaOSdCvWPv78/LyWlmqQ8Hltn5+fVypjHoQ1i36gtHArm0lRParCfxjAXwNYt/7+EMAvqOyrY6HwJ8/gLEppdUbp+DGkOcJWl6AGuTrcto+OjsozZ84onysPwmqC2ykpTChbLOHv74+9AP4RgJ8HUFHdT8dC4U8H54OahmjoEOwsWrY6rk2UFr8t/mHOaYL4kOzwEn7ffPxCiOOeGwFIKb/qt10XzMefDUnmDQ/KWa7K4uIipqamsL6+vr1ubGwMCwsLOHr0qFabnei4NvZ8A87JTpzzDdjb9+7di0uXLu3YN8q1IuXDKx9/0Axc/8JnmwSQivCTbEhyhii3uV795nX1IqvBYjquTdD0i/b2b37zm/jYxz62Q/yjXCtCbHyFX0r5b9MyhKSHCbMX6RLsvM/PG1SBNJtNHDt2DB/5yEd2rE+jciPFRTkfvxDiTiHEvxdC3G8vSRpGksGUoeNeOcujCHar1cLq6ioWFhawurqqZW5Tk/Lw67xWJuF1jU269oXFzfE/uADoAPgigOcB/Ef0QzJnVfbVsbBzVw+6O0J1dBya2Ploah5+E69VVLyusanXPq8gZjjnEwN/RwD8lcq+OhYKvx50jpY07QeqqxLyynFTBLE1BR1TExapEkwSL+FXdfXYPXA/FUL8DIBrAA5EfMkgGaHLr97r9dBut7GxsYH19XVsbGyg3W5n9mquw31lH+P48eM7OpyBbDNYFhGvLKrdblcpe6gp7spc41YbDC4APgngrQCOoz8a9yUAMyr76ljY4teHjhh0tzeH4eFhOT8/n4DF/ujI6R40ctn0Fn9arV9d54nT4s/DiGSTQBRXD4CjAP6u4/9fA/BtAL8L4Dq/fXUuFH696Mi66SaU9Xo9dZdPnFmcbDfVzMyMa66i4eFhI9xYg7gNtEva5ab7PF4NkKCGSVGTuyVVeUcV/gu2wKM/o9b/A/AvAcwA+IrfvjoXCn96qD6A9g8069axjnlb3fLU1+t15bw4Osqg+qMfFGBdCeOC7Euile1Vbr/rkWaLP603qSQr76jC/7jj8+8BeMDx/0W/fXUuFH59+D3MYR/A+fl5OTw8nHnrK4z7yqvFmFWe+jDXPKtkeqa0su1n155C0vR0IiokXZFFFf6nYM20BeB/ArjDuc1vX50LhV8Pfg9zlAcw6Yc2TItL9bt+NqcdKRL2+mWVPtsEv/rgs9vpdBK7V2mWN+lKNarwfwL9bJxfB7AEbOf2+TkAf+23r86Fwh+foIc56gOYVDK3JFtcKjanUQmEveZu99CeFCbpN5WzZ8/Ker0uh4eHlftykuwMTnLi+zTfcIxs8ff3w+0APgBg2LHu7wM4HLSvroXCH5+ghznOA6hbJNNocel0ecWxIWw5s8rzbp9XtcNb5zX0etOp1WqJ3Ju033CSzIQbWfhNWCj88VF5mE2Z/DpLn3IefvSmu6SSGCHu1beRlGsr7T4fo6J6TFko/HpQEZlOpyNrtVrovO868Yvzdv44kvixZFHpmD4KNew10XkNnR26tVot8c5s55tKvV6XMzMzxt4XFSj8BSdM56YJoXJBDFZSJ0+e3OE6GPxfVwWV9TWw789gJZfWeXU8F6oVdxCD7qLPfOYzu8RfdzBBnHtvYgVO4c8pKg+TLn+q2wTdjUYjs8ExThEMCmOMIixeZDVloX1eu6z256TPr/L8hL0mfhW3SkvaS4STDOWM86ZiWu4qGwp/DlF5mHS2UJeXl11FNanoCS8GKzvVMMZarabth2eCHz2NN44wz0/YaxJUcfvdJz8RTureRP0tZf2W6AeFP2eoPkxRWyluP55ut7vrVbper8du8Yf5obpVdioDl9IUyyTwq9yS7GNII++S25ukSiWThZhGedszZYCbGxT+nKH6MMUJCRxsHXc6He0CGndkqn1+L9fB2NiYrNVqu/YL88MzwTdrUovfrvB1uSu83iSD7lNWLrcobzZs8VP4tRDmYQrzA/HreHMTgDghnWF/ECpjDdyiesLkcR/EJN+sbYudNyhtH39SFU6UFr+NCZWyCllVUkFQ+HNIWEFX+YF4ieuZM2d2rR8dHY31uqpjZKqqv1n1Wjn3M7GlllVUT5J5l9J4qzABEyspCr8hOH/Y8/PzgRkgdT9MXmL3ve99T3uoXBw3lJ+Ae7XSg66VSjpmU3yzaZN0JTh4X/MeH58XKPwGMBiuZy/VajXVlo+Xv9y2q16va3td9RLyoLhx3WMN3Pazy2lSiz9LknZXmNgiLjoU/owJikxJW3D8/ONhEmANuk5U8qvH8atHjaAwLR2zqSwvL8szZ86kHsJLkoHCnzFBsejDw8OJxii7sba2Js+cOSNHR0cjuTucAl6tVmWlUgkU87ijOpOItU7ymueplWtSRzfRA4U/Y1Ra/PaoxDR+eM6EbFHePqK+wbhVgPV6PdTgq6guibCd5Sp9MCrny4OQmtjRTeJD4TeAwXA9p4/fFv00fnheoh0mMVvQG4zXW4PKYCzViidKS1plv7Nnz8pqtbptT6VSCS3aeRNSkwchkeikLvwAHgawBsdMXQCuA/AdAM9af/erHKsowi+ld1RPt9uN7HIJi9uPfGRkRJ45cybWoBVVkXO2vOMOvtKNX+ihV3m8RkHnSUjzVlERNbIQ/jsAHB4Q/k8DOGV9PgXgQZVjFUn4vXAbNVupVFJr8UfxdzsF3Pbxq7pfdAy+SoJut7srnt3ZBzOIX2ipSeVSwdRBSCQ6mbh6AEwMCP8zAA5Ynw8AeEblOEUX/rW1tV3unySFX0r3H3kUn7RKVE8UW7IiTIs/SNxNKpcqeeqMJsGYIvyvOD4L5/9+S9GFP2wr0w8/Ifb7P4kWatD5/b6bJao+fhV3jknlIuXDOOG3/n/ZZ997AZwDcO7gwYPJXRkNqMav++3v1uIPK7x+4ZVBE5fo9kkPvj28//3vl9VqNdOZvcKgEtWTR3cOKRemCH/hXD1e7pGwbpOTJ0/uEJC9e/eGEkeVaBk/gdIpYroid/JAHt05pDyYIvyfwc7O3U+rHCdL4Y+SPiBsh6VXOoEwwqgyWcmgG2kw57qOMEZVW0ZGRoyNcAkL3TnEVLyEfw8SQggxB+C/A/gHQogXhBBtAP8ZwJQQ4lkAk9b/xjI3N4fx8XFMTU1hfHwcc3NzO7YvLS1hz56dl7BSqaDb7aJare5av7Ky4nqelZWVXd+vVque33djYmICm5ubyt9/7bXXcPfdd+8o0+Tk5I7ybG1tod1uo9frKR9X1ZatrS1MTEyEOq6pNJtNHD16FM1mM2tTCFHDrTYwbcmixa8SseHll9fR4o/iCvELrzx58mRgP4JOP79Xjnd7iZPnnxCiBjhyNxxBc366iZqbjz/q5NRRfcV+UT1BOdd1d1aura3JmZkZWalUto83NDRE0SckJbyEX/S3mc1tt90mz507l+o5e70exsfHsbGxsb2u0WhgdXUVKysrmJqawvr6+o5tDz74IO65557tV/5er4eVlRVMTEwouQHCfl9nmezzzc3Nod1uo1KpYGtrC7Ozs5icnIxlV6/Xw9LSEgDg0KFDdIkQkhJCiPNSytt2bXCrDUxbsurctd05w8PDO2YL0pHrRhdhQka73e52TiC/NwvnMcPOmctOTkLMAXT1hMcWveHh4V2i59w2WAGkFaqoKsqD3+t0OtpTIOcpEyUhZYHCHxIV0et0Ojv813Dxm+u0Z3AUrIoox/Hbq3b2ciATIWbiJfyJhXPmHbcQS2dIZq/Xw/T0NLa2tnbtqztU0S2sNMg+1XL44RaW6Va2OOcghKQPhd+DINFzEzsAqNVqmJ2d1daB2ev10G63sbGxgfX1dWxsbKDdbmNzcxNXrlzxtE+1HH40m03Mzs6i0WhgbGwMjUbDtWxxzkEIyQC31wDTlqx9/CMjI7JWq+0IQ3Rzb4SZq1YVN3dLo9HYkce+0Wgo+fijhoqqTl7C1AWEmAXo449Gp9ORtVrNNWInDbFTyXujUuGkEXHDqB5CzMJL+BnH74NK3HvSsffAztj6q1evYs+ePTtsGhsbw8LCAo4ePZrI+Qkh+cQrjp8+fh/c/Ph79uzZHowERM/T0uv1sLi4qJQHp9VqYXV1FQsLCzvObUN/OiEkDBR+H9w6Ld2Sm4UlKPmbG3YF8853vlOpw5UQQrygqyeAubk5fPjDH94VQTPo8lFFxX2kepykXUyEkHzj5erZm4UxpuAUTwCuQtpqtXD99dfj+PHjeO2117bXO+PUnfsFCbLtPnIKv32sMALebDYp+ISQSJRW+O0O02q1ip/+9KcQQqDRaGBzcxOzs7NotVrb3z106BDeeOONHftvbW3hwoULeM973oNqtYrNzU20223Mzs5u/z94HIAx74QQA3AL9TFt0R3OGRQi6ZZuYDCs00525nUMr+NIyZh3Qkg6wCOcs5Qtfjd3i5NB18vc3Bymp6e3W/Kf//zncfjwYd9juB3HptVqxU51TAghUSml8AdNDeh0vThTJthMT0/j/PnzsaYXpI+eEJIVpQznbDabOH36NGq1GkZGRlCpVFCtVl3DI90SjUkpcfny5e2wytHRUdfznD59muJOCDGOUgq/03WztbWFhx56CC+88AIWFhawurq6o0N2ZGRklzvnypUrGBkZ2R5Y9dBDD+0S/5GRERw+fDiV8hBCSBhKJ/xO182lS5dw9epVTE9PA4DrCNznn3/e9Tj2+maziWPHjuHatWs7tr/++uuM1CGEGEnphD+J3PGq6YsJIcQESte5GzaO/tChQ9sTj9tUKhUcOnRox/cYqUMIyQula/FHaZ3ff//9qNVqGB4eRr1exyOPPOL6/agJ2wghJE1K1+IH1FvnztG9e/bswalTp3DixAkKOyEk15Q6SZtfXh1dydQIISQrmI9/AK/UyHae/KWlJU4gTggpJKV09ThDOu0Wfbvdxquvvrod33/16lXXxGwM0SSE5J3SCX+v18MnP/nJXYOy9u7di/vuuw9Xr17d3lapVNBoNLajehiiSQgpAqUSfruz1i2x2ubmJoQQO9YNDQ3ha1/7Gvbv388QTUJIYcjExy+EWBFCPCmEuCiESGVqLbdka05OnDixa5atK1eu4Oabb2aIJiGkUGTZufuLUspb3Xqck8BtxK5NvV7HnXfeiUajsWN9o9HA5cuX0zCPEEJSozRRPV6pmKvVKj73uc/tGonr3I8QQopEVsIvAXxbCHFeCHGv2xeEEPcKIc4JIc71er3YJxwcsVupVDA0NIRarYbp6WksLCww3w4hpBRkMoBLCHGTlPJFIcTfAfAdAB+TUj7m9X2dA7h6vR6WlpZw11137fDp24OzAPdJ1wkhJG8YNYBLSvmi9XcNwJ8DeFda5242m9i/fz9qtdqO9c5pEtmZSwgpMqkLvxBiWAgxan8G8MsAnkrThrAZOgkhpEhk0eK/EcD3hBCPA+gC+Asp5X9J0wDmzyeElBkmaaM/nxBSULx8/KUauTtIs9mk4BNCSkdp4vgJIYT0ofATQkjJoPATQkjJoPATQkjJKJXw27Nr6UgBQQgheaU0wu811SIhhJSNUsTxc+J0QkgZMSpXT9q45eLnxOmEkLJSCuFnbh5CCHmTUgg/c/MQQsiblCZlQ6vVwuTkJHPzEEJKT2mEH2BuHkIIAUri6iGEEPImFH5CCCkZFH5CCCkZFH5CCCkZFH5CCCkZhRZ+JmUjhJDdFFb4mZSNEELcKWSSNiZlI4SQkiVpY1I2QgjxppDCz6RshBDiTSGFn0nZCCHEm8Lm6mFSNkIIcaewwg8wKRshhLhRSFcPIYQQbyj8hBBSMij8hBBSMij8hBBSMij8hBBSMnKRskEI0QOwGmHXGwD8WLM5WVKk8hSpLECxylOksgDFKk/YsoxLKXeFNuZC+KMihDjnlqcirxSpPEUqC1Cs8hSpLECxyqOrLHT1EEJIyaDwE0JIySi68P9B1gZopkjlKVJZgGKVp0hlAYpVHi1lKbSPnxBCyG6K3uInhBAyAIWfEEJKRmGFXwjxz4UQzwghnhNCnMranrAIIVaEEE8KIS4KIc5Z664TQnxHCPGs9Xd/1nZ6IYR4WAixJoR4yrHO1X7R53ete/WEEOJwdpbvxqMsDwghXrTuz0UhxDHHtv9gleUZIcQ/y8Zqb4QQNwshHhVCLAshfiCEuM9an7v741OWXN4fIURdCNEVQjxulec/WevfJoT4vmX3l4UQVWt9zfr/OWv7hNKJpJSFWwAMAfhfAN4OoArgcQC3ZG1XyDKsALhhYN2nAZyyPp8C8GDWdvrYfweAwwCeCrIfwDEA3wIgANwO4PtZ269QlgcA/JbLd2+xnrcagLdZz+FQ1mUYsPEAgMPW51EAP7Tszt398SlLLu+PdY1HrM8VAN+3rvmfALjHWt8B8BHr878D0LE+3wPgyyrnKWqL/10AnpNS/m8p5SaALwG4K2ObdHAXgEesz48AuDs7U/yRUj4G4CcDq73svwvAF2Wf/wHgrUKIA6kYqoBHWby4C8CXpJRXpZT/B8Bz6D+PxiClfElKecH6fAnA0wBuQg7vj09ZvDD6/ljX+LL1b8VaJIBfAvAVa/3gvbHv2VcAvFcIIYLOU1ThvwnA847/X4D/w2AiEsC3hRDnhRD3WutulFK+ZH3+GwA3ZmNaZLzsz+v9Omm5Ph52uN1yVRbLNXAI/ZZlru/PQFmAnN4fIcSQEOIigDUA30H/reQVKeU16ytOm7fLY21fB3B90DmKKvxF4N1SysMA3gfgo0KIO5wbZf/dLrexuHm3H8DvA/h7AG4F8BKA38nUmggIIUYA/BmAX5dSvurclrf741KW3N4fKeXrUspbAfws+m8j/1D3OYoq/C8CuNnx/89a63KDlPJF6+8agD9H/wH4kf2Kbf1dy87CSHjZn7v7JaX8kfUDfQPAH+JNd0EuyiKEqKAvlH8spfyqtTqX98etLHm/PwAgpXwFwKMA/gn67jV7qlynzdvlsbbvA/C3QccuqvAvAniH1RNeRb/T4xsZ26SMEGJYCDFqfwbwywCeQr8MH7K+9iEAX8/Gwsh42f8NAL9mRY/cDmDd4XIwkgEf9wfQvz9Avyz3WNEWbwPwDgDdtO3zw/IBzwJ4Wkr5Wcem3N0fr7Lk9f4IIZpCiLdanxsAptDvt3gUwK9YXxu8N/Y9+xUA/9V6W/Mn617spBb0IxF+iL5/7BNZ2xPS9rejH3nwOIAf2Paj77v7LoBnASwAuC5rW33KMIf+K/YW+j7Jtpf96Ecy/J51r54EcFvW9iuU5Y8sW5+wfnwHHN//hFWWZwC8L2v7XcrzbvTdOE8AuGgtx/J4f3zKksv7A+AXACxZdj8F4H5r/dvRr6CeA/CnAGrW+rr1/3PW9rernIcpGwghpGQU1dVDCCHEAwo/IYSUDAo/IYSUDAo/IYSUDAo/IYSUDAo/IQoIIa53ZHr8G0fmRzmY4VEI8etCiN/PylZCgqDwE6KAlPJvpZS3yv5Q+g6A09bnE+gPEHRyD/qx/4QYCYWfkHh8BcCdjvzoEwB+BsBfZWkUIX5Q+AmJgZTyJ+iPmHyfteoeAH8iOTKSGAyFn5D4zOFNdw/dPMR4KPyExOfr6E+AcRjAW6SU57M2iBA/KPyExET2Z0x6FMDDYGuf5AAKPyF6mAPwj0HhJzmA2TkJIaRksMVPCCElg8JPCCElg8JPCCElg8JPCCElg8JPCCElg8JPCCElg8JPCCEl4/8DhE5wlJrCYZwAAAAASUVORK5CYII=\n",
271 | "text/plain": [
272 | ""
273 | ]
274 | },
275 | "metadata": {
276 | "needs_background": "light"
277 | },
278 | "output_type": "display_data"
279 | }
280 | ],
281 | "source": [
282 | "adv.plot(x='TV', y='Sales', kind='scatter', c='black')"
283 | ]
284 | },
285 | {
286 | "cell_type": "markdown",
287 | "metadata": {},
288 | "source": [
289 | "You can use this dataset to solve a simple problem with linear regression: given a TV marketing budget, predict sales."
290 | ]
291 | },
292 | {
293 | "cell_type": "markdown",
294 | "metadata": {},
295 | "source": [
296 | " \n",
297 | "## 2 - Linear Regression in Python with `NumPy` and `Scikit-Learn`"
298 | ]
299 | },
300 | {
301 | "cell_type": "markdown",
302 | "metadata": {},
303 | "source": [
304 | "Save the required field of the DataFrame into variables `X` and `Y`:"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": 7,
310 | "metadata": {
311 | "tags": [
312 | "graded"
313 | ]
314 | },
315 | "outputs": [],
316 | "source": [
317 | "X = adv['TV']\n",
318 | "Y = adv['Sales']"
319 | ]
320 | },
321 | {
322 | "cell_type": "markdown",
323 | "metadata": {},
324 | "source": [
325 | " \n",
326 | "### 2.1 - Linear Regression with `NumPy`"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "You can use the function `np.polyfit(x, y, deg)` to fit a polynomial of degree `deg` to points $(x, y)$, minimising the sum of squared errors. You can read more in the [documentation](https://numpy.org/doc/stable/reference/generated/numpy.polyfit.html). Taking `deg = 1` you can obtain the slope `m` and the intercept `b` of the linear regression line:"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "execution_count": 8,
339 | "metadata": {
340 | "tags": [
341 | "graded"
342 | ]
343 | },
344 | "outputs": [
345 | {
346 | "name": "stdout",
347 | "output_type": "stream",
348 | "text": [
349 | "Linear regression with NumPy. Slope: 0.04753664043301978. Intercept: 7.032593549127698\n"
350 | ]
351 | }
352 | ],
353 | "source": [
354 | "m_numpy, b_numpy = np.polyfit(X, Y, 1)\n",
355 | "\n",
356 | "print(f\"Linear regression with NumPy. Slope: {m_numpy}. Intercept: {b_numpy}\")"
357 | ]
358 | },
359 | {
360 | "cell_type": "markdown",
361 | "metadata": {},
362 | "source": [
363 | " \n",
364 | "### Exercise 2\n",
365 | "\n",
366 | "Make predictions substituting the obtained slope and intercept coefficients into the equation $Y = mX + b$, given an array of $X$ values."
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": 9,
372 | "metadata": {
373 | "tags": [
374 | "graded"
375 | ]
376 | },
377 | "outputs": [],
378 | "source": [
379 | "# This is organised as a function only for grading purposes.\n",
380 | "def pred_numpy(m, b, X):\n",
381 | " ### START CODE HERE ### (~ 1 line of code)\n",
382 | " Y = m*X + b\n",
383 | " ### END CODE HERE ###\n",
384 | " \n",
385 | " return Y"
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": 10,
391 | "metadata": {
392 | "tags": [
393 | "graded"
394 | ]
395 | },
396 | "outputs": [
397 | {
398 | "name": "stdout",
399 | "output_type": "stream",
400 | "text": [
401 | "TV marketing expenses:\n",
402 | "[ 50 120 280]\n",
403 | "Predictions of sales using NumPy linear regression:\n",
404 | "[ 9.40942557 12.7369904 20.34285287]\n"
405 | ]
406 | }
407 | ],
408 | "source": [
409 | "X_pred = np.array([50, 120, 280])\n",
410 | "Y_pred_numpy = pred_numpy(m_numpy, b_numpy, X_pred)\n",
411 | "\n",
412 | "print(f\"TV marketing expenses:\\n{X_pred}\")\n",
413 | "print(f\"Predictions of sales using NumPy linear regression:\\n{Y_pred_numpy}\")"
414 | ]
415 | },
416 | {
417 | "cell_type": "markdown",
418 | "metadata": {},
419 | "source": [
420 | "##### __Expected Output__ \n",
421 | "\n",
422 | "```Python\n",
423 | "TV marketing expenses:\n",
424 | "[ 50 120 280]\n",
425 | "Predictions of sales using NumPy linear regression:\n",
426 | "[ 9.40942557 12.7369904 20.34285287]\n",
427 | "```"
428 | ]
429 | },
430 | {
431 | "cell_type": "code",
432 | "execution_count": 11,
433 | "metadata": {},
434 | "outputs": [
435 | {
436 | "name": "stdout",
437 | "output_type": "stream",
438 | "text": [
439 | "\u001b[92m All tests passed\n"
440 | ]
441 | }
442 | ],
443 | "source": [
444 | "w2_unittest.test_pred_numpy(pred_numpy)"
445 | ]
446 | },
447 | {
448 | "cell_type": "markdown",
449 | "metadata": {},
450 | "source": [
451 | " \n",
452 | "### 2.2 - Linear Regression with `Scikit-Learn`"
453 | ]
454 | },
455 | {
456 | "cell_type": "markdown",
457 | "metadata": {},
458 | "source": [
459 | "`Scikit-Learn` is an open-source machine learning library that supports supervised and unsupervised learning. It also provides various tools for model fitting, data preprocessing, model selection, model evaluation, and many other utilities. `Scikit-learn` provides dozens of built-in machine learning algorithms and models, called **estimators**. Each estimator can be fitted to some data using its `fit` method. Full documentation can be found [here](https://scikit-learn.org/stable/)."
460 | ]
461 | },
462 | {
463 | "cell_type": "markdown",
464 | "metadata": {},
465 | "source": [
466 | "Create an estimator object for a linear regression model:"
467 | ]
468 | },
469 | {
470 | "cell_type": "code",
471 | "execution_count": 12,
472 | "metadata": {
473 | "tags": [
474 | "graded"
475 | ]
476 | },
477 | "outputs": [],
478 | "source": [
479 | "lr_sklearn = LinearRegression()"
480 | ]
481 | },
482 | {
483 | "cell_type": "markdown",
484 | "metadata": {},
485 | "source": [
486 | "The estimator can learn from data calling the `fit` function. However, trying to run the following code you will get an error, as the data needs to be reshaped into 2D array:"
487 | ]
488 | },
489 | {
490 | "cell_type": "code",
491 | "execution_count": 13,
492 | "metadata": {
493 | "tags": [
494 | "graded"
495 | ]
496 | },
497 | "outputs": [
498 | {
499 | "name": "stdout",
500 | "output_type": "stream",
501 | "text": [
502 | "Shape of X array: (200,)\n",
503 | "Shape of Y array: (200,)\n",
504 | "Expected 2D array, got 1D array instead:\n",
505 | "array=[230.1 44.5 17.2 151.5 180.8 8.7 57.5 120.2 8.6 199.8 66.1 214.7\n",
506 | " 23.8 97.5 204.1 195.4 67.8 281.4 69.2 147.3 218.4 237.4 13.2 228.3\n",
507 | " 62.3 262.9 142.9 240.1 248.8 70.6 292.9 112.9 97.2 265.6 95.7 290.7\n",
508 | " 266.9 74.7 43.1 228. 202.5 177. 293.6 206.9 25.1 175.1 89.7 239.9\n",
509 | " 227.2 66.9 199.8 100.4 216.4 182.6 262.7 198.9 7.3 136.2 210.8 210.7\n",
510 | " 53.5 261.3 239.3 102.7 131.1 69. 31.5 139.3 237.4 216.8 199.1 109.8\n",
511 | " 26.8 129.4 213.4 16.9 27.5 120.5 5.4 116. 76.4 239.8 75.3 68.4\n",
512 | " 213.5 193.2 76.3 110.7 88.3 109.8 134.3 28.6 217.7 250.9 107.4 163.3\n",
513 | " 197.6 184.9 289.7 135.2 222.4 296.4 280.2 187.9 238.2 137.9 25. 90.4\n",
514 | " 13.1 255.4 225.8 241.7 175.7 209.6 78.2 75.1 139.2 76.4 125.7 19.4\n",
515 | " 141.3 18.8 224. 123.1 229.5 87.2 7.8 80.2 220.3 59.6 0.7 265.2\n",
516 | " 8.4 219.8 36.9 48.3 25.6 273.7 43. 184.9 73.4 193.7 220.5 104.6\n",
517 | " 96.2 140.3 240.1 243.2 38. 44.7 280.7 121. 197.6 171.3 187.8 4.1\n",
518 | " 93.9 149.8 11.7 131.7 172.5 85.7 188.4 163.5 117.2 234.5 17.9 206.8\n",
519 | " 215.4 284.3 50. 164.5 19.6 168.4 222.4 276.9 248.4 170.2 276.7 165.6\n",
520 | " 156.6 218.5 56.2 287.6 253.8 205. 139.5 191.1 286. 18.7 39.5 75.5\n",
521 | " 17.2 166.8 149.7 38.2 94.2 177. 283.6 232.1].\n",
522 | "Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.\n"
523 | ]
524 | }
525 | ],
526 | "source": [
527 | "print(f\"Shape of X array: {X.shape}\")\n",
528 | "print(f\"Shape of Y array: {Y.shape}\")\n",
529 | "\n",
530 | "try:\n",
531 | " lr_sklearn.fit(X, Y)\n",
532 | "except ValueError as err:\n",
533 | " print(err)"
534 | ]
535 | },
536 | {
537 | "cell_type": "markdown",
538 | "metadata": {},
539 | "source": [
540 | "You can increase the dimension of the array by one with `reshape` function, or there is another another way to do it:"
541 | ]
542 | },
543 | {
544 | "cell_type": "code",
545 | "execution_count": 14,
546 | "metadata": {
547 | "tags": [
548 | "graded"
549 | ]
550 | },
551 | "outputs": [
552 | {
553 | "name": "stdout",
554 | "output_type": "stream",
555 | "text": [
556 | "Shape of new X array: (200, 1)\n",
557 | "Shape of new Y array: (200, 1)\n"
558 | ]
559 | }
560 | ],
561 | "source": [
562 | "X_sklearn = X[:, np.newaxis]\n",
563 | "Y_sklearn = Y[:, np.newaxis]\n",
564 | "\n",
565 | "print(f\"Shape of new X array: {X_sklearn.shape}\")\n",
566 | "print(f\"Shape of new Y array: {Y_sklearn.shape}\")"
567 | ]
568 | },
569 | {
570 | "cell_type": "markdown",
571 | "metadata": {},
572 | "source": [
573 | " \n",
574 | "### Exercise 3\n",
575 | "\n",
576 | "Fit the linear regression model passing `X_sklearn` and `Y_sklearn` arrays into the function `lr_sklearn.fit`."
577 | ]
578 | },
579 | {
580 | "cell_type": "code",
581 | "execution_count": 16,
582 | "metadata": {
583 | "tags": [
584 | "graded"
585 | ]
586 | },
587 | "outputs": [
588 | {
589 | "data": {
590 | "text/plain": [
591 | "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False)"
592 | ]
593 | },
594 | "execution_count": 16,
595 | "metadata": {},
596 | "output_type": "execute_result"
597 | }
598 | ],
599 | "source": [
600 | "### START CODE HERE ### (~ 1 line of code)\n",
601 | "lr_sklearn.fit(X_sklearn, Y_sklearn)\n",
602 | "### END CODE HERE ###"
603 | ]
604 | },
605 | {
606 | "cell_type": "code",
607 | "execution_count": 17,
608 | "metadata": {
609 | "tags": [
610 | "graded"
611 | ]
612 | },
613 | "outputs": [
614 | {
615 | "name": "stdout",
616 | "output_type": "stream",
617 | "text": [
618 | "Linear regression using Scikit-Learn. Slope: [[0.04753664]]. Intercept: [7.03259355]\n"
619 | ]
620 | }
621 | ],
622 | "source": [
623 | "m_sklearn = lr_sklearn.coef_\n",
624 | "b_sklearn = lr_sklearn.intercept_\n",
625 | "\n",
626 | "print(f\"Linear regression using Scikit-Learn. Slope: {m_sklearn}. Intercept: {b_sklearn}\")"
627 | ]
628 | },
629 | {
630 | "cell_type": "markdown",
631 | "metadata": {},
632 | "source": [
633 | "##### __Expected Output__ \n",
634 | "\n",
635 | "```Python\n",
636 | "Linear regression using Scikit-Learn. Slope: [[0.04753664]]. Intercept: [7.03259355]\n",
637 | "```"
638 | ]
639 | },
640 | {
641 | "cell_type": "code",
642 | "execution_count": 18,
643 | "metadata": {},
644 | "outputs": [
645 | {
646 | "name": "stdout",
647 | "output_type": "stream",
648 | "text": [
649 | "\u001b[92m All tests passed\n"
650 | ]
651 | }
652 | ],
653 | "source": [
654 | "w2_unittest.test_sklearn_fit(lr_sklearn)"
655 | ]
656 | },
657 | {
658 | "cell_type": "markdown",
659 | "metadata": {},
660 | "source": [
661 | "Note that you have got the same result as with the `NumPy` function `polyfit`. Now, to make predictions it is convenient to use `Scikit-Learn` function `predict`. "
662 | ]
663 | },
664 | {
665 | "cell_type": "markdown",
666 | "metadata": {},
667 | "source": [
668 | " \n",
669 | "### Exercise 4\n",
670 | "\n",
671 | "\n",
672 | "Increase the dimension of the $X$ array using the function `np.newaxis` (see an example above) and pass the result to the `lr_sklearn.predict` function to make predictions."
673 | ]
674 | },
675 | {
676 | "cell_type": "code",
677 | "execution_count": 19,
678 | "metadata": {
679 | "tags": [
680 | "graded"
681 | ]
682 | },
683 | "outputs": [],
684 | "source": [
685 | "# This is organised as a function only for grading purposes.\n",
686 | "def pred_sklearn(X, lr_sklearn):\n",
687 | " ### START CODE HERE ### (~ 2 lines of code)\n",
688 | " X_2D = X[:, np.newaxis]\n",
689 | " Y = lr_sklearn.predict(X_2D)\n",
690 | " ### END CODE HERE ###\n",
691 | " \n",
692 | " return Y"
693 | ]
694 | },
695 | {
696 | "cell_type": "code",
697 | "execution_count": 20,
698 | "metadata": {
699 | "tags": [
700 | "graded"
701 | ]
702 | },
703 | "outputs": [
704 | {
705 | "name": "stdout",
706 | "output_type": "stream",
707 | "text": [
708 | "TV marketing expenses:\n",
709 | "[ 50 120 280]\n",
710 | "Predictions of sales using Scikit_Learn linear regression:\n",
711 | "[[ 9.40942557 12.7369904 20.34285287]]\n"
712 | ]
713 | }
714 | ],
715 | "source": [
716 | "Y_pred_sklearn = pred_sklearn(X_pred, lr_sklearn)\n",
717 | "\n",
718 | "print(f\"TV marketing expenses:\\n{X_pred}\")\n",
719 | "print(f\"Predictions of sales using Scikit_Learn linear regression:\\n{Y_pred_sklearn.T}\")"
720 | ]
721 | },
722 | {
723 | "cell_type": "markdown",
724 | "metadata": {},
725 | "source": [
726 | "##### __Expected Output__ \n",
727 | "\n",
728 | "```Python\n",
729 | "TV marketing expenses:\n",
730 | "[ 50 120 280]\n",
731 | "Predictions of sales using Scikit_Learn linear regression:\n",
732 | "[[ 9.40942557 12.7369904 20.34285287]]\n",
733 | "```"
734 | ]
735 | },
736 | {
737 | "cell_type": "code",
738 | "execution_count": 21,
739 | "metadata": {},
740 | "outputs": [
741 | {
742 | "name": "stdout",
743 | "output_type": "stream",
744 | "text": [
745 | "\u001b[92m All tests passed\n"
746 | ]
747 | }
748 | ],
749 | "source": [
750 | "w2_unittest.test_sklearn_predict(pred_sklearn, lr_sklearn)"
751 | ]
752 | },
753 | {
754 | "cell_type": "markdown",
755 | "metadata": {},
756 | "source": [
757 | "You can plot the linear regression line and the predictions by running the following code. The regression line is red and the predicted points are blue."
758 | ]
759 | },
760 | {
761 | "cell_type": "code",
762 | "execution_count": 22,
763 | "metadata": {
764 | "tags": [
765 | "graded"
766 | ]
767 | },
768 | "outputs": [
769 | {
770 | "data": {
771 | "text/plain": [
772 | "[]"
773 | ]
774 | },
775 | "execution_count": 22,
776 | "metadata": {},
777 | "output_type": "execute_result"
778 | },
779 | {
780 | "data": {
781 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe4AAAE9CAYAAADNvYHXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/av/WaAAAACXBIWXMAAAsTAAALEwEAmpwYAAAxbklEQVR4nO3dfZQb1X038O9dezdBxgm2TPzwtpLpk9CHkjaA25CmOCluG0hPSFrIUziicdvAhiVPgpvilkSnCZx0aRq3BZoA6eaYFJBiktD2QNo4rWkDTWlrajvgEHh4C9I+OCbGCwHCLrG9+j1/zGjRy8xoJN2ZuXfm+zlHZ3ellXR1Jc137svcUSICIiIissNI0gUgIiKi8BjcREREFmFwExERWYTBTUREZBEGNxERkUUY3ERERBZZmnQBwli1apUUi8Wki0FERBSLXbt2HRCRo71usyK4i8Uidu7cmXQxiIiIYqGUqvvdxq5yIiIiizC4iYiILMLgJiIisgiDm4iIyCIMbiIiIoswuImIiCzC4CYiIrIIg5uIiFKlWq2iWCxiZGQExWIR1Wo16SJpZcUCLERERGFUq1VMTExgbm4OAFCv1zExMQEAKJVKSRZNG7a4iYgoNcrl8mJoN83NzaFcLidUIv0Y3ERElBozMzN9XW8jBjcREaXG+Ph4X9fbiMFNRESpMTU1hVwu13ZdLpfD1NRUQiXSj8FNRESpUSqVMD09jUKhAKUUCoUCpqenUzMxDQCUiCRdhp7Wrl0rPK0nERFlhVJql4is9bqNLW4iIiKXDceA8zhuIiIi2HMMOFvcREREsOcYcAY3ERER7DkGnMFNREQEe44BZ3ATERHBnmPAGdxERESw5xhwBjcREWWO32FfpVIJtVoNjUYDtVrNuNAGGNxEROTDhGOaoyhD87Cver0OEVk87KvzsU14/Z5ExPjL6aefLkREFJ9KpSK5XE4ALF5yuZxUKhXry1AoFNoes3kpFAqRP3dYAHaKTyZyyVMiIupSLBZRr9e7ri8UCqjValaXYWRkBF7Zp5RCo9GI9LnDSmTJU6XUCUqpbymlHlZKfU8pdbl7/VVKqb1KqQfcy7ujKgMREQ3GhGOaoypDmMO+THj9fqIc4z4M4A9E5GQAZwD4sFLqZPe2a0XkLe7lGxGWgYiIBmDCMc1RlSHMYV8mvH4/kQW3iOwTkd3u7y8BeATAcVE9HxER6WPCMc1RlSHMYV8mvH5ffoPfOi8AigBmALwOwFUAagD2ALgZwIpe9+fkNCKi+FUqFSkUCqKUkkKhEOvENBPKkORzI8nJaUqpIwHcC2BKRP5OKbUawAE4s/Q+DeAYEfk9j/tNAJgAgPHx8dO9JgkQEaVdtVpFuVzGzMwMxsfHMTU1ZeSxxaRX0OS0SINbKTUK4B8A/JOI/KXH7UUA/yAipwQ9DmeVE1EWdZ5mEnC6a01czYv0SmpWuQKwBcAjraGtlDqm5d9+A8BDUZWBiMhmtpxmkuIV5azytwP4bQBndRz69Vml1HeVUnsA/DKA34+wDERE1jL5kCQbGbsSWp+WRvXAIvLvAJTHTTz8i4gohPHxcc9FQEw4JMk2ncMOzWVOAVg37MC1yomIDGX0IUmaxNUKTtOwA4ObiMhQtpxmclBhT/ahQ5qGHbhWORERJSLO9cCTXnu8X4nMKicionZpmRylS5yt4KiGHRJ5T/1WZjHpwpXTiChpw66ilfRpIk0U5vSaOuleCS3K9xQ8rScR0eB0LIRiW1dtHGxfYCbK9zSxldN0YXATUZJ0bKDDnAM6i2xe0jXK95TBTUQ0BB0baLa40yepFjcnpxER9aDj3MxZOCY7a5J6TxncREQ96NhA23BMNme99yep95Rd5UREIdg8FhuG7RPF0oZj3EREFIhj8GbhGDcREQVK05KgacfgJiIiLRPwKB4MbiIi4qx3izC4iYg0GWZWdtIzum2Y9U4OTk4jItJgmFnZnNFNnTg5jYioD4O0fsvlclvwAsDc3BzK5XKk96XsWZp0AYiITNLZ+q3X65iYmACAwNbvMLOy/f7H6/AsIra4iYhaDNr6HWZWtt//KKWsWb0s6TH6LGFwExG1GLTlPMys7KmpKSiluq4XkYG6y+MO0WYvRb1eh4gs9lIwvCPid6Juky6nn3760CclJyIKo1AoCICuS6FQ6HnfSqUihUJBlFJSKBSkUqmEfl6v5wQgSqm+yl+pVCSXy7U9Ri6X66ss/RqmzsgbgJ3ik4mcVU5E1CKpGd66lhxNYulSnmtcP84qJyIKKanjmXUtgKJj6dJ+u9q56lq8GNxERB1KpRJqtRoajQZqtVokod0ZjgC07DAMG6KDjFdz1bWY+fWhm3ThGDcRpUmU49DDPvag49XDjO9TN3CMm4jIHFGMQ7eeL3zlypUAgOeee67vc4dzvNoMHOMmIjKI7lNodnZvz87OYn5+HrfddlvfXf0cr/ZnyrHqDG4iopjpDkedS6ZyvNqbSceqM7iJKFVMaRUF0R2OOlvwPEuYN5PWk+cYNxGlhk1n2Wodk+53HLpTEsduZ03cY/9BY9wMbiJKjawGmE07LLYK/Gz9678CRx8NLF+u7fk4OY2IMkH3pC9bsHs7ep3DGz8NYLdSqNXrwE/9FPC+98VWFgY3EaVGlmdEx7FojKnimNdQKpWw9eqrsXtsDALgEQCntvZY//mfa39OPwxuIkoNr0lfSinU63VjJ6olzYbJfEEin+39/e8D69YBSuHcTZtw6sGDizd9LJ9H9bbbABHg1FP1PF8YfiuzmHThymlEFFZzBS+4Z9ZCjGfJsk0SZxLTLZIzk9XrIuvXiziR3Ha5dGwslvoCV04joqzJ6kS1fqShjrTN9t67F7j4YuCb3+y+7aabgA99CMU1a2KrL05OI6LMyepEtX6koY7CzmuoVoFiERgZcX5WqwD27QPOPRdQCjj++PbQvv56YGHBaWdfeimglDH1xeAmolTK8kS1sNJQR2EWs6lWgYkJoF53crheByYuehnVY/8A+PrXX73j5s3A4cPOP330o07KtzClvhjcRJRKaVm6M8rJY8PUke5yDfp4YQ6FK1+5gI5FzzCHZSjjGuCaa4BDh5ywvuIKYMkS3+cy5jPlN/g97AXACQC+BeBhAN8DcLl7/UoA2wE87v5c0euxODmNiAZh+6km45g8Nkgd6S5XJK/z+edFNmwQAURhwWuemSg1WFnj+EwhiclpSqljABwjIruVUssB7ALwPgC/A+A5EfmMUupKN7j/KOixODmNiLLI1Mljusul7fFefBH42MeALVvaHx9PoY6ix+MDps7BS2RymojsE5Hd7u8vwTle/TgA7wVwi/tvt8AJcyIi6mDKZKiwz+91fZgu8KFe549/DFx2mTPB7PWvbw/tTZuA+XlMVYro6OFGLgdYNmqyKJYxbqVUEcCpAHYAWC0i+9ybngGw2uc+E0qpnUqpnc8++2wcxSQiMsogk6HiWFAl/EzucIuj9P065+aAjRudsF6+3Dlcq2njRuDll53e8M9+Fnjta1EqAdPTTgtbKefn9DRg7eJyfn3oui4AjoTTTf6b7t8/6rj9+V6PwTFuovSxffw5DpOTk30tIhPXgiphnyfs4iihHm9+XmTTJs9FUeSyy0Reeknra0waAsa4ow7tUQD/BOBjLdc9CmfsGwCOAfBor8dhcBOlSxpW7IqaVx0ppWRyctL3PpGsIhZQvl47Xp07Ha2vI9TjvfKKyCc+4R3Wl1wi8sIL2l+XKYKCO8rJaQrOGPZzIrKx5frNAGbl1clpK0XkD4Mei5PTiNLF1ElXJmiep9urfoDgOor7nNG9DPQ+HzrkDD5ffXX3bRs2ANdeC6xYobegBkpq5bS3A/htAGcppR5wL+8G8BkAv6qUehzAr7h/E1GGmDrpKmmtY8J+gurIlAVCmkIf93z4sHM8tVLA2Fh7aF94IXDggNPO/pu/yURo9+TXFDfpwq5yosGYOo4cZ5duP5KuL796CVtHJg5B+Nbp4cMimzd7d4Off77I/v2JldkESGqMW9eFwU3UPxM34iaXzYQy+Y0J91OepHc+Ai0siFx3nXdYn3uuyL59SZfQGAxuogwytVXb5BcwSQXPMPWlq8xBLW7jQjisRkPkhhu8w/qcc0SefjrpEhqJwU2UQf3M6DVFkq3eQetLZ5njfv3D7nD43r/REPniF73Dev16kVotgleTLgxuogwyvcXtJckyD/rcusscV4/DsDsJXve/eGzMO6zXrRN58slIXkdaMbiJMsiEMdt+JdlLMGh92dizITL8Dkfz/hd4BTUgcsYZIo89Fu2LSLGg4OZpPYlSKszpDk2T5OFMg9aXaYdghTXUIXl/+7eo1esQAFtbrt4J4GTAie7//E/gjW8cvqDUzS/RTbqwxU2ULFu6b5NgY5lFBmhx33mnyOhoV8v6QUBOsWQoxiZgVzkRDcq2CVNJCFNm015XqPf1G98QyeW6u8FPOkn+8dOftnKHxRYMbiIamI2T3KIyaPia2ir3fD3bt4u8/vXdYb1mjciOHb3vT1oEBXdka5XrxLXKiZJj2vrXSWkuRzo3N7d4XS6XCzUObvza7PfeC7z//UDnKZRPOAG4/XbgF38xmXJlWFJrlRNRCtg6+Uq3crncFtoAMDc3h3K53PO+Rq7N/h//ARx/vLM++Dvf+Wpor14N3HOP086emWFoG4jBTWSharWKYrGIkZERFItFVKvVyJ4r9IkieoizzFEYJnz73fmJrK7uvx848UQnrN/+dmDvXuf6FSuA7dudsH7mGeAd79DzfBQNvz50ky4c4yZ6VRLjpTpW2DJxjLcfwy6JGvb1a6+rWk3kTW/qHrNetkxk27bBHpMiB05OI0oPGyeL2VjmTjpWGguz86OlrvbsETnllO6wHhsTueuu8I9DiQkKbk5OI7KMjZPFbCyzl2q1inK5jJmZGYyPj2Nqakr7gjYD19UjjwAXXQTs3t1+/ac+BaxfD5x5ptZyUrQ4OY0oRWycLGZjmb2USiXUajU0Gg3UarVIVqHrq64efxw44wxnzPrkk9tDe+tWoNEArroqkdC2fU6DyRjcRJbRNVksTjaWOSk96+r73wfWrXPC+k1vAnbsePUfb73VCWsR4IILnP9JQPPQuXq9DhFBvV7HxMQEw1sXvz50ky4c4yZqZ+PCFzaWOQ5e9dJ53d9dd51zOkyvk3ls2eKcRtMgOsbps/55ASenEdEgsrDxTPI1Bk54e/ppkbPP9g7rm24yLqxbDXvGtDQchTAsBjdRRugMoSxsPJN+jZ0t09WA3OkV1IDI9deLLCzEUq5h6Tpl6DAtdtsxuIkyQHcImbrx1LlzkvRrVErJ0YB8zS+sN28WOXw4lrLoNOxn0dZznOvE4CbKAN0hZOLGU/fOSWKv8cABeeptb/MM6ysBWQpY37MxzA5W0jtUJmBwE2WA7hDSufHU1UrWvUGPNSCef15kwwbPsP5jN6yzGlKdkh7CMAGDmygDdIeQro2nzo2w7p2TyAPihRdELr7YM6w/DciYx2sxoWfDBFmYGBmEwU0UIVM2MFGEkI7XpnOHIooWsvb376WXRCYnPcNaNm0SmZ/33QFhi5uaGNxEETGtS8+UnYhWOlvJpp1gpXlbDpAty5d7h/XGjSIvv9z2mH47ICZ8hsgMDG6iiHASTW9RdOHHtXMStKPw5Ztvlr9cutQ7rC+7zGl59/G4zR0cU3a4KFkMbqKImDjz2jSm9Ur0o3OnYwyQP/EKakD+GpDlfeyQmNg7QuYICm6eHYxoCMViEfV6vev6QqGAWq0Wf4EMFcdZtaIwMjKCJSIoA7jK4/a/AbARwAst19l2xjMyE88ORhQRnjwjnDjOqqXV4cPANdegIYJDaA/tKoC3HH88ioUCfhftoQ3Yd8azLEnLGcuWJl0AIps1A+jyyy/H7OwsAOCII45Iskg0qIUF4NprgU2bum76GoDLAByAs2M2/ZnPAAAmJiYwNze3+H/caTNX84xlzferecYyAObvSHZgi5tIg/n5+cXfZ2dntZzCMC2tA6M1GsD11zunv1y6tD20zz0X2LcP1UoFmwoFzCqFQqGA6elplEollEolTE9Po1AoQHXc1smW99KWcg6iXC637WQBwNzcHMrlMgDLXrvf4LdJF05OI5NFdWyxrRO6jNdoiNx4o/ds8HPOcc7KpZEt76Ut5RxU0ERSE187ODmNKDojIyPw+h4NM0mJk940EwG2bAEuuaT7tvXrgZtvBiIam7blvVy1atXicE8r08o5qKD3AYBx7xEnpxFFyG8y0jCTlGZmZvq6njyIALfe6nSDj4y0hfa9AM489lhUKxXg7rsjC23AjveyWq16hjZgVjmHETSR1Ib3qBWDm2hIUcwsj2JnALBsHG8QIsDtt78a1hs2LN7030uX4mdGR6EAvBPAv//gB1rmIvQS1XvpZdD3tznO6yUts+SD5iTE+R5p4deHbtKFY9xkOt2LaUS17rhp43ja3HGH55j1rpER+emE1wSPq96HeZ6gtdNT8fnowcTvBrhyGpF9K1XpLm/qlme9806R0dHuwH7zm0X27Om5HnjzEscqd3F89oZ5f/3um8/ntZfTVKZtH7QGN5zu9df1e79hLgxuGpaJe9Sdot5wpGJ51m3bRJYt6w7rk04S2b277V/DnIGrNdhM23D3a5j314bvR9YMHdwAvgzgdQCWAXgYwNMANvW4z80A9gN4qOW6qwDsBfCAe3l3mOdncNOwTG9txrHhNL0OfN19t8hRR3WH9YkniuzY4Xu3MC3uZh2nIbiGfX9t33FJGx3B/YD7swTgLwCMAtjT4z7rAJzmEdxXhHnO1guDm4ZlemszjlD1Cqdmd6hxG+l77hF5wxu6w/qEE0Tuuy/UQ3i93tHRUcnn813hZO1OTYs07HzEzeSdFR3B/T03rL8G4B3udQ+GuF+RwU0mMH3DHNeORaVSkXw+79vyTNR994kcd1x3WK9eLXLPPQNtZL3u43Wd6Tt2YZkcRKYxfUdHR3B/1O3i/gYABaAA4Nsh7ucV3DUAe+B0pa8I8/wMbhqW6V/SOHcsjNqJuf9+kTVrusN6xQqR7dsX/03X++f3OF47Mybt2PXCwO6ts45Mf8+HDm7POwJLQ/xPZ3CvBrAEzgS3KQA3B9x3AsBOADvHx8ejrSFKVFwbHZM3bnHuWCTeuty925lM1hnWy5Y5k8886NrZ8NtY5/N5o3fsgpi+U2oCv2Eik3tZdLS4VwPYAmCb+/fJAD4Y4n5twR32ts4LW9zp5bfRmZycNDZkoxLXjkVQCEZWhj17RE45pTusx8ZE7rqr59117GxUKpXAjbXJO3ZBjOpBMVSYiYqm1ZuO4N4G4H/DHdeGczrQ74a4X2eL+5iW338fwO1hnp/BnV5+X6jODTVbEPoE7Sxpbbk9/LDIqad2hfUCIOe17CiEoSOcgjbepmysB5F4D4oFwh4aaNJ2Rkdw/7f78zst1z3Q4z5bAewDcAjO4WMfBHAbgO/CGeO+qzXIgy4M7vQK+4WyfeNqGq/WpZaW22OPibz1rd0ta0C+/eEPS+6II0JvKFvLmM/nZXR0dKiNbFpXB2OLuze/Osrn88b2sugI7nsA5AHsdv8+A8C9Ye6r48LgTq9+urDYgojWwC23J58UOfNMz7CWW291TqMp/QWMV6/A2NiY56FcYQVtvG3GMe7ebKwjHcF9GoD7ALzg/nwMwM+Gua+OC4M7vby+UH4BksYWhEnjqn213Op1kbPO8g7rLVsWw7pVPzsGUbQibdx4h2XS58hUttXR0MHtPAaWAvgZAKcAGA17Px0XBne6dX6htI+1Gsq0IOlZnqefFjn7bO+wvukmz7Bu1U8YB3VrD7PhtW3jTdk1cHAD+M2gS9B9dV4Y3NljwgY26jKYODbZ+Zrv+PznRd7zHu+wvv56kYWFvh477I5KmCGUNO7METUNE9xfCrj4HoOt+8LgprjF0Ro2djbwD38oct553mG9ebPI4cMDP3TYnaGwx92mcfiESCQ4uJVzu9nWrl0rO3fuTLoYlCHFYhH1er3r+kKhgFqtZs1zhDY7C3zkI8DWrd23XXMNsGkTsHRprEWqVqsol8uYmZmB33ZKKYVGoxFruYjioJTaJSJrvW4b6eNBfl0p9YdKqU82L/qKSGSWmZmZvq4fxNTUFHK5XNt1uVwOU1NT2p4j0I9+BGzYACgFrFrVHtpXXw0cPOi0sz/+8dhDGwBKpRJqtRoajQYKhYLn/4yPj8dcKqLkhQpupdQXAPwWgI/AWav8/XDWKycKVK1WUSwWMTIygmKxiGq1mnSRQvELBJ1BUSqVMD09jUKhAKUUCoUCpqenUSqVtD1HlxdfBC6+2AnrFSuAW2999bZyGXjlFSesP/lJYHQ0unL0KfGdHCKT+PWht17gnsKz5eeRCHGSEV0XjnHbybRZ0/2wuexdXnpJZHLSe8x60yaR+fmkSxiKCRMWieICDcdx73B//heAYwG8FsATYe6r48LgtpOJs6b7YXVQvPyyyOWXe4b1w2ef7dxO1gvzGbX6c5xhOoL7jwEcBecwsH3u5dNh7qvjwuC2UxyzprlRajE/77SgPcL6c4Ass73ngNqE6RVKVc9Rxgwc3AB+HsD/aPn7AwD+GcBfAVgZdF+dFwa3naJucdu+UdKy0/HKKyKf+IRnWE8DctTIiNW9HuQvzPer3+8gd4TNMUxw724GNIB1AH4A4DwAnwZwR9B9dV4Y3HaKOlht7oofqm4OHhT51Kc8w/qJM8+UYzpO5BF1rwclI0yPVj+9XrbvCKfNMMH9YMvvNwC4quXvB4Luq/PC4LZXlHvwxi5gEkLfOx2HDolMTXmGtVx4ociBA4GPa+PODQXT3eK2eUc4jYYJ7ocALHV//78A1rXeFnRfnRcGN3kJCinTu/lC7XQcPuysVOYV1uefL1+78caunaIwp0m1vRXltTNoWhdvHOXRPcZt845wlJL6bA0T3GU4ZwO7E8B3gMWV1v4ngPuC7qvzwuAmL72WxTQ5oJydjgsFeEqABffnhVIcHxe57jrvsD73XJF9+0TEf4Ocz+c962LJkiXGhNow/E73Oey5uqMuY1Tl0Tmr3KQWtyk7YkkOHwwc3M59cQaA3wCwrOW6NwE4rdd9dV0Y3Ok2zJe0eV/buoQnJ78twI/bcjmHH0sFF7aH9TnnOGfl6uD3mvP5vBXjlIO+52GHApJ8700KwH6YMsZtSjlEkn0vhwpuEy4MbjuFbQ3o+JLa1s1XGG94NqoLeEpk/XrnfNcBgl6vKa0VP8O852GGApJ+7236LHqdUjfpz45JOz5JvpcMbopd2I2zri9pkl/20EHZaIjccosIIAoLnsEddntg0satX8OUnS1ufUxq2bYyaceHLW4Gd6aE/cDr+pImtRHq+byNhsjWrV0JXcBT3i3ugqbnNdgw7znHuPUxdXKnSTs+1o5xm3BhcNsn7MZZ55c0iS5iv/J/aNWq7lQGRE4/XeThh6VSEcnl2m/K5UT6KXI/57ZOuvuz1bDvuQ2zyicnJ2XJkiUCOBMDJycn+7p/HK+n17BDUjsbpu34WDer3JQLg9s+YTfOpn1J+9W68XsPID/xCus3v1lkz56u+1YqTgtbKednFC/ZxPo1sUw6Dfv64qqfMMMOSXXvm7YjlgQGN8Wun41P68zwZivFli/rB97wBnnJI6yfWLpUZNeupItnVLdjqzRvmIet87jes16HUw4yZDVsedL6mRgEg5sS0c8X0apW2N13ixx1VHdYA/LzcMZc8/l81+tOYsNk0kSfrBi2zqN8z/xmkSfd4rbq+x8TBjcNJY7AMbVluOiee0SOProrrOWEE+Sbn/zkYv3k83nPiVKTk5OJbJiMr9cUMrXFHRSOSQen7techtY7g5sGFtcXOqi7LjH33Sdy3HHdYb16tRPkHvw2QM0hgLgDNOkNchitG9l8Pu/ZW2FK+cKUKcox7mECqVc4Jhl2OnsZbPjMh8Hgpjb9fEHjarH5BduSJUu0Pk9PO3aIrFnTHdYrVohs397z7v0sENJal1GvaW1q66PXOGvSG9xBQ2DYOvebOe9VV/l8fqjFa0wYNtG5nUlLLxODmxb1uyGK68ueaIt7926Rk07qDutly0S2bfO8i9+Gud8Wd2f9Jh1UcQsaXzVhg2tSCATVVZjPjUmvpZPOVrLJOyj9YHDTon6/vHF92ePeqPzDn/6pPDI62h3WY2Mid90VeN9Bxgq9xrj9NjAmbEjjEqaHIskNblD54u69GHYHx/QuZF09QybvoPSDwU2L+t0b1fFlD/OFnJycjL71+fDDIqee2hXWC4BcODYW+rkGHSvsvN7EoIqbrS3uJILPr9emn8+NycMmupi+gxIWg5sWDbI3OsyXPcyXyOt/lFJ9rzbl6dFHRd761u6WNSC/NWBA6OqK090ysHGjbOMYd1I7Fibv4JjGxu9CJwY3LYp7bzRMOGnv2nrySZEzz/QMa7n1VlFDtnR1lVfne2FzK8OWWeVJ95AMO8ZNdmFwU5s490bDtE61tGDrdZGzzvIO6y1bnJN9uIYN3n5DMqi+BznUyOv/0zKuZ7Kk63jYWeVkFwZ3CtnSFRS0sevVkum5QXz6aZF3vcs7rG+6qS2sW8U1bq/rucI8Vlpm0prMhF4NW773NDwGd8p4TeQydc+7n1nWoTaIP/iByHve4x3W118vsrAQulxxbAB1ttKCHivp1mBWMDgpLgzuFAlqXZk61uW1sevV0m57DT/8och553mH9ebNIocPR1rWYehsCQc9lgmtQSLSh8GdIkGBZ1Mrq2egHTggcuGF3mF9zTUihw61PZ6OCU5RhF9cLe5m+dkajBbrmOLC4E4R0xesCNK60fM6JvUoQO5Ytsw7rK++WuTgQd/H1XFIURTdzVmYOZ6VMDO1/imdGNwp0k+L26QNql+4Lgfki15BDYiUyyKvvNLzsXX1QvTqih60LnW+Dya9p83yZCXMOI+A4sTgTpGwrUvTNqitG71lgNzgF9abNonMz/f12Lp6Ifw2zPl83qi6HESlUpF8Pt/2mnSUP0thxpn7FCcGd8qEGc81bYOaA+Rav7DeuFHk5ZcHfmxdLW6/nZ3WwDOhLvtVqVRkbGysq/yjo6NGTb4znWnfKUq3RIIbwM0A9gN4qOW6lQC2A3jc/bkizGMxuPtTqVQCQyw28/MiV1zhGdafc1veOjZ6OpfN9OqKtj2ces3gD9Kraz5LYWZaLxalW1LBvQ7AaR3B/VkAV7q/Xwngz8I8FoM7vF4h1hyvjcwrr4h84hOeYb1lyRJZHtFGL8plM20Pp6ChhKCdj0HXmU9zmJk2x4DSK7GucgDFjuB+FMAx7u/HAHg0zOMwuMNL5HCxgwdFPvUpz7CWDRtEnn9eRPQu/Rkn28Np0BZ32B0Wk987IluZFNw/avldtf4ddGFwhxfb4WKHDolMTXmH9YUXOsdhh2RDMNocToOOcds+REBkMyOD2/37+YD7TgDYCWDn+Ph4ZJWTNpG2uA8fll0XXOAZ1l8F5NTjj18Mgn6CLs6u6M5yTU5OWhvI/RhkVrntQwRENjMpuNlVHrFKpSKjo6O+od13S3ZhQeS66zzD+u8BWe3x+F7rkAc9b1wtu17j/2FaoVliQ08IUVqZFNyb0T457bNhHidtwR2mNTpo16xft2izpdQ+rixSKIgo5fxcvGlhQeSGGzzD+h8BObZHi95rVbSgllpcLbswvRHN1ig5bB4iILJZIsENYCuAfQAOAXgawAcB5AH8C5zDwe4GsDLMY6UpuKOeqRt+QpFILteey7mxg1KBx/rg69eL1Gqhxs+DLn4taK/Xq5SSycnJoeq6Uz/lJyJKUmItbl0XG4I7bMskTLAO0wIN2+1cKHg2qKWAp5xf1q0TefLJUGUP2+L2avU3eZ2qVHe3bNjyM7iJKGkM7oiFbSEHLYzSGqzDjPn2DP1GQ+TLXxaFBc/gVqrR1+vsvAx6ru04usvDlB9gVzkRJY/BHbEwodMrNHS1uP12Iu796EfbErqAp7xb3D2eIuys7DDn3G4V5wS1oB6BsbExjuMSUeIY3BELEzpBIaZ7NapmaJ4LyE+80vnNb5bKn9a7x7hzLRPUYqwbkfgPCfPaidJ14g0iomExuCMWJnR6TYzqnvE94Gzeb3yje9YZIHLSSSK7drX9q++sco3CT5bz3lmJ6jhrzpYmIpMxuCMWZmJVmIlRA0/GuvtukaOO6g7rNWtEduwY+HXpCLd+eg+8uuEH7XlgMBORzRjcEQp7KFPYiVGhu4bvuUfk6KO7w/qEE0Tuuy+S1zVoaA7aah60+5wLhxCR7RjcEeonXHpN2Oo5Geu++0SOO647rFevdoI8odfV+Rp1heagE9biHC8nIopCUHCPgIYyMzMT+vpSqYSpqSnkcjnfxxsfH2+/4v77gRNPBJQC3v52YO9e5/oVK4Dt253ofuYZ4B3v6Lvs1WoVxWIRIyMjKBaLqFarA72uVuVyGXNzc23Xzc3NoVwu912+rrrocX3ToGUnIrIBg3tI/YaLV7A15XI5TE1NAd/5DnDSSU5Yv/WtwFNPOf+wbBmwbZsT1s89B/zKrwxc7mq1iomJCdTrdYgI6vU6JiYmFsPbhND02slZrKMAg5adiMgKfk1xky4md5X32zXs1/17CiDPH398dzf42JjIXXdpL3ev7uRBu7x1d1MPMsmMY9xEZDtwjDtag57C8n8BsqszqAHn+Kw77oi0zEGHpzVfSz6fl3w+b2VoclY5EdmMwR2TMGFx5+bNsmNkpDusAZGtW50lSWPg1zLWsV44Q5OIaDhBwa2c2822du1a2blzZ9LFCNQcM24dv87lcpienkbpbW8Dfud3gG9/u+t+H8vncfp116F00UUxlta7vEopeH0eCoUCarVajKUjIso2pdQuEVnreRuDW49isYh6vb749ziAmwGs9/rnLVuA3/1dZ/JZgqrVKsrlMmZmZjA+Pt5W/lZKKTQajZhLR0SUXUHBzVnlmszMzOA4ANvg9DHX0RHaN90ENBpOp/jv/V7ioQ04h6fVajU0Gg3UajUUCgXP/+NsbCIiczC4h7VvH3DuuWiI4GkAZ7fc9FEAoyMjqFYqwKWXag/roOOwBzHo4VdERBQfBvcg9u8Hzj/fCeJjjwW+/vXFm64AsASAAvA5AIcbjbbjo3XpdRz2IEqlEqanp1EoFKCUQqFQcMboSyWNJSciomFwjDus2VngIx8Btm7tvu2aa4BNm1D9ylewYcMGLCwsdP2L7glenWPqUT0PERHFj2Pcg3r+eeADH3Ba1qtWtYX2g+edBxw86IxZf/zjwNKlKJVKvpO4eq0c1m+3N5f1JCLKJgZ3pxdfBC6+2AnrlSuB225bvOlPALwGTjf4L27bhupXv9p19zDLbXaG9GWXXdZ3tzeX9SQiyii/A7xNukS+AMtLL4lMTnouivJngLwmYIWxTr1WDvM7DWjYxw/7PEREZC/w7GAe5uaAjRudlvXy5c7hWq4ty5cjB6dl/UcAfuLzEH7d0kccccTi7/l8vm2Cl9dJRsRnnkFQtzcnkhERZdPSpAsQu/37gdWru6//8IeBz3wGOPJIXDIygjBT9jq7pb1WI5ufn2/7n37GoHt1e5dKJQY1EVHGZK/Ffe+9r/5+ySXACy84neKf/zxw5JEAwo0Tex3fHOZc1GHHoHn8NBEReclecL///a+OYE9PA697Xde/eC1EMjo6inw+H9gtHWamt9djd1qyZAm7vYmIyFP2gjukznHqL33pSzhw4MDi8qBeobpy5UrPx2oN6taxaT+NRoOhTUREnhjcHZrj1LOzs4vXdY5T9+vll19uO7SruUY41wYnIqJ+Mbg7hBmn9vPcc88FPm4nrg1ORET9YnB3GGZFsqCWstf9eUgXERH1i8HdYZgVyaampqB8zgDmd//OU2sytImIKAiDu8Mw3delUgmXXnppV3iz+5uIiHRhcHcYtvv6xhtvxG233cbubyIiigRP66lJtVpFuVzGzMwMxsfHMTU1xbAmIqKBBJ3WM3tLnkagc6nT5tm9ADC8iYhIK3aV++jn/NjDHEJGRETUD7a4PQS1oAF0dYkPcwgZERFRPzjG3aFarWLDhg1YWFjoui2fz2N+fr6tdZ3L5XDEEUe0rbTWVCgUUKvVoiwuERGlUNAYN7vKXdVqFatWrcJFF13kGdoAMDs769klDoAroBERUSwY3PBen7wfs7OzXAGNiIhiwa5yAMViEfV6PfB/crkc5ufn4VVfS5YsweHDh6MqHhERZYxxXeVKqZpS6rtKqQeUUokfoN1rElnz/Nh+Ozl+XetERES6JdlV/ssi8ha/PYo4Ba1DnsvlcMstt6BUKvmehjPo3NpEREQ6cYwb3uuTA4BSavF47Gq1ytNwEhFR4pIKbgHwz0qpXUqpCa9/UEpNKKV2KqV2Pvvss5EWpnN98nw+j9HR0cWu8dbjuDkJjYiIkpTI5DSl1HEislcp9QYA2wF8RET+ze//416r3G+yGo/LJiKiOBg3OU1E9ro/9wP4ewC/kEQ5/HAlNCIiMlXswa2UWqaUWt78HcCvAXgo7nIE8ZusFjSJjYiIKA5JtLhXA/h3pdSDAO4H8I8i8s0EyuGLk9CIiMhUsZ9kRES+D+Dn4n7efjQnm/H82kREZBqunEZERGQY4yanERER0WAY3ERERBZhcBMREVmEwU1ERGQRBjcREZFFGNxEREQWYXATERFZhMFNRERkkcwHd7VaRbFYxMjICIrFIqrVatJFIiIi8hX7kqcmqVarmJiYwNzcHID2825zeVMiIjJRplvc5XJ5MbSb5ubmUC6XEyoRERFRsEwHN8+7TUREtsl0cPO820REZJtMBzfPu01ERLbJdHCXSiVMT0+jUChAKYVCoYDp6WlOTCMiImPxfNxERESG4fm4iYiIUoLBTUREZBEGNxERkUUY3ERERBZhcBMREVmEwU1ERGQRBjcREZFFMhXcPIUnERHZLjOn9eQpPImIKA0y0+LmKTyJiCgNMhPcPIUnERGlQWaCm6fwJCKiNMhMcPMUnkRElAaZCW6ewpOIiNKAp/UkIiIyDE/rSURElBIMbiIiIoswuImIiCzC4CYiIrIIg5uIiMgiDG4iIiKLMLiJiIgswuAmIiKyiBULsCilngVQ1/RwqwAc0PRYacE66cY6acf66MY6acf66DZMnRRE5GivG6wIbp2UUjv9VqPJKtZJN9ZJO9ZHN9ZJO9ZHt6jqhF3lREREFmFwExERWSSLwT2ddAEMxDrpxjppx/roxjppx/roFkmdZG6Mm4iIyGZZbHETERFZK1PBrZQ6Wyn1qFLqCaXUlUmXJwlKqZpS6rtKqQeUUjvd61YqpbYrpR53f65IupxRUkrdrJTar5R6qOU6zzpQjr9yPzN7lFKnJVfy6PjUyVVKqb3uZ+UBpdS7W277uFsnjyql3pVMqaOjlDpBKfUtpdTDSqnvKaUud6/P7OckoE4y+TlRSr1WKXW/UupBtz6udq9fo5Ta4b7uryilxtzrX+P+/YR7e3HgJxeRTFwALAHwJIATAYwBeBDAyUmXK4F6qAFY1XHdZwFc6f5+JYA/S7qcEdfBOgCnAXioVx0AeDeAbQAUgDMA7Ei6/DHWyVUArvD435Pd789rAKxxv1dLkn4NmuvjGACnub8vB/CY+7oz+zkJqJNMfk7c9/pI9/dRADvc9/6rAC5wr/8CgEn398sAfMH9/QIAXxn0ubPU4v4FAE+IyPdF5CCA2wG8N+EymeK9AG5xf78FwPuSK0r0ROTfADzXcbVfHbwXwK3i+C8ARymljomloDHyqRM/7wVwu4j8RESeAvAEnO9XaojIPhHZ7f7+EoBHAByHDH9OAurET6o/J+57/WP3z1H3IgDOAnCHe33nZ6T52bkDwHqllBrkubMU3McB+H8tfz+N4A9dWgmAf1ZK7VJKTbjXrRaRfe7vzwBYnUzREuVXB1n/3Pwft+v35pYhlEzViduleSqcFhU/J+iqEyCjnxOl1BKl1AMA9gPYDqdX4Ucictj9l9bXvFgf7u0vAMgP8rxZCm5y/JKInAbgHAAfVkqta71RnH6cTB9qwDpYdBOAnwLwFgD7APxFoqVJgFLqSAB/C2CjiLzYeltWPycedZLZz4mILIjIWwAcD6c34afjeN4sBfdeACe0/H28e12miMhe9+d+AH8P58P2w2a3nvtzf3IlTIxfHWT2cyMiP3Q3TA0AX8Sr3ZyZqBOl1CicgKqKyN+5V2f6c+JVJ1n/nACAiPwIwLcAvA3OMMlS96bW17xYH+7trwcwO8jzZSm4/xvAG90Zf2NwJgfclXCZYqWUWqaUWt78HcCvAXgITj1scP9tA4A7kylhovzq4C4AH3BnDZ8B4IWWrtJU6xij/Q04nxXAqZML3FmyawC8EcD9cZcvSu7Y4xYAj4jIX7bclNnPiV+dZPVzopQ6Wil1lPv7EQB+Fc64/7cAnO/+W+dnpPnZOR/Av7q9Nv1LemZenBc4Mz8fgzMOUU66PAm8/hPhzPJ8EMD3mnUAZ5zlXwA8DuBuACuTLmvE9bAVTpfeIThjUB/0qwM4M0dvcD8z3wWwNunyx1gnt7mveY+70Tmm5f/Lbp08CuCcpMsfQX38Epxu8D0AHnAv787y5ySgTjL5OQHwswC+477uhwB80r3+RDg7KE8A+BqA17jXv9b9+wn39hMHfW6unEZERGSRLHWVExERWY/BTUREZBEGNxERkUUY3ERERBZhcBMREVmEwU1EUErlW87u9EzL2Z6k86xOSqmNSqmbkiorUdYxuIkIIjIrIm8RZ/nGLwC41v39Q3AWK2p1AZzjvokoAQxuIgpyB4BfbzmncBHAsQC+nWShiLKMwU1EvkTkOTirPJ3jXnUBgK8KV24iSgyDm4h62YpXu8vZTU6UMAY3EfVyJ4D1SqnTAOREZFfSBSLKMgY3EQUSkR/DOePRzWBrmyhxDG4iCmMrgJ8Dg5socTw7GBERkUXY4iYiIrIIg5uIiMgiDG4iIiKLMLiJiIgswuAmIiKyCIObiIjIIgxuIiIiizC4iYiILPL/AczYEjvtfjhPAAAAAElFTkSuQmCC\n",
782 | "text/plain": [
783 | ""
784 | ]
785 | },
786 | "metadata": {
787 | "needs_background": "light"
788 | },
789 | "output_type": "display_data"
790 | }
791 | ],
792 | "source": [
793 | "fig, ax = plt.subplots(1,1,figsize=(8,5))\n",
794 | "ax.plot(X, Y, 'o', color='black')\n",
795 | "ax.set_xlabel('TV')\n",
796 | "ax.set_ylabel('Sales')\n",
797 | "\n",
798 | "ax.plot(X, m_sklearn[0][0]*X+b_sklearn[0], color='red')\n",
799 | "ax.plot(X_pred, Y_pred_sklearn, 'o', color='blue')"
800 | ]
801 | },
802 | {
803 | "cell_type": "markdown",
804 | "metadata": {},
805 | "source": [
806 | " \n",
807 | "## 3 - Linear Regression using Gradient Descent"
808 | ]
809 | },
810 | {
811 | "cell_type": "markdown",
812 | "metadata": {},
813 | "source": [
814 | "Functions to fit the models automatically are convenient to use, but for an in-depth understanding of the model and the maths behind it is good to implement an algorithm by yourself. Let's try to find linear regression coefficients $m$ and $b$, by minimising the difference between original values $y^{(i)}$ and predicted values $\\hat{y}^{(i)}$ with the **loss function** $L\\left(w, b\\right) = \\frac{1}{2}\\left(\\hat{y}^{(i)} - y^{(i)}\\right)^2$ for each of the training examples. Division by $2$ is taken just for scaling purposes, you will see the reason below, calculating partial derivatives.\n",
815 | "\n",
816 | "To compare the resulting vector of the predictions $\\hat{Y}$ with the vector $Y$ of original values $y^{(i)}$, you can take an average of the loss function values for each of the training examples:\n",
817 | "\n",
818 | "$$E\\left(m, b\\right) = \\frac{1}{2n}\\sum_{i=1}^{n} \\left(\\hat{y}^{(i)} - y^{(i)}\\right)^2 = \n",
819 | "\\frac{1}{2n}\\sum_{i=1}^{n} \\left(mx^{(i)}+b - y^{(i)}\\right)^2,\\tag{1}$$\n",
820 | "\n",
821 | "where $n$ is a number of data points. This function is called the sum of squares **cost function**. To use gradient descent algorithm, calculate partial derivatives as:\n",
822 | "\n",
823 | "\\begin{align}\n",
824 | "\\frac{\\partial E }{ \\partial m } &= \n",
825 | "\\frac{1}{n}\\sum_{i=1}^{n} \\left(mx^{(i)}+b - y^{(i)}\\right)x^{(i)},\\\\\n",
826 | "\\frac{\\partial E }{ \\partial b } &= \n",
827 | "\\frac{1}{n}\\sum_{i=1}^{n} \\left(mx^{(i)}+b - y^{(i)}\\right),\n",
828 | "\\tag{2}\\end{align}\n",
829 | "\n",
830 | "and update the parameters iteratively using the expressions\n",
831 | "\n",
832 | "\\begin{align}\n",
833 | "m &= m - \\alpha \\frac{\\partial E }{ \\partial m },\\\\\n",
834 | "b &= b - \\alpha \\frac{\\partial E }{ \\partial b },\n",
835 | "\\tag{3}\\end{align}\n",
836 | "\n",
837 | "where $\\alpha$ is the learning rate."
838 | ]
839 | },
840 | {
841 | "cell_type": "markdown",
842 | "metadata": {},
843 | "source": [
844 | "Original arrays `X` and `Y` have different units. To make gradient descent algorithm efficient, you need to bring them to the same units. A common approach to it is called **normalization**: substract the mean value of the array from each of the elements in the array and divide them by standard deviation (a statistical measure of the amount of dispersion of a set of values). If you are not familiar with mean and standard deviation, do not worry about this for now - this is covered in the next Course of Specialization.\n",
845 | "\n",
846 | "Normalization is not compulsory - gradient descent would work without it. But due to different units of `X` and `Y`, the cost function will be much steeper. Then you would need to take a significantly smaller learning rate $\\alpha$, and the algorithm will require thousands of iterations to converge instead of a few dozens. Normalization helps to increase the efficiency of the gradient descent algorithm.\n",
847 | "\n",
848 | "Normalization is implemented in the following code:"
849 | ]
850 | },
851 | {
852 | "cell_type": "code",
853 | "execution_count": 23,
854 | "metadata": {
855 | "tags": [
856 | "graded"
857 | ]
858 | },
859 | "outputs": [],
860 | "source": [
861 | "X_norm = (X - np.mean(X))/np.std(X)\n",
862 | "Y_norm = (Y - np.mean(Y))/np.std(Y)"
863 | ]
864 | },
865 | {
866 | "cell_type": "markdown",
867 | "metadata": {},
868 | "source": [
869 | "Define cost function according to the equation $(1)$:"
870 | ]
871 | },
872 | {
873 | "cell_type": "code",
874 | "execution_count": 24,
875 | "metadata": {
876 | "tags": [
877 | "graded"
878 | ]
879 | },
880 | "outputs": [],
881 | "source": [
882 | "def E(m, b, X, Y):\n",
883 | " return 1/(2*len(Y))*np.sum((m*X + b - Y)**2)"
884 | ]
885 | },
886 | {
887 | "cell_type": "markdown",
888 | "metadata": {},
889 | "source": [
890 | " \n",
891 | "### Exercise 5\n",
892 | "\n",
893 | "\n",
894 | "Define functions `dEdm` and `dEdb` to calculate partial derivatives according to the equations $(2)$. This can be done using vector form of the input data `X` and `Y`."
895 | ]
896 | },
897 | {
898 | "cell_type": "code",
899 | "execution_count": 35,
900 | "metadata": {
901 | "tags": [
902 | "graded"
903 | ]
904 | },
905 | "outputs": [],
906 | "source": [
907 | "def dEdm(m, b, X, Y):\n",
908 | " ### START CODE HERE ### (~ 1 line of code)\n",
909 | " # Use the following line as a hint, replacing all None.\n",
910 | " res = 1/len(Y)*np.dot(m*X + b - Y, X)\n",
911 | " ### END CODE HERE ###\n",
912 | " \n",
913 | " return res\n",
914 | " \n",
915 | "\n",
916 | "def dEdb(m, b, X, Y):\n",
917 | " ### START CODE HERE ### (~ 1 line of code)\n",
918 | " # Replace None writing the required expression fully.\n",
919 | " res = (1/len(Y))*sum(m*X + b - Y)\n",
920 | " ### END CODE HERE ###\n",
921 | " \n",
922 | " return res\n"
923 | ]
924 | },
925 | {
926 | "cell_type": "code",
927 | "execution_count": 36,
928 | "metadata": {
929 | "tags": [
930 | "graded"
931 | ]
932 | },
933 | "outputs": [
934 | {
935 | "name": "stdout",
936 | "output_type": "stream",
937 | "text": [
938 | "-0.7822244248616067\n",
939 | "5.098005351200641e-16\n",
940 | "0.21777557513839355\n",
941 | "5.000000000000002\n"
942 | ]
943 | }
944 | ],
945 | "source": [
946 | "print(dEdm(0, 0, X_norm, Y_norm))\n",
947 | "print(dEdb(0, 0, X_norm, Y_norm))\n",
948 | "print(dEdm(1, 5, X_norm, Y_norm))\n",
949 | "print(dEdb(1, 5, X_norm, Y_norm))"
950 | ]
951 | },
952 | {
953 | "cell_type": "markdown",
954 | "metadata": {},
955 | "source": [
956 | "##### __Expected Output__ \n",
957 | "\n",
958 | "```Python\n",
959 | "-0.7822244248616067\n",
960 | "5.098005351200641e-16\n",
961 | "0.21777557513839355\n",
962 | "5.000000000000002\n",
963 | "```"
964 | ]
965 | },
966 | {
967 | "cell_type": "code",
968 | "execution_count": 37,
969 | "metadata": {},
970 | "outputs": [
971 | {
972 | "name": "stdout",
973 | "output_type": "stream",
974 | "text": [
975 | "\u001b[92m All tests passed\n"
976 | ]
977 | }
978 | ],
979 | "source": [
980 | "w2_unittest.test_partial_derivatives(dEdm, dEdb, X_norm, Y_norm)"
981 | ]
982 | },
983 | {
984 | "cell_type": "markdown",
985 | "metadata": {},
986 | "source": [
987 | " \n",
988 | "### Exercise 6\n",
989 | "\n",
990 | "\n",
991 | "Implement gradient descent using expressions $(3)$:\n",
992 | "\\begin{align}\n",
993 | "m &= m - \\alpha \\frac{\\partial E }{ \\partial m },\\\\\n",
994 | "b &= b - \\alpha \\frac{\\partial E }{ \\partial b },\n",
995 | "\\end{align}\n",
996 | "\n",
997 | "where $\\alpha$ is the `learning_rate`."
998 | ]
999 | },
1000 | {
1001 | "cell_type": "code",
1002 | "execution_count": 40,
1003 | "metadata": {
1004 | "tags": [
1005 | "graded"
1006 | ]
1007 | },
1008 | "outputs": [],
1009 | "source": [
1010 | "def gradient_descent(dEdm, dEdb, m, b, X, Y, learning_rate = 0.001, num_iterations = 1000, print_cost=False):\n",
1011 | " for iteration in range(num_iterations):\n",
1012 | " ### START CODE HERE ### (~ 2 lines of code)\n",
1013 | " m_new = m - learning_rate * dEdm(m, b, X,Y)\n",
1014 | " b_new = b - learning_rate * dEdb(m, b, X,Y)\n",
1015 | " ### END CODE HERE ###\n",
1016 | " m = m_new\n",
1017 | " b = b_new\n",
1018 | " if print_cost:\n",
1019 | " print (f\"Cost after iteration {iteration}: {E(m, b, X, Y)}\")\n",
1020 | " \n",
1021 | " return m, b"
1022 | ]
1023 | },
1024 | {
1025 | "cell_type": "code",
1026 | "execution_count": 41,
1027 | "metadata": {
1028 | "tags": [
1029 | "graded"
1030 | ]
1031 | },
1032 | "outputs": [
1033 | {
1034 | "name": "stdout",
1035 | "output_type": "stream",
1036 | "text": [
1037 | "(0.49460408269589495, -3.4890673683563055e-16)\n",
1038 | "(0.9791767513915026, 4.521910375044022)\n"
1039 | ]
1040 | }
1041 | ],
1042 | "source": [
1043 | "print(gradient_descent(dEdm, dEdb, 0, 0, X_norm, Y_norm))\n",
1044 | "print(gradient_descent(dEdm, dEdb, 1, 5, X_norm, Y_norm, learning_rate = 0.01, num_iterations = 10))"
1045 | ]
1046 | },
1047 | {
1048 | "cell_type": "markdown",
1049 | "metadata": {},
1050 | "source": [
1051 | "##### __Expected Output__ \n",
1052 | "\n",
1053 | "```Python\n",
1054 | "(0.49460408269589495, -3.489285249624889e-16)\n",
1055 | "(0.9791767513915026, 4.521910375044022)\n",
1056 | "```"
1057 | ]
1058 | },
1059 | {
1060 | "cell_type": "code",
1061 | "execution_count": 42,
1062 | "metadata": {},
1063 | "outputs": [
1064 | {
1065 | "name": "stdout",
1066 | "output_type": "stream",
1067 | "text": [
1068 | "\u001b[92m All tests passed\n"
1069 | ]
1070 | }
1071 | ],
1072 | "source": [
1073 | "w2_unittest.test_gradient_descent(gradient_descent, dEdm, dEdb, X_norm, Y_norm)"
1074 | ]
1075 | },
1076 | {
1077 | "cell_type": "markdown",
1078 | "metadata": {},
1079 | "source": [
1080 | "Now run the gradient descent method starting from the initial point $\\left(m_0, b_0\\right)=\\left(0, 0\\right)$."
1081 | ]
1082 | },
1083 | {
1084 | "cell_type": "code",
1085 | "execution_count": 43,
1086 | "metadata": {
1087 | "tags": [
1088 | "graded"
1089 | ]
1090 | },
1091 | "outputs": [
1092 | {
1093 | "name": "stdout",
1094 | "output_type": "stream",
1095 | "text": [
1096 | "Cost after iteration 0: 0.20629997559196597\n",
1097 | "Cost after iteration 1: 0.1945519746156446\n",
1098 | "Cost after iteration 2: 0.19408205457659175\n",
1099 | "Cost after iteration 3: 0.19406325777502967\n",
1100 | "Cost after iteration 4: 0.19406250590296714\n",
1101 | "Cost after iteration 5: 0.1940624758280847\n",
1102 | "Cost after iteration 6: 0.19406247462508938\n",
1103 | "Cost after iteration 7: 0.19406247457696957\n",
1104 | "Cost after iteration 8: 0.19406247457504477\n",
1105 | "Cost after iteration 9: 0.19406247457496775\n",
1106 | "Cost after iteration 10: 0.1940624745749647\n",
1107 | "Cost after iteration 11: 0.1940624745749646\n",
1108 | "Cost after iteration 12: 0.19406247457496456\n",
1109 | "Cost after iteration 13: 0.19406247457496456\n",
1110 | "Cost after iteration 14: 0.19406247457496456\n",
1111 | "Cost after iteration 15: 0.19406247457496456\n",
1112 | "Cost after iteration 16: 0.19406247457496456\n",
1113 | "Cost after iteration 17: 0.19406247457496456\n",
1114 | "Cost after iteration 18: 0.19406247457496456\n",
1115 | "Cost after iteration 19: 0.19406247457496456\n",
1116 | "Cost after iteration 20: 0.19406247457496456\n",
1117 | "Cost after iteration 21: 0.19406247457496456\n",
1118 | "Cost after iteration 22: 0.19406247457496456\n",
1119 | "Cost after iteration 23: 0.19406247457496456\n",
1120 | "Cost after iteration 24: 0.19406247457496456\n",
1121 | "Cost after iteration 25: 0.19406247457496456\n",
1122 | "Cost after iteration 26: 0.19406247457496456\n",
1123 | "Cost after iteration 27: 0.19406247457496456\n",
1124 | "Cost after iteration 28: 0.19406247457496456\n",
1125 | "Cost after iteration 29: 0.19406247457496456\n",
1126 | "Gradient descent result: m_min, b_min = 0.7822244248616068, -6.357414594759804e-16\n"
1127 | ]
1128 | }
1129 | ],
1130 | "source": [
1131 | "m_initial = 0; b_initial = 0; num_iterations = 30; learning_rate = 1.2\n",
1132 | "m_gd, b_gd = gradient_descent(dEdm, dEdb, m_initial, b_initial, \n",
1133 | " X_norm, Y_norm, learning_rate, num_iterations, print_cost=True)\n",
1134 | "\n",
1135 | "print(f\"Gradient descent result: m_min, b_min = {m_gd}, {b_gd}\") "
1136 | ]
1137 | },
1138 | {
1139 | "cell_type": "markdown",
1140 | "metadata": {},
1141 | "source": [
1142 | "Remember, that the initial datasets were normalized. To make the predictions, you need to normalize `X_pred` array, calculate `Y_pred` with the linear regression coefficients `m_gd`, `b_gd` and then **denormalize** the result (perform the reverse process of normalization):"
1143 | ]
1144 | },
1145 | {
1146 | "cell_type": "code",
1147 | "execution_count": 44,
1148 | "metadata": {
1149 | "tags": [
1150 | "graded"
1151 | ]
1152 | },
1153 | "outputs": [
1154 | {
1155 | "name": "stdout",
1156 | "output_type": "stream",
1157 | "text": [
1158 | "TV marketing expenses:\n",
1159 | "[ 50 120 280]\n",
1160 | "Predictions of sales using Scikit_Learn linear regression:\n",
1161 | "[[ 9.40942557 12.7369904 20.34285287]]\n",
1162 | "Predictions of sales using Gradient Descent:\n",
1163 | "[ 9.40942557 12.7369904 20.34285287]\n"
1164 | ]
1165 | }
1166 | ],
1167 | "source": [
1168 | "X_pred = np.array([50, 120, 280])\n",
1169 | "# Use the same mean and standard deviation of the original training array X\n",
1170 | "X_pred_norm = (X_pred - np.mean(X))/np.std(X)\n",
1171 | "Y_pred_gd_norm = m_gd * X_pred_norm + b_gd\n",
1172 | "# Use the same mean and standard deviation of the original training array Y\n",
1173 | "Y_pred_gd = Y_pred_gd_norm * np.std(Y) + np.mean(Y)\n",
1174 | "\n",
1175 | "print(f\"TV marketing expenses:\\n{X_pred}\")\n",
1176 | "print(f\"Predictions of sales using Scikit_Learn linear regression:\\n{Y_pred_sklearn.T}\")\n",
1177 | "print(f\"Predictions of sales using Gradient Descent:\\n{Y_pred_gd}\")"
1178 | ]
1179 | },
1180 | {
1181 | "cell_type": "markdown",
1182 | "metadata": {},
1183 | "source": [
1184 | "You should have gotten similar results as in the previous sections. \n",
1185 | "\n",
1186 | "Well done! Now you know how gradient descent algorithm can be applied to train a real model. Re-producing results manually for a simple case should give you extra confidence that you understand what happends under the hood of commonly used functions."
1187 | ]
1188 | },
1189 | {
1190 | "cell_type": "code",
1191 | "execution_count": null,
1192 | "metadata": {
1193 | "tags": [
1194 | "graded"
1195 | ]
1196 | },
1197 | "outputs": [],
1198 | "source": []
1199 | }
1200 | ],
1201 | "metadata": {
1202 | "accelerator": "GPU",
1203 | "colab": {
1204 | "collapsed_sections": [],
1205 | "name": "C1_W1_Assignment_Solution.ipynb",
1206 | "provenance": []
1207 | },
1208 | "coursera": {
1209 | "schema_names": [
1210 | "AI4MC1-1"
1211 | ]
1212 | },
1213 | "grader_version": "1",
1214 | "kernelspec": {
1215 | "display_name": "Python 3",
1216 | "language": "python",
1217 | "name": "python3"
1218 | },
1219 | "language_info": {
1220 | "codemirror_mode": {
1221 | "name": "ipython",
1222 | "version": 3
1223 | },
1224 | "file_extension": ".py",
1225 | "mimetype": "text/x-python",
1226 | "name": "python",
1227 | "nbconvert_exporter": "python",
1228 | "pygments_lexer": "ipython3",
1229 | "version": "3.8.8"
1230 | },
1231 | "toc": {
1232 | "base_numbering": 1,
1233 | "nav_menu": {},
1234 | "number_sections": true,
1235 | "sideBar": true,
1236 | "skip_h1_title": false,
1237 | "title_cell": "Table of Contents",
1238 | "title_sidebar": "Contents",
1239 | "toc_cell": false,
1240 | "toc_position": {},
1241 | "toc_section_display": true,
1242 | "toc_window_display": false
1243 | },
1244 | "vscode": {
1245 | "interpreter": {
1246 | "hash": "478841ab876a4250505273c8a697bbc1b6b194054b009c227dc606f17fb56272"
1247 | }
1248 | }
1249 | },
1250 | "nbformat": 4,
1251 | "nbformat_minor": 1
1252 | }
1253 |
--------------------------------------------------------------------------------
/Week2/PersonalNotes_Mathematics for ML C2W2_230305_191309.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week2/PersonalNotes_Mathematics for ML C2W2_230305_191309.pdf
--------------------------------------------------------------------------------
/Week2/data/tvmarketing.csv:
--------------------------------------------------------------------------------
1 | TV,Sales
2 | 230.1,22.1
3 | 44.5,10.4
4 | 17.2,9.3
5 | 151.5,18.5
6 | 180.8,12.9
7 | 8.7,7.2
8 | 57.5,11.8
9 | 120.2,13.2
10 | 8.6,4.8
11 | 199.8,10.6
12 | 66.1,8.6
13 | 214.7,17.4
14 | 23.8,9.2
15 | 97.5,9.7
16 | 204.1,19
17 | 195.4,22.4
18 | 67.8,12.5
19 | 281.4,24.4
20 | 69.2,11.3
21 | 147.3,14.6
22 | 218.4,18
23 | 237.4,12.5
24 | 13.2,5.6
25 | 228.3,15.5
26 | 62.3,9.7
27 | 262.9,12
28 | 142.9,15
29 | 240.1,15.9
30 | 248.8,18.9
31 | 70.6,10.5
32 | 292.9,21.4
33 | 112.9,11.9
34 | 97.2,9.6
35 | 265.6,17.4
36 | 95.7,9.5
37 | 290.7,12.8
38 | 266.9,25.4
39 | 74.7,14.7
40 | 43.1,10.1
41 | 228,21.5
42 | 202.5,16.6
43 | 177,17.1
44 | 293.6,20.7
45 | 206.9,12.9
46 | 25.1,8.5
47 | 175.1,14.9
48 | 89.7,10.6
49 | 239.9,23.2
50 | 227.2,14.8
51 | 66.9,9.7
52 | 199.8,11.4
53 | 100.4,10.7
54 | 216.4,22.6
55 | 182.6,21.2
56 | 262.7,20.2
57 | 198.9,23.7
58 | 7.3,5.5
59 | 136.2,13.2
60 | 210.8,23.8
61 | 210.7,18.4
62 | 53.5,8.1
63 | 261.3,24.2
64 | 239.3,15.7
65 | 102.7,14
66 | 131.1,18
67 | 69,9.3
68 | 31.5,9.5
69 | 139.3,13.4
70 | 237.4,18.9
71 | 216.8,22.3
72 | 199.1,18.3
73 | 109.8,12.4
74 | 26.8,8.8
75 | 129.4,11
76 | 213.4,17
77 | 16.9,8.7
78 | 27.5,6.9
79 | 120.5,14.2
80 | 5.4,5.3
81 | 116,11
82 | 76.4,11.8
83 | 239.8,12.3
84 | 75.3,11.3
85 | 68.4,13.6
86 | 213.5,21.7
87 | 193.2,15.2
88 | 76.3,12
89 | 110.7,16
90 | 88.3,12.9
91 | 109.8,16.7
92 | 134.3,11.2
93 | 28.6,7.3
94 | 217.7,19.4
95 | 250.9,22.2
96 | 107.4,11.5
97 | 163.3,16.9
98 | 197.6,11.7
99 | 184.9,15.5
100 | 289.7,25.4
101 | 135.2,17.2
102 | 222.4,11.7
103 | 296.4,23.8
104 | 280.2,14.8
105 | 187.9,14.7
106 | 238.2,20.7
107 | 137.9,19.2
108 | 25,7.2
109 | 90.4,8.7
110 | 13.1,5.3
111 | 255.4,19.8
112 | 225.8,13.4
113 | 241.7,21.8
114 | 175.7,14.1
115 | 209.6,15.9
116 | 78.2,14.6
117 | 75.1,12.6
118 | 139.2,12.2
119 | 76.4,9.4
120 | 125.7,15.9
121 | 19.4,6.6
122 | 141.3,15.5
123 | 18.8,7
124 | 224,11.6
125 | 123.1,15.2
126 | 229.5,19.7
127 | 87.2,10.6
128 | 7.8,6.6
129 | 80.2,8.8
130 | 220.3,24.7
131 | 59.6,9.7
132 | 0.7,1.6
133 | 265.2,12.7
134 | 8.4,5.7
135 | 219.8,19.6
136 | 36.9,10.8
137 | 48.3,11.6
138 | 25.6,9.5
139 | 273.7,20.8
140 | 43,9.6
141 | 184.9,20.7
142 | 73.4,10.9
143 | 193.7,19.2
144 | 220.5,20.1
145 | 104.6,10.4
146 | 96.2,11.4
147 | 140.3,10.3
148 | 240.1,13.2
149 | 243.2,25.4
150 | 38,10.9
151 | 44.7,10.1
152 | 280.7,16.1
153 | 121,11.6
154 | 197.6,16.6
155 | 171.3,19
156 | 187.8,15.6
157 | 4.1,3.2
158 | 93.9,15.3
159 | 149.8,10.1
160 | 11.7,7.3
161 | 131.7,12.9
162 | 172.5,14.4
163 | 85.7,13.3
164 | 188.4,14.9
165 | 163.5,18
166 | 117.2,11.9
167 | 234.5,11.9
168 | 17.9,8
169 | 206.8,12.2
170 | 215.4,17.1
171 | 284.3,15
172 | 50,8.4
173 | 164.5,14.5
174 | 19.6,7.6
175 | 168.4,11.7
176 | 222.4,11.5
177 | 276.9,27
178 | 248.4,20.2
179 | 170.2,11.7
180 | 276.7,11.8
181 | 165.6,12.6
182 | 156.6,10.5
183 | 218.5,12.2
184 | 56.2,8.7
185 | 287.6,26.2
186 | 253.8,17.6
187 | 205,22.6
188 | 139.5,10.3
189 | 191.1,17.3
190 | 286,15.9
191 | 18.7,6.7
192 | 39.5,10.8
193 | 75.5,9.9
194 | 17.2,5.9
195 | 166.8,19.6
196 | 149.7,17.3
197 | 38.2,7.6
198 | 94.2,9.7
199 | 177,12.8
200 | 283.6,25.5
201 | 232.1,13.4
202 |
--------------------------------------------------------------------------------
/Week2/w2_tools.py:
--------------------------------------------------------------------------------
1 | import time
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from matplotlib.widgets import Button
5 | from matplotlib.patches import FancyArrowPatch
6 | from matplotlib.gridspec import GridSpec
7 | from IPython.display import display, clear_output
8 |
9 |
10 | def plot_f(x_range, y_range, f, ox_position):
11 | x = np.linspace(*x_range, 100)
12 | fig, ax = plt.subplots(1,1,figsize=(8,4))
13 |
14 | fig.canvas.toolbar_visible = False
15 | fig.canvas.header_visible = False
16 | fig.canvas.footer_visible = False
17 |
18 | ax.set_ylim(*y_range)
19 | ax.set_xlim(*x_range)
20 | ax.set_ylabel('$f$')
21 | ax.set_xlabel('$x$')
22 | ax.spines['left'].set_position('zero')
23 | ax.spines['bottom'].set_position(('data', ox_position))
24 | ax.spines['right'].set_color('none')
25 | ax.spines['top'].set_color('none')
26 | ax.xaxis.set_ticks_position('bottom')
27 | ax.yaxis.set_ticks_position('left')
28 | ax.autoscale(enable=False)
29 |
30 | pf = ax.plot(x, f(x), 'k')
31 |
32 | return fig, ax
33 |
34 |
35 | class gradient_descent_one_variable:
36 | """ class to run one interactive plot """
37 | def __init__(self, x_range, y_range, f, dfdx, gd, n_it, lr, x_0, ox_position, t_position):
38 | x = np.linspace(*x_range, 100)
39 | fig, ax = plot_f(x_range, y_range, f, ox_position)
40 |
41 | # Initialize plot.
42 | self.fig = fig
43 | self.ax = ax
44 | self.x = x
45 | self.f = f
46 | self.dfdx = dfdx
47 | self.gd = gd
48 | self.n_it = n_it
49 | self.lr = lr
50 | self.x_0 = x_0
51 | self.x_range = x_range
52 | self.i = 0
53 | self.ox_position = ox_position
54 | self.t_position = t_position
55 |
56 | self.update_plot_point(firsttime=True)
57 | self.path = path(self.x_0, self.ax, self.ox_position) # initialize an empty path, avoids existance check
58 |
59 | time.sleep(0.2)
60 | clear_output(wait=True)
61 | display(self.fig)
62 |
63 | self.run_gd()
64 | self.cpoint = self.fig.canvas.mpl_connect('button_press_event', self.click_plot)
65 |
66 | def click_plot(self, event):
67 | ''' Called when click in plot '''
68 | if (event.xdata <= max(self.x) and event.xdata >= min(self.x)):
69 | self.x_0 = event.xdata
70 | self.i = 0
71 | self.path.re_init(self.x_0)
72 | self.update_plot_point()
73 | time.sleep(0.2)
74 | self.run_gd()
75 |
76 | def update_plot_point(self, firsttime=False):
77 |
78 | # Remove items and re-add them on plot.
79 | if not firsttime:
80 | for artist in self.p_items:
81 | artist.remove()
82 |
83 | a = self.ax.scatter(self.x_0, self.f(self.x_0), marker='o', s=100, color='r', zorder=10)
84 | b = self.ax.scatter(self.x_0, self.ox_position, marker='o', s=100, color='k', zorder=10)
85 | c = self.ax.hlines(self.f(self.x_0), 0, self.x_0, lw=2, ls='dotted', color='k')
86 | d = self.ax.vlines(self.x_0, self.ox_position, self.f(self.x_0), lw=2, ls='dotted', color='k')
87 | t_it = self.ax.annotate(f"Iteration #${self.i}$", xy=(self.t_position[0], self.t_position[1]),
88 | xytext=(4,4), textcoords='offset points', size=10)
89 | t_x_0 = self.ax.annotate(f"$x_0 = {self.x_0:0.4f}$", xy=(self.t_position[0], self.t_position[1]-1),
90 | xytext=(4,4), textcoords='offset points', size=10)
91 | t_f = self.ax.annotate(f"$f\\,\\left(x_0\\right) = {self.f(self.x_0):0.2f}$",
92 | xy=(self.t_position[0], self.t_position[1]-2), xytext=(4,4),
93 | textcoords='offset points', size=10)
94 | t_dfdx = self.ax.annotate(f"$f\\,'\\left(x_0\\right) = {self.dfdx(self.x_0):0.4f}$",
95 | xy=(self.t_position[0], self.t_position[1]-3),
96 | xytext=(4,4), textcoords='offset points', size=10)
97 |
98 | self.p_items = [a, b, c, d, t_it, t_x_0, t_f, t_dfdx]
99 | self.fig.canvas.draw()
100 |
101 | def run_gd(self):
102 | self.i = 1
103 | x_0_new = self.gd(self.dfdx, self.x_0, self.lr, 1)
104 | while (self.i <= self.n_it and abs(self.dfdx(x_0_new)) >= 0.00001 and x_0_new >= self.x_range[0]):
105 | x_0_new = self.gd(self.dfdx, self.x_0, self.lr, 1)
106 | self.path.add_path_item(x_0_new, self.f)
107 | self.x_0 = x_0_new
108 | time.sleep(0.05)
109 | self.update_plot_point()
110 | clear_output(wait=True)
111 | display(self.fig)
112 | self.i += 1
113 |
114 | if abs(self.dfdx(self.x_0)) >= 0.00001 or self.x_0 < self.x_range[0] or self.x_0 < self.x_range[0]:
115 | t_res = self.ax.annotate("Has Not Converged", xy=(self.t_position[0], self.t_position[1]-4),
116 | xytext=(4,4), textcoords='offset points', size=10)
117 | else:
118 | t_res = self.ax.annotate("Converged", xy=(self.t_position[0], self.t_position[1]-4),
119 | xytext=(4,4), textcoords='offset points', size=10)
120 | t_instruction = self.ax.text(0.3,0.95,"[Click on the plot to choose initial point]",
121 | size=10, color="r", transform=self.ax.transAxes)
122 | self.p_items.append(t_res)
123 | self.p_items.append(t_instruction)
124 | # Clear last time at the end, so there is no duplicate with the cell output.
125 | clear_output(wait=True)
126 | # plt.close()
127 |
128 |
129 | class path:
130 | ''' tracks paths during gradient descent on the plot '''
131 | def __init__(self, x_0, ax, ox_position):
132 | ''' x_0 at start of path '''
133 | self.path_items = []
134 | self.x_0 = x_0
135 | self.ax = ax
136 | self.ox_position = ox_position
137 |
138 | def re_init(self, x_0):
139 | for artist in self.path_items:
140 | artist.remove()
141 | self.path_items = []
142 | self.x_0 = x_0
143 |
144 | def add_path_item(self, x_0, f):
145 | a = FancyArrowPatch(
146 | posA=(self.x_0, self.ox_position), posB=(x_0, self.ox_position), color='r',
147 | arrowstyle='simple, head_width=5, head_length=10, tail_width=1.0',
148 | )
149 | b = self.ax.scatter(self.x_0, f(self.x_0), facecolors='none', edgecolors='r', ls='dotted', s=100, zorder=10)
150 | self.ax.add_artist(a)
151 | self.path_items.append(a)
152 | self.path_items.append(b)
153 | self.x_0 = x_0
154 |
155 |
156 | # +
157 | def f_example_2(x):
158 | return (np.exp(x) - np.log(x))*np.sin(np.pi*x*2)
159 |
160 | def dfdx_example_2(x):
161 | return (np.exp(x) - 1/x)*np.sin(np.pi*x*2) + (np.exp(x) - \
162 | np.log(x))*np.cos(np.pi*x*2)*2*np.pi
163 |
164 |
165 | # +
166 | def f_example_3(x,y):
167 | return (85+ 0.1*(- 1/9*(x-6)*x**2*y**3 + 2/3*(x-6)*x**2*y**2))
168 |
169 | def dfdx_example_3(x,y):
170 | return 0.1/3*x*y**2*(2-y/3)*(3*x-12)
171 |
172 | def dfdy_example_3(x,y):
173 | return 0.1/3*(x-6)*x**2*y*(4-y)
174 |
175 |
176 | # +
177 | def f_example_4(x,y):
178 | return -(10/(3+3*(x-.5)**2+3*(y-.5)**2) + \
179 | 2/(1+2*((x-3)**2)+2*(y-1.5)**2) + \
180 | 3/(1+.5*((x-3.5)**2)+0.5*(y-4)**2))+10
181 |
182 | def dfdx_example_4(x,y):
183 | return -(-2*3*(x-0.5)*10/(3+3*(x-0.5)**2+3*(y-0.5)**2)**2 + \
184 | -2*2*(x-3)*2/(1+2*((x-3)**2)+2*(y-1.5)**2)**2 +\
185 | -2*0.5*(x-3.5)*3/(1+.5*((x-3.5)**2)+0.5*(y-4)**2)**2)
186 |
187 | def dfdy_example_4(x,y):
188 | return -(-2*3*(y-0.5)*10/(3+3*(x-0.5)**2+3*(y-0.5)**2)**2 + \
189 | -2*2*(y-1.5)*2/(1+2*((x-3)**2)+2*(y-1.5)**2)**2 +\
190 | -0.5*2*(y-4)*3/(1+.5*((x-3.5)**2)+0.5*(y-4)**2)**2)
191 |
192 |
193 | # -
194 |
195 | def plot_f_cont_and_surf(x_range, y_range, z_range, f, cmap, view):
196 |
197 | fig = plt.figure( figsize=(10,5))
198 | fig.canvas.toolbar_visible = False
199 | fig.canvas.header_visible = False
200 | fig.canvas.footer_visible = False
201 | fig.set_facecolor('#ffffff') #white
202 | gs = GridSpec(1, 2, figure=fig)
203 | axc = fig.add_subplot(gs[0, 0])
204 | axs = fig.add_subplot(gs[0, 1], projection='3d')
205 |
206 | x = np.linspace(*x_range, 51)
207 | y = np.linspace(*y_range, 51)
208 | X,Y = np.meshgrid(x,y)
209 |
210 | cont = axc.contour(X, Y, f(X, Y), cmap=cmap, levels=18, linewidths=2, alpha=0.7)
211 | axc.set_xlabel('$x$')
212 | axc.set_ylabel('$y$')
213 | axc.set_xlim(*x_range)
214 | axc.set_ylim(*y_range)
215 | axc.set_aspect("equal")
216 | axc.autoscale(enable=False)
217 |
218 | surf = axs.plot_surface(X,Y, f(X,Y), cmap=cmap,
219 | antialiased=True, cstride=1, rstride=1, alpha=0.69)
220 | axs.set_xlabel('$x$')
221 | axs.set_ylabel('$y$')
222 | axs.set_zlabel('$f$')
223 | axs.set_xlim(*x_range)
224 | axs.set_ylim(*y_range)
225 | axs.set_zlim(*z_range)
226 | axs.view_init(elev=view['elev'], azim=view['azim'])
227 | axs.autoscale(enable=False)
228 |
229 | return fig, axc, axs
230 |
231 |
232 | class gradient_descent_two_variables:
233 | """ class to run one interactive plot """
234 | def __init__(self, x_range, y_range, z_range, f, dfdx, dfdy, gd, n_it, lr, x_0, y_0,
235 | t_position, t_space, instr_position, cmap, view):
236 |
237 | x = np.linspace(*x_range, 51)
238 | y = np.linspace(*y_range, 51)
239 | fig, axc, axs = plot_f_cont_and_surf(x_range, y_range, z_range, f, cmap, view)
240 |
241 | # Initialize plot.
242 | self.fig = fig
243 | self.axc = axc
244 | self.axs = axs
245 | self.x = x
246 | self.y = y
247 | self.f = f
248 | self.dfdx = dfdx
249 | self.dfdy = dfdy
250 | self.gd = gd
251 | self.n_it = n_it
252 | self.lr = lr
253 | self.x_0 = x_0
254 | self.y_0 = y_0
255 | self.x_range = x_range
256 | self.y_range = y_range
257 | self.i = 0
258 | self.t_position = t_position
259 | self.t_space = t_space
260 | self.instr_position = instr_position
261 |
262 | self.update_plot_point(firsttime=True)
263 | self.path = path_2(self.x_0, self.y_0, self.axc, self.axs) # initialize an empty path, avoids existance check
264 |
265 | time.sleep(0.2)
266 | clear_output(wait=True)
267 | display(self.fig)
268 |
269 | self.run_gd()
270 | self.cpoint = self.fig.canvas.mpl_connect('button_press_event', self.click_plot)
271 |
272 | def click_plot(self, event):
273 | ''' Called when click in plot '''
274 | if (event.xdata <= max(self.x) and event.xdata >= min(self.x) and
275 | event.ydata <= max(self.y) and event.ydata >= min(self.y)):
276 | self.x_0 = event.xdata
277 | self.y_0 = event.ydata
278 | self.i = 0
279 | self.path.re_init(self.x_0, self.y_0)
280 | self.update_plot_point()
281 | time.sleep(0.2)
282 | self.run_gd()
283 |
284 | def update_plot_point(self, firsttime=False):
285 |
286 | # Remove items and re-add them on plot.
287 | if not firsttime:
288 | for artist in self.p_items:
289 | artist.remove()
290 |
291 | a = self.axc.scatter(self.x_0, self.y_0, marker='o', s=100, color='k', zorder=10)
292 | b = self.axc.hlines(self.y_0, self.axc.get_xlim()[0], self.x_0, lw=2, ls='dotted', color='k')
293 | c = self.axc.vlines(self.x_0, self.axc.get_ylim()[0], self.y_0, lw=2, ls='dotted', color='k')
294 | d = self.axs.scatter3D(self.x_0, self.y_0, self.f(self.x_0, self.y_0), s=100, color='r', zorder=10)
295 | t_it = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2],
296 | f"Iteration #${self.i}$", size=10, zorder=20)
297 | t_x_y = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space,
298 | f"$x_0, y_0 = {self.x_0:0.2f}, {self.y_0:0.2f}$", size=10, zorder=20)
299 | t_f = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space*2,
300 | f"$f\\,\\left(x_0, y_0\\right) = {self.f(self.x_0, self.y_0):0.2f}$", size=10, zorder=20)
301 | t_dfdx = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space*3,
302 | f"$f\\,'_x\\left(x_0, y_0\\right) = {self.dfdx(self.x_0, self.y_0):0.2f}$", size=10, zorder=20)
303 | t_dfdy = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space*4,
304 | f"$f\\,'_y\\left(x_0, y_0\\right) = {self.dfdy(self.x_0, self.y_0):0.2f}$", size=10, zorder=20)
305 | self.p_items = [a, b, c, d, t_it, t_x_y, t_f, t_dfdx, t_dfdy]
306 | self.fig.canvas.draw()
307 |
308 | def run_gd(self):
309 | self.i = 1
310 | x_0_new, y_0_new = self.gd(self.dfdx, self.dfdy, self.x_0, self.y_0, self.lr, 1)
311 |
312 | while (self.i <= self.n_it and
313 | (abs(self.dfdx(x_0_new, y_0_new)) >= 0.001 or abs(self.dfdy(x_0_new, y_0_new)) >= 0.001) and
314 | x_0_new >= self.x_range[0] and x_0_new <= self.x_range[1] and
315 | y_0_new >= self.y_range[0] and y_0_new <= self.y_range[1]):
316 | x_0_new, y_0_new = self.gd(self.dfdx, self.dfdy, self.x_0, self.y_0, self.lr, 1)
317 | self.path.add_path_item(x_0_new, y_0_new, self.f)
318 | self.x_0 = x_0_new
319 | self.y_0 = y_0_new
320 | time.sleep(0.05)
321 | self.update_plot_point()
322 | clear_output(wait=True)
323 | display(self.fig)
324 | self.i += 1
325 |
326 | if abs(self.dfdx(x_0_new, y_0_new)) >= 0.001 or abs(self.dfdy(x_0_new, y_0_new)) >= 0.001 or self.x_0 < self.x_range[0] or self.x_0 > self.x_range[1] or self.y_0 < self.y_range[0] or self.y_0 > self.y_range[1]:
327 | t_res = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space*5,
328 | "Has Not Converged", size=10, zorder=20)
329 | else:
330 | t_res = self.axs.text(self.t_position[0], self.t_position[1], self.t_position[2]-self.t_space*5,
331 | "Converged", size=10, zorder=20)
332 | t_instruction = self.axs.text(*self.instr_position, "[Click on the contour plot to choose initial point]",
333 | size=10, color="r", transform=self.axs.transAxes)
334 | self.p_items.append(t_res)
335 | self.p_items.append(t_instruction)
336 | # Clear last time at the end, so there is no duplicate with the cell output.
337 | clear_output(wait=True)
338 |
339 |
340 | class path_2:
341 | ''' tracks paths during gradient descent on contour and surface plots '''
342 | def __init__(self, x_0, y_0, axc, axs):
343 | ''' x_0, y_0 at start of path '''
344 | self.path_items = []
345 | self.x_0 = x_0
346 | self.y_0 = y_0
347 | self.axc = axc
348 | self.axs = axs
349 |
350 | def re_init(self, x_0, y_0):
351 | for artist in self.path_items:
352 | artist.remove()
353 | self.path_items = []
354 | self.x_0 = x_0
355 | self.y_0 = y_0
356 |
357 | def add_path_item(self, x_0, y_0, f):
358 | a = FancyArrowPatch(
359 | posA=(self.x_0, self.y_0), posB=(x_0, y_0), color='r',
360 | arrowstyle='simple, head_width=5, head_length=10, tail_width=1.0',
361 | )
362 | b = self.axs.scatter3D(self.x_0, self.y_0, f(self.x_0, self.y_0),
363 | facecolors='none', edgecolors='r', ls='dotted', s=100, zorder=10)
364 | self.axc.add_artist(a)
365 | self.path_items.append(a)
366 | self.path_items.append(b)
367 | self.x_0 = x_0
368 | self.y_0 = y_0
369 |
--------------------------------------------------------------------------------
/Week2/w2_unittest.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from sklearn.linear_model import LinearRegression
4 |
5 |
6 | def test_load_data(target_adv):
7 | successful_cases = 0
8 | failed_cases = []
9 |
10 | try:
11 | assert type(target_adv) == pd.DataFrame
12 | successful_cases += 1
13 | except:
14 | failed_cases.append(
15 | {
16 | "name": "default_check",
17 | "expected": pd.DataFrame,
18 | "got": type(target_adv),
19 | }
20 | )
21 | print(
22 | f"Test case \"{failed_cases[-1].get('name')}\". Object adv has incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
23 | )
24 |
25 | # Not all of the values of the output array will be checked - only some of them.
26 | # In graders all of the output array gets checked.
27 | test_cases = [{
28 | "name": "default_check",
29 | "expected": {"shape": (200, 2),
30 | "adv": [
31 | {"i": 0, "TV": 230.1, "Sales": 22.1},
32 | {"i": 4, "TV": 180.8, "Sales": 12.9},
33 | {"i": 40, "TV": 202.5, "Sales": 16.6},
34 | {"i": 199, "TV": 232.1, "Sales": 13.4},
35 | ],}
36 | },]
37 |
38 | for test_case in test_cases:
39 | result = target_adv
40 |
41 | try:
42 | assert result.shape == test_case["expected"]["shape"]
43 | successful_cases += 1
44 | except:
45 | failed_cases.append(
46 | {
47 | "name": test_case["name"],
48 | "expected": test_case["expected"]["shape"],
49 | "got": result.shape,
50 | }
51 | )
52 | print(
53 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of adv. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
54 | )
55 |
56 | for test_case_i in test_case["expected"]["adv"]:
57 | i = test_case_i["i"]
58 |
59 | try:
60 | assert float(result.iloc[i]["TV"]) == test_case_i["TV"]
61 | successful_cases += 1
62 | except:
63 | failed_cases.append(
64 | {
65 | "name": test_case["name"],
66 | "expected": test_case_i["TV"],
67 | "got": float(result.iloc[i]["TV"]),
68 | }
69 | )
70 | print(
71 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong value of TV in the adv. Test for index i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
72 | )
73 |
74 | try:
75 | assert float(result.iloc[i]["Sales"]) == test_case_i["Sales"]
76 | successful_cases += 1
77 | except:
78 | failed_cases.append(
79 | {
80 | "name": test_case["name"],
81 | "expected": test_case_i["Sales"],
82 | "got": float(result.iloc[i]["Sales"]),
83 | }
84 | )
85 | print(
86 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong value of Sales in the adv. Test for index i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
87 | )
88 |
89 | if len(failed_cases) == 0:
90 | print("\033[92m All tests passed")
91 | else:
92 | print("\033[92m", successful_cases, " Tests passed")
93 | print("\033[91m", len(failed_cases), " Tests failed")
94 |
95 |
96 | def test_pred_numpy(target_pred_numpy):
97 | successful_cases = 0
98 | failed_cases = []
99 |
100 | test_cases = [
101 | {
102 | "name": "default_check",
103 | "input": {
104 | "m": 0.04753664043301975,
105 | "b": 7.0325935491276965,
106 | "X": np.array([50, 120, 280]),
107 | },
108 | "expected": {
109 | "Y": np.array([9.40942557, 12.7369904, 20.34285287]),
110 | }
111 | },
112 | {
113 | "name": "extra_check",
114 | "input": {
115 | "m": 2,
116 | "b": 10,
117 | "X": np.array([-5, 0, 1, 5])
118 | },
119 | "expected": {
120 | "Y": np.array([0, 10, 12, 20]),
121 | }
122 | },
123 | ]
124 |
125 | for test_case in test_cases:
126 | result = target_pred_numpy(test_case["input"]["m"], test_case["input"]["b"], test_case["input"]["X"])
127 |
128 | try:
129 | assert result.shape == test_case["expected"]["Y"].shape
130 | successful_cases += 1
131 | except:
132 | failed_cases.append(
133 | {
134 | "name": test_case["name"],
135 | "expected": test_case["expected"]["Y"].shape,
136 | "got": result.shape,
137 | }
138 | )
139 | print(
140 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of pred_numpy output. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
141 | )
142 |
143 | try:
144 | assert np.allclose(result, test_case["expected"]["Y"])
145 | successful_cases += 1
146 |
147 | except:
148 | failed_cases.append(
149 | {
150 | "name": test_case["name"],
151 | "expected": test_case["expected"]["Y"],
152 | "got": result,
153 | }
154 | )
155 | print(
156 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of pred_numpy for X = {test_case['input']['X']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
157 | )
158 |
159 | if len(failed_cases) == 0:
160 | print("\033[92m All tests passed")
161 | else:
162 | print("\033[92m", successful_cases, " Tests passed")
163 | print("\033[91m", len(failed_cases), " Tests failed")
164 |
165 |
166 | def test_sklearn_fit(target_lr_sklearn):
167 | successful_cases = 0
168 | failed_cases = []
169 |
170 | # Not all of the values of the output array will be checked - only some of them.
171 | # In graders all of the output array gets checked.
172 | test_cases = [
173 | {
174 | "name": "default_check",
175 | "expected": {
176 | "coef_": np.array([[0.04753664]]),
177 | "intercept_": np.array([7.03259355]),
178 | }
179 | },
180 | ]
181 |
182 | for test_case in test_cases:
183 | result = target_lr_sklearn
184 |
185 | try:
186 | assert isinstance(result, LinearRegression)
187 | successful_cases += 1
188 | except:
189 | failed_cases.append(
190 | {
191 | "name": test_case["name"],
192 | "expected": LinearRegression,
193 | "got": type(result),
194 | }
195 | )
196 | print(
197 | f"Test case \"{failed_cases[-1].get('name')}\". Object lr_sklearn has incorrect type. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
198 | )
199 |
200 | try:
201 | assert hasattr(result, 'coef_')
202 | successful_cases += 1
203 | except:
204 | failed_cases.append(
205 | {
206 | "name": test_case["name"],
207 | "expected": "coef_ attribute of the lr_sklearn model",
208 | "got": None,
209 | }
210 | )
211 | print(
212 | f"Test case \"{failed_cases[-1].get('name')}\". lr_sklearn has no attribute coef_. Check if you have fitted the linear regression model correctly. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
213 | )
214 |
215 | try:
216 | assert hasattr(result, 'intercept_')
217 | successful_cases += 1
218 | except:
219 | failed_cases.append(
220 | {
221 | "name": test_case["name"],
222 | "expected": "intercept_ attribute of the lr_sklearn model",
223 | "got": None,
224 | }
225 | )
226 | print(
227 | f"Test case \"{failed_cases[-1].get('name')}\". lr_sklearn has no attribute intercept_. Check if you have fitted the linear regression model correctly. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
228 | )
229 |
230 | try:
231 | assert np.allclose(result.coef_, test_case["expected"]["coef_"])
232 | successful_cases += 1
233 |
234 | except:
235 | failed_cases.append(
236 | {
237 | "name": test_case["name"],
238 | "expected": test_case["expected"]["coef_"],
239 | "got": result.coef_,
240 | }
241 | )
242 | print(
243 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong slope. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
244 | )
245 |
246 | try:
247 | assert np.allclose(result.intercept_, test_case["expected"]["intercept_"])
248 | successful_cases += 1
249 |
250 | except:
251 | failed_cases.append(
252 | {
253 | "name": test_case["name"],
254 | "expected": test_case["expected"]["intercept_"],
255 | "got": result.intercept_,
256 | }
257 | )
258 | print(
259 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong intercept. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
260 | )
261 |
262 | if len(failed_cases) == 0:
263 | print("\033[92m All tests passed")
264 | else:
265 | print("\033[92m", successful_cases, " Tests passed")
266 | print("\033[91m", len(failed_cases), " Tests failed")
267 |
268 |
269 | def test_sklearn_predict(target_pred_sklearn, input_lr_sklearn):
270 | successful_cases = 0
271 | failed_cases = []
272 |
273 | test_cases = [
274 | {
275 | "name": "default_check",
276 | "input": {
277 | "X": np.array([50, 120, 280]),
278 | },
279 | "expected": {
280 | "Y": np.array([[9.40942557], [12.7369904], [20.34285287]]),
281 | }
282 | },
283 | {
284 | "name": "extra_check",
285 | "input": {
286 | "X": np.array([-5, 0, 1, 5])
287 | },
288 | "expected": {
289 | "Y": np.array([[6.79491035], [7.03259355], [7.08013019], [7.27027675]]),
290 | }
291 | },
292 | ]
293 |
294 | for test_case in test_cases:
295 | result = target_pred_sklearn(test_case["input"]["X"], input_lr_sklearn)
296 |
297 | try:
298 | assert result.shape == test_case["expected"]["Y"].shape
299 | successful_cases += 1
300 | except:
301 | failed_cases.append(
302 | {
303 | "name": test_case["name"],
304 | "expected": test_case["expected"]["Y"].shape,
305 | "got": result.shape,
306 | }
307 | )
308 | print(
309 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of pred_sklearn output. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
310 | )
311 |
312 | try:
313 | assert np.allclose(result, test_case["expected"]["Y"])
314 | successful_cases += 1
315 |
316 | except:
317 | failed_cases.append(
318 | {
319 | "name": test_case["name"],
320 | "expected": test_case["expected"]["Y"],
321 | "got": result,
322 | }
323 | )
324 | print(
325 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of pred_sklearn for X = {test_case['input']['X']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
326 | )
327 |
328 | if len(failed_cases) == 0:
329 | print("\033[92m All tests passed")
330 | else:
331 | print("\033[92m", successful_cases, " Tests passed")
332 | print("\033[91m", len(failed_cases), " Tests failed")
333 |
334 |
335 | def test_partial_derivatives(target_dEdm, target_dEdb, input_X_norm, input_Y_norm):
336 | successful_cases = 0
337 | failed_cases = []
338 |
339 | test_cases = [
340 | {
341 | "name": "default_check",
342 | "input": {
343 | "m": 0,
344 | "b": 0,
345 | },
346 | "expected": {
347 | "dEdm": -0.7822244248616065,
348 | "dEdb": 1.687538997430238e-16,
349 | }
350 | },
351 | {
352 | "name": "extra_check",
353 | "input": {
354 | "m": 1,
355 | "b": 5,
356 | },
357 | "expected": {
358 | "dEdm": 0.21777557513839416,
359 | "dEdb": 5.000000000000001,
360 | }
361 | },
362 | ]
363 |
364 | for test_case in test_cases:
365 | result_dEdm = target_dEdm(test_case["input"]["m"], test_case["input"]["b"], input_X_norm, input_Y_norm)
366 | result_dEdb = target_dEdb(test_case["input"]["m"], test_case["input"]["b"], input_X_norm, input_Y_norm)
367 |
368 | try:
369 | assert np.allclose(result_dEdm, test_case["expected"]["dEdm"])
370 | successful_cases += 1
371 |
372 | except:
373 | failed_cases.append(
374 | {
375 | "name": test_case["name"],
376 | "expected": test_case["expected"]["dEdm"],
377 | "got": result_dEdm,
378 | }
379 | )
380 | print(
381 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of dEdm for m = {test_case['input']['m']}, b = {test_case['input']['b']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
382 | )
383 |
384 | try:
385 | assert np.allclose(result_dEdb, test_case["expected"]["dEdb"])
386 | successful_cases += 1
387 |
388 | except:
389 | failed_cases.append(
390 | {
391 | "name": test_case["name"],
392 | "expected": test_case["expected"]["dEdb"],
393 | "got": result_dEdb,
394 | }
395 | )
396 | print(
397 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of dEdb for m = {test_case['input']['m']}, b = {test_case['input']['b']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
398 | )
399 |
400 | if len(failed_cases) == 0:
401 | print("\033[92m All tests passed")
402 | else:
403 | print("\033[92m", successful_cases, " Tests passed")
404 | print("\033[91m", len(failed_cases), " Tests failed")
405 |
406 |
407 | def test_gradient_descent(target_gradient_descent, input_dEdm, input_dEdb, input_X_norm, input_Y_norm):
408 | successful_cases = 0
409 | failed_cases = []
410 |
411 | test_cases = [
412 | {
413 | "name": "default_check",
414 | "input": {
415 | "m": 0,
416 | "b": 0,
417 | "learning_rate": 0.001,
418 | "num_iterations": 1000,
419 | },
420 | "expected": {
421 | "m": 0.49460408269589484,
422 | "b": -1.367306268207353e-16,
423 | }
424 | },
425 | {
426 | "name": "extra_check",
427 | "input": {
428 | "m": 1,
429 | "b": 5,
430 | "learning_rate": 0.01,
431 | "num_iterations": 10,
432 | },
433 | "expected": {
434 | "m": 0.9791767513915026,
435 | "b": 4.521910375044022,
436 | }
437 | },
438 | ]
439 |
440 | for test_case in test_cases:
441 | result_m, result_b = target_gradient_descent(
442 | input_dEdm, input_dEdb, test_case["input"]["m"], test_case["input"]["b"],
443 | input_X_norm, input_Y_norm, test_case["input"]["learning_rate"], test_case["input"]["num_iterations"]
444 | )
445 |
446 | try:
447 | assert np.allclose(result_m, test_case["expected"]["m"])
448 | successful_cases += 1
449 |
450 | except:
451 | failed_cases.append(
452 | {
453 | "name": test_case["name"],
454 | "expected": test_case["expected"]["m"],
455 | "got": result_m,
456 | }
457 | )
458 | print(
459 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output value m of the function gradient_descent.\nm = {test_case['input']['m']}, b = {test_case['input']['b']}, learning_rate = {test_case['input']['learning_rate']}, num_iterations = {test_case['input']['num_iterations']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
460 | )
461 |
462 | try:
463 | assert np.allclose(result_b, test_case["expected"]["b"])
464 | successful_cases += 1
465 |
466 | except:
467 | failed_cases.append(
468 | {
469 | "name": test_case["name"],
470 | "expected": test_case["expected"]["b"],
471 | "got": result_b,
472 | }
473 | )
474 | print(
475 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output value b of the function gradient_descent.\nm = {test_case['input']['m']}, b = {test_case['input']['b']}, learning_rate = {test_case['input']['learning_rate']}, num_iterations = {test_case['input']['num_iterations']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
476 | )
477 |
478 | if len(failed_cases) == 0:
479 | print("\033[92m All tests passed")
480 | else:
481 | print("\033[92m", successful_cases, " Tests passed")
482 | print("\033[91m", len(failed_cases), " Tests failed")
483 |
--------------------------------------------------------------------------------
/Week3/data/tvmarketing.csv:
--------------------------------------------------------------------------------
1 | TV,Sales
2 | 230.1,22.1
3 | 44.5,10.4
4 | 17.2,9.3
5 | 151.5,18.5
6 | 180.8,12.9
7 | 8.7,7.2
8 | 57.5,11.8
9 | 120.2,13.2
10 | 8.6,4.8
11 | 199.8,10.6
12 | 66.1,8.6
13 | 214.7,17.4
14 | 23.8,9.2
15 | 97.5,9.7
16 | 204.1,19
17 | 195.4,22.4
18 | 67.8,12.5
19 | 281.4,24.4
20 | 69.2,11.3
21 | 147.3,14.6
22 | 218.4,18
23 | 237.4,12.5
24 | 13.2,5.6
25 | 228.3,15.5
26 | 62.3,9.7
27 | 262.9,12
28 | 142.9,15
29 | 240.1,15.9
30 | 248.8,18.9
31 | 70.6,10.5
32 | 292.9,21.4
33 | 112.9,11.9
34 | 97.2,9.6
35 | 265.6,17.4
36 | 95.7,9.5
37 | 290.7,12.8
38 | 266.9,25.4
39 | 74.7,14.7
40 | 43.1,10.1
41 | 228,21.5
42 | 202.5,16.6
43 | 177,17.1
44 | 293.6,20.7
45 | 206.9,12.9
46 | 25.1,8.5
47 | 175.1,14.9
48 | 89.7,10.6
49 | 239.9,23.2
50 | 227.2,14.8
51 | 66.9,9.7
52 | 199.8,11.4
53 | 100.4,10.7
54 | 216.4,22.6
55 | 182.6,21.2
56 | 262.7,20.2
57 | 198.9,23.7
58 | 7.3,5.5
59 | 136.2,13.2
60 | 210.8,23.8
61 | 210.7,18.4
62 | 53.5,8.1
63 | 261.3,24.2
64 | 239.3,15.7
65 | 102.7,14
66 | 131.1,18
67 | 69,9.3
68 | 31.5,9.5
69 | 139.3,13.4
70 | 237.4,18.9
71 | 216.8,22.3
72 | 199.1,18.3
73 | 109.8,12.4
74 | 26.8,8.8
75 | 129.4,11
76 | 213.4,17
77 | 16.9,8.7
78 | 27.5,6.9
79 | 120.5,14.2
80 | 5.4,5.3
81 | 116,11
82 | 76.4,11.8
83 | 239.8,12.3
84 | 75.3,11.3
85 | 68.4,13.6
86 | 213.5,21.7
87 | 193.2,15.2
88 | 76.3,12
89 | 110.7,16
90 | 88.3,12.9
91 | 109.8,16.7
92 | 134.3,11.2
93 | 28.6,7.3
94 | 217.7,19.4
95 | 250.9,22.2
96 | 107.4,11.5
97 | 163.3,16.9
98 | 197.6,11.7
99 | 184.9,15.5
100 | 289.7,25.4
101 | 135.2,17.2
102 | 222.4,11.7
103 | 296.4,23.8
104 | 280.2,14.8
105 | 187.9,14.7
106 | 238.2,20.7
107 | 137.9,19.2
108 | 25,7.2
109 | 90.4,8.7
110 | 13.1,5.3
111 | 255.4,19.8
112 | 225.8,13.4
113 | 241.7,21.8
114 | 175.7,14.1
115 | 209.6,15.9
116 | 78.2,14.6
117 | 75.1,12.6
118 | 139.2,12.2
119 | 76.4,9.4
120 | 125.7,15.9
121 | 19.4,6.6
122 | 141.3,15.5
123 | 18.8,7
124 | 224,11.6
125 | 123.1,15.2
126 | 229.5,19.7
127 | 87.2,10.6
128 | 7.8,6.6
129 | 80.2,8.8
130 | 220.3,24.7
131 | 59.6,9.7
132 | 0.7,1.6
133 | 265.2,12.7
134 | 8.4,5.7
135 | 219.8,19.6
136 | 36.9,10.8
137 | 48.3,11.6
138 | 25.6,9.5
139 | 273.7,20.8
140 | 43,9.6
141 | 184.9,20.7
142 | 73.4,10.9
143 | 193.7,19.2
144 | 220.5,20.1
145 | 104.6,10.4
146 | 96.2,11.4
147 | 140.3,10.3
148 | 240.1,13.2
149 | 243.2,25.4
150 | 38,10.9
151 | 44.7,10.1
152 | 280.7,16.1
153 | 121,11.6
154 | 197.6,16.6
155 | 171.3,19
156 | 187.8,15.6
157 | 4.1,3.2
158 | 93.9,15.3
159 | 149.8,10.1
160 | 11.7,7.3
161 | 131.7,12.9
162 | 172.5,14.4
163 | 85.7,13.3
164 | 188.4,14.9
165 | 163.5,18
166 | 117.2,11.9
167 | 234.5,11.9
168 | 17.9,8
169 | 206.8,12.2
170 | 215.4,17.1
171 | 284.3,15
172 | 50,8.4
173 | 164.5,14.5
174 | 19.6,7.6
175 | 168.4,11.7
176 | 222.4,11.5
177 | 276.9,27
178 | 248.4,20.2
179 | 170.2,11.7
180 | 276.7,11.8
181 | 165.6,12.6
182 | 156.6,10.5
183 | 218.5,12.2
184 | 56.2,8.7
185 | 287.6,26.2
186 | 253.8,17.6
187 | 205,22.6
188 | 139.5,10.3
189 | 191.1,17.3
190 | 286,15.9
191 | 18.7,6.7
192 | 39.5,10.8
193 | 75.5,9.9
194 | 17.2,5.9
195 | 166.8,19.6
196 | 149.7,17.3
197 | 38.2,7.6
198 | 94.2,9.7
199 | 177,12.8
200 | 283.6,25.5
201 | 232.1,13.4
202 |
--------------------------------------------------------------------------------
/Week3/images/nn_model_2_layers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week3/images/nn_model_2_layers.png
--------------------------------------------------------------------------------
/Week3/images/nn_model_classification_1_layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week3/images/nn_model_classification_1_layer.png
--------------------------------------------------------------------------------
/Week3/images/nn_model_linear_regression_multiple.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week3/images/nn_model_linear_regression_multiple.png
--------------------------------------------------------------------------------
/Week3/images/nn_model_linear_regression_simple.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sagardevaraju/Calculus-for-Machine-Learning-and-Data-Science/0bd255e2dd6f7557d2b3cdd829c994e869a6acfd/Week3/images/nn_model_linear_regression_simple.png
--------------------------------------------------------------------------------
/Week3/w3_unittest.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from sklearn.datasets import make_blobs
3 |
4 | # +
5 | # variables for the default_check test cases
6 | m = 2000
7 | samples, labels = make_blobs(n_samples=m,
8 | centers=([2.5, 3], [6.7, 7.9], [2.1, 7.9], [7.4, 2.8]),
9 | cluster_std=1.1,
10 | random_state=0)
11 | labels[(labels == 0) | (labels == 1)] = 1
12 | labels[(labels == 2) | (labels == 3)] = 0
13 | X = np.transpose(samples)
14 | Y = labels.reshape((1, m))
15 |
16 | n_x = X.shape[0]
17 | n_h = 2
18 | n_y = Y.shape[0]
19 |
20 |
21 | # -
22 |
23 | def test_sigmoid(target_sigmoid):
24 | successful_cases = 0
25 | failed_cases = []
26 |
27 | test_cases = [
28 | {
29 | "name": "default_check",
30 | "input": {"z": -2,},
31 | "expected": {"sigmoid": 0.11920292202211755,},
32 | },
33 | {
34 | "name": "extra_check_1",
35 | "input": {"z": 0,},
36 | "expected": {"sigmoid": 0.5,},
37 | },
38 | {
39 | "name": "extra_check_2",
40 | "input": {"z": 3.5,},
41 | "expected": {"sigmoid": 0.9706877692486436,},
42 | },
43 | ]
44 |
45 | for test_case in test_cases:
46 | result = target_sigmoid(test_case["input"]["z"])
47 |
48 | try:
49 | assert np.allclose(result, test_case["expected"]["sigmoid"])
50 | successful_cases += 1
51 |
52 | except:
53 | failed_cases.append(
54 | {
55 | "name": test_case["name"],
56 | "expected": test_case["expected"]["sigmoid"],
57 | "got": result,
58 | }
59 | )
60 | print(
61 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of sigmoid for z = {test_case['input']['z']}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
62 | )
63 |
64 | if len(failed_cases) == 0:
65 | print("\033[92m All tests passed")
66 | else:
67 | print("\033[92m", successful_cases, " Tests passed")
68 | print("\033[91m", len(failed_cases), " Tests failed")
69 |
70 | def test_layer_sizes(target_layer_sizes):
71 | successful_cases = 0
72 | failed_cases = []
73 |
74 | test_cases = [
75 | {
76 | "name": "default_check",
77 | "input": {
78 | "X": X,
79 | "Y": Y
80 | },
81 | "expected": {
82 | "n_x": n_x,
83 | "n_h": n_h,
84 | "n_y": n_y
85 | },
86 | },
87 | {
88 | "name": "extra_check",
89 | "input": {
90 | "X": np.ones((5, 100)),
91 | "Y": np.ones((3, 100))
92 | },
93 | "expected": {
94 | "n_x": 5,
95 | "n_h": 2,
96 | "n_y": 3
97 | },
98 | },
99 | ]
100 |
101 | for test_case in test_cases:
102 | (result_n_x, result_n_h, result_n_y) = target_layer_sizes(test_case["input"]["X"], test_case["input"]["Y"])
103 |
104 | try:
105 | assert (
106 | result_n_x == test_case["expected"]["n_x"]
107 | )
108 | successful_cases += 1
109 | except:
110 | failed_cases.append(
111 | {
112 | "name": test_case["name"],
113 | "expected": test_case["expected"]["n_x"],
114 | "got": result_n_x,
115 | }
116 | )
117 | print(
118 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong size of the input layer n_x for the test case, where array X has a shape {test_case['input']['X'].shape}. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
119 | )
120 |
121 | try:
122 | assert (
123 | result_n_h == test_case["expected"]["n_h"]
124 | )
125 | successful_cases += 1
126 | except:
127 | failed_cases.append(
128 | {
129 | "name": test_case["name"],
130 | "expected": test_case["expected"]["n_h"],
131 | "got": result_n_h,
132 | }
133 | )
134 | print(
135 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong size of the hidden layer n_h. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
136 | )
137 |
138 | try:
139 | assert (
140 | result_n_y == test_case["expected"]["n_y"]
141 | )
142 | successful_cases += 1
143 | except:
144 | failed_cases.append(
145 | {
146 | "name": test_case["name"],
147 | "expected": test_case["expected"]["n_y"],
148 | "got": result_n_y,
149 | }
150 | )
151 | print(
152 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong size of the output layer n_y for the test case, where array Y has a shape {test_case['input']['Y'].shape}. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
153 | )
154 |
155 | if len(failed_cases) == 0:
156 | print("\033[92m All tests passed")
157 | else:
158 | print("\033[92m", successful_cases, " Tests passed")
159 | print("\033[91m", len(failed_cases), " Tests failed")
160 |
161 | def test_initialize_parameters(target_initialize_parameters):
162 | successful_cases = 0
163 | failed_cases = []
164 |
165 | test_cases = [
166 | {
167 | "name": "default_check",
168 | "input": {
169 | "n_x": n_x,
170 | "n_h": n_h,
171 | "n_y": n_y,
172 | },
173 | "expected": {
174 | "W1": np.zeros((n_h, n_x)), # no check of the actual values in the unit tests
175 | "b1": np.zeros((n_h, 1)),
176 | "W2": np.zeros((n_y, n_h)), # no check of the actual values in the unit tests
177 | "b2": np.zeros((n_y, 1)),
178 | },
179 | },
180 | {
181 | "name": "extra_check",
182 | "input": {
183 | "n_x": 5,
184 | "n_h": 4,
185 | "n_y": 3,
186 | },
187 | "expected": {
188 | "W1": np.zeros((4, 5)), # no check of the actual values in the unit tests
189 | "b1": np.zeros((4, 1)),
190 | "W2": np.zeros((3, 4)), # no check of the actual values in the unit tests
191 | "b2": np.zeros((3, 1)),
192 | },
193 | },
194 | ]
195 |
196 | for test_case in test_cases:
197 | result = target_initialize_parameters(test_case["input"]["n_x"], test_case["input"]["n_h"], test_case["input"]["n_y"])
198 |
199 | try:
200 | assert result["W1"].shape == test_case["expected"]["W1"].shape
201 | successful_cases += 1
202 | except:
203 | failed_cases.append(
204 | {
205 | "name": test_case["name"],
206 | "expected": test_case["expected"]["W1"].shape,
207 | "got": result["W1"].shape,
208 | }
209 | )
210 | print(
211 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the weights matrix W1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
212 | )
213 |
214 | try:
215 | assert result["b1"].shape == test_case["expected"]["b1"].shape
216 | successful_cases += 1
217 | except:
218 | failed_cases.append(
219 | {
220 | "name": test_case["name"],
221 | "expected": test_case["expected"]["b1"].shape,
222 | "got": result["b1"].shape,
223 | }
224 | )
225 | print(
226 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the bias vector b1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
227 | )
228 |
229 | try:
230 | assert np.allclose(result["b1"], test_case["expected"]["b1"])
231 | successful_cases += 1
232 |
233 | except:
234 | failed_cases.append(
235 | {
236 | "name": test_case["name"],
237 | "expected": test_case["expected"]["b1"],
238 | "got": result["b1"],
239 | }
240 | )
241 | print(
242 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong bias vector b1. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
243 | )
244 |
245 | try:
246 | assert result["W2"].shape == test_case["expected"]["W2"].shape
247 | successful_cases += 1
248 | except:
249 | failed_cases.append(
250 | {
251 | "name": test_case["name"],
252 | "expected": test_case["expected"]["W2"].shape,
253 | "got": result["W2"].shape,
254 | }
255 | )
256 | print(
257 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the weights matrix W2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
258 | )
259 |
260 | try:
261 | assert result["b2"].shape == test_case["expected"]["b2"].shape
262 | successful_cases += 1
263 | except:
264 | failed_cases.append(
265 | {
266 | "name": test_case["name"],
267 | "expected": test_case["expected"]["b2"].shape,
268 | "got": result["b2"].shape,
269 | }
270 | )
271 | print(
272 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the bias vector b2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
273 | )
274 |
275 | try:
276 | assert np.allclose(result["b2"], test_case["expected"]["b2"])
277 | successful_cases += 1
278 |
279 | except:
280 | failed_cases.append(
281 | {
282 | "name": test_case["name"],
283 | "expected": test_case["expected"]["b2"],
284 | "got": result["b2"],
285 | }
286 | )
287 | print(
288 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong bias vector b2. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
289 | )
290 |
291 | if len(failed_cases) == 0:
292 | print("\033[92m All tests passed")
293 | else:
294 | print("\033[92m", successful_cases, " Tests passed")
295 | print("\033[91m", len(failed_cases), " Tests failed")
296 |
297 | def test_forward_propagation(target_forward_propagation):
298 | successful_cases = 0
299 | failed_cases = []
300 |
301 | test_cases = [
302 | {
303 | "name": "default_check",
304 | "input": {
305 | "X": X,
306 | "parameters": {
307 | "W1": np.array([[0.01788628, 0.0043651], [0.00096497, -0.01863493]]),
308 | "b1": np.zeros((n_h, 1)),
309 | "W2": np.array([[-0.00277388, -0.00354759]]),
310 | "b2": np.zeros((n_y, 1)),
311 | },
312 | },
313 | "expected": {
314 | "Z1_array": {
315 | "shape": (2, 2000),
316 | "Z1": [
317 | {"i": 0, "j": 0, "Z1_i_j": 0.11050400276471689,},
318 | {"i": 1, "j": 1999, "Z1_i_j": -0.11866556808051022,},
319 | {"i": 0, "j": 100, "Z1_i_j": 0.08570563958483839,},
320 | ],},
321 | "A1_array": {
322 | "shape": (2, 2000),
323 | "A1": [
324 | {"i": 0, "j": 0, "A1_i_j": 0.5275979229090347,},
325 | {"i": 1, "j": 1999, "A1_i_j": 0.47036837134568177,},
326 | {"i": 0, "j": 100, "A1_i_j": 0.521413303959268,},
327 | ],},
328 | "Z2_array": {
329 | "shape": (1, 2000),
330 | "Z2": [
331 | {"i": 0, "Z2_i": -0.003193737045395555,},
332 | {"i": 400, "Z2_i": -0.003221924688299396,},
333 | {"i": 1999, "Z2_i": -0.00317339213692169,},
334 | ],},
335 | "A2_array": {
336 | "shape": (1, 2000),
337 | "A2": [
338 | {"i": 0, "A2_i": 0.4992015664173166,},
339 | {"i": 400, "A2_i": 0.49919451952471916,},
340 | {"i": 1999, "A2_i": 0.4992066526315478,},
341 | ],},
342 | },
343 | },
344 | {
345 | "name": "change_weights_check",
346 | "input": {
347 | "X": X,
348 | "parameters": {
349 | "W1": np.array([[-0.00082741, -0.00627001], [-0.00043818, -0.00477218]]),
350 | "b1": np.zeros((n_h, 1)),
351 | "W2": np.array([[-0.01313865, 0.00884622]]),
352 | "b2": np.zeros((n_y, 1)),
353 | },
354 | },
355 | "expected": {
356 | "Z1_array": {
357 | "shape": (2, 2000),
358 | "Z1": [
359 | {"i": 0, "j": 0, "Z1_i_j": -0.022822666781157443,},
360 | {"i": 1, "j": 1999, "Z1_i_j": -0.03577862954823146,},
361 | {"i": 0, "j": 100, "Z1_i_j": -0.05872690929597483,},
362 | ],},
363 | "A1_array": {
364 | "shape": (2, 2000),
365 | "A1": [
366 | {"i": 0, "j": 0, "A1_i_j": 0.49429458095298745,},
367 | {"i": 1, "j": 1999, "A1_i_j": 0.4910562966698409,},
368 | {"i": 0, "j": 100, "A1_i_j": 0.4853224908106953,},
369 | ],},
370 | "Z2_array": {
371 | "shape": (1, 2000),
372 | "Z2": [
373 | {"i": 0, "Z2_i": -0.0021073530226891173,},
374 | {"i": 400, "Z2_i": -0.002110285690191978,},
375 | {"i": 1999, "Z2_i": -0.0020644562143733863,},
376 | ],},
377 | "A2_array": {
378 | "shape": (1, 2000),
379 | "A2": [
380 | {"i": 0, "A2_i": 0.4994731619392989,},
381 | {"i": 400, "A2_i": 0.49947242877323833,},
382 | {"i": 1999, "A2_i": 0.49948388612971223,},
383 | ],},
384 | },
385 | },
386 | {
387 | "name": "change_dataset_check",
388 | "input": {
389 | "X": np.array([[0, 1, 0, 0, 1], [0, 0, 0, 0, 1]]),
390 | "parameters": {
391 | "W1": np.array([[-0.00082741, -0.00627001], [-0.00043818, -0.00477218]]),
392 | "b1": np.zeros((n_h, 1)),
393 | "W2": np.array([[-0.01313865, 0.00884622]]),
394 | "b2": np.zeros((n_y, 1)),
395 | },
396 | },
397 | "expected": {
398 | "Z1_array": {
399 | "shape": (2, 5),
400 | "Z1": [
401 | {"i": 0, "j": 0, "Z1_i_j": 0.0,},
402 | {"i": 1, "j": 4, "Z1_i_j": -0.00521036,},
403 | {"i": 0, "j": 4, "Z1_i_j": -0.00709742,},
404 | ],},
405 | "A1_array": {
406 | "shape": (2, 5),
407 | "A1": [
408 | {"i": 0, "j": 0, "A1_i_j": 0.5,},
409 | {"i": 1, "j": 4, "A1_i_j": 0.49869741294686865,},
410 | {"i": 0, "j": 4, "A1_i_j": 0.49822565244831607,},
411 | ],},
412 | "Z2_array": {
413 | "shape": (1, 5),
414 | "Z2": [
415 | {"i": 0, "Z2_i": -0.002146215,},
416 | {"i": 1, "Z2_i": -0.0021444662967103198,},
417 | {"i": 4, "Z2_i": -0.00213442544018122,},
418 | ],},
419 | "A2_array": {
420 | "shape": (1, 5),
421 | "A2": [
422 | {"i": 0, "A2_i": 0.4994634464559578,},
423 | {"i": 1, "A2_i": 0.4994638836312772,},
424 | {"i": 4, "A2_i": 0.49946639384253705,},
425 | ],},
426 | },
427 | },
428 | ]
429 |
430 | for test_case in test_cases:
431 | result_A2, result_cache = target_forward_propagation(test_case["input"]["X"], test_case["input"]["parameters"])
432 |
433 | try:
434 | assert result_cache["Z1"].shape == test_case["expected"]["Z1_array"]["shape"]
435 | successful_cases += 1
436 | except:
437 | failed_cases.append(
438 | {
439 | "name": test_case["name"],
440 | "expected": test_case["expected"]["Z1_array"]["shape"],
441 | "got": result_cache["Z1"].shape,
442 | }
443 | )
444 | print(
445 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the array Z1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}.")
446 |
447 | for test_case_i_j in test_case["expected"]["Z1_array"]["Z1"]:
448 | i = test_case_i_j["i"]
449 | j = test_case_i_j["j"]
450 |
451 | try:
452 | assert result_cache["Z1"][i, j] == test_case_i_j["Z1_i_j"]
453 | successful_cases += 1
454 |
455 | except:
456 | failed_cases.append(
457 | {
458 | "name": test_case["name"],
459 | "expected": test_case_i_j["Z1_i_j"],
460 | "got": result_cache["Z1"][i, j],
461 | }
462 | )
463 | print(
464 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of Z1 for X = \n{test_case['input']['X']}\nTest for i = {i}, j = {j}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
465 | )
466 |
467 | try:
468 | assert result_cache["A1"].shape == test_case["expected"]["A1_array"]["shape"]
469 | successful_cases += 1
470 | except:
471 | failed_cases.append(
472 | {
473 | "name": test_case["name"],
474 | "expected": test_case["expected"]["A1_array"]["shape"],
475 | "got": result_cache["A1"].shape,
476 | }
477 | )
478 | print(
479 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the array A1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}.")
480 |
481 | for test_case_i_j in test_case["expected"]["A1_array"]["A1"]:
482 | i = test_case_i_j["i"]
483 | j = test_case_i_j["j"]
484 |
485 | try:
486 | assert result_cache["A1"][i, j] == test_case_i_j["A1_i_j"]
487 | successful_cases += 1
488 |
489 | except:
490 | failed_cases.append(
491 | {
492 | "name": test_case["name"],
493 | "expected": test_case_i_j["A1_i_j"],
494 | "got": result_cache["A1"][i, j],
495 | }
496 | )
497 | print(
498 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of A1 for X = \n{test_case['input']['X']}\nTest for i = {i}, j = {j}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
499 | )
500 |
501 | try:
502 | assert result_cache["Z2"].shape == test_case["expected"]["Z2_array"]["shape"]
503 | successful_cases += 1
504 | except:
505 | failed_cases.append(
506 | {
507 | "name": test_case["name"],
508 | "expected": test_case["expected"]["Z2_array"]["shape"],
509 | "got": result_cache["Z2"].shape,
510 | }
511 | )
512 | print(
513 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the array Z2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}.")
514 |
515 | for test_case_i in test_case["expected"]["Z2_array"]["Z2"]:
516 | i = test_case_i["i"]
517 |
518 | try:
519 | assert result_cache["Z2"][0, i] == test_case_i["Z2_i"]
520 | successful_cases += 1
521 |
522 | except:
523 | failed_cases.append(
524 | {
525 | "name": test_case["name"],
526 | "expected": test_case_i["Z2_i"],
527 | "got": result_cache["Z2"][0, i],
528 | }
529 | )
530 | print(
531 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of Z2. Test for i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
532 | )
533 |
534 | try:
535 | assert result_A2.shape == test_case["expected"]["A2_array"]["shape"]
536 | successful_cases += 1
537 | except:
538 | failed_cases.append(
539 | {
540 | "name": test_case["name"],
541 | "expected": test_case["expected"]["A2_array"]["shape"],
542 | "got": result_A2.shape,
543 | }
544 | )
545 | print(
546 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the array A2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}.")
547 |
548 | for test_case_i in test_case["expected"]["A2_array"]["A2"]:
549 | i = test_case_i["i"]
550 |
551 | try:
552 | assert result_A2[0, i] == test_case_i["A2_i"]
553 | successful_cases += 1
554 |
555 | except:
556 | failed_cases.append(
557 | {
558 | "name": test_case["name"],
559 | "expected": test_case_i["A2_i"],
560 | "got": result_A2[0, i],
561 | }
562 | )
563 | print(
564 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of A2. Test for i = {i}. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
565 | )
566 |
567 | if len(failed_cases) == 0:
568 | print("\033[92m All tests passed")
569 | else:
570 | print("\033[92m", successful_cases, " Tests passed")
571 | print("\033[91m", len(failed_cases), " Tests failed")
572 |
573 | def test_compute_cost(target_compute_cost, input_A2):
574 | successful_cases = 0
575 | failed_cases = []
576 |
577 | test_cases = [
578 | {
579 | "name": "default_check",
580 | "input": {
581 | "A2": input_A2,
582 | "Y": Y,
583 | },
584 | "expected": {"cost": 0.6931477703826823,},
585 | },
586 | {
587 | "name": "extra_check",
588 | "input": {
589 | "A2": np.array([[0.64, 0.60, 0.35, 0.15, 0.95]]),
590 | "Y": np.array([[0.58, 0.01, 0.42, 0.24, 0.99]])
591 | },
592 | "expected": {"cost": 0.5901032749748385,},
593 | },
594 | ]
595 |
596 | for test_case in test_cases:
597 | result = target_compute_cost(test_case["input"]["A2"], test_case["input"]["Y"])
598 |
599 | try:
600 | assert np.allclose(result, test_case["expected"]["cost"])
601 | successful_cases += 1
602 |
603 | except:
604 | failed_cases.append(
605 | {
606 | "name": test_case["name"],
607 | "expected": test_case["expected"]["cost"],
608 | "got": result,
609 | }
610 | )
611 | print(
612 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output of compute_cost. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
613 | )
614 |
615 | if len(failed_cases) == 0:
616 | print("\033[92m All tests passed")
617 | else:
618 | print("\033[92m", successful_cases, " Tests passed")
619 | print("\033[91m", len(failed_cases), " Tests failed")
620 |
621 | def test_update_parameters(target_update_parameters):
622 | successful_cases = 0
623 | failed_cases = []
624 |
625 | test_cases = [
626 | {
627 | "name": "default_check",
628 | "input": {
629 | "parameters": {
630 | "W1": np.array([[0.01788628, 0.0043651], [0.00096497, -0.01863493]]),
631 | "b1": np.zeros((n_h, 1)),
632 | "W2": np.array([[-0.00277388, -0.00354759]]),
633 | "b2": np.zeros((n_y, 1)),
634 | },
635 | "grads": {
636 | "dW1": np.array([[-1.49856632e-05, 1.67791519e-05], [-2.12394543e-05, 2.43895135e-05]]),
637 | "db1": np.array([[5.11207671e-07], [7.06236219e-07]]),
638 | "dW2": np.array([[-0.00032641, -0.0002606]]),
639 | "db2": np.array([[-0.00078732]]),
640 | },
641 | "learning_rate": 1.2,
642 | },
643 | "expected": {
644 | "parameters": {
645 | "W1": np.array([[0.01790426, 0.00434497], [0.00099046, -0.0186642]]),
646 | "b1": np.array([[-6.13449205e-07], [-8.47483463e-07]]),
647 | "W2": np.array([[-0.00238219, -0.00323487]]),
648 | "b2": np.array([[0.00094478]]),
649 | },
650 | }
651 | },
652 | {
653 | "name": "extra_check",
654 | "input": {
655 | "parameters": {
656 | "W1": np.array([[-0.00082741, -0.00627001], [-0.00043818, -0.00477218]]),
657 | "b1": np.zeros((n_h, 1)),
658 | "W2": np.array([[-0.01313865, 0.00884622]]),
659 | "b2": np.zeros((n_y, 1)),
660 | },
661 | "grads": {
662 | "dW1": np.array([[-7.56054712e-05, 8.48587435e-05], [5.05322772e-05, -5.72665231e-05]]),
663 | "db1": np.array([[1.68002224e-06], [-1.14292837e-06]]),
664 | "dW2": np.array([[-0.0002246, -0.00023206]]),
665 | "db2": np.array([[-0.000521]]),
666 | },
667 | "learning_rate": 0.1,
668 | },
669 | "expected": {
670 | "parameters": {
671 | "W1": np.array([[-0.00081985, -0.0062785], [-0.00044323, -0.00476645]]),
672 | "b1": np.array([[-1.68002224e-07], [1.14292837e-07]]),
673 | "W2": np.array([[-0.01311619, 0.00886943]]),
674 | "b2": np.array([[5.21e-05]]),
675 | },
676 | }
677 | },
678 | ]
679 |
680 | for test_case in test_cases:
681 | result = target_update_parameters(test_case["input"]["parameters"], test_case["input"]["grads"], test_case["input"]["learning_rate"])
682 |
683 | try:
684 | assert result["W1"].shape == test_case["expected"]["parameters"]["W1"].shape
685 | successful_cases += 1
686 | except:
687 | failed_cases.append(
688 | {
689 | "name": test_case["name"],
690 | "expected": test_case["expected"]["parameters"]["W1"].shape,
691 | "got": result["W1"].shape,
692 | }
693 | )
694 | print(
695 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the output array W1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
696 | )
697 |
698 | try:
699 | assert np.allclose(result["W1"], test_case["expected"]["parameters"]["W1"])
700 | successful_cases += 1
701 |
702 | except:
703 | failed_cases.append(
704 | {
705 | "name": test_case["name"],
706 | "expected": test_case["expected"]["parameters"]["W1"],
707 | "got": result["W1"],
708 | }
709 | )
710 | print(
711 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output array W1. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
712 | )
713 |
714 | try:
715 | assert result["b1"].shape == test_case["expected"]["parameters"]["b1"].shape
716 | successful_cases += 1
717 | except:
718 | failed_cases.append(
719 | {
720 | "name": test_case["name"],
721 | "expected": test_case["expected"]["parameters"]["b1"].shape,
722 | "got": result["b1"].shape,
723 | }
724 | )
725 | print(
726 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the output array b1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
727 | )
728 |
729 | try:
730 | assert np.allclose(result["b1"], test_case["expected"]["parameters"]["b1"])
731 | successful_cases += 1
732 |
733 | except:
734 | failed_cases.append(
735 | {
736 | "name": test_case["name"],
737 | "expected": test_case["expected"]["parameters"]["b1"],
738 | "got": result["b1"],
739 | }
740 | )
741 | print(
742 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output array b1. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
743 |
744 | )
745 |
746 | try:
747 | assert result["W2"].shape == test_case["expected"]["parameters"]["W2"].shape
748 | successful_cases += 1
749 | except:
750 | failed_cases.append(
751 | {
752 | "name": test_case["name"],
753 | "expected": test_case["expected"]["parameters"]["W2"].shape,
754 | "got": result["W2"].shape,
755 | }
756 | )
757 | print(
758 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the output array W2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
759 | )
760 |
761 | try:
762 | assert np.allclose(result["W2"], test_case["expected"]["parameters"]["W2"])
763 | successful_cases += 1
764 |
765 | except:
766 | failed_cases.append(
767 | {
768 | "name": test_case["name"],
769 | "expected": test_case["expected"]["parameters"]["W2"],
770 | "got": result["W2"],
771 | }
772 | )
773 | print(
774 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output array W2. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
775 | )
776 |
777 | try:
778 | assert result["b2"].shape == test_case["expected"]["parameters"]["b2"].shape
779 | successful_cases += 1
780 | except:
781 | failed_cases.append(
782 | {
783 | "name": test_case["name"],
784 | "expected": test_case["expected"]["parameters"]["b2"].shape,
785 | "got": result["b2"].shape,
786 | }
787 | )
788 | print(
789 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the output array b2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
790 | )
791 |
792 | try:
793 | assert np.allclose(result["b2"], test_case["expected"]["parameters"]["b2"])
794 | successful_cases += 1
795 |
796 | except:
797 | failed_cases.append(
798 | {
799 | "name": test_case["name"],
800 | "expected": test_case["expected"]["parameters"]["b2"],
801 | "got": result["b2"],
802 | }
803 | )
804 | print(
805 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output array b2. \n\tExpected: \n{failed_cases[-1].get('expected')}\n\tGot: \n{failed_cases[-1].get('got')}"
806 | )
807 |
808 |
809 | if len(failed_cases) == 0:
810 | print("\033[92m All tests passed")
811 | else:
812 | print("\033[92m", successful_cases, " Tests passed")
813 | print("\033[91m", len(failed_cases), " Tests failed")
814 |
815 | def test_nn_model(target_nn_model):
816 | successful_cases = 0
817 | failed_cases = []
818 |
819 | test_cases = [
820 | {
821 | "name": "default_check",
822 | "input": {
823 | "X": X,
824 | "Y": Y,
825 | "n_h": 2,
826 | "num_iterations": 3000,
827 | "learning_rate": 1.2,
828 | },
829 | "expected": {
830 | "W1": np.zeros((n_h, n_x)), # no check of the actual values in the unit tests
831 | "b1": np.zeros((n_h, 1)), # no check of the actual values in the unit tests
832 | "W2": np.zeros((n_y, n_h)), # no check of the actual values in the unit tests
833 | "b2": np.zeros((n_y, 1)), # no check of the actual values in the unit tests
834 | },
835 | },
836 | {
837 | "name": "extra_check",
838 | "input": {
839 | "X": np.array([[0, 1, 0, 0, 1],[0, 0, 0, 0, 1]]),
840 | "Y": np.array([[0, 0, 0, 0, 1]]),
841 | "n_h": 3,
842 | "num_iterations": 100,
843 | "learning_rate": 0.1,
844 | },
845 | "expected": {
846 | "W1": np.zeros((3, 2)), # no check of the actual values in the unit tests
847 | "b1": np.zeros((3, 1)), # no check of the actual values in the unit tests
848 | "W2": np.zeros((1, 3)), # no check of the actual values in the unit tests
849 | "b2": np.zeros((1, 1)), # no check of the actual values in the unit tests
850 | },
851 | },
852 | ]
853 |
854 | for test_case in test_cases:
855 |
856 | result = target_nn_model(test_case["input"]["X"], test_case["input"]["Y"], test_case["input"]["n_h"],
857 | test_case["input"]["num_iterations"], test_case["input"]["learning_rate"], False)
858 |
859 | try:
860 | assert result["W1"].shape == test_case["expected"]["W1"].shape
861 | successful_cases += 1
862 | except:
863 | failed_cases.append(
864 | {
865 | "name": test_case["name"],
866 | "expected": test_case["expected"]["W1"].shape,
867 | "got": result["W1"].shape,
868 | }
869 | )
870 | print(
871 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the weights matrix W1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
872 | )
873 |
874 | try:
875 | assert result["b1"].shape == test_case["expected"]["b1"].shape
876 | successful_cases += 1
877 | except:
878 | failed_cases.append(
879 | {
880 | "name": test_case["name"],
881 | "expected": test_case["expected"]["b1"].shape,
882 | "got": result["b1"].shape,
883 | }
884 | )
885 | print(
886 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the bias vector b1. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
887 | )
888 |
889 | try:
890 | assert result["W2"].shape == test_case["expected"]["W2"].shape
891 | successful_cases += 1
892 | except:
893 | failed_cases.append(
894 | {
895 | "name": test_case["name"],
896 | "expected": test_case["expected"]["W2"].shape,
897 | "got": result["W2"].shape,
898 | }
899 | )
900 | print(
901 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the weights matrix W2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
902 | )
903 |
904 | try:
905 | assert result["b2"].shape == test_case["expected"]["b2"].shape
906 | successful_cases += 1
907 | except:
908 | failed_cases.append(
909 | {
910 | "name": test_case["name"],
911 | "expected": test_case["expected"]["b2"].shape,
912 | "got": result["b2"].shape,
913 | }
914 | )
915 | print(
916 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the bias vector b2. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
917 | )
918 |
919 | if len(failed_cases) == 0:
920 | print("\033[92m All tests passed")
921 | else:
922 | print("\033[92m", successful_cases, " Tests passed")
923 | print("\033[91m", len(failed_cases), " Tests failed")
924 |
925 |
926 | def test_predict(target_predict):
927 | successful_cases = 0
928 | failed_cases = []
929 |
930 | test_cases = [
931 | {
932 | "name": "default_check",
933 | "input": {
934 | "X": np.array([[2, 8, 2, 8], [2, 8, 8, 2]]),
935 | "parameters": {
936 | "W1": np.array([[2.14274251, -1.93155541], [2.20268789, -2.1131799]]),
937 | "b1": np.array([[-4.83079243], [6.2845223]]),
938 | "W2": np.array([[-7.21370685, 7.0898022]]),
939 | "b2": np.array([[-3.48755239]]),
940 | },
941 | },
942 | "expected": {
943 | "predictions": np.array([[True, True, False, False]]),
944 | },
945 | },
946 | {
947 | "name": "extra_check",
948 | "input": {
949 | "X": np.array([[0, 10, 0, 0, 10],[0, 0, 0, 0, 10]]),
950 | "parameters": {
951 | "W1": np.array([[2.15345603, -2.02993877], [2.24191569, -1.89471923]]),
952 | "b1": np.array([[6.29905582], [-4.80909975]]),
953 | "W2": np.array([[7.07457688, -7.23061969]]),
954 | "b2": np.array([[-3.50971507]]),
955 | },
956 | },
957 | "expected": {
958 | "predictions": np.array([[True, False, True, True, True]]),
959 | },
960 | },
961 | ]
962 |
963 | for test_case in test_cases:
964 |
965 | result = target_predict(test_case["input"]["X"], test_case["input"]["parameters"])
966 |
967 | try:
968 | assert result.shape == test_case["expected"]["predictions"].shape
969 | successful_cases += 1
970 | except:
971 | failed_cases.append(
972 | {
973 | "name": test_case["name"],
974 | "expected": test_case["expected"]["predictions"].shape,
975 | "got": result.shape,
976 | }
977 | )
978 | print(
979 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong shape of the output array. Input: X = \n{test_case['input']['X']},\nparameters = {test_case['input']['parameters']}. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
980 | )
981 |
982 | try:
983 | assert np.allclose(result, test_case["expected"]["predictions"])
984 | successful_cases += 1
985 |
986 | except:
987 | failed_cases.append(
988 | {
989 | "name": test_case["name"],
990 | "expected": test_case["expected"]["predictions"],
991 | "got": result,
992 | }
993 | )
994 | print(
995 | f"Test case \"{failed_cases[-1].get('name')}\". Wrong output array. Input: X = \n{test_case['input']['X']},\nparameters = {test_case['input']['parameters']}. \n\tExpected: {failed_cases[-1].get('expected')}.\n\tGot: {failed_cases[-1].get('got')}."
996 | )
997 |
998 | if len(failed_cases) == 0:
999 | print("\033[92m All tests passed")
1000 | else:
1001 | print("\033[92m", successful_cases, " Tests passed")
1002 | print("\033[91m", len(failed_cases), " Tests failed")
1003 |
--------------------------------------------------------------------------------