├── .gitignore ├── LICENSE ├── Notebooks ├── Source.gv ├── Source.gv.pdf ├── ch2_statistical_learning_applied.ipynb ├── ch2_statistical_learning_conceptual.ipynb ├── ch3_linear_regression_applied.ipynb ├── ch3_linear_regression_conceptual.ipynb ├── ch4_classification_applied.ipynb ├── ch4_classification_conceptual.ipynb ├── ch5_resampling_methods_applied.ipynb ├── ch5_resampling_methods_conceptual.ipynb ├── ch6_linear_model_selection_and_regularisation_applied.ipynb ├── ch6_linear_model_selection_and_regularisation_conceptual.ipynb ├── ch6_linear_model_selection_and_regularisation_labs.ipynb ├── ch7_moving_beyond_linearity_applied.ipynb ├── ch7_moving_beyond_linearity_labs.ipynb ├── ch8_tree_based_methods_applied.ipynb ├── ch8_tree_based_methods_conceptual.ipynb ├── ch8_tree_based_methods_labs.ipynb ├── ch9_support_vector_machines_applied.ipynb ├── ch9_support_vector_machines_conceptual.ipynb ├── ch9_support_vector_machines_labs.ipynb ├── data │ ├── Advertising.csv │ ├── Auto.csv │ ├── Caravan.csv │ ├── Carseats.csv │ ├── Ch10Ex11.csv │ ├── College.csv │ ├── Credit.csv │ ├── Default.csv │ ├── Heart.csv │ ├── Hitters.csv │ ├── Income1.csv │ ├── Income2.csv │ ├── OJ.csv │ ├── Wage.csv │ ├── Weekly.csv │ ├── khan_test.csv │ └── khan_train.csv └── images │ ├── 2_3.jpg │ ├── 3_5.jpg │ ├── 3_6.jpg │ ├── 3_7.jpg │ ├── 3_table3.4.png │ ├── 4_1.JPG │ ├── 4_2.JPG │ ├── 4_3.jpg │ ├── 4_4abcd.JPG │ ├── 4_4e.JPG │ ├── 4_6b.JPG │ ├── 5_1.jpg │ ├── 5_2.jpg │ ├── 6_5.jpg │ ├── 6_5bi.jpg │ ├── 6_5bii.jpg │ ├── 6_5cd.jpg │ ├── 8_1a.jpg │ ├── 8_1b.jpg │ ├── 8_4a.JPG │ └── 8_4b.JPG └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # DS store 2 | .DS_Store 3 | /.DS_Store 4 | /out 5 | .*.DS_Store 6 | 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | MANIFEST 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | 65 | # Flask stuff: 66 | instance/ 67 | .webassets-cache 68 | 69 | # Scrapy stuff: 70 | .scrapy 71 | 72 | # Sphinx documentation 73 | docs/_build/ 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # pyenv 82 | .python-version 83 | 84 | # celery beat schedule file 85 | celerybeat-schedule 86 | 87 | # SageMath parsed files 88 | *.sage.py 89 | 90 | # Environments 91 | .env 92 | .venv 93 | env/ 94 | venv/ 95 | ENV/ 96 | env.bak/ 97 | venv.bak/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | .spyproject 102 | 103 | # Rope project settings 104 | .ropeproject 105 | 106 | # mkdocs documentation 107 | /site 108 | 109 | # mypy 110 | .mypy_cache/ 111 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 a-martyn 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Notebooks/Source.gv: -------------------------------------------------------------------------------- 1 | digraph Tree { 2 | node [shape=box] ; 3 | 0 [label="X[1] <= 0.5\ngini = 0.484\nsamples = 400\nvalue = [236, 164]"] ; 4 | 1 [label="X[9] <= 92.5\ngini = 0.429\nsamples = 315\nvalue = [217, 98]"] ; 5 | 0 -> 1 [labeldistance=2.5, labelangle=45, headlabel="True"] ; 6 | 2 [label="X[6] <= 57.0\ngini = 0.423\nsamples = 46\nvalue = [14, 32]"] ; 7 | 1 -> 2 ; 8 | 3 [label="X[2] <= 0.5\ngini = 0.42\nsamples = 10\nvalue = [7, 3]"] ; 9 | 2 -> 3 ; 10 | 4 [label="gini = 0.0\nsamples = 7\nvalue = [7, 0]"] ; 11 | 3 -> 4 ; 12 | 5 [label="gini = 0.0\nsamples = 3\nvalue = [0, 3]"] ; 13 | 3 -> 5 ; 14 | 6 [label="X[8] <= 207.5\ngini = 0.313\nsamples = 36\nvalue = [7, 29]"] ; 15 | 2 -> 6 ; 16 | 7 [label="X[10] <= 56.5\ngini = 0.469\nsamples = 16\nvalue = [6, 10]"] ; 17 | 6 -> 7 ; 18 | 8 [label="X[9] <= 88.0\ngini = 0.219\nsamples = 8\nvalue = [1, 7]"] ; 19 | 7 -> 8 ; 20 | 9 [label="gini = 0.0\nsamples = 7\nvalue = [0, 7]"] ; 21 | 8 -> 9 ; 22 | 10 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 23 | 8 -> 10 ; 24 | 11 [label="X[8] <= 38.5\ngini = 0.469\nsamples = 8\nvalue = [5, 3]"] ; 25 | 7 -> 11 ; 26 | 12 [label="gini = 0.0\nsamples = 2\nvalue = [0, 2]"] ; 27 | 11 -> 12 ; 28 | 13 [label="X[9] <= 68.5\ngini = 0.278\nsamples = 6\nvalue = [5, 1]"] ; 29 | 11 -> 13 ; 30 | 14 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 31 | 13 -> 14 ; 32 | 15 [label="gini = 0.0\nsamples = 5\nvalue = [5, 0]"] ; 33 | 13 -> 15 ; 34 | 16 [label="X[6] <= 115.0\ngini = 0.095\nsamples = 20\nvalue = [1, 19]"] ; 35 | 6 -> 16 ; 36 | 17 [label="gini = 0.0\nsamples = 19\nvalue = [0, 19]"] ; 37 | 16 -> 17 ; 38 | 18 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 39 | 16 -> 18 ; 40 | 19 [label="X[7] <= 13.5\ngini = 0.37\nsamples = 269\nvalue = [203, 66]"] ; 41 | 1 -> 19 ; 42 | 20 [label="X[5] <= 124.5\ngini = 0.299\nsamples = 224\nvalue = [183, 41]"] ; 43 | 19 -> 20 ; 44 | 21 [label="X[9] <= 106.5\ngini = 0.117\nsamples = 96\nvalue = [90, 6]"] ; 45 | 20 -> 21 ; 46 | 22 [label="X[8] <= 177.0\ngini = 0.266\nsamples = 38\nvalue = [32, 6]"] ; 47 | 21 -> 22 ; 48 | 23 [label="X[7] <= 0.5\ngini = 0.486\nsamples = 12\nvalue = [7, 5]"] ; 49 | 22 -> 23 ; 50 | 24 [label="gini = 0.0\nsamples = 6\nvalue = [6, 0]"] ; 51 | 23 -> 24 ; 52 | 25 [label="X[10] <= 70.5\ngini = 0.278\nsamples = 6\nvalue = [1, 5]"] ; 53 | 23 -> 25 ; 54 | 26 [label="gini = 0.0\nsamples = 5\nvalue = [0, 5]"] ; 55 | 25 -> 26 ; 56 | 27 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 57 | 25 -> 27 ; 58 | 28 [label="X[11] <= 10.5\ngini = 0.074\nsamples = 26\nvalue = [25, 1]"] ; 59 | 22 -> 28 ; 60 | 29 [label="X[10] <= 66.5\ngini = 0.444\nsamples = 3\nvalue = [2, 1]"] ; 61 | 28 -> 29 ; 62 | 30 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 63 | 29 -> 30 ; 64 | 31 [label="gini = 0.0\nsamples = 2\nvalue = [2, 0]"] ; 65 | 29 -> 31 ; 66 | 32 [label="gini = 0.0\nsamples = 23\nvalue = [23, 0]"] ; 67 | 28 -> 32 ; 68 | 33 [label="gini = 0.0\nsamples = 58\nvalue = [58, 0]"] ; 69 | 21 -> 33 ; 70 | 34 [label="X[9] <= 109.5\ngini = 0.397\nsamples = 128\nvalue = [93, 35]"] ; 71 | 20 -> 34 ; 72 | 35 [label="X[2] <= 0.5\ngini = 0.408\nsamples = 21\nvalue = [6, 15]"] ; 73 | 34 -> 35 ; 74 | 36 [label="gini = 0.0\nsamples = 5\nvalue = [5, 0]"] ; 75 | 35 -> 36 ; 76 | 37 [label="X[5] <= 127.5\ngini = 0.117\nsamples = 16\nvalue = [1, 15]"] ; 77 | 35 -> 37 ; 78 | 38 [label="X[5] <= 126.0\ngini = 0.5\nsamples = 2\nvalue = [1, 1]"] ; 79 | 37 -> 38 ; 80 | 39 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 81 | 38 -> 39 ; 82 | 40 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 83 | 38 -> 40 ; 84 | 41 [label="gini = 0.0\nsamples = 14\nvalue = [0, 14]"] ; 85 | 37 -> 41 ; 86 | 42 [label="X[9] <= 126.5\ngini = 0.304\nsamples = 107\nvalue = [87, 20]"] ; 87 | 34 -> 42 ; 88 | 43 [label="X[10] <= 49.5\ngini = 0.444\nsamples = 42\nvalue = [28, 14]"] ; 89 | 42 -> 43 ; 90 | 44 [label="X[5] <= 137.0\ngini = 0.48\nsamples = 20\nvalue = [8, 12]"] ; 91 | 43 -> 44 ; 92 | 45 [label="X[6] <= 95.0\ngini = 0.49\nsamples = 14\nvalue = [8, 6]"] ; 93 | 44 -> 45 ; 94 | 46 [label="X[8] <= 194.0\ngini = 0.397\nsamples = 11\nvalue = [8, 3]"] ; 95 | 45 -> 46 ; 96 | 47 [label="X[6] <= 72.0\ngini = 0.5\nsamples = 6\nvalue = [3, 3]"] ; 97 | 46 -> 47 ; 98 | 48 [label="gini = 0.0\nsamples = 3\nvalue = [0, 3]"] ; 99 | 47 -> 48 ; 100 | 49 [label="gini = 0.0\nsamples = 3\nvalue = [3, 0]"] ; 101 | 47 -> 49 ; 102 | 50 [label="gini = 0.0\nsamples = 5\nvalue = [5, 0]"] ; 103 | 46 -> 50 ; 104 | 51 [label="gini = 0.0\nsamples = 3\nvalue = [0, 3]"] ; 105 | 45 -> 51 ; 106 | 52 [label="gini = 0.0\nsamples = 6\nvalue = [0, 6]"] ; 107 | 44 -> 52 ; 108 | 53 [label="X[7] <= 11.0\ngini = 0.165\nsamples = 22\nvalue = [20, 2]"] ; 109 | 43 -> 53 ; 110 | 54 [label="X[9] <= 125.0\ngini = 0.095\nsamples = 20\nvalue = [19, 1]"] ; 111 | 53 -> 54 ; 112 | 55 [label="gini = 0.0\nsamples = 18\nvalue = [18, 0]"] ; 113 | 54 -> 55 ; 114 | 56 [label="X[5] <= 142.5\ngini = 0.5\nsamples = 2\nvalue = [1, 1]"] ; 115 | 54 -> 56 ; 116 | 57 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 117 | 56 -> 57 ; 118 | 58 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 119 | 56 -> 58 ; 120 | 59 [label="X[7] <= 12.5\ngini = 0.5\nsamples = 2\nvalue = [1, 1]"] ; 121 | 53 -> 59 ; 122 | 60 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 123 | 59 -> 60 ; 124 | 61 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 125 | 59 -> 61 ; 126 | 62 [label="X[5] <= 147.5\ngini = 0.168\nsamples = 65\nvalue = [59, 6]"] ; 127 | 42 -> 62 ; 128 | 63 [label="X[8] <= 97.5\ngini = 0.039\nsamples = 50\nvalue = [49, 1]"] ; 129 | 62 -> 63 ; 130 | 64 [label="X[9] <= 130.5\ngini = 0.219\nsamples = 8\nvalue = [7, 1]"] ; 131 | 63 -> 64 ; 132 | 65 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 133 | 64 -> 65 ; 134 | 66 [label="gini = 0.0\nsamples = 7\nvalue = [7, 0]"] ; 135 | 64 -> 66 ; 136 | 67 [label="gini = 0.0\nsamples = 42\nvalue = [42, 0]"] ; 137 | 63 -> 67 ; 138 | 68 [label="X[9] <= 147.0\ngini = 0.444\nsamples = 15\nvalue = [10, 5]"] ; 139 | 62 -> 68 ; 140 | 69 [label="X[2] <= 0.5\ngini = 0.469\nsamples = 8\nvalue = [3, 5]"] ; 141 | 68 -> 69 ; 142 | 70 [label="gini = 0.0\nsamples = 2\nvalue = [2, 0]"] ; 143 | 69 -> 70 ; 144 | 71 [label="X[8] <= 172.5\ngini = 0.278\nsamples = 6\nvalue = [1, 5]"] ; 145 | 69 -> 71 ; 146 | 72 [label="X[9] <= 138.5\ngini = 0.5\nsamples = 2\nvalue = [1, 1]"] ; 147 | 71 -> 72 ; 148 | 73 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 149 | 72 -> 73 ; 150 | 74 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 151 | 72 -> 74 ; 152 | 75 [label="gini = 0.0\nsamples = 4\nvalue = [0, 4]"] ; 153 | 71 -> 75 ; 154 | 76 [label="gini = 0.0\nsamples = 7\nvalue = [7, 0]"] ; 155 | 68 -> 76 ; 156 | 77 [label="X[10] <= 54.5\ngini = 0.494\nsamples = 45\nvalue = [20, 25]"] ; 157 | 19 -> 77 ; 158 | 78 [label="X[5] <= 130.5\ngini = 0.32\nsamples = 25\nvalue = [5, 20]"] ; 159 | 77 -> 78 ; 160 | 79 [label="X[6] <= 100.0\ngini = 0.459\nsamples = 14\nvalue = [5, 9]"] ; 161 | 78 -> 79 ; 162 | 80 [label="X[5] <= 113.5\ngini = 0.494\nsamples = 9\nvalue = [5, 4]"] ; 163 | 79 -> 80 ; 164 | 81 [label="gini = 0.0\nsamples = 2\nvalue = [2, 0]"] ; 165 | 80 -> 81 ; 166 | 82 [label="X[2] <= 0.5\ngini = 0.49\nsamples = 7\nvalue = [3, 4]"] ; 167 | 80 -> 82 ; 168 | 83 [label="gini = 0.0\nsamples = 2\nvalue = [2, 0]"] ; 169 | 82 -> 83 ; 170 | 84 [label="X[11] <= 17.0\ngini = 0.32\nsamples = 5\nvalue = [1, 4]"] ; 171 | 82 -> 84 ; 172 | 85 [label="gini = 0.0\nsamples = 4\nvalue = [0, 4]"] ; 173 | 84 -> 85 ; 174 | 86 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 175 | 84 -> 86 ; 176 | 87 [label="gini = 0.0\nsamples = 5\nvalue = [0, 5]"] ; 177 | 79 -> 87 ; 178 | 88 [label="gini = 0.0\nsamples = 11\nvalue = [0, 11]"] ; 179 | 78 -> 88 ; 180 | 89 [label="X[5] <= 122.5\ngini = 0.375\nsamples = 20\nvalue = [15, 5]"] ; 181 | 77 -> 89 ; 182 | 90 [label="gini = 0.0\nsamples = 10\nvalue = [10, 0]"] ; 183 | 89 -> 90 ; 184 | 91 [label="X[9] <= 125.0\ngini = 0.5\nsamples = 10\nvalue = [5, 5]"] ; 185 | 89 -> 91 ; 186 | 92 [label="gini = 0.0\nsamples = 5\nvalue = [0, 5]"] ; 187 | 91 -> 92 ; 188 | 93 [label="gini = 0.0\nsamples = 5\nvalue = [5, 0]"] ; 189 | 91 -> 93 ; 190 | 94 [label="X[9] <= 142.5\ngini = 0.347\nsamples = 85\nvalue = [19, 66]"] ; 191 | 0 -> 94 [labeldistance=2.5, labelangle=-45, headlabel="False"] ; 192 | 95 [label="X[6] <= 34.5\ngini = 0.236\nsamples = 73\nvalue = [10, 63]"] ; 193 | 94 -> 95 ; 194 | 96 [label="X[9] <= 109.5\ngini = 0.497\nsamples = 13\nvalue = [6, 7]"] ; 195 | 95 -> 96 ; 196 | 97 [label="gini = 0.0\nsamples = 6\nvalue = [0, 6]"] ; 197 | 96 -> 97 ; 198 | 98 [label="X[6] <= 22.5\ngini = 0.245\nsamples = 7\nvalue = [6, 1]"] ; 199 | 96 -> 98 ; 200 | 99 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 201 | 98 -> 99 ; 202 | 100 [label="gini = 0.0\nsamples = 6\nvalue = [6, 0]"] ; 203 | 98 -> 100 ; 204 | 101 [label="X[4] <= 0.5\ngini = 0.124\nsamples = 60\nvalue = [4, 56]"] ; 205 | 95 -> 101 ; 206 | 102 [label="X[9] <= 110.0\ngini = 0.397\nsamples = 11\nvalue = [3, 8]"] ; 207 | 101 -> 102 ; 208 | 103 [label="gini = 0.0\nsamples = 5\nvalue = [0, 5]"] ; 209 | 102 -> 103 ; 210 | 104 [label="X[5] <= 129.5\ngini = 0.5\nsamples = 6\nvalue = [3, 3]"] ; 211 | 102 -> 104 ; 212 | 105 [label="gini = 0.0\nsamples = 3\nvalue = [3, 0]"] ; 213 | 104 -> 105 ; 214 | 106 [label="gini = 0.0\nsamples = 3\nvalue = [0, 3]"] ; 215 | 104 -> 106 ; 216 | 107 [label="X[6] <= 98.5\ngini = 0.04\nsamples = 49\nvalue = [1, 48]"] ; 217 | 101 -> 107 ; 218 | 108 [label="gini = 0.0\nsamples = 40\nvalue = [0, 40]"] ; 219 | 107 -> 108 ; 220 | 109 [label="X[10] <= 61.5\ngini = 0.198\nsamples = 9\nvalue = [1, 8]"] ; 221 | 107 -> 109 ; 222 | 110 [label="gini = 0.0\nsamples = 8\nvalue = [0, 8]"] ; 223 | 109 -> 110 ; 224 | 111 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 225 | 109 -> 111 ; 226 | 112 [label="X[8] <= 65.5\ngini = 0.375\nsamples = 12\nvalue = [9, 3]"] ; 227 | 94 -> 112 ; 228 | 113 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 229 | 112 -> 113 ; 230 | 114 [label="X[8] <= 379.5\ngini = 0.298\nsamples = 11\nvalue = [9, 2]"] ; 231 | 112 -> 114 ; 232 | 115 [label="X[3] <= 0.5\ngini = 0.18\nsamples = 10\nvalue = [9, 1]"] ; 233 | 114 -> 115 ; 234 | 116 [label="X[10] <= 34.5\ngini = 0.5\nsamples = 2\nvalue = [1, 1]"] ; 235 | 115 -> 116 ; 236 | 117 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 237 | 116 -> 117 ; 238 | 118 [label="gini = 0.0\nsamples = 1\nvalue = [1, 0]"] ; 239 | 116 -> 118 ; 240 | 119 [label="gini = 0.0\nsamples = 8\nvalue = [8, 0]"] ; 241 | 115 -> 119 ; 242 | 120 [label="gini = 0.0\nsamples = 1\nvalue = [0, 1]"] ; 243 | 114 -> 120 ; 244 | } 245 | -------------------------------------------------------------------------------- /Notebooks/Source.gv.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/a-martyn/ISL-python/ee156568a8f7307be71dad5390bae12b51dcd93f/Notebooks/Source.gv.pdf -------------------------------------------------------------------------------- /Notebooks/ch2_statistical_learning_conceptual.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 2. Statistical Learning\n", 8 | "\n", 9 | "Excercises from **Chapter 2** of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani.\n", 10 | "\n", 11 | "I've elected to use Python instead of R." 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "## Conceptual" 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "### Q1 \n", 26 | "For each of parts (a) through (d), indicate whether we would generally expect the performance of a flexible statistical learning method to be better or worse than an inflexible method. Justify your answer.\n", 27 | "\n", 28 | "a) The sample size n is extremely large, and the number of predictors p is small. \n", 29 | ">**Flexible**: we have enough observations to avoid overfitting, so assuming some there are non-linear relationships in our data a more flexible model should provide an improved fit. \n", 30 | "\n", 31 | "b) The number of predictors p is extremely large, and the number of observations n is small. \n", 32 | ">**Inflexible**: we *don't* have enough observations to avoid overfitting \n", 33 | "\n", 34 | "c) The relationship between the predictors and response is highly non-linear. \n", 35 | ">**Flexible**: a high variance model affords a better fit to non-linear relationships \n", 36 | "\n", 37 | "d) The variance of the error terms, i.e. σ2 = Var(ε), is extremely high. \n", 38 | ">**Inflexible**: a high bias model avoids overfitting to the noise in our dataset " 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "### Q2 \n", 46 | "Explain whether each scenario is a classification or regression problem, and indicate whether we are most interested in inference or prediction. Finally, provide n and p.\n", 47 | "\n", 48 | "(a) We collect a set of data on the top 500 firms in the US. For each firm we record profit, number of employees, industry and the CEO salary. We are interested in understanding which factors affect CEO salary. \n", 49 | "\n", 50 | ">regression, inference, n=500, p=4 \n", 51 | "\n", 52 | "(b) We are considering launching a new product and wish to know whether it will be a success or a failure. We collect data on 20 similar products that were previously launched. For each product we have recorded whether it was a success or failure, price charged for the product, marketing budget, competition price, and ten other variables. \n", 53 | "\n", 54 | ">classification, prediction, n=20, p=14\n", 55 | "\n", 56 | "(c) We are interested in predicting the % change in the USD/Euro exchange rate in relation to the weekly changes in the world stock markets. Hence we collect weekly data for all of 2012. For each week we record the % change in the USD/Euro, the % change in the US market, the % change in the British market, and the % change in the German market. \n", 57 | "\n", 58 | ">regression, prediction, n=52, p=4" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "### Q3\n", 66 | "\n", 67 | "We now revisit the bias-variance decomposition.\n", 68 | "\n", 69 | "(a) Provide a sketch of typical (squared) bias, variance, training error, test error, and Bayes (or irreducible) error curves, on a single plot, as we go from less flexible statistical learning methods towards more flexible approaches. The x-axis should represent the amount of flexibility in the method, and the y-axis should represent the values for each curve. There should be five curves. Make sure to label each one.\n", 70 | "\n", 71 | "![IMG_1908.jpg](./images/2_3.jpg)\n" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "metadata": {}, 77 | "source": [ 78 | "(b) Explain why each of the five curves has the shape displayed in part (a).\n", 79 | "\n", 80 | "- **Bayes error**: the irreducible error which is a constant irrespective of model flexibility\n", 81 | "- **Variance**: the variance of a model increases with flexibility as the model picks up variation between training sets rsulting in more variaiton in f(X)\n", 82 | "- **Bias**: bias tends to decrease with flexibility as the model casn fit more complex relationships\n", 83 | "- **Test error**: tends to decreases as reduced bias allows the model to better fit non-linear relationships but then increases as an increasingly flexible model begins to fit the noise in the dataset (overfitting)\n", 84 | "- **Training error**: decreases monotonically with increaed flexibility as the model 'flexes' towards individual datapoints in the training set" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "### Q4 \n", 92 | "You will now think of some real-life applications for statistical learning\n", 93 | "\n", 94 | "(a) Describe three real-life applications in which classification might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. \n", 95 | "\n", 96 | " 1. Is this tumor malignant or benign?\n", 97 | " - response: boolean (is malign)\n", 98 | " - predictors (naive examples): tumor size, white blood cell count, change in adrenal mass, position in body\n", 99 | " - goal: prediction\n", 100 | " 2. What animals are in this image?\n", 101 | " - response: 'Cat', 'Dog', 'Fish'\n", 102 | " - predictors: image pixel values\n", 103 | " - goal: prediction\n", 104 | " 3. What test bench metrics are most indicative of a faulty Printed Circuit Board (PCB)?\n", 105 | " - response: boolean (is faulty)\n", 106 | " - predictors: current draw, voltage drawer, output noise, operating temp.\n", 107 | " - goal: inference\n", 108 | "\n", 109 | "\n", 110 | "(b) Describe three real-life applications in which regression might be useful. Describe the response, as well as the predictors. Is the goal of each application inference or prediction? Explain your answer. \n", 111 | "\n", 112 | " 1. How much is this house worth?\n", 113 | " - responce: SalePrice\n", 114 | " - predictors: LivingArea, BathroomCount, GarageCount, Neighbourhood, CrimeRate\n", 115 | " - goal: prediction\n", 116 | " 2. What attributes most affect the market cap. of a company?\n", 117 | " - response: MarketCap\n", 118 | " - predictors: Sector, Employees, FounderIsCEO, Age, TotalInvestment, Profitability, RONA\n", 119 | " - goal: inference\n", 120 | " 3. How long is this dairy cow likely to live?\n", 121 | " - response: years\n", 122 | " - predictors: past medical conditions, current weight, milk yield\n", 123 | " - goal: prediction\n", 124 | "\n", 125 | "(c) Describe three real-life applications in which cluster analysis might be useful. \n", 126 | "\n", 127 | " 1. This dataset contains observations of 3 different species of flower. Estimate which observations belong to the same species.\n", 128 | " - response: a, b, c (species class)\n", 129 | " - predictors: sepal length, petal length, number of petals\n", 130 | " - goal: prediction\n", 131 | " 2. which attributes of the flowers in dataset described above are most predictive of species?\n", 132 | " - response: a, b, c (species class)\n", 133 | " - predictors: sepal length, petal length, number of petals\n", 134 | " - goal: inference\n", 135 | " 3. Group these audio recordings of birdsong by species.\n", 136 | " - responce: (species classes)\n", 137 | " - predictors: audio sample values\n", 138 | " - goal: prediction\n", 139 | " " 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "### Q5 \n", 147 | "What are the advantages and disadvantages of a very flexible (versus a less flexible) approach for regression or classification? Under what circumstances might a more flexible approach be preferred to a less flexible approach? When might a less flexible approach be preferred?\n", 148 | "\n", 149 | "Less flexible\n", 150 | "\n", 151 | "+ (+) gives better results with few observations\n", 152 | "+ (+) simpler inference: the effect of each feature can be more easily understood\n", 153 | "+ (+) fewer parameters, faster optimisation\n", 154 | "- (-) performs poorly if observations contain highly non-linear relationships\n", 155 | "\n", 156 | "More flexible\n", 157 | "+ (+) gives better fit if observations contain non-linear relationships\n", 158 | "- (-) can overfit the data providing poor predictions for new observations\n" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "metadata": {}, 164 | "source": [ 165 | "### Q6 \n", 166 | "Describe the differences between a parametric and a non-parametric statistical learning approach. What are the advantages of a parametric approach to regression or classification (as opposed to a non-parametric approach)? What are its disadvantages?\n", 167 | "\n", 168 | "\n", 169 | ">A parametric approach simplifies the problem of estimating the best fit to the training data f(x) by making some assumptions about the functional form of f(x), this reduces the problem to estimating the parameters of the model. A non-parametric approach make no such assumptions and so f(x) can take any arbitrary shape.\n", 170 | "\n", 171 | ">The advantage of the parametric approach is that it simplifies the problem of estimating f(x) because it is easier to estimate paramters than an arbitrary function. The disadvantage of this approach is that the assumed form of the function f(X) could limit the degree of accuracy with which the model can fit the training data. If too many parameters are used, in an attempt to increase the models flexibility, then overfitting can occur – meaning that the model begins to fit noise in the training data that is not representive of unseen observations." 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 148, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "import numpy as np\n", 181 | "import pandas as pd\n", 182 | "import seaborn as sns\n", 183 | "import warnings\n", 184 | "warnings.filterwarnings('ignore')" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "### Q7 \n", 192 | "The table below provides a training data set containing six observations, three predictors, and one qualitative response variable." 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": 50, 198 | "metadata": {}, 199 | "outputs": [ 200 | { 201 | "data": { 202 | "text/html": [ 203 | "
\n", 204 | "\n", 217 | "\n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | " \n", 229 | " \n", 230 | " \n", 231 | " \n", 232 | " \n", 233 | " \n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | " \n", 255 | " \n", 256 | " \n", 257 | " \n", 258 | " \n", 259 | " \n", 260 | " \n", 261 | " \n", 262 | " \n", 263 | " \n", 264 | " \n", 265 | " \n", 266 | " \n", 267 | " \n", 268 | " \n", 269 | " \n", 270 | " \n", 271 | " \n", 272 | " \n", 273 | " \n", 274 | " \n", 275 | " \n", 276 | " \n", 277 | " \n", 278 | "
ObsX1X2X3Y
01030Red
12200Red
23013Red
34012Green
45-101Green
561-11Red
\n", 279 | "
" 280 | ], 281 | "text/plain": [ 282 | " Obs X1 X2 X3 Y\n", 283 | "0 1 0 3 0 Red\n", 284 | "1 2 2 0 0 Red\n", 285 | "2 3 0 1 3 Red\n", 286 | "3 4 0 1 2 Green\n", 287 | "4 5 -1 0 1 Green\n", 288 | "5 6 1 -1 1 Red" 289 | ] 290 | }, 291 | "execution_count": 50, 292 | "metadata": {}, 293 | "output_type": "execute_result" 294 | } 295 | ], 296 | "source": [ 297 | "df = pd.DataFrame({'Obs': [1, 2, 3, 4, 5, 6],\n", 298 | " 'X1': [0, 2, 0, 0, -1, 1],\n", 299 | " 'X2': [3, 0, 1, 1, 0, -1],\n", 300 | " 'X3': [0, 0, 3, 2, 1, 1],\n", 301 | " 'Y': ['Red', 'Red', 'Red', 'Green', 'Green', 'Red']})\n", 302 | "\n", 303 | "df" 304 | ] 305 | }, 306 | { 307 | "cell_type": "markdown", 308 | "metadata": {}, 309 | "source": [ 310 | "Suppose we wish to use this data set to make a prediction for Y when X1 = X2 = X3 = 0 using K-nearest neighbors." 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 55, 316 | "metadata": {}, 317 | "outputs": [ 318 | { 319 | "data": { 320 | "text/html": [ 321 | "
\n", 322 | "\n", 335 | "\n", 336 | " \n", 337 | " \n", 338 | " \n", 339 | " \n", 340 | " \n", 341 | " \n", 342 | " \n", 343 | " \n", 344 | " \n", 345 | " \n", 346 | " \n", 347 | " \n", 348 | " \n", 349 | " \n", 350 | " \n", 351 | " \n", 352 | " \n", 353 | " \n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | " \n", 360 | " \n", 361 | " \n", 362 | " \n", 363 | " \n", 364 | " \n", 365 | " \n", 366 | " \n", 367 | " \n", 368 | " \n", 369 | " \n", 370 | " \n", 371 | " \n", 372 | " \n", 373 | " \n", 374 | " \n", 375 | " \n", 376 | " \n", 377 | " \n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | " \n", 384 | " \n", 385 | " \n", 386 | " \n", 387 | " \n", 388 | " \n", 389 | " \n", 390 | " \n", 391 | " \n", 392 | " \n", 393 | " \n", 394 | " \n", 395 | " \n", 396 | " \n", 397 | " \n", 398 | " \n", 399 | " \n", 400 | " \n", 401 | " \n", 402 | " \n", 403 | "
ObsX1X2X3YEuclideanDist
01030Red3.000000
12200Red2.000000
23013Red3.162278
34012Green2.236068
45-101Green1.414214
561-11Red1.732051
\n", 404 | "
" 405 | ], 406 | "text/plain": [ 407 | " Obs X1 X2 X3 Y EuclideanDist\n", 408 | "0 1 0 3 0 Red 3.000000\n", 409 | "1 2 2 0 0 Red 2.000000\n", 410 | "2 3 0 1 3 Red 3.162278\n", 411 | "3 4 0 1 2 Green 2.236068\n", 412 | "4 5 -1 0 1 Green 1.414214\n", 413 | "5 6 1 -1 1 Red 1.732051" 414 | ] 415 | }, 416 | "execution_count": 55, 417 | "metadata": {}, 418 | "output_type": "execute_result" 419 | } 420 | ], 421 | "source": [ 422 | "# (a) Compute the Euclidean distance between each observation and the test point,X1 =X2 =X3 =0. \n", 423 | "\n", 424 | "def euclidian_dist(x):\n", 425 | " \"\"\"Compute the row-wise euclidean distance\n", 426 | " from the origin\"\"\"\n", 427 | " return (np.sum(x**2, axis=1))**0.5\n", 428 | "\n", 429 | "euc_dist = pd.DataFrame({'EuclideanDist': euclidian_dist(df[['X1', 'X2', 'X3']])})\n", 430 | "df_euc = pd.concat([df, euc_dist], axis=1)\n", 431 | "df_euc" 432 | ] 433 | }, 434 | { 435 | "cell_type": "code", 436 | "execution_count": 78, 437 | "metadata": {}, 438 | "outputs": [ 439 | { 440 | "data": { 441 | "text/html": [ 442 | "
\n", 443 | "\n", 456 | "\n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | " \n", 471 | " \n", 472 | " \n", 473 | " \n", 474 | " \n", 475 | " \n", 476 | " \n", 477 | " \n", 478 | " \n", 479 | "
ObsX1X2X3YEuclideanDist
45-101Green1.414214
\n", 480 | "
" 481 | ], 482 | "text/plain": [ 483 | " Obs X1 X2 X3 Y EuclideanDist\n", 484 | "4 5 -1 0 1 Green 1.414214" 485 | ] 486 | }, 487 | "execution_count": 78, 488 | "metadata": {}, 489 | "output_type": "execute_result" 490 | } 491 | ], 492 | "source": [ 493 | "# (b) What is our prediction with K = 1? Why? \n", 494 | "\n", 495 | "K = 1\n", 496 | "df_euc.nsmallest(K, 'EuclideanDist')\n", 497 | "\n", 498 | "# Our prediction is Y=Green because that is the response value of the \n", 499 | "# first nearest neighbour to the point X1 = X2 = X3 = 0" 500 | ] 501 | }, 502 | { 503 | "cell_type": "code", 504 | "execution_count": 80, 505 | "metadata": {}, 506 | "outputs": [ 507 | { 508 | "data": { 509 | "text/html": [ 510 | "
\n", 511 | "\n", 524 | "\n", 525 | " \n", 526 | " \n", 527 | " \n", 528 | " \n", 529 | " \n", 530 | " \n", 531 | " \n", 532 | " \n", 533 | " \n", 534 | " \n", 535 | " \n", 536 | " \n", 537 | " \n", 538 | " \n", 539 | " \n", 540 | " \n", 541 | " \n", 542 | " \n", 543 | " \n", 544 | " \n", 545 | " \n", 546 | " \n", 547 | " \n", 548 | " \n", 549 | " \n", 550 | " \n", 551 | " \n", 552 | " \n", 553 | " \n", 554 | " \n", 555 | " \n", 556 | " \n", 557 | " \n", 558 | " \n", 559 | " \n", 560 | " \n", 561 | " \n", 562 | " \n", 563 | " \n", 564 | " \n", 565 | "
ObsX1X2X3YEuclideanDist
45-101Green1.414214
561-11Red1.732051
12200Red2.000000
\n", 566 | "
" 567 | ], 568 | "text/plain": [ 569 | " Obs X1 X2 X3 Y EuclideanDist\n", 570 | "4 5 -1 0 1 Green 1.414214\n", 571 | "5 6 1 -1 1 Red 1.732051\n", 572 | "1 2 2 0 0 Red 2.000000" 573 | ] 574 | }, 575 | "execution_count": 80, 576 | "metadata": {}, 577 | "output_type": "execute_result" 578 | } 579 | ], 580 | "source": [ 581 | "# (c) What is our prediction with K = 3? Why? \n", 582 | "\n", 583 | "K = 3\n", 584 | "df_euc.nsmallest(K, 'EuclideanDist')\n", 585 | "\n", 586 | "# Red, because majority of the 3 nearest neighbours are Red." 587 | ] 588 | }, 589 | { 590 | "cell_type": "markdown", 591 | "metadata": {}, 592 | "source": [ 593 | "(d) If the Bayes decision boundary in this problem is highly non-linear, then would we expect the best value for K to be large or small? Why? \n", 594 | "\n", 595 | "Small. A smaller value of K results in a more flexible classification model because the prediciton is based upon a smaller subset of all observations in the dataset." 596 | ] 597 | } 598 | ], 599 | "metadata": { 600 | "kernelspec": { 601 | "display_name": "Python 3", 602 | "language": "python", 603 | "name": "python3" 604 | }, 605 | "language_info": { 606 | "codemirror_mode": { 607 | "name": "ipython", 608 | "version": 3 609 | }, 610 | "file_extension": ".py", 611 | "mimetype": "text/x-python", 612 | "name": "python", 613 | "nbconvert_exporter": "python", 614 | "pygments_lexer": "ipython3", 615 | "version": "3.6.5" 616 | } 617 | }, 618 | "nbformat": 4, 619 | "nbformat_minor": 2 620 | } 621 | -------------------------------------------------------------------------------- /Notebooks/ch4_classification_conceptual.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 4. Classification – Conceptual\n", 8 | "\n", 9 | "Excercises from **Chapter 4** of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani.\n", 10 | "\n", 11 | "I've elected to use Python instead of R." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 71, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import numpy as np" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## 1. Using a little bit of algebra, prove that (4.2) is equivalent to (4.3). In other words, the logistic function representation and logit representation for the logistic regression model are equivalent.\n", 28 | "\n", 29 | "![1.JPG](./images/4_1.JPG)\n", 30 | "\n", 31 | "## 2. It was stated in the text that classifying an observation to the class for which (4.12) is largest is equivalent to classifying an observation to the class for which (4.13) is largest. Prove that this is the case. In other words, under the assumption that the observations in the kth class are drawn from a N(μk,σ2) distribution, the Bayes’ classifier assigns an observation to the class for which the discriminant function is maximized.\n", 32 | "\n", 33 | "![2.JPG](./images/4_2.JPG)\n", 34 | "\n", 35 | "## 3. This problem relates to the QDA model, in which the observations within each class are drawn from a normal distribution with a class- specific mean vector and a class specific covariance matrix. We con- sider the simple case where p = 1; i.e. there is only one feature.\n", 36 | "\n", 37 | "### Suppose that we have K classes, and that if an observation belongs to the kth class then X comes from a one-dimensional normal dis- tribution, X ∼ N(μk,σk2). Recall that the density function for the one-dimensional normal distribution is given in (4.11). Prove that in this case, the Bayes’ classifier is not linear. Argue that it is in fact quadratic.\n", 38 | "\n", 39 | "Hint: For this problem, you should follow the arguments laid out in Section 4.4.2, but without making the assumption that σ12 = . . . = σK2 .\n", 40 | "\n", 41 | "![3.jpg](./images/4_3.jpg)\n", 42 | "\n", 43 | "\n", 44 | "## 4. When the number of features p is large, there tends to be a deteri- oration in the performance of KNN and other local approaches that perform prediction using only observations that are near the test ob- servation for which a prediction must be made. This phenomenon is known as the curse of dimensionality, and it ties into the fact that non-parametric approaches often perform poorly when p is large. We will now investigate this curse.\n", 45 | "\n", 46 | "### (a) Suppose that we have a set of observations, each with measure- ments on p = 1 feature, X. We assume that X is uniformly (evenly) distributed on [0,1]. Associated with each observation is a response value. Suppose that we wish to predict a test obser- vation’s response using only observations that are within 10 % of the range of X closest to that test observation. For instance, in order to predict the response for a test observation with X = 0.6, we will use observations in the range [0.55,0.65]. On average, what fraction of the available observations will we use to make the prediction?\n", 47 | "\n", 48 | "### (b) Now suppose that we have a set of observations, each with measurements on p = 2 features, X1 and X2. We assume that (X1,X2) are uniformly distributed on [0,1]×[0,1]. We wish to predict a test observation’s response using only observations that are within 10 % of the range of X1 and within 10 % of the range of X2 closest to that test observation. For instance, in order to predict the response for a test observation with X1 = 0.6 and X2 = 0.35, we will use observations in the range [0.55, 0.65] for X1 and in the range [0.3, 0.4] for X2. On average, what fraction of the available observations will we use to make the prediction?\n", 49 | "\n", 50 | "### (c) Now suppose that we have a set of observations on p = 100 fea- tures. Again the observations are uniformly distributed on each feature, and again each feature ranges in value from 0 to 1. We wish to predict a test observation’s response using observations within the 10 % of each feature’s range that is closest to that test observation. What fraction of the available observations will we use to make the prediction?\n", 51 | "\n", 52 | "### (d) Using your answers to parts (a)–(c), argue that a drawback of KNN when p is large is that there are very few training obser- vations “near” any given test observation.\n", 53 | "\n", 54 | "![4abcd.JPG](./images/4_4abcd.JPG)\n", 55 | "\n", 56 | "### (e) Now suppose that we wish to make a prediction for a test obser- vation by creating a p-dimensional hypercube centered around the test observation that contains, on average, 10 % of the train- ing observations. For p = 1,2, and 100, what is the length of each side of the hypercube? Comment on your answer.\n", 57 | "\n", 58 | "Note: A hypercube is a generalization of a cube to an arbitrary number of dimensions. When p = 1, a hypercube is simply a line segment, when p = 2 it is a square, and when p = 100 it is a 100-dimensional cube.\n", 59 | "\n", 60 | "![4e.JPG](./images/4_4e.JPG)" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "## 5. We now examine the differences between LDA and QDA.\n", 68 | "\n", 69 | "### (a) If the Bayes decision boundary is linear, do we expect LDA or QDA to perform better on the training set? On the test set?\n", 70 | "\n", 71 | "- Training: QDA should perform best as higher variance model has increased flexibility to fit noise in the data\n", 72 | "- Test: LDA should perform best as increased bias is without cost if Bayes decision boundary is linear.\n", 73 | "\n", 74 | "\n", 75 | "### (b) If the Bayes decision boundary is non-linear, do we expect LDA or QDA to perform better on the training set? On the test set?\n", 76 | "\n", 77 | "- Training: QDA should perform best as higher variance model has increased flexibility to fit non-linear relationship in data and noise\n", 78 | "- Test: QDA should perform best as higher variance model has increased flexibility to fit non-linear relationship in data\n", 79 | "\n", 80 | "\n", 81 | "### (c) In general, as the sample size n increases, do we expect the test prediction accuracy of QDA relative to LDA to improve, decline, or be unchanged? Why?\n", 82 | "\n", 83 | "Improve, as increased sample size reduces a more flexible models tendency to overfit the training data.\n", 84 | "\n", 85 | "### (d) True or False: Even if the Bayes decision boundary for a given problem is linear, we will probably achieve a superior test error rate using QDA rather than LDA because QDA is flexible enough to model a linear decision boundary. Justify your answer.\n", 86 | "\n", 87 | "False. If the bayes decision boundary is linear, then a more flexible model is prone to overfit and take account of noise in the training data that will reduce its accuracy in making predictions during test.\n", 88 | "\n", 89 | "## 6. Suppose we collect data for a group of students in a statistics class with variables X1 = hours studied, X2 = undergrad GPA, and Y = receive an A. We fit a logistic regression and produce estimated coefficient, βˆ0 = −6, βˆ1 = 0.05, βˆ2 = 1.\n", 90 | "\n", 91 | "### (a) Estimate the probability that a student who studies for 40 h and has an undergrad GPA of 3.5 gets an A in the class.\n", 92 | "\n", 93 | "For multiple logistic regression a prediction p(X) is given by \n", 94 | "\n", 95 | "$$p(X) = \\frac{\\exp{(β_0+β_1 X_1 + β_2 X_2)}}{1 + \\exp{(β_0+β_1 X_1 + β_2 X_2)}}$$" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 43, 101 | "metadata": {}, 102 | "outputs": [ 103 | { 104 | "name": "stdout", 105 | "output_type": "stream", 106 | "text": [ 107 | "p(X) = 0.3775\n" 108 | ] 109 | } 110 | ], 111 | "source": [ 112 | "beta = np.array([-6, 0.05, 1])\n", 113 | "X = np.array([1, 40, 3.5])\n", 114 | "\n", 115 | "pX = np.exp(beta.T@X) / (1 + np.exp(beta.T@X))\n", 116 | "\n", 117 | "print('p(X) = ' + str(np.around(pX, 4)))" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "### (b) How many hours would the student in part (a) need to study to have a 50 % chance of getting an A in the class?\n", 125 | "\n", 126 | "![6b.JPG](./images/4_6b.JPG)\n", 127 | "\n", 128 | "50 hrs" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "## 7. Suppose that we wish to predict whether a given stock will issue a dividend this year (“Yes” or “No”) based on X, last year’s percent profit. We examine a large number of companies and discover that the mean value of X for companies that issued a dividend was X ̄ = 10, while the mean for those that didn’t was X ̄ = 0. In addition, the variance of X for these two sets of companies was σˆ2 = 36. Finally, 80 % of companies issued dividends. Assuming that X follows a nor- mal distribution, predict the probability that a company will issue a dividend this year given that its percentage profit was X = 4 last year.\n", 136 | "\n", 137 | "### Hint: Recall that the density function for a normal random variable is f(x) = √ 1 e−(x−μ)2/2σ2 . You will need to use Bayes’ theorem.\n", 138 | "\n", 139 | "$p_1(4) = 0.752$\n" 140 | ] 141 | }, 142 | { 143 | "cell_type": "markdown", 144 | "metadata": {}, 145 | "source": [ 146 | "### 8. Suppose that we take a data set, divide it into equally-sized training and test sets, and then try out two different classification procedures. First we use logistic regression and get an error rate of 20 % on the training data and 30 % on the test data. Next we use 1-nearest neighbors (i.e. K = 1) and get an average error rate (averaged over both test and training data sets) of 18%. Based on these results, which method should we prefer to use for classification of new observations? Why?\n", 147 | "\n", 148 | "KNN with k=1 is a highly flexible non-parametric model, it is prone to overfitting in which case we would observe a low training error and high test error.\n", 149 | "\n", 150 | "The test error will be most indicative of the models performance on new observations.\n", 151 | "\n", 152 | "We know that the average error rate for KNN is 18%. We expect the error in test to be higher than training therefore the best possible test error is 18% (assuming 18% error in training).\n", 153 | "\n", 154 | "The worst possible test error is 36% (assuming 0% error in training). \n", 155 | "\n", 156 | "Therefore the knn test error is somewhere in the range 18 - 36%.\n", 157 | "\n", 158 | "The logistic regression achieves a test error of 30%. This inflexible model is failing to account for some variance in the data, but we do no know if this variance is noise (an irreducible error), or variance in the true relationship which could be accounted for by a more flexible model.\n", 159 | "\n", 160 | "Without any further information we can calculate the probability that KNN produces lower than 30% error in test as:\n", 161 | "\n", 162 | "$p = \\frac{30-18}{36-18} = \\frac{2}{3}$\n", 163 | "\n", 164 | "Therefore we should prefere the KNN method.\n", 165 | "\n", 166 | "**INCORRECT: We know k=1 so training error will be 0%**\n" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": {}, 172 | "source": [ 173 | "## 9. This problem has to do with odds.\n", 174 | "### (a) On average, what fraction of people with an odds of 0.37 of defaulting on their credit card payment will in fact default?\n", 175 | "\n", 176 | "$odds = \\frac{p(X)}{1 - p(X)} = 0.37$ \n", 177 | "\n", 178 | "Rearranging for p(X)\n", 179 | " \n", 180 | "$p(X) = 0.37 - 0.37p(X)$ \n", 181 | " \n", 182 | "$p(X) + 0.37p(X) = 0.37$ \n", 183 | " \n", 184 | "$p(X) = \\frac{0.37}{1 + 0.37}$ \n", 185 | " \n", 186 | "$p(X) = 0.27$\n", 187 | "\n", 188 | "### (b) Suppose that an individual has a 16% chance of defaulting on her credit card payment. What are the odds that she will default?\n", 189 | "\n", 190 | "$odds = \\frac{p(X)}{1 - p(X)} = \\frac{0.16}{1 - 0.16} = 0.19$" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [] 199 | } 200 | ], 201 | "metadata": { 202 | "kernelspec": { 203 | "display_name": "Python 3", 204 | "language": "python", 205 | "name": "python3" 206 | }, 207 | "language_info": { 208 | "codemirror_mode": { 209 | "name": "ipython", 210 | "version": 3 211 | }, 212 | "file_extension": ".py", 213 | "mimetype": "text/x-python", 214 | "name": "python", 215 | "nbconvert_exporter": "python", 216 | "pygments_lexer": "ipython3", 217 | "version": "3.6.5" 218 | } 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 2 222 | } 223 | -------------------------------------------------------------------------------- /Notebooks/ch5_resampling_methods_conceptual.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 5. Resampling Methods – Conceptual\n", 8 | "\n", 9 | "Excercises from **Chapter 5* of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import numpy as np\n", 19 | "import matplotlib.pyplot as plt\n", 20 | "import seaborn as sns" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## 1. Using basic statistical properties of the variance, as well as single- variable calculus, derive (5.6). In other words, prove that α given by (5.6) does indeed minimize Var(αX + (1 − α)Y ).\n", 28 | "\n", 29 | "![5_1.jpg](./images/5_1.jpg)\n", 30 | "\n", 31 | "\n", 32 | "## 2. We will now derive the probability that a given observation is part of a bootstrap sample. Suppose that we obtain a bootstrap sample from a set of n observations.\n", 33 | "\n", 34 | "- (a) What is the probability that the first bootstrap observation is not the jth observation from the original sample? Justify your answer.\n", 35 | "- (b) What is the probability that the second bootstrap observation is not the jth observation from the original sample?\n", 36 | "- (c) Argue that the probability that the jth observation is not in the bootstrap sample is (1 − 1/n)n.\n", 37 | "- (d) When n = 5, what is the probability that the jth observation is in the bootstrap sample?\n", 38 | "- (e) When n = 100, what is the probability that the jth observation is in the bootstrap sample?\n", 39 | "- (f) When n = 10, 000, what is the probability that the jth observa- tion is in the bootstrap sample?\n", 40 | "\n", 41 | "![5_2.jpg](./images/5_2.jpg)\n", 42 | "\n", 43 | "### 2(g) Create a plot that displays, for each integer value of n from 1 to 100,000, the probability that the jth observation is in the bootstrap sample. Comment on what you observe." 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 2, 49 | "metadata": {}, 50 | "outputs": [ 51 | { 52 | "data": { 53 | "text/plain": [ 54 | "Text(0,0.5,'probability')" 55 | ] 56 | }, 57 | "execution_count": 2, 58 | "metadata": {}, 59 | "output_type": "execute_result" 60 | }, 61 | { 62 | "data": { 63 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEKCAYAAAAB0GKPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAGMhJREFUeJzt3X+QJ3V95/Hni8UFTUQXWQ1hF3fJrR4bNYJTiGL8ecDKpcREy9vNJaLxjsopemq8HJQWmM1ZehdjLAylrLmNaEUIh7lky1qLcPwwGn/tcPyS1YVhjTAuCeOBQEUUl33fH9+e0Awz8+1dtmdmd56Pqm99uz/d/f2+e3v4vujuT3enqpAkaZhD5rsASdKBwcCQJHViYEiSOjEwJEmdGBiSpE4MDElSJwaGJKkTA0OS1ImBIUnq5ND5LmB/Oeqoo2rVqlXzXYYkHVCuv/76H1bV8i7zHjSBsWrVKkZHR+e7DEk6oCT5ftd5PSQlSerEwJAkdWJgSJI6MTAkSZ0YGJKkTnoLjCSbk9yT5NszTE+SC5OMJbk5yYmtaWclub15ndVXjZKk7vrcw/gMsG6W6a8F1jSvs4FPAiQ5ErgAeDFwEnBBkmU91ilJ6qC3wKiqvwPunWWWM4HP1sA3gKcnORo4Hbiqqu6tqvuAq5g9eJ6QHz+8m4/97Q5uuPO+vr5Ckg4K83kO4xjgrtb4eNM2U/vjJDk7yWiS0YmJiX0q4qGHH+HCa8a45Qf379PykrRYzGdgZJq2mqX98Y1Vm6pqpKpGli/vdGW7JGkfzWdgjAMrW+MrgF2ztEuS5tF8BsYW4M1Nb6mTgfur6m7gSuC0JMuak92nNW2SpHnU280Hk1wKvBI4Ksk4g55PTwKoqk8BW4EzgDHgx8Bbm2n3JvlDYFvzURuraraT55KkOdBbYFTVhiHTC3jHDNM2A5v7qEuStG+80rtR055WlyRNWvSBkUzXKUuSNNWiDwxJUjcGhiSpEwNDktSJgSFJ6sTAaJTdpCRpVos+MOwjJUndLPrAkCR1Y2BIkjoxMCRJnRgYkqRODAxJUicGRsNOtZI0u0UfGN57UJK6WfSBIUnqxsCQJHXSa2AkWZdkR5KxJOdOM/3ZSa5OcnOS65KsaE17JMmNzWtLn3VKkobr85neS4CLgFOBcWBbki1Vtb0120eBz1bVJUleDXwY+O1m2kNV9cK+6pMk7Z0+9zBOAsaqamdVPQxcBpw5ZZ61wNXN8LXTTJckLRB9BsYxwF2t8fGmre0m4A3N8K8DT03yjGb88CSjSb6R5PU91gn4TG9JGqbPwJiuw+rUn+X3Aa9IcgPwCuAHwO5m2rFVNQL8JvDxJL/0uC9Izm5CZXRiYmIfi7RfrSR10WdgjAMrW+MrgF3tGapqV1X9RlWdALy/abt/clrzvhO4Djhh6hdU1aaqGqmqkeXLl/eyEpKkgT4DYxuwJsnqJEuB9cBjejslOSrJZA3nAZub9mVJDpucBzgFaJ8slyTNsd4Co6p2A+cAVwLfAS6vqluTbEzyuma2VwI7ktwGPAv4UNN+PDCa5CYGJ8M/MqV3lSRpjvXWrRagqrYCW6e0nd8avgK4YprlvgY8v8/aJEl7xyu9JUmdGBgNe9VK0uwMDHvVSlInBoYkqRMDQ5LUiYEhSerEwJAkdWJgNMq7D0rSrAwMSVIniz4wYrdaSepk0QeGJKkbA0OS1ImBIUnqxMCQJHViYEiSOln0gWEnKUnqZtEHhiSpGwNDktRJr4GRZF2SHUnGkpw7zfRnJ7k6yc1JrkuyojXtrCS3N6+z+qxTkjRcb4GRZAlwEfBaYC2wIcnaKbN9FPhsVb0A2Ah8uFn2SOAC4MXAScAFSZb1Vaskabg+9zBOAsaqamdVPQxcBpw5ZZ61wNXN8LWt6acDV1XVvVV1H3AVsK7HWiVJQ/QZGMcAd7XGx5u2tpuANzTDvw48NckzOi67X3mzWkmaXZ+BMV2P1ak/y+8DXpHkBuAVwA+A3R2XJcnZSUaTjE5MTOxbkd59UJI66TMwxoGVrfEVwK72DFW1q6p+o6pOAN7ftN3fZdlm3k1VNVJVI8uXL9/f9UuSWvoMjG3AmiSrkywF1gNb2jMkOSrJZA3nAZub4SuB05Isa052n9a0SZLmSW+BUVW7gXMY/NB/B7i8qm5NsjHJ65rZXgnsSHIb8CzgQ82y9wJ/yCB0tgEbmzZJ0jw5tM8Pr6qtwNYpbee3hq8Arphh2c08uschSZpnXuktSerEwGjU4zthSZJaFn1g2KlWkrpZ9IEhSerGwJAkdWJgSJI6MTAkSZ0YGA1vPihJszMwJEmdLPrA8Ga1ktTNog8MSVI3BoYkqRMDQ5LUiYEhSerEwGjYq1aSZtcpMJIs6buQ+RJvPyhJnXTdwxhL8kdJ1vZajSRpweoaGC8AbgP+LMk3kpyd5Ige65IkLTCdAqOqHqyqT1fVS4HfBy4A7k5ySZJ/NdNySdYl2ZFkLMm500w/Nsm1SW5IcnOSM5r2VUkeSnJj8/rUPq6fJGk/6fRM7+Ycxr8F3gqsAv4Y+AvgVxk8s/s5MyxzEXAqMA5sS7Klqra3ZvsAcHlVfbI53LW1+XyAO6rqhfuwTpKkHnQKDOB24Frgj6rqa632K5K8fIZlTgLGqmonQJLLgDOBdmAUMHlo62nArq6FS5LmVtdzGG+uqre1wyLJKQBV9a4ZljkGuKs1Pt60tX0Q+K0k4wz2Lt7Zmra6OVT15SS/2rHOfebdaiVpdl0D48Jp2j4xZJnp+qtO/VneAHymqlYAZwCfS3IIcDdwbFWdALwX+Px0J9mbk++jSUYnJiaGrsS0RdqrVpI6mfWQVJKXAC8Flid5b2vSEcCwazPGgZWt8RU8/pDT24B1AFX19SSHA0dV1T3AT5v265PcweA8yWh74araBGwCGBkZcR9Bkno0bA9jKfDzDILlqa3XA8Abhyy7DViTZHWSpcB6YMuUee4EXgOQ5HjgcGAiyfLJiwWTHAesAXZ2XSlJ0v436x5GVX0Z+HKSz1TV9/fmg6tqd5JzgCsZ7I1srqpbk2wERqtqC/B7wKeTvIfB4aq3VFU1J9I3JtkNPAL8blXdu/erJ0naX4Ydkvp4Vb0b+NMkjzvkU1Wvm235qtrK4GR2u+381vB24JRplvsC8IXZS5ckzaVh3Wo/17x/tO9CJEkL27BDUtc371+em3LmT3m/Wkma1bBDUrcwy52/q+oF+70iSdKCNOyQ1K/NSRWSpAVv2CGpveoZJUk6eM16HUaSrzbvDyZ5YOr73JQoSVoIhu1hvKx5f+rclCNJWqi63q2WJCcCL2NwEvyrVXVDb1XNA28+KEmz6/pM7/OBS4BnAEcBn0nygT4LkyQtLF33MDYAJ1TVTwCSfAT4v8B/66uwueLdaiWpm663N/8HBjcGnHQYcMd+r0aStGANu3DvEwzOWfwUuDXJVc34qcBX+y9PkrRQDDskNfn8ieuB/91qv66XaiRJC9awbrWXzFUhkqSFrdNJ7yRrgA8Da2mdy6iq43qqS5K0wHQ96f3nwCeB3cCrgM/y6K3PD2iZ9tHjkqSpugbGk6vqaiBV9f2q+iDw6v7KkiQtNF2vw/hJkkOA25vHrv4AeGZ/ZUmSFpquexjvBp4CvAt4EfDbwFnDFkqyLsmOJGNJzp1m+rFJrk1yQ5Kbk5zRmnZes9yOJKd3rFOS1JNOexhVtQ2g2ct4V1U9OGyZJEuAixhcszEObEuypXmO96QPAJdX1SeTrGXw/O9VzfB64JeBXwT+T5LnVNUje7FukqT9qOu9pEaap+/dDNyS5KYkLxqy2EnAWFXtrKqHgcuAM6fMU8ARzfDTgF3N8JnAZVX106r6HjDWfJ4kaZ50PSS1GXh7Va2qqlXAOxj0nJrNMcBdrfHxpq3tg8BvJRlnsHfxzr1Ydr8qb1crSbPqGhgPVtVXJkeq6qvAsMNS0/VXnfqrvAH4TFWtAM4APtcc9uqyLEnOTjKaZHRiYmJIOTMUaa9aSepk2L2kTmwGv5XkYuBSBj/c/47htwcZB1a2xlfw6CGnSW8D1gFU1deTHM7g9uldlqWqNgGbAEZGRtxFkKQeDTvp/cdTxi9oDQ/7gd4GrEmymkE33PXAb06Z507gNQyer3E8g6vIJ4AtwOeTfIzBSe81wLeGfJ8kqUfD7iX1qn394Kra3VyzcSWwBNhcVbcm2QiMVtUW4PeATyd5D4MAeksNTibcmuRyYDuDq8vfYQ8pSZpfXe8l9TQGexcvb5q+DGysqvtnW66qtjI4md1uO781vB04ZYZlPwR8qEt9kqT+7U0vqQeBNzWvBxjeS0qSdBDpemuQX6qqN7TG/yDJjX0UNF/sVStJs+u6h/FQkpdNjiQ5BXion5Lmlr1qJambrnsYvwt8tjmXAXAfHe4lJUk6eAwNjOZCuudW1a8kOQKgqh7ovTJJ0oIy9JBUVe0BzmmGHzAsJGlx6noO46ok70uyMsmRk69eK5MkLShdz2H8DoML694+pf2geaa3naQkaXZdA2Mtg7B4GYPf1q8An+qrKEnSwtM1MC5hcLHehc34hqbtTX0UNZfi7WolqZOugfHcqvqV1vi1SW7qoyBJ0sLU9aT3DUlOnhxJ8mLg7/spSZK0EHXdw3gx8OYkdzbjxwLfaR7bWlX1gl6qkyQtGF0DY12vVUiSFrxOgVFV3++7kPnmzQclaXZdz2EctOwjJUndLPrAkCR1Y2BIkjrpNTCSrEuyI8lYknOnmf4nSW5sXrcl+VFr2iOtaVv6rFOSNFzXXlJ7LckS4CLgVGAc2JZkS/McbwCq6j2t+d8JnND6iIeq6oV91SdJ2jt97mGcBIxV1c6qehi4DDhzlvk3AJf2WI8k6QnoMzCOAe5qjY83bY+T5NnAauCaVvPhSUaTfCPJ6/src6C8X60kzaq3Q1JM32N1pl/l9cAVVfVIq+3YqtqV5DjgmiS3VNUdj/mC5GzgbIBjjz1234q0X60kddLnHsY4sLI1vgLYNcO865lyOKqqdjXvO4HreOz5jcl5NlXVSFWNLF++fH/ULEmaQZ+BsQ1Yk2R1kqUMQuFxvZ2SPBdYBny91bYsyWHN8FHAKcD2qctKkuZOb4ekqmp3knOAK4ElwOaqujXJRmC0qibDYwNwWdVjbs5xPHBxkj0MQu0j7d5VkqS51+c5DKpqK7B1Stv5U8Y/OM1yXwOe32dtkqS945XekqRODIyGd6uVpNkt+sDwmd6S1M2iDwxJUjcGhiSpEwNDktSJgSFJ6sTAaNhJSpJmZ2BIkjoxMCRJnRgYkqRODAxJUicGhiSpEwNDktSJgTHJuw9K0qwMDHyutyR1YWBIkjoxMCRJnfQaGEnWJdmRZCzJudNM/5MkNzav25L8qDXtrCS3N6+z+qxTkjRcb8/0TrIEuAg4FRgHtiXZUlXbJ+epqve05n8ncEIzfCRwATDC4DZP1zfL3tdXvZKk2fW5h3ESMFZVO6vqYeAy4MxZ5t8AXNoMnw5cVVX3NiFxFbCux1olSUP0GRjHAHe1xsebtsdJ8mxgNXDN3iyb5Owko0lGJyYmnlCxdqqVpNn1GRjTdVad6Xd5PXBFVT2yN8tW1aaqGqmqkeXLl+9jmdN/mSTpsfoMjHFgZWt8BbBrhnnX8+jhqL1dVpI0B/oMjG3AmiSrkyxlEApbps6U5LnAMuDrreYrgdOSLEuyDDitaZMkzZPeeklV1e4k5zD4oV8CbK6qW5NsBEarajI8NgCXVT16b46qujfJHzIIHYCNVXVvX7VKkobrLTAAqmorsHVK2/lTxj84w7Kbgc29FSdJ2ite6S1J6sTAaHizWkmanYEBxNvVStJQBoYkqRMDQ5LUiYEhSerEwJAkdWJgNMrbD0rSrAwMSVInBgberVaSujAwJEmdGBiSpE4MDElSJwaGJKkTA6PhzQclaXYGBuC9ByVpOANDktSJgSFJ6qTXwEiyLsmOJGNJzp1hnjcl2Z7k1iSfb7U/kuTG5rVlumUlSXOnt2d6J1kCXAScCowD25JsqartrXnWAOcBp1TVfUme2fqIh6rqhX3VJ0naO33uYZwEjFXVzqp6GLgMOHPKPP8RuKiq7gOoqnt6rEeS9AT0GRjHAHe1xsebtrbnAM9J8vdJvpFkXWva4UlGm/bXT/cFSc5u5hmdmJh4QsXaq1aSZtfbISmmv6ff1N/lQ4E1wCuBFcBXkjyvqn4EHFtVu5IcB1yT5JaquuMxH1a1CdgEMDIyss+/+fH2g5I0VJ97GOPAytb4CmDXNPP8TVX9rKq+B+xgECBU1a7mfSdwHXBCj7VKkoboMzC2AWuSrE6yFFgPTO3t9NfAqwCSHMXgENXOJMuSHNZqPwXYjiRp3vR2SKqqdic5B7gSWAJsrqpbk2wERqtqSzPttCTbgUeA/1JV/y/JS4GLk+xhEGofafeukiTNvT7PYVBVW4GtU9rObw0X8N7m1Z7na8Dz+6xNkrR3vNJbktSJgQEsPfQQfvKzR+a7DEla0AwM4FlHHMZt//QgD+/eM9+lSNKC1es5jAPFa45/Fpv+bifP+cCXOPxJh/CUpYdySEIChwQOSVrj6XQ79K5XdqTDh3X6rP1UU5d6JC0sxx99BJ/Y0P+VBwYG8F/X/WtOPHYZO/7xQf754d38+OHd7CmoKvbsgaLYU7Cnij17hl8f2PUKwi4PberyWdXhgzrV5OXu0gFp5bInz8n3GBjAkkPCuuf9Auue9wvzXYokLView5AkdWJgSJI6MTAkSZ0YGJKkTgwMSVInBoYkqRMDQ5LUiYEhSeokXa4SPhAkmQC+/wQ+4ijgh/upnAPFYlvnxba+4DovFk9knZ9dVcu7zHjQBMYTlWS0qkbmu465tNjWebGtL7jOi8VcrbOHpCRJnRgYkqRODIxHbZrvAubBYlvnxba+4DovFnOyzp7DkCR14h6GJKmTRR8YSdYl2ZFkLMm5813P3kqyMsm1Sb6T5NYk/7lpPzLJVUlub96XNe1JcmGzvjcnObH1WWc189+e5KxW+4uS3NIsc2EWwGP5kixJckOSLzbjq5N8s6n9L5MsbdoPa8bHmumrWp9xXtO+I8nprfYF9zeR5OlJrkjy3WZbv2QRbOP3NH/T305yaZLDD7btnGRzknuSfLvV1vt2nek7hqqqRfsClgB3AMcBS4GbgLXzXddersPRwInN8FOB24C1wP8Azm3azwX+ezN8BvAlBk9sPRn4ZtN+JLCzeV/WDC9rpn0LeEmzzJeA1y6A9X4v8Hngi8345cD6ZvhTwH9qht8OfKoZXg/8ZTO8ttnehwGrm7+DJQv1bwK4BPgPzfBS4OkH8zYGjgG+Bzy5tX3fcrBtZ+DlwInAt1ttvW/Xmb5jaL3z/R/CPP9RvgS4sjV+HnDefNf1BNfpb4BTgR3A0U3b0cCOZvhiYENr/h3N9A3Axa32i5u2o4HvttofM988reMK4Grg1cAXm/8YfggcOnW7AlcCL2mGD23my9RtPTnfQvybAI5ofjwzpf1g3sbHAHc1P4KHNtv59INxOwOreGxg9L5dZ/qOYa/Ffkhq8o9y0njTdkBqdsNPAL4JPKuq7gZo3p/ZzDbTOs/WPj5N+3z6OPD7wJ5m/BnAj6pqdzPervFf1quZfn8z/97+O8yn44AJ4M+bw3B/luTnOIi3cVX9APgocCdwN4Ptdj0H93aeNBfbdabvmNViD4zpjtMekN3Gkvw88AXg3VX1wGyzTtNW+9A+L5L8GnBPVV3fbp5m1hoy7YBY38ahDA5bfLKqTgD+mcFhhJkc8OvcHFM/k8FhpF8Efg547TSzHkzbeZh5X8fFHhjjwMrW+Apg1zzVss+SPIlBWPxFVf1V0/xPSY5uph8N3NO0z7TOs7WvmKZ9vpwCvC7JPwCXMTgs9XHg6UkObeZp1/gv69VMfxpwL3v/7zCfxoHxqvpmM34FgwA5WLcxwL8BvldVE1X1M+CvgJdycG/nSXOxXWf6jlkt9sDYBqxpel4sZXCybMs817RXml4P/xP4TlV9rDVpCzDZW+IsBuc2Jtvf3PS4OBm4v9klvRI4Lcmy5v/uTmNwjPdu4MEkJzff9ebWZ825qjqvqlZU1SoG2+uaqvr3wLXAG5vZpq7v5L/DG5v5q2lf3/SuWQ2sYXCCcMH9TVTVPwJ3JXlu0/QaYDsH6TZu3AmcnOQpTU2T63zQbueWudiuM33H7ObzxNZCeDHoeXAbgx4T75/vevah/pcx2M28GbixeZ3B4Pjt1cDtzfuRzfwBLmrW9xZgpPVZvwOMNa+3ttpHgG83y/wpU06+zuO6v5JHe0kdx+CHYAz4X8BhTfvhzfhYM/241vLvb9ZpB61eQQvxbwJ4ITDabOe/ZtAb5qDexsAfAN9t6vocg55OB9V2Bi5lcI7mZwz2CN42F9t1pu8Y9vJKb0lSJ4v9kJQkqSMDQ5LUiYEhSerEwJAkdWJgSJI6MTAkSZ0YGJKkTgwMqUdJVmXw/IpPN892+NskT57vuqR9YWBI/VsDXFRVvwz8CHjDPNcj7RMDQ+rf96rqxmb4egbPP5AOOAaG1L+ftoYfYXC7cumAY2BIkjoxMCRJnXi3WklSJ+5hSJI6MTAkSZ0YGJKkTgwMSVInBoYkqRMDQ5LUiYEhSerEwJAkdfL/Ab5Bi83pB09DAAAAAElFTkSuQmCC\n", 64 | "text/plain": [ 65 | "
" 66 | ] 67 | }, 68 | "metadata": {}, 69 | "output_type": "display_data" 70 | } 71 | ], 72 | "source": [ 73 | "def prob_j_in_sample(n):\n", 74 | " return 1 - (1 - 1/n)**n\n", 75 | "\n", 76 | "x = np.arange(1, 100000)\n", 77 | "y = np.array([prob_j_in_sample(n) for n in x])\n", 78 | "\n", 79 | "ax = sns.lineplot(x=x, y=prob_j_in_sample(x))\n", 80 | "plt.xlabel('n')\n", 81 | "plt.ylabel('probability')" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "### 2(h) We will now investigate numerically the probability that a bootstrap sample of size n = 100 contains the jth observation. Here j = 4. We repeatedly create bootstrap samples, and each time we record whether or not the fourth observation is contained in the bootstrap sample." 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 15, 94 | "metadata": {}, 95 | "outputs": [ 96 | { 97 | "data": { 98 | "text/plain": [ 99 | "0.6353635363536354" 100 | ] 101 | }, 102 | "execution_count": 15, 103 | "metadata": {}, 104 | "output_type": "execute_result" 105 | } 106 | ], 107 | "source": [ 108 | "store = [] \n", 109 | "for i in np.arange(1, 10000):\n", 110 | " store += [np.sum((np.random.randint(low=1, high=101, size=100) == 4)) > 0]\n", 111 | "\n", 112 | "np.mean(store)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "**Comment**\n", 120 | "\n", 121 | "The result observed from a numerical approach above is simimilar to to our probabilistic estimation for a sample size of 100 which was P = 0.634.\n", 122 | "\n", 123 | "It is interesting to note that there is a suprisingly high level of variability between results given that results are averaged over 10000 tests. This can be observed by running the above cell multiple time (note that no random seed is set)." 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "metadata": {}, 129 | "source": [ 130 | "## 3. We now review k-fold cross-validation.\n", 131 | "\n", 132 | "### (a) Explain how k-fold cross-validation is implemented.\n", 133 | "\n", 134 | "In k-fold cross validation k independant samples are taken from the set of all available observations each of size, $1 - (\\frac{n}{k})$\n", 135 | "\n", 136 | "The model is then fitted to each of these training samples, and then tested on the observations that were excluded from that sample. This produces k error scores which are then averaged to produce the final cross-validation score.\n", 137 | "\n", 138 | "Note that the proportion of observations that are included in each training increases increases with k.\n", 139 | "\n", 140 | "### (b) What are the advantages and disadvantages of k-fold cross- validation relative to:\n", 141 | "\n", 142 | "### - i. The validation set approach?\n", 143 | "\n", 144 | "When $k>2$, cross-validation provides a larger training set than the validation set approach. This means that there is less bias in the training setting. This means crossvalidation can produce more accurate estimates for more flexible models that benefit from a larger number of observations in the training set.\n", 145 | "\n", 146 | "Cross-validation results will exhibit more variability than the validation set approach. The approach is also more computationally expensive because the model must be fitted and tested for each fold in k.\n", 147 | "\n", 148 | "### - ii. LOOCV?\n", 149 | "\n", 150 | "Cross-validation for k

7 | 8 | This repository contains my solutions to the labs and exercises as Jupyter Notebooks written in Python using: 9 | 10 | - Numpy 11 | - Pandas 12 | - Matplotlib 13 | - Seaborn 14 | - Patsy 15 | - StatsModels 16 | - Sklearn 17 | 18 | 19 | Perhaps of most interest will be the recreation of some functions from the R language that I couldn't find in the Python ecosystem. These took me some time to reproduce but the implementation details are not essential to the concepts taught in the book so please feel free to reuse. For example, a reproduction of R's `lm()` four-way diagnostic plot for linear regression in Chapter 3. Also, a collection of [all required datasets]((./Notebooks/data)) is provided in .csv format. 20 | 21 | 22 | ## To view notebooks 23 | 24 | Links to view each notebook below. The code is provided [here](./Notebooks). 25 | 26 | [Chapter 2 - Statistical Learning: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch2_statistical_learning_conceptual.ipynb) 27 | [Chapter 2 - Statistical Learning: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch2_statistical_learning_applied.ipynb) 28 | 29 | 30 | [Chapter 3 - Linear Regression: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch3_linear_regression_conceptual.ipynb) 31 | [Chapter 3 - Linear Regression: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch3_linear_regression_applied.ipynb) 32 | 33 | 34 | [Chapter 4 - Classification: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch4_classification_conceptual.ipynb) 35 | [Chapter 4 - Classification: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch4_classification_applied.ipynb) 36 | 37 | 38 | [Chapter 5 - Resampling Methods: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch5_resampling_methods_conceptual.ipynb) 39 | [Chapter 5 - Resampling Methods: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch5_resampling_methods_applied.ipynb) 40 | 41 | 42 | [Chapter 6 - Linear Model Selection and Regularization: Labs](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch6_linear_model_selection_and_regularisation_labs.ipynb) 43 | [Chapter 6 - Linear Model Selection and Regularization: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch6_linear_model_selection_and_regularisation_conceptual.ipynb) 44 | [Chapter 6 - Linear Model Selection and Regularization: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch6_linear_model_selection_and_regularisation_applied.ipynb) 45 | 46 | 47 | [Chapter 7 - Moving Beyond Linearity: Labs](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch7_moving_beyond_linearity_labs.ipynb) 48 | [Chapter 7 - Moving Beyond Linearity: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch7_moving_beyond_linearity_applied.ipynb) 49 | 50 | 51 | [Chapter 8 - Tree-Based Methods: Labs](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch8_tree_based_methods_labs.ipynb) 52 | [Chapter 8 - Tree-Based Methods: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch8_tree_based_methods_conceptual.ipynb) 53 | [Chapter 8 - Tree-Based Methods: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch8_tree_based_methods_applied.ipynb) 54 | 55 | 56 | [Chapter 9 - Support Vector Machines: Labs](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch9_support_vector_machines_labs.ipynb) 57 | [Chapter 9 - Support Vector Machines: Conceptual](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch9_support_vector_machines_conceptual.ipynb) 58 | [Chapter 9 - Support Vector Machines: Applied](https://nbviewer.jupyter.org/github/a-martyn/ISL-python/blob/master/Notebooks/ch9_support_vector_machines_applied.ipynb) 59 | 60 | 61 | ## To run notebooks 62 | 63 | Running the notebooks enables you to execute the code and play around with any interactive features. 64 | 65 | To run: 66 | 67 | 1. [Install Jupyter Notebooks](https://jupyter.readthedocs.io/en/latest/install.html). I recommend doing this via the Annaconda/Conda method to ensure that package versions play nicely together. 68 | 2. `cd` to this repo 69 | 3. Run `jupyter notebook` to run the Jupyter server locally on your machine. It should launch in your browser. 70 | 4. In the Jupyter browser app, navigate to the notebook you'd like to explore. 71 | 72 | 73 | --------------------------------------------------------------------------------