├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── big-data └── streaming │ ├── anomaly_detection.ipynb │ ├── nlp.ipynb │ └── online_stats.ipynb ├── data-mining ├── clustering │ ├── affinity-propagation.ipynb │ ├── hierarchical.ipynb │ ├── k-means.ipynb │ └── lab.ipynb ├── data-viz │ ├── altair.ipynb │ ├── lab.ipynb │ └── table.html ├── feature-engineering │ ├── basketball.ipynb │ └── taxis.ipynb ├── finding-structure │ ├── ca.ipynb │ ├── lab.ipynb │ ├── pca-anomaly-detection.ipynb │ ├── pca-data-analysis.ipynb │ ├── pca-faces.ipynb │ └── skyline.ipynb ├── supervised-learning │ └── lightgbm.ipynb └── text-processing │ ├── record-linkage.ipynb │ ├── regex.ipynb │ ├── shoes.ipynb │ ├── spelling-correction.ipynb │ └── tfidf.ipynb ├── data ├── .gitkeep ├── agribalyse-31-synthese.csv ├── bias-in-bios.zip ├── mens-machine-learning-competition-2019.zip ├── recipe_embeddings.csv ├── tents.csv └── wowah.zip ├── deep-learning ├── backprop.ipynb ├── brad-pitt.jpg ├── cat.jpg ├── charseq.jpeg ├── complexity.png ├── cross-val.png ├── duck_rabbit.jpg ├── intro.ipynb ├── learning-rate.png ├── mini-batch.png ├── momentum.gif ├── nlp.ipynb ├── noisette-loo.jpg ├── skip-gram.png ├── stuff.ipynb └── word2vec.png ├── house-prices-regression.ipynb ├── introduction-to-python ├── Introduction to Python.ipynb ├── my_package │ ├── __init__.py │ └── my_sequences.py └── my_sequences.py ├── online-learning └── river-workshop.ipynb ├── optimization ├── part-1.ipynb ├── part-2.ipynb └── part-3.ipynb └── project-assignments.ipynb /.gitattributes: -------------------------------------------------------------------------------- 1 | data/wowah.zip filter=lfs diff=lfs merge=lfs -text 2 | data/mens-machine-learning-competition-2019.zip filter=lfs diff=lfs merge=lfs -text 3 | data/bias-in-bios.zip filter=lfs diff=lfs merge=lfs -text 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints/ 2 | *.csv 3 | !data/*csv 4 | *.pyc 5 | introduction-to-python/*.txt 6 | introduction-to-python/*.json 7 | *.h5 8 | *.pkl 9 | .DS_Store 10 | *.txt 11 | *.json* 12 | *.db 13 | *.wal 14 | .env 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | 439 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Creative Commons License
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 2 | -------------------------------------------------------------------------------- /big-data/streaming/anomaly_detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "# Streaming anomaly detection" 9 | ] 10 | }, 11 | { 12 | "attachments": {}, 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "Anomaly detection is a very common ML task. Here we will consider streaming tabular data.\n", 17 | "\n", 18 | "## Streaming a dataset\n", 19 | "\n", 20 | "As an example, we'll use a credit card transactions dataset." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "metadata": {}, 27 | "outputs": [ 28 | { 29 | "data": { 30 | "text/plain": [ 31 | "Credit card frauds.\n", 32 | "\n", 33 | "The datasets contains transactions made by credit cards in September 2013 by european\n", 34 | "cardholders. This dataset presents transactions that occurred in two days, where we have 492\n", 35 | "frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class\n", 36 | "(frauds) account for 0.172% of all transactions.\n", 37 | "\n", 38 | "It contains only numerical input variables which are the result of a PCA transformation.\n", 39 | "Unfortunately, due to confidentiality issues, we cannot provide the original features and more\n", 40 | "background information about the data. Features V1, V2, ... V28 are the principal components\n", 41 | "obtained with PCA, the only features which have not been transformed with PCA are 'Time' and\n", 42 | "'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first\n", 43 | "transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be\n", 44 | "used for example-dependant cost-senstive learning. Feature 'Class' is the response variable and\n", 45 | "it takes value 1 in case of fraud and 0 otherwise.\n", 46 | "\n", 47 | " Name CreditCard \n", 48 | " Task Binary classification \n", 49 | " Samples 284,807 \n", 50 | " Features 30 \n", 51 | " Sparse False \n", 52 | " Path /Users/max/river_data/CreditCard/creditcard.csv \n", 53 | " URL https://maxhalford.github.io/files/datasets/creditcardfraud.zip\n", 54 | " Size 143.84 MB \n", 55 | "Downloaded True " 56 | ] 57 | }, 58 | "execution_count": 1, 59 | "metadata": {}, 60 | "output_type": "execute_result" 61 | } 62 | ], 63 | "source": [ 64 | "from river import datasets\n", 65 | "\n", 66 | "dataset = datasets.CreditCard()\n", 67 | "dataset" 68 | ] 69 | }, 70 | { 71 | "attachments": {}, 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "**Question 🤔: in production, what would be the output of an anomaly detector on this dataset?**\n", 76 | "\n", 77 | "**Question 🤔: how would humans and the model interact with each other?**\n", 78 | "\n", 79 | "**Question 🤔: how could you exploit human feedback?**" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 14, 85 | "metadata": {}, 86 | "outputs": [ 87 | { 88 | "data": { 89 | "text/plain": [ 90 | "river.datasets.credit_card.CreditCard" 91 | ] 92 | }, 93 | "execution_count": 14, 94 | "metadata": {}, 95 | "output_type": "execute_result" 96 | } 97 | ], 98 | "source": [ 99 | "type(dataset)" 100 | ] 101 | }, 102 | { 103 | "attachments": {}, 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "The dataset is special in that it isn't loaded in memory. When you loop over it with `for`, it will stream the dataset from the disk, one row at a time." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 12, 113 | "metadata": {}, 114 | "outputs": [ 115 | { 116 | "data": { 117 | "text/plain": [ 118 | "{'Time': 0.0,\n", 119 | " 'V1': -1.3598071336738,\n", 120 | " 'V2': -0.0727811733098497,\n", 121 | " 'V3': 2.53634673796914,\n", 122 | " 'V4': 1.37815522427443,\n", 123 | " 'V5': -0.338320769942518,\n", 124 | " 'V6': 0.462387777762292,\n", 125 | " 'V7': 0.239598554061257,\n", 126 | " 'V8': 0.0986979012610507,\n", 127 | " 'V9': 0.363786969611213,\n", 128 | " 'V10': 0.0907941719789316,\n", 129 | " 'V11': -0.551599533260813,\n", 130 | " 'V12': -0.617800855762348,\n", 131 | " 'V13': -0.991389847235408,\n", 132 | " 'V14': -0.311169353699879,\n", 133 | " 'V15': 1.46817697209427,\n", 134 | " 'V16': -0.470400525259478,\n", 135 | " 'V17': 0.207971241929242,\n", 136 | " 'V18': 0.0257905801985591,\n", 137 | " 'V19': 0.403992960255733,\n", 138 | " 'V20': 0.251412098239705,\n", 139 | " 'V21': -0.018306777944153,\n", 140 | " 'V22': 0.277837575558899,\n", 141 | " 'V23': -0.110473910188767,\n", 142 | " 'V24': 0.0669280749146731,\n", 143 | " 'V25': 0.128539358273528,\n", 144 | " 'V26': -0.189114843888824,\n", 145 | " 'V27': 0.133558376740387,\n", 146 | " 'V28': -0.0210530534538215,\n", 147 | " 'Amount': 149.62}" 148 | ] 149 | }, 150 | "execution_count": 12, 151 | "metadata": {}, 152 | "output_type": "execute_result" 153 | } 154 | ], 155 | "source": [ 156 | "for transaction, is_fraud in dataset.take(1):\n", 157 | " ...\n", 158 | "\n", 159 | "transaction" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 13, 165 | "metadata": {}, 166 | "outputs": [ 167 | { 168 | "data": { 169 | "text/plain": [ 170 | "0" 171 | ] 172 | }, 173 | "execution_count": 13, 174 | "metadata": {}, 175 | "output_type": "execute_result" 176 | } 177 | ], 178 | "source": [ 179 | "is_fraud" 180 | ] 181 | }, 182 | { 183 | "attachments": {}, 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "**Question 🤔: what is the fraud rate?**" 188 | ] 189 | }, 190 | { 191 | "attachments": {}, 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "## Progressive validation" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": 23, 201 | "metadata": {}, 202 | "outputs": [ 203 | { 204 | "data": { 205 | "text/plain": [ 206 | "ROCAUC: 91.49%" 207 | ] 208 | }, 209 | "execution_count": 23, 210 | "metadata": {}, 211 | "output_type": "execute_result" 212 | } 213 | ], 214 | "source": [ 215 | "from river import anomaly\n", 216 | "from river import compose\n", 217 | "from river import metrics\n", 218 | "from river import preprocessing\n", 219 | "\n", 220 | "model = compose.Pipeline(\n", 221 | " preprocessing.MinMaxScaler(),\n", 222 | " anomaly.HalfSpaceTrees(seed=42)\n", 223 | ")\n", 224 | "\n", 225 | "metric = metrics.ROCAUC()\n", 226 | "\n", 227 | "for x, y in dataset.take(100_000):\n", 228 | " score = model.score_one(x)\n", 229 | " model = model.learn_one(x)\n", 230 | " metric = metric.update(y, score)\n", 231 | "\n", 232 | "metric" 233 | ] 234 | }, 235 | { 236 | "attachments": {}, 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "**Question 🤔: what do you think of this way of evaluating a model?**" 241 | ] 242 | }, 243 | { 244 | "attachments": {}, 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "Normally, an anomaly detection task is tackled with an unsupervised model due to a lack of labels. Here, we have labels, which allows to evaluate the model's performance. However, we can also train a supervised model and see if it performs any better." 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 24, 254 | "metadata": {}, 255 | "outputs": [ 256 | { 257 | "data": { 258 | "text/plain": [ 259 | "ROCAUC: 89.20%" 260 | ] 261 | }, 262 | "execution_count": 24, 263 | "metadata": {}, 264 | "output_type": "execute_result" 265 | } 266 | ], 267 | "source": [ 268 | "from river import linear_model\n", 269 | "from river import preprocessing\n", 270 | "\n", 271 | "model = compose.Pipeline(\n", 272 | " preprocessing.StandardScaler(),\n", 273 | " linear_model.LogisticRegression()\n", 274 | ")\n", 275 | "\n", 276 | "metric = metrics.ROCAUC()\n", 277 | "\n", 278 | "for x, y in dataset.take(100_000):\n", 279 | " score = model.predict_proba_one(x)[True]\n", 280 | " model = model.learn_one(x, y)\n", 281 | " metric = metric.update(y, score)\n", 282 | "\n", 283 | "metric" 284 | ] 285 | }, 286 | { 287 | "attachments": {}, 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "**Question 🤔: why do you think the performance is worse?**" 292 | ] 293 | }, 294 | { 295 | "attachments": {}, 296 | "cell_type": "markdown", 297 | "metadata": {}, 298 | "source": [ 299 | "River also has an `evaluate` module with a `progressive_val_score` function." 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": 25, 305 | "metadata": {}, 306 | "outputs": [ 307 | { 308 | "name": "stdout", 309 | "output_type": "stream", 310 | "text": [ 311 | "[10,000] ROCAUC: 94.57% – 00:00:00 – 10.3 KB\n", 312 | "[20,000] ROCAUC: 89.21% – 00:00:01 – 10.3 KB\n", 313 | "[30,000] ROCAUC: 87.08% – 00:00:01 – 10.3 KB\n", 314 | "[40,000] ROCAUC: 87.39% – 00:00:02 – 10.3 KB\n", 315 | "[50,000] ROCAUC: 90.46% – 00:00:03 – 10.3 KB\n", 316 | "[60,000] ROCAUC: 89.19% – 00:00:03 – 10.3 KB\n", 317 | "[70,000] ROCAUC: 89.08% – 00:00:04 – 10.3 KB\n", 318 | "[80,000] ROCAUC: 89.23% – 00:00:05 – 10.3 KB\n", 319 | "[90,000] ROCAUC: 89.76% – 00:00:05 – 10.3 KB\n", 320 | "[100,000] ROCAUC: 89.20% – 00:00:06 – 10.3 KB\n" 321 | ] 322 | }, 323 | { 324 | "data": { 325 | "text/plain": [ 326 | "ROCAUC: 89.20%" 327 | ] 328 | }, 329 | "execution_count": 25, 330 | "metadata": {}, 331 | "output_type": "execute_result" 332 | } 333 | ], 334 | "source": [ 335 | "from river import evaluate\n", 336 | "\n", 337 | "evaluate.progressive_val_score(\n", 338 | " dataset.take(100_000),\n", 339 | " model=compose.Pipeline(\n", 340 | " preprocessing.StandardScaler(),\n", 341 | " linear_model.LogisticRegression()\n", 342 | " ),\n", 343 | " metric=metrics.ROCAUC(),\n", 344 | " print_every=10_000,\n", 345 | " show_time=True,\n", 346 | " show_memory=True\n", 347 | ")" 348 | ] 349 | }, 350 | { 351 | "attachments": {}, 352 | "cell_type": "markdown", 353 | "metadata": {}, 354 | "source": [ 355 | "## Improving the supervised approach" 356 | ] 357 | }, 358 | { 359 | "attachments": {}, 360 | "cell_type": "markdown", 361 | "metadata": {}, 362 | "source": [ 363 | "In an anomaly detection task, the number of positive cases is usually much lower than the amount of negatives. This penalizes many supervised classification models, because many are based on the assumption that the data is somewhat balanced. In the case of logistic regression, it's possible to adjust the loss function to increase the importance of positive samples on the learning process." 364 | ] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "execution_count": 26, 369 | "metadata": {}, 370 | "outputs": [ 371 | { 372 | "name": "stdout", 373 | "output_type": "stream", 374 | "text": [ 375 | "[10,000] ROCAUC: 95.90% – 00:00:00 – 10.26 KB\n", 376 | "[20,000] ROCAUC: 92.71% – 00:00:01 – 10.26 KB\n", 377 | "[30,000] ROCAUC: 91.84% – 00:00:01 – 10.26 KB\n", 378 | "[40,000] ROCAUC: 92.17% – 00:00:02 – 10.26 KB\n", 379 | "[50,000] ROCAUC: 94.16% – 00:00:03 – 10.26 KB\n", 380 | "[60,000] ROCAUC: 92.55% – 00:00:03 – 10.26 KB\n", 381 | "[70,000] ROCAUC: 92.21% – 00:00:04 – 10.26 KB\n", 382 | "[80,000] ROCAUC: 92.28% – 00:00:05 – 10.26 KB\n", 383 | "[90,000] ROCAUC: 92.59% – 00:00:06 – 10.26 KB\n", 384 | "[100,000] ROCAUC: 91.87% – 00:00:06 – 10.26 KB\n" 385 | ] 386 | }, 387 | { 388 | "data": { 389 | "text/plain": [ 390 | "ROCAUC: 91.87%" 391 | ] 392 | }, 393 | "execution_count": 26, 394 | "metadata": {}, 395 | "output_type": "execute_result" 396 | } 397 | ], 398 | "source": [ 399 | "from river import optim\n", 400 | "\n", 401 | "evaluate.progressive_val_score(\n", 402 | " dataset.take(100_000),\n", 403 | " model=compose.Pipeline(\n", 404 | " preprocessing.StandardScaler(),\n", 405 | " linear_model.LogisticRegression(\n", 406 | " loss=optim.losses.Log(weight_pos=5)\n", 407 | " )\n", 408 | " ),\n", 409 | " metric=metrics.ROCAUC(),\n", 410 | " print_every=10_000,\n", 411 | " show_time=True,\n", 412 | " show_memory=True\n", 413 | ")" 414 | ] 415 | }, 416 | { 417 | "attachments": {}, 418 | "cell_type": "markdown", 419 | "metadata": {}, 420 | "source": [ 421 | "An alternative is to under-sample the majority class. The idea is that the model is being drowned with negative examples. Adjusting the class distribution can help a model. Note that one could also over-sample the minority class. However, the advantage of under-sampling is that it reduces the processing time, because less data has to be processed." 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": 27, 427 | "metadata": {}, 428 | "outputs": [ 429 | { 430 | "name": "stdout", 431 | "output_type": "stream", 432 | "text": [ 433 | "[10,000] ROCAUC: 94.55% – 00:00:00 – 14.33 KB\n", 434 | "[20,000] ROCAUC: 95.59% – 00:00:01 – 14.33 KB\n", 435 | "[30,000] ROCAUC: 95.40% – 00:00:01 – 14.33 KB\n", 436 | "[40,000] ROCAUC: 95.34% – 00:00:02 – 14.33 KB\n", 437 | "[50,000] ROCAUC: 96.72% – 00:00:02 – 14.33 KB\n", 438 | "[60,000] ROCAUC: 95.42% – 00:00:03 – 14.33 KB\n", 439 | "[70,000] ROCAUC: 95.14% – 00:00:03 – 14.33 KB\n", 440 | "[80,000] ROCAUC: 95.38% – 00:00:04 – 14.33 KB\n", 441 | "[90,000] ROCAUC: 95.72% – 00:00:05 – 14.33 KB\n", 442 | "[100,000] ROCAUC: 95.26% – 00:00:05 – 14.33 KB\n" 443 | ] 444 | }, 445 | { 446 | "data": { 447 | "text/plain": [ 448 | "ROCAUC: 95.26%" 449 | ] 450 | }, 451 | "execution_count": 27, 452 | "metadata": {}, 453 | "output_type": "execute_result" 454 | } 455 | ], 456 | "source": [ 457 | "from river import imblearn\n", 458 | "\n", 459 | "evaluate.progressive_val_score(\n", 460 | " dataset.take(100_000),\n", 461 | " model=compose.Pipeline(\n", 462 | " preprocessing.StandardScaler(),\n", 463 | " imblearn.RandomUnderSampler(\n", 464 | " classifier=linear_model.LogisticRegression(),\n", 465 | " desired_dist={0: .8, 1: .2},\n", 466 | " seed=42\n", 467 | " )\n", 468 | " ),\n", 469 | " metric=metrics.ROCAUC(),\n", 470 | " print_every=10_000,\n", 471 | " show_time=True,\n", 472 | " show_memory=True\n", 473 | ")" 474 | ] 475 | }, 476 | { 477 | "attachments": {}, 478 | "cell_type": "markdown", 479 | "metadata": {}, 480 | "source": [ 481 | "Nothing prevents us from combining the two approaches." 482 | ] 483 | }, 484 | { 485 | "cell_type": "code", 486 | "execution_count": 28, 487 | "metadata": {}, 488 | "outputs": [ 489 | { 490 | "name": "stdout", 491 | "output_type": "stream", 492 | "text": [ 493 | "[10,000] ROCAUC: 94.23% – 00:00:00 – 14.28 KB\n", 494 | "[20,000] ROCAUC: 96.77% – 00:00:01 – 14.28 KB\n", 495 | "[30,000] ROCAUC: 96.86% – 00:00:01 – 14.28 KB\n", 496 | "[40,000] ROCAUC: 96.54% – 00:00:02 – 14.28 KB\n", 497 | "[50,000] ROCAUC: 97.54% – 00:00:02 – 14.28 KB\n", 498 | "[60,000] ROCAUC: 97.15% – 00:00:03 – 14.28 KB\n", 499 | "[70,000] ROCAUC: 96.83% – 00:00:03 – 14.28 KB\n", 500 | "[80,000] ROCAUC: 96.77% – 00:00:04 – 14.28 KB\n", 501 | "[90,000] ROCAUC: 96.97% – 00:00:05 – 14.28 KB\n", 502 | "[100,000] ROCAUC: 96.49% – 00:00:05 – 14.28 KB\n" 503 | ] 504 | }, 505 | { 506 | "data": { 507 | "text/plain": [ 508 | "ROCAUC: 96.49%" 509 | ] 510 | }, 511 | "execution_count": 28, 512 | "metadata": {}, 513 | "output_type": "execute_result" 514 | } 515 | ], 516 | "source": [ 517 | "from river import imblearn\n", 518 | "\n", 519 | "evaluate.progressive_val_score(\n", 520 | " dataset.take(100_000),\n", 521 | " model=compose.Pipeline(\n", 522 | " preprocessing.StandardScaler(),\n", 523 | " imblearn.RandomUnderSampler(\n", 524 | " classifier=linear_model.LogisticRegression(\n", 525 | " loss=optim.losses.Log(weight_pos=5)\n", 526 | " ),\n", 527 | " desired_dist={0: .8, 1: .2},\n", 528 | " seed=42\n", 529 | " )\n", 530 | " ),\n", 531 | " metric=metrics.ROCAUC(),\n", 532 | " print_every=10_000,\n", 533 | " show_time=True,\n", 534 | " show_memory=True\n", 535 | ")" 536 | ] 537 | }, 538 | { 539 | "attachments": {}, 540 | "cell_type": "markdown", 541 | "metadata": {}, 542 | "source": [ 543 | "## Going further: active learning\n", 544 | "\n", 545 | "We started off with an unsupervised approach. We did so because we assumed we had no labels to train a supervised model. Next, we trained a supervised model, which performed with some tuning. In a real setup, labels wouldn't be available at first. One way to proceed would be to have both models running alongside. \n", 546 | "\n", 547 | "The first model would be unsupervised and rank samples according to their anomaly score. Humans would label the samples according to this ranking. These labels would then feed into the second model. A great way to prioritize this labelling effort is to use active learning. See a demo [here](https://next.databutton.com/v/13lkg6b6), with explanations [here](https://maxhalford.github.io/blog/online-active-learning-river-databutton/).\n", 548 | "\n", 549 | "**Question 🤔: if there are two models running alongside, how to determine which one's outputs should be used?**" 550 | ] 551 | } 552 | ], 553 | "metadata": { 554 | "kernelspec": { 555 | "display_name": "Python 3", 556 | "language": "python", 557 | "name": "python3" 558 | }, 559 | "language_info": { 560 | "codemirror_mode": { 561 | "name": "ipython", 562 | "version": 3 563 | }, 564 | "file_extension": ".py", 565 | "mimetype": "text/x-python", 566 | "name": "python", 567 | "nbconvert_exporter": "python", 568 | "pygments_lexer": "ipython3", 569 | "version": "3.11.0" 570 | }, 571 | "orig_nbformat": 4, 572 | "vscode": { 573 | "interpreter": { 574 | "hash": "55fbbcf542e06cc59ad76a1e0d5dc36ee204d6d2b704491656ee6b3487310122" 575 | } 576 | } 577 | }, 578 | "nbformat": 4, 579 | "nbformat_minor": 2 580 | } 581 | -------------------------------------------------------------------------------- /big-data/streaming/nlp.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "# NLP on streaming data" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 57, 14 | "metadata": {}, 15 | "outputs": [ 16 | { 17 | "data": { 18 | "text/plain": [ 19 | "3387" 20 | ] 21 | }, 22 | "execution_count": 57, 23 | "metadata": {}, 24 | "output_type": "execute_result" 25 | } 26 | ], 27 | "source": [ 28 | "from sklearn import datasets\n", 29 | "\n", 30 | "categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']\n", 31 | "newsgroups = datasets.fetch_20newsgroups(\n", 32 | " subset='all',\n", 33 | " remove=['headers', 'footers', 'quotes'],\n", 34 | " categories=categories\n", 35 | ")\n", 36 | "stream = list(zip(\n", 37 | " newsgroups.data,\n", 38 | " (newsgroups.target_names[i] for i in newsgroups.target)\n", 39 | "))\n", 40 | "len(stream)" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 7, 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "name": "stdout", 50 | "output_type": "stream", 51 | "text": [ 52 | "My point is that you set up your views as the only way to believe. Saying \n", 53 | "that all eveil in this world is caused by atheism is ridiculous and \n", 54 | "counterproductive to dialogue in this newsgroups. I see in your posts a \n", 55 | "spirit of condemnation of the atheists in this newsgroup bacause they don'\n", 56 | "t believe exactly as you do. If you're here to try to convert the atheists \n", 57 | "here, you're failing miserably. Who wants to be in position of constantly \n", 58 | "defending themselves agaist insulting attacks, like you seem to like to do?!\n", 59 | "I'm sorry you're so blind that you didn't get the messgae in the quote, \n", 60 | "everyone else has seemed to.\n", 61 | "alt.atheism\n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "text, label = stream[0]\n", 67 | "print(text)\n", 68 | "print(label)" 69 | ] 70 | }, 71 | { 72 | "attachments": {}, 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "**Question 🤔: compared to the [anomaly detection notebook](anomaly_detection.ipynb), what is the practical difference with this dataset?**" 77 | ] 78 | }, 79 | { 80 | "attachments": {}, 81 | "cell_type": "markdown", 82 | "metadata": {}, 83 | "source": [ 84 | "## Bag of words extraction" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 8, 90 | "metadata": {}, 91 | "outputs": [ 92 | { 93 | "data": { 94 | "text/plain": [ 95 | "{'my': 1,\n", 96 | " 'point': 1,\n", 97 | " 'is': 3,\n", 98 | " 'that': 3,\n", 99 | " 'you': 7,\n", 100 | " 'set': 1,\n", 101 | " 'up': 1,\n", 102 | " 'your': 2,\n", 103 | " 'views': 1,\n", 104 | " 'as': 2,\n", 105 | " 'the': 5,\n", 106 | " 'only': 1,\n", 107 | " 'way': 1,\n", 108 | " 'to': 8,\n", 109 | " 'believe': 2,\n", 110 | " 'saying': 1,\n", 111 | " 'all': 1,\n", 112 | " 'eveil': 1,\n", 113 | " 'in': 6,\n", 114 | " 'this': 3,\n", 115 | " 'world': 1,\n", 116 | " 'caused': 1,\n", 117 | " 'by': 1,\n", 118 | " 'atheism': 1,\n", 119 | " 'ridiculous': 1,\n", 120 | " 'and': 1,\n", 121 | " 'counterproductive': 1,\n", 122 | " 'dialogue': 1,\n", 123 | " 'newsgroups': 1,\n", 124 | " 'see': 1,\n", 125 | " 'posts': 1,\n", 126 | " 'spirit': 1,\n", 127 | " 'of': 3,\n", 128 | " 'condemnation': 1,\n", 129 | " 'atheists': 2,\n", 130 | " 'newsgroup': 1,\n", 131 | " 'bacause': 1,\n", 132 | " 'they': 1,\n", 133 | " 'don': 1,\n", 134 | " 'exactly': 1,\n", 135 | " 'do': 2,\n", 136 | " 'if': 1,\n", 137 | " 're': 3,\n", 138 | " 'here': 2,\n", 139 | " 'try': 1,\n", 140 | " 'convert': 1,\n", 141 | " 'failing': 1,\n", 142 | " 'miserably': 1,\n", 143 | " 'who': 1,\n", 144 | " 'wants': 1,\n", 145 | " 'be': 1,\n", 146 | " 'position': 1,\n", 147 | " 'constantly': 1,\n", 148 | " 'defending': 1,\n", 149 | " 'themselves': 1,\n", 150 | " 'agaist': 1,\n", 151 | " 'insulting': 1,\n", 152 | " 'attacks': 1,\n", 153 | " 'like': 2,\n", 154 | " 'seem': 1,\n", 155 | " 'sorry': 1,\n", 156 | " 'so': 1,\n", 157 | " 'blind': 1,\n", 158 | " 'didn': 1,\n", 159 | " 'get': 1,\n", 160 | " 'messgae': 1,\n", 161 | " 'quote': 1,\n", 162 | " 'everyone': 1,\n", 163 | " 'else': 1,\n", 164 | " 'has': 1,\n", 165 | " 'seemed': 1}" 166 | ] 167 | }, 168 | "execution_count": 8, 169 | "metadata": {}, 170 | "output_type": "execute_result" 171 | } 172 | ], 173 | "source": [ 174 | "from river import feature_extraction\n", 175 | "\n", 176 | "vectorizer = feature_extraction.BagOfWords()\n", 177 | "\n", 178 | "for text, label in stream:\n", 179 | " vectorizer = vectorizer.learn_one(text)\n", 180 | " vector = vectorizer.transform_one(text)\n", 181 | " break\n", 182 | "\n", 183 | "vector" 184 | ] 185 | }, 186 | { 187 | "attachments": {}, 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "**Question 🤔: what do you notice about these tokens?**" 192 | ] 193 | }, 194 | { 195 | "attachments": {}, 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "## TF-IDF" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 9, 205 | "metadata": {}, 206 | "outputs": [ 207 | { 208 | "data": { 209 | "text/plain": [ 210 | "{'my': 0.05754353376484363,\n", 211 | " 'point': 0.05754353376484363,\n", 212 | " 'is': 0.1726306012945309,\n", 213 | " 'that': 0.1726306012945309,\n", 214 | " 'you': 0.4028047363539054,\n", 215 | " 'set': 0.05754353376484363,\n", 216 | " 'up': 0.05754353376484363,\n", 217 | " 'your': 0.11508706752968725,\n", 218 | " 'views': 0.05754353376484363,\n", 219 | " 'as': 0.11508706752968725,\n", 220 | " 'the': 0.2877176688242182,\n", 221 | " 'only': 0.05754353376484363,\n", 222 | " 'way': 0.05754353376484363,\n", 223 | " 'to': 0.460348270118749,\n", 224 | " 'believe': 0.11508706752968725,\n", 225 | " 'saying': 0.05754353376484363,\n", 226 | " 'all': 0.05754353376484363,\n", 227 | " 'eveil': 0.05754353376484363,\n", 228 | " 'in': 0.3452612025890618,\n", 229 | " 'this': 0.1726306012945309,\n", 230 | " 'world': 0.05754353376484363,\n", 231 | " 'caused': 0.05754353376484363,\n", 232 | " 'by': 0.05754353376484363,\n", 233 | " 'atheism': 0.05754353376484363,\n", 234 | " 'ridiculous': 0.05754353376484363,\n", 235 | " 'and': 0.05754353376484363,\n", 236 | " 'counterproductive': 0.05754353376484363,\n", 237 | " 'dialogue': 0.05754353376484363,\n", 238 | " 'newsgroups': 0.05754353376484363,\n", 239 | " 'see': 0.05754353376484363,\n", 240 | " 'posts': 0.05754353376484363,\n", 241 | " 'spirit': 0.05754353376484363,\n", 242 | " 'of': 0.1726306012945309,\n", 243 | " 'condemnation': 0.05754353376484363,\n", 244 | " 'atheists': 0.11508706752968725,\n", 245 | " 'newsgroup': 0.05754353376484363,\n", 246 | " 'bacause': 0.05754353376484363,\n", 247 | " 'they': 0.05754353376484363,\n", 248 | " 'don': 0.05754353376484363,\n", 249 | " 'exactly': 0.05754353376484363,\n", 250 | " 'do': 0.11508706752968725,\n", 251 | " 'if': 0.05754353376484363,\n", 252 | " 're': 0.1726306012945309,\n", 253 | " 'here': 0.11508706752968725,\n", 254 | " 'try': 0.05754353376484363,\n", 255 | " 'convert': 0.05754353376484363,\n", 256 | " 'failing': 0.05754353376484363,\n", 257 | " 'miserably': 0.05754353376484363,\n", 258 | " 'who': 0.05754353376484363,\n", 259 | " 'wants': 0.05754353376484363,\n", 260 | " 'be': 0.05754353376484363,\n", 261 | " 'position': 0.05754353376484363,\n", 262 | " 'constantly': 0.05754353376484363,\n", 263 | " 'defending': 0.05754353376484363,\n", 264 | " 'themselves': 0.05754353376484363,\n", 265 | " 'agaist': 0.05754353376484363,\n", 266 | " 'insulting': 0.05754353376484363,\n", 267 | " 'attacks': 0.05754353376484363,\n", 268 | " 'like': 0.11508706752968725,\n", 269 | " 'seem': 0.05754353376484363,\n", 270 | " 'sorry': 0.05754353376484363,\n", 271 | " 'so': 0.05754353376484363,\n", 272 | " 'blind': 0.05754353376484363,\n", 273 | " 'didn': 0.05754353376484363,\n", 274 | " 'get': 0.05754353376484363,\n", 275 | " 'messgae': 0.05754353376484363,\n", 276 | " 'quote': 0.05754353376484363,\n", 277 | " 'everyone': 0.05754353376484363,\n", 278 | " 'else': 0.05754353376484363,\n", 279 | " 'has': 0.05754353376484363,\n", 280 | " 'seemed': 0.05754353376484363}" 281 | ] 282 | }, 283 | "execution_count": 9, 284 | "metadata": {}, 285 | "output_type": "execute_result" 286 | } 287 | ], 288 | "source": [ 289 | "from river import feature_extraction\n", 290 | "\n", 291 | "vectorizer = feature_extraction.TFIDF()\n", 292 | "\n", 293 | "for text, label in stream:\n", 294 | " vectorizer = vectorizer.learn_one(text)\n", 295 | " vector = vectorizer.transform_one(text)\n", 296 | " break\n", 297 | "\n", 298 | "vector" 299 | ] 300 | }, 301 | { 302 | "attachments": {}, 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "**Question 🤔: knowing how TF-IDF works, what difference does its online variant have?**" 307 | ] 308 | }, 309 | { 310 | "attachments": {}, 311 | "cell_type": "markdown", 312 | "metadata": {}, 313 | "source": [ 314 | "## Progressive validation" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 10, 320 | "metadata": {}, 321 | "outputs": [ 322 | { 323 | "name": "stdout", 324 | "output_type": "stream", 325 | "text": [ 326 | "[1,000] Accuracy: 68.47%, MacroF1: 67.29%\n", 327 | "[2,000] Accuracy: 72.49%, MacroF1: 71.03%\n", 328 | "[3,000] Accuracy: 74.66%, MacroF1: 73.16%\n", 329 | "[3,387] Accuracy: 74.96%, MacroF1: 73.49%\n" 330 | ] 331 | }, 332 | { 333 | "data": { 334 | "text/plain": [ 335 | "Accuracy: 74.96%, MacroF1: 73.49%" 336 | ] 337 | }, 338 | "execution_count": 10, 339 | "metadata": {}, 340 | "output_type": "execute_result" 341 | } 342 | ], 343 | "source": [ 344 | "from river import evaluate\n", 345 | "from river import metrics\n", 346 | "from river import naive_bayes\n", 347 | "\n", 348 | "model = (\n", 349 | " feature_extraction.BagOfWords() |\n", 350 | " naive_bayes.MultinomialNB()\n", 351 | ")\n", 352 | "\n", 353 | "metric = metrics.Accuracy() + metrics.MacroF1()\n", 354 | "\n", 355 | "evaluate.progressive_val_score(stream, model, metric, print_every=1000)" 356 | ] 357 | }, 358 | { 359 | "attachments": {}, 360 | "cell_type": "markdown", 361 | "metadata": {}, 362 | "source": [ 363 | "**Question 🤔: what makes the comparison with a batch approach difficult?**" 364 | ] 365 | }, 366 | { 367 | "attachments": {}, 368 | "cell_type": "markdown", 369 | "metadata": {}, 370 | "source": [ 371 | "## Mini-batching" 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": 53, 377 | "metadata": {}, 378 | "outputs": [ 379 | { 380 | "name": "stdout", 381 | "output_type": "stream", 382 | "text": [ 383 | "1000\n", 384 | "1000\n", 385 | "1000\n", 386 | "387\n" 387 | ] 388 | } 389 | ], 390 | "source": [ 391 | "def batch(stream, size):\n", 392 | " batch = []\n", 393 | " for x, y in stream:\n", 394 | " batch.append((x, y))\n", 395 | " if len(batch) == size:\n", 396 | " yield batch\n", 397 | " batch = []\n", 398 | " if batch:\n", 399 | " yield batch\n", 400 | "\n", 401 | "for mini_batch in batch(stream, size=1000):\n", 402 | " print(len(mini_batch))" 403 | ] 404 | }, 405 | { 406 | "cell_type": "code", 407 | "execution_count": 74, 408 | "metadata": {}, 409 | "outputs": [ 410 | { 411 | "ename": "ValueError", 412 | "evalue": "X has 17444 features, but GaussianNB is expecting 19939 features as input.", 413 | "output_type": "error", 414 | "traceback": [ 415 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 416 | "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", 417 | "Cell \u001b[0;32mIn [74], line 10\u001b[0m\n\u001b[1;32m 8\u001b[0m X, y \u001b[39m=\u001b[39m \u001b[39mzip\u001b[39m(\u001b[39m*\u001b[39mmini_batch)\n\u001b[1;32m 9\u001b[0m X \u001b[39m=\u001b[39m vectorizer\u001b[39m.\u001b[39mfit_transform(X)\u001b[39m.\u001b[39mtoarray()\n\u001b[0;32m---> 10\u001b[0m model\u001b[39m.\u001b[39;49mpartial_fit(X, y, classes\u001b[39m=\u001b[39;49mcategories)\n", 418 | "File \u001b[0;32m~/.pyenv/versions/3.11.0/lib/python3.11/site-packages/sklearn/naive_bayes.py:389\u001b[0m, in \u001b[0;36mGaussianNB.partial_fit\u001b[0;34m(self, X, y, classes, sample_weight)\u001b[0m\n\u001b[1;32m 348\u001b[0m \u001b[39m\"\"\"Incremental fit on a batch of samples.\u001b[39;00m\n\u001b[1;32m 349\u001b[0m \n\u001b[1;32m 350\u001b[0m \u001b[39mThis method is expected to be called several times consecutively\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[39m Returns the instance itself.\u001b[39;00m\n\u001b[1;32m 386\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[1;32m 387\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_validate_params()\n\u001b[0;32m--> 389\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_partial_fit(\n\u001b[1;32m 390\u001b[0m X, y, classes, _refit\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m, sample_weight\u001b[39m=\u001b[39;49msample_weight\n\u001b[1;32m 391\u001b[0m )\n", 419 | "File \u001b[0;32m~/.pyenv/versions/3.11.0/lib/python3.11/site-packages/sklearn/naive_bayes.py:426\u001b[0m, in \u001b[0;36mGaussianNB._partial_fit\u001b[0;34m(self, X, y, classes, _refit, sample_weight)\u001b[0m\n\u001b[1;32m 423\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mclasses_ \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 425\u001b[0m first_call \u001b[39m=\u001b[39m _check_partial_fit_first_call(\u001b[39mself\u001b[39m, classes)\n\u001b[0;32m--> 426\u001b[0m X, y \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_validate_data(X, y, reset\u001b[39m=\u001b[39;49mfirst_call)\n\u001b[1;32m 427\u001b[0m \u001b[39mif\u001b[39;00m sample_weight \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 428\u001b[0m sample_weight \u001b[39m=\u001b[39m _check_sample_weight(sample_weight, X)\n", 420 | "File \u001b[0;32m~/.pyenv/versions/3.11.0/lib/python3.11/site-packages/sklearn/base.py:558\u001b[0m, in \u001b[0;36mBaseEstimator._validate_data\u001b[0;34m(self, X, y, reset, validate_separately, **check_params)\u001b[0m\n\u001b[1;32m 555\u001b[0m out \u001b[39m=\u001b[39m X, y\n\u001b[1;32m 557\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m no_val_X \u001b[39mand\u001b[39;00m check_params\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39mensure_2d\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mTrue\u001b[39;00m):\n\u001b[0;32m--> 558\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_check_n_features(X, reset\u001b[39m=\u001b[39;49mreset)\n\u001b[1;32m 560\u001b[0m \u001b[39mreturn\u001b[39;00m out\n", 421 | "File \u001b[0;32m~/.pyenv/versions/3.11.0/lib/python3.11/site-packages/sklearn/base.py:359\u001b[0m, in \u001b[0;36mBaseEstimator._check_n_features\u001b[0;34m(self, X, reset)\u001b[0m\n\u001b[1;32m 356\u001b[0m \u001b[39mreturn\u001b[39;00m\n\u001b[1;32m 358\u001b[0m \u001b[39mif\u001b[39;00m n_features \u001b[39m!=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mn_features_in_:\n\u001b[0;32m--> 359\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m 360\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mX has \u001b[39m\u001b[39m{\u001b[39;00mn_features\u001b[39m}\u001b[39;00m\u001b[39m features, but \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__class__\u001b[39m\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m\u001b[39m}\u001b[39;00m\u001b[39m \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 361\u001b[0m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mis expecting \u001b[39m\u001b[39m{\u001b[39;00m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mn_features_in_\u001b[39m}\u001b[39;00m\u001b[39m features as input.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 362\u001b[0m )\n", 422 | "\u001b[0;31mValueError\u001b[0m: X has 17444 features, but GaussianNB is expecting 19939 features as input." 423 | ] 424 | } 425 | ], 426 | "source": [ 427 | "from sklearn import feature_extraction\n", 428 | "from sklearn import naive_bayes\n", 429 | "\n", 430 | "vectorizer = feature_extraction.text.CountVectorizer()\n", 431 | "model = naive_bayes.GaussianNB()\n", 432 | "\n", 433 | "for mini_batch in batch(stream, size=1000):\n", 434 | " X, y = zip(*mini_batch)\n", 435 | " X = vectorizer.fit_transform(X).toarray()\n", 436 | " model.partial_fit(X, y, classes=categories)" 437 | ] 438 | }, 439 | { 440 | "attachments": {}, 441 | "cell_type": "markdown", 442 | "metadata": {}, 443 | "source": [ 444 | "**Question 🤔: what is the issue?**" 445 | ] 446 | }, 447 | { 448 | "attachments": {}, 449 | "cell_type": "markdown", 450 | "metadata": {}, 451 | "source": [ 452 | "A common way of dealing with a varying number of features is called the [\"hashing trick\"](https://www.wikiwand.com/en/Feature_hashing). scikit-learn has a `HashingVectorizer`, which is a combination of [`CountVectorizer`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) and [`FeatureHasher`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.FeatureHasher.html)." 453 | ] 454 | }, 455 | { 456 | "cell_type": "code", 457 | "execution_count": 73, 458 | "metadata": {}, 459 | "outputs": [], 460 | "source": [ 461 | "from sklearn import pipeline\n", 462 | "\n", 463 | "from sklearn import feature_extraction\n", 464 | "from sklearn import naive_bayes\n", 465 | "\n", 466 | "vectorizer = feature_extraction.text.HashingVectorizer(n_features=2000)\n", 467 | "model = naive_bayes.GaussianNB()\n", 468 | "\n", 469 | "for mini_batch in batch(stream, size=1000):\n", 470 | " X, y = zip(*mini_batch)\n", 471 | " X = vectorizer.fit_transform(X).toarray()\n", 472 | " model.partial_fit(X, y, classes=categories)" 473 | ] 474 | } 475 | ], 476 | "metadata": { 477 | "kernelspec": { 478 | "display_name": "Python 3", 479 | "language": "python", 480 | "name": "python3" 481 | }, 482 | "language_info": { 483 | "codemirror_mode": { 484 | "name": "ipython", 485 | "version": 3 486 | }, 487 | "file_extension": ".py", 488 | "mimetype": "text/x-python", 489 | "name": "python", 490 | "nbconvert_exporter": "python", 491 | "pygments_lexer": "ipython3", 492 | "version": "3.11.0" 493 | }, 494 | "orig_nbformat": 4, 495 | "vscode": { 496 | "interpreter": { 497 | "hash": "55fbbcf542e06cc59ad76a1e0d5dc36ee204d6d2b704491656ee6b3487310122" 498 | } 499 | } 500 | }, 501 | "nbformat": 4, 502 | "nbformat_minor": 2 503 | } 504 | -------------------------------------------------------------------------------- /data-mining/clustering/lab.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Clustering lab" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "!git lfs pull --include ../../data/wowah.zip\n" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": {}, 23 | "outputs": [ 24 | { 25 | "data": { 26 | "text/html": [ 27 | "
\n", 28 | "\n", 41 | "\n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | " \n", 57 | " \n", 58 | " \n", 59 | " \n", 60 | " \n", 61 | " \n", 62 | " \n", 63 | " \n", 64 | " \n", 65 | " \n", 66 | " \n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | "
charlevelracecharclasszoneguildtimestamp
0594251OrcRogueOrgrimmar16501/01/08 00:02:04
1654949OrcHunterDurotar-101/01/08 00:02:04
26532514OrcWarriorGhostlands-101/01/08 00:02:04
36549018OrcHunterGhostlands-101/01/08 00:02:04
4228860OrcHunterHellfire Peninsula-101/01/08 00:02:09
........................
108267298676680Blood ElfDeath KnightHalls of Lightning10112/31/08 23:50:18
108267308649777Blood ElfDeath KnightThe Storm Peaks35812/31/08 23:50:18
108267313489380Blood ElfDeath KnightThe Storm Peaks18912/31/08 23:50:18
108267328688180Blood ElfDeath KnightDragonblight47812/31/08 23:50:18
108267338645780Blood ElfDeath KnightDragonblight20412/31/08 23:50:18
\n", 167 | "

10826734 rows × 7 columns

\n", 168 | "
" 169 | ], 170 | "text/plain": [ 171 | " char level race charclass zone guild \\\n", 172 | "0 59425 1 Orc Rogue Orgrimmar 165 \n", 173 | "1 65494 9 Orc Hunter Durotar -1 \n", 174 | "2 65325 14 Orc Warrior Ghostlands -1 \n", 175 | "3 65490 18 Orc Hunter Ghostlands -1 \n", 176 | "4 2288 60 Orc Hunter Hellfire Peninsula -1 \n", 177 | "... ... ... ... ... ... ... \n", 178 | "10826729 86766 80 Blood Elf Death Knight Halls of Lightning 101 \n", 179 | "10826730 86497 77 Blood Elf Death Knight The Storm Peaks 358 \n", 180 | "10826731 34893 80 Blood Elf Death Knight The Storm Peaks 189 \n", 181 | "10826732 86881 80 Blood Elf Death Knight Dragonblight 478 \n", 182 | "10826733 86457 80 Blood Elf Death Knight Dragonblight 204 \n", 183 | "\n", 184 | " timestamp \n", 185 | "0 01/01/08 00:02:04 \n", 186 | "1 01/01/08 00:02:04 \n", 187 | "2 01/01/08 00:02:04 \n", 188 | "3 01/01/08 00:02:04 \n", 189 | "4 01/01/08 00:02:09 \n", 190 | "... ... \n", 191 | "10826729 12/31/08 23:50:18 \n", 192 | "10826730 12/31/08 23:50:18 \n", 193 | "10826731 12/31/08 23:50:18 \n", 194 | "10826732 12/31/08 23:50:18 \n", 195 | "10826733 12/31/08 23:50:18 \n", 196 | "\n", 197 | "[10826734 rows x 7 columns]" 198 | ] 199 | }, 200 | "execution_count": 2, 201 | "metadata": {}, 202 | "output_type": "execute_result" 203 | } 204 | ], 205 | "source": [ 206 | "import zipfile\n", 207 | "import pandas as pd\n", 208 | "\n", 209 | "with zipfile.ZipFile('../../data/wowah.zip') as z:\n", 210 | " with z.open('wowah_data.csv') as f:\n", 211 | " wowah = pd.read_csv(f)\n", 212 | "wowah\n" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "metadata": {}, 218 | "source": [ 219 | "Instructions: be creative! Use the tools we've learned so far to explore the data and find interesting patterns. You can use the clustering methods we've learned so far, or any other methods you find in scikit-learn.\n", 220 | "\n", 221 | "You will likely have to process the data in some way to get it into a form that can be used by the clustering algorithms. Indeed, there are several samples per user, so you will have to find a way to aggregate the data. Here are a couple of suggestions:\n", 222 | "\n", 223 | "1. Look at the last sample per character, and try to cluster characters based on the basic available properties.\n", 224 | "2. Add some aggregate features: the total time played, the most common time of day, the speed at which the player levelled up, etc.\n", 225 | "3. Split the data into sessions, and try to cluster the sessions themselves. Transforming events into sessions is a good skill to have, as it is a common problem in data science: you have a log of events, and you want to identify meaningful sessions of activity." 226 | ] 227 | } 228 | ], 229 | "metadata": { 230 | "kernelspec": { 231 | "display_name": "Python 3", 232 | "language": "python", 233 | "name": "python3" 234 | }, 235 | "language_info": { 236 | "codemirror_mode": { 237 | "name": "ipython", 238 | "version": 3 239 | }, 240 | "file_extension": ".py", 241 | "mimetype": "text/x-python", 242 | "name": "python", 243 | "nbconvert_exporter": "python", 244 | "pygments_lexer": "ipython3", 245 | "version": "3.11.0" 246 | } 247 | }, 248 | "nbformat": 4, 249 | "nbformat_minor": 2 250 | } 251 | -------------------------------------------------------------------------------- /data-mining/data-viz/altair.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "https://github.com/datascience-course/2023-datascience-lectures/blob/main/12-practical-data-visualization/12-practical_visualization.ipynb\n" 10 | ] 11 | } 12 | ], 13 | "metadata": { 14 | "language_info": { 15 | "name": "python" 16 | } 17 | }, 18 | "nbformat": 4, 19 | "nbformat_minor": 2 20 | } 21 | -------------------------------------------------------------------------------- /data-mining/data-viz/lab.ipynb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/data-mining/data-viz/lab.ipynb -------------------------------------------------------------------------------- /data-mining/data-viz/table.html: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/data-mining/data-viz/table.html -------------------------------------------------------------------------------- /data-mining/finding-structure/lab.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Lab: analyzing tents data\n", 8 | "\n", 9 | "## Data extraction\n", 10 | "\n", 11 | "Let's fetch data from Decathlon. We'll do it in two phases. First, we'll make a list of all the tents they have. Then, we'll fetch the data for each tent.\n", 12 | "\n", 13 | "Let's start by listing the tents they have." 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 3, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "import json\n", 23 | "import re\n", 24 | "import bs4\n", 25 | "import requests\n", 26 | "\n", 27 | "url = 'https://www.decathlon.fr/tous-les-sports/camping-bivouac/tentes-et-abris'\n", 28 | "response = requests.get(url)\n", 29 | "soup = bs4.BeautifulSoup(response.content, 'html.parser')\n", 30 | "script_tag = soup.find('script', id='__dkt')\n", 31 | "raw_json = re.search(r'{(.+)}', script_tag.string).group(0)\n", 32 | "data = json.loads(raw_json)\n" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "Here's a tool explore the JSON: https://jsonhero.io/j/VTrWj5vx53Ys\n", 40 | "\n", 41 | "The data is quite deeply nested, but it's not difficult to extract:" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 4, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "# Note: if the following doesn't work, try modifying the index. It's possible that Decathlon has changed the structure of the page.\n", 51 | "idx = 6\n", 52 | "tents = {\n", 53 | " item['webLabel']: f\"https://www.decathlon.fr/{item['url']}\"\n", 54 | " for item in data['_ctx']['data'][idx]['data']['blocks']['items']\n", 55 | "}\n" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 5, 61 | "metadata": {}, 62 | "outputs": [ 63 | { 64 | "data": { 65 | "text/plain": [ 66 | "{'Séjour à arceaux de camping - Arpenaz Base - 6 Personnes': 'https://www.decathlon.fr/p/sejour-a-arceaux-de-camping-arpenaz-base-6-personnes/_/R-p-157674',\n", 67 | " 'Tente de camping - 2 SECONDS - 2 places': 'https://www.decathlon.fr/p/tente-de-camping-2-seconds-2-places/_/R-p-340082',\n", 68 | " 'Tente dôme de trekking - 1 place - MT900': 'https://www.decathlon.fr/p/tente-dome-de-trekking-1-place-mt900/_/R-p-305777',\n", 69 | " 'Tente de camping - MH100 - 2 places': 'https://www.decathlon.fr/p/tente-de-camping-mh100-2-places/_/R-p-303295',\n", 70 | " 'Housse de compression de trekking imperméable - 20 Litres': 'https://www.decathlon.fr/p/housse-de-compression-de-trekking-impermeable-20-litres/_/R-p-311987',\n", 71 | " 'Lot de 2 housses de rangement de trekking - Demi-lune imperméables - 2x7L': 'https://www.decathlon.fr/p/lot-de-2-housses-de-rangement-de-trekking-demi-lune-impermeables-2x7l/_/R-p-309832',\n", 72 | " 'Tente dôme de trekking - 3 places - MT900': 'https://www.decathlon.fr/p/tente-dome-de-trekking-3-places-mt900/_/R-p-301559',\n", 73 | " 'Social Bivvy L Pêche de la carpe': 'https://www.decathlon.fr/p/social-bivvy-l-peche-de-la-carpe/_/R-p-327052',\n", 74 | " 'Lot de 2 housses de rangement de trekking - Demi-lune imperméables - 2x15L': 'https://www.decathlon.fr/p/lot-de-2-housses-de-rangement-de-trekking-demi-lune-impermeables-2x15l/_/R-p-309853',\n", 75 | " 'Tente de camping - 2 SECONDS XL - 3 places - Fresh & Black': 'https://www.decathlon.fr/p/tente-de-camping-2-seconds-xl-3-places-fresh-black/_/R-p-172537',\n", 76 | " 'Tente tarp de trekking - 1 place - MT900 Minimal Editions - Undyed': 'https://www.decathlon.fr/p/tente-tarp-de-trekking-1-place-mt900-minimal-editions-undyed/_/R-p-343262',\n", 77 | " 'Tente de camping - 2 SECONDS - 3 places - Fresh & Black': 'https://www.decathlon.fr/p/tente-de-camping-2-seconds-3-places-fresh-black/_/R-p-142651',\n", 78 | " 'Tente de camping - MH100 XL - 3 places - Fresh & Black': 'https://www.decathlon.fr/p/tente-de-camping-mh100-xl-3-places-fresh-black/_/R-p-331796',\n", 79 | " 'Abri de camping instantané 4 places - Base Easy 4P UltraFresh': 'https://www.decathlon.fr/p/abri-de-camping-instantane-4-places-base-easy-4p-ultrafresh/_/R-p-342385',\n", 80 | " 'Tente de camping - MH100 - 3 places - Fresh & Black': 'https://www.decathlon.fr/p/tente-de-camping-mh100-3-places-fresh-black/_/R-p-313085',\n", 81 | " 'Tente de camping - MH100 - 4 places': 'https://www.decathlon.fr/p/tente-de-camping-mh100-4-places/_/R-p-343524',\n", 82 | " 'Tente de camping - MH100 - 3 places': 'https://www.decathlon.fr/p/tente-de-camping-mh100-3-places/_/R-p-308317',\n", 83 | " 'TARP ABRI CAMPING MULTIFONCTION': 'https://www.decathlon.fr/p/tarp-abri-camping-multifonction/_/R-p-307422',\n", 84 | " '6 PIQUETS DE TENTE - SOLS DURS': 'https://www.decathlon.fr/p/6-piquets-de-tente-sols-durs/_/R-p-105665',\n", 85 | " 'Lot de 2 housses de rangement universelles - 2x10L': 'https://www.decathlon.fr/p/lot-de-2-housses-de-rangement-universelles-2x10l/_/R-p-191091',\n", 86 | " 'PELLE-PIOCHE PLIANTE POUR LE CAMPING': 'https://www.decathlon.fr/p/pelle-pioche-pliante-pour-le-camping/_/R-p-302835',\n", 87 | " 'MAILLET DE CAMPING': 'https://www.decathlon.fr/p/maillet-de-camping/_/R-p-147151',\n", 88 | " 'Abri à arceaux de camping - 1 place - Arpenaz 1P': 'https://www.decathlon.fr/p/abri-a-arceaux-de-camping-1-place-arpenaz-1p/_/R-p-336276',\n", 89 | " 'MAILLET DE CAMPING TERRAIN DUR': 'https://www.decathlon.fr/p/maillet-de-camping-terrain-dur/_/R-p-343552',\n", 90 | " 'KIT BALAI - PELLE POUR LE CAMPING': 'https://www.decathlon.fr/p/kit-balai-pelle-pour-le-camping/_/R-p-334078',\n", 91 | " '10 CORNIÈRES DE TENTE - SOLS MOUS OU MEUBLES': 'https://www.decathlon.fr/p/10-cornieres-de-tente-sols-mous-ou-meubles/_/R-p-3735',\n", 92 | " 'Piquets alu non-anodisés - MT500 - 15 g (x5)': 'https://www.decathlon.fr/p/piquets-alu-non-anodises-mt500-15-g-x5/_/R-p-311281',\n", 93 | " 'Tente bulle de camping - AirSeconds Skyview Polycoton - 2 Personnes - 1 Chambre': 'https://www.decathlon.fr/p/tente-bulle-de-camping-airseconds-skyview-polycoton-2-personnes-1-chambre/_/R-p-342404',\n", 94 | " 'TENTE DE TOIT MH500 FRESH & BLACK 2P': 'https://www.decathlon.fr/p/tente-de-toit-mh500-fresh-black-2p/_/R-p-331938',\n", 95 | " 'Tente dôme de trekking - 2 places - MT900': 'https://www.decathlon.fr/p/tente-dome-de-trekking-2-places-mt900/_/R-p-301558',\n", 96 | " 'TENTE DE TOIT GONFLABLE MH900 FRESH & BLACK 2 PERSONNES': 'https://www.decathlon.fr/p/tente-de-toit-gonflable-mh900-fresh-black-2-personnes/_/R-p-344868',\n", 97 | " 'Tente à arceaux de camping - Arpenaz 4.1 - 4 Personnes - 1 Chambre': 'https://www.decathlon.fr/p/tente-a-arceaux-de-camping-arpenaz-4-1-4-personnes-1-chambre/_/R-p-4123',\n", 98 | " 'Séjour à arceaux de camping - Arpenaz Base Fresh - 10 Personnes': 'https://www.decathlon.fr/p/sejour-a-arceaux-de-camping-arpenaz-base-fresh-10-personnes/_/R-p-308178',\n", 99 | " 'Tente dôme de trekking - 2 places - MT500 Fresh & Black': 'https://www.decathlon.fr/p/tente-dome-de-trekking-2-places-mt500-fresh-black/_/R-p-311346',\n", 100 | " 'Tente gonflable de camping - Air Seconds 6.3 F&B - 6 Personnes - 3 Chambres': 'https://www.decathlon.fr/p/tente-gonflable-de-camping-air-seconds-6-3-f-b-6-personnes-3-chambres/_/R-p-324957',\n", 101 | " 'Tente à arceaux de camping - Arpenaz 4 - 4 Personnes - 1 Chambre': 'https://www.decathlon.fr/p/tente-a-arceaux-de-camping-arpenaz-4-4-personnes-1-chambre/_/R-p-105650',\n", 102 | " 'Tente de camping - 2 SECONDS EASY - 2 places - Fresh & Black': 'https://www.decathlon.fr/p/tente-de-camping-2-seconds-easy-2-places-fresh-black/_/R-p-308355',\n", 103 | " 'Tente à arceaux de camping - Arpenaz 4.1 F&B - 4 Personnes - 1 Chambre': 'https://www.decathlon.fr/p/tente-a-arceaux-de-camping-arpenaz-4-1-f-b-4-personnes-1-chambre/_/R-p-177332',\n", 104 | " 'Tente gonflable de camping - Air Seconds 5.2 F&B - 5 Personnes - 2 Chambres': 'https://www.decathlon.fr/p/tente-gonflable-de-camping-air-seconds-5-2-f-b-5-personnes-2-chambres/_/R-p-324972',\n", 105 | " 'Tente gonflable de camping - Air Seconds 4.2 F&B - 4 Personnes - 2 Chambres': 'https://www.decathlon.fr/p/tente-gonflable-de-camping-air-seconds-4-2-f-b-4-personnes-2-chambres/_/R-p-157654'}" 106 | ] 107 | }, 108 | "execution_count": 5, 109 | "metadata": {}, 110 | "output_type": "execute_result" 111 | } 112 | ], 113 | "source": [ 114 | "tents\n" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "So now we have a URL for each tent. Let's grab some data for the first tent." 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 9, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "url = tents['Tente de camping - MH100 - 2 places']\n", 131 | "response = requests.get(url)\n", 132 | "soup = bs4.BeautifulSoup(response.content, 'html.parser')\n", 133 | "script_tag = soup.find('script', id='__dkt')\n", 134 | "raw_json = re.search(r'{(.+)}', script_tag.string).group(0)\n", 135 | "data = json.loads(raw_json)\n" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "This JSON can be explored here: https://jsonhero.io/j/QeKMElLudiaA" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 24, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "{'rating': 4.45,\n", 154 | " 'price': 30,\n", 155 | " 'weight': '2.6',\n", 156 | " 'composition': 'Tissu principal\\n75% Polyester, 25% Polyéthylène\\nArceau\\n100% Fibre de verre',\n", 157 | " 'packed_size': 'Dimensions de la housse : 58cm x 16cm x 16cm / 15 L. Poids : 2,6 kg',\n", 158 | " 'size': 'Chambre 130 X 210 cm. (2 couchages de 65cm) Hauteur max. utile : 107 cm'}" 159 | ] 160 | }, 161 | "execution_count": 24, 162 | "metadata": {}, 163 | "output_type": "execute_result" 164 | } 165 | ], 166 | "source": [ 167 | "benefits = {\n", 168 | " b['label']: b\n", 169 | " for b in data['_ctx']['data'][10]['data']['benefits']\n", 170 | "}\n", 171 | "{\n", 172 | " 'rating': data['_ctx']['data'][4]['data']['reviews']['notation'],\n", 173 | " 'price': data['_ctx']['data'][4]['data']['models'][0]['price'],\n", 174 | " 'weight': data['_ctx']['data'][4]['data']['models'][0]['grossWeight'],\n", 175 | " 'composition': data['_ctx']['data'][4]['data']['models'][0]['composition'],\n", 176 | " 'packed_size': benefits['Facilité de transport']['value'],\n", 177 | " 'size': benefits['Habitabilité']['value']\n", 178 | "}\n" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "Ok great, we can extract data for a single tent. Now let's do it for all of them!\n", 186 | "\n", 187 | "First, let's list all the tents." 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 14, 193 | "metadata": {}, 194 | "outputs": [ 195 | { 196 | "name": "stderr", 197 | "output_type": "stream", 198 | "text": [ 199 | " 0%| | 0/11 [00:00 1\u001b[0m tents_df \u001b[39m=\u001b[39m pd\u001b[39m.\u001b[39mDataFrame\u001b[39m.\u001b[39mfrom_dict(tents_info, orient\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mindex\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[1;32m 2\u001b[0m tents_df\u001b[39m.\u001b[39misnull()\u001b[39m.\u001b[39msum()\n", 374 | "\u001b[0;31mNameError\u001b[0m: name 'tents_info' is not defined" 375 | ] 376 | } 377 | ], 378 | "source": [ 379 | "tents_df = pd.DataFrame.from_dict(tents_info, orient='index')\n", 380 | "tents_df.isnull().sum()\n" 381 | ] 382 | }, 383 | { 384 | "cell_type": "code", 385 | "execution_count": 35, 386 | "metadata": {}, 387 | "outputs": [ 388 | { 389 | "data": { 390 | "text/plain": [ 391 | "68" 392 | ] 393 | }, 394 | "execution_count": 35, 395 | "metadata": {}, 396 | "output_type": "execute_result" 397 | } 398 | ], 399 | "source": [ 400 | "len(tents_df[~tents_df.isnull().any(axis=1)])\n" 401 | ] 402 | }, 403 | { 404 | "cell_type": "code", 405 | "execution_count": 78, 406 | "metadata": {}, 407 | "outputs": [ 408 | { 409 | "data": { 410 | "text/html": [ 411 | "
\n", 412 | "\n", 425 | "\n", 426 | " \n", 427 | " \n", 428 | " \n", 429 | " \n", 430 | " \n", 431 | " \n", 432 | " \n", 433 | " \n", 434 | " \n", 435 | " \n", 436 | " \n", 437 | " \n", 438 | " \n", 439 | " \n", 440 | " \n", 441 | " \n", 442 | " \n", 443 | " \n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | " \n", 471 | " \n", 472 | " \n", 473 | " \n", 474 | " \n", 475 | " \n", 476 | " \n", 477 | " \n", 478 | " \n", 479 | " \n", 480 | " \n", 481 | " \n", 482 | " \n", 483 | " \n", 484 | "
ratingpriceweightcompositionpacked_sizesize
Tente à arceaux de camping - Arpenaz 4.1 - 4 Personnes - 1 Chambre4.13120.010.200Tissu principal\\n100% Polyester\\nArceau\\n100% ...Housse rectangulaire | 60 x 24 x 24 cm | 35 li...Chambre : 240 x 210 cm | Séjour debout : 5 m² ...
Tente de camping - MH100 - 2 places4.4730.02.600Tissu principal\\n75% Polyester, 25% Polyéthylè...Dimensions de la housse : 58cm x 16cm x 16cm /...Chambre 130 X 210 cm. (2 couchages de 65cm) Ha...
Tente de camping - 2 SECONDS - 3 places4.60100.03.562Double toit\\n100% Polyester\\nChambre intérieur...Dimension de la housse : Ø77x9 cm / 41,9 L. Po...Chambre 180 X 210 cm.\\nHauteur max. utile : 10...
Tente de camping - 2 SECONDS - 2 places4.4065.02.900Tissu principal\\n75% Polyester, 25% Polyéthylè...Dimension de la housse : Ø65x7 cm / 23,2 L. Po...Chambre 120 x 210 cm.\\nHauteur max utile : 102...
Séjour à arceaux de camping - Arpenaz Base - 6 Personnes4.20120.07.950Arceau\\n100% Fibre de verre\\nTissu principal\\n...Housse cylindrique | 57 x 18 cm | 18 litres | ...Hauteur : 2,15 m | Surface au sol : 6,25 m² | ...
\n", 485 | "
" 486 | ], 487 | "text/plain": [ 488 | " rating price weight \\\n", 489 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... 4.13 120.0 10.200 \n", 490 | "Tente de camping - MH100 - 2 places 4.47 30.0 2.600 \n", 491 | "Tente de camping - 2 SECONDS - 3 places 4.60 100.0 3.562 \n", 492 | "Tente de camping - 2 SECONDS - 2 places 4.40 65.0 2.900 \n", 493 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... 4.20 120.0 7.950 \n", 494 | "\n", 495 | " composition \\\n", 496 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Tissu principal\\n100% Polyester\\nArceau\\n100% ... \n", 497 | "Tente de camping - MH100 - 2 places Tissu principal\\n75% Polyester, 25% Polyéthylè... \n", 498 | "Tente de camping - 2 SECONDS - 3 places Double toit\\n100% Polyester\\nChambre intérieur... \n", 499 | "Tente de camping - 2 SECONDS - 2 places Tissu principal\\n75% Polyester, 25% Polyéthylè... \n", 500 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Arceau\\n100% Fibre de verre\\nTissu principal\\n... \n", 501 | "\n", 502 | " packed_size \\\n", 503 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Housse rectangulaire | 60 x 24 x 24 cm | 35 li... \n", 504 | "Tente de camping - MH100 - 2 places Dimensions de la housse : 58cm x 16cm x 16cm /... \n", 505 | "Tente de camping - 2 SECONDS - 3 places Dimension de la housse : Ø77x9 cm / 41,9 L. Po... \n", 506 | "Tente de camping - 2 SECONDS - 2 places Dimension de la housse : Ø65x7 cm / 23,2 L. Po... \n", 507 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Housse cylindrique | 57 x 18 cm | 18 litres | ... \n", 508 | "\n", 509 | " size \n", 510 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Chambre : 240 x 210 cm | Séjour debout : 5 m² ... \n", 511 | "Tente de camping - MH100 - 2 places Chambre 130 X 210 cm. (2 couchages de 65cm) Ha... \n", 512 | "Tente de camping - 2 SECONDS - 3 places Chambre 180 X 210 cm.\\nHauteur max. utile : 10... \n", 513 | "Tente de camping - 2 SECONDS - 2 places Chambre 120 x 210 cm.\\nHauteur max utile : 102... \n", 514 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Hauteur : 2,15 m | Surface au sol : 6,25 m² | ... " 515 | ] 516 | }, 517 | "execution_count": 78, 518 | "metadata": {}, 519 | "output_type": "execute_result" 520 | } 521 | ], 522 | "source": [ 523 | "tents_df.to_csv('../../data/tents.csv')\n", 524 | "tents_df.head()\n" 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "metadata": {}, 530 | "source": [ 531 | "☝️ If you were not able to extract the data, you can use the already extracted data:" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": 1, 537 | "metadata": {}, 538 | "outputs": [ 539 | { 540 | "data": { 541 | "text/html": [ 542 | "
\n", 543 | "\n", 556 | "\n", 557 | " \n", 558 | " \n", 559 | " \n", 560 | " \n", 561 | " \n", 562 | " \n", 563 | " \n", 564 | " \n", 565 | " \n", 566 | " \n", 567 | " \n", 568 | " \n", 569 | " \n", 570 | " \n", 571 | " \n", 572 | " \n", 573 | " \n", 574 | " \n", 575 | " \n", 576 | " \n", 577 | " \n", 578 | " \n", 579 | " \n", 580 | " \n", 581 | " \n", 582 | " \n", 583 | " \n", 584 | " \n", 585 | " \n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | "
ratingpriceweightcompositionpacked_sizesize
Tente à arceaux de camping - Arpenaz 4.1 - 4 Personnes - 1 Chambre4.13120.010.200Tissu principal\\n100% Polyester\\nArceau\\n100% ...Housse rectangulaire | 60 x 24 x 24 cm | 35 li...Chambre : 240 x 210 cm | Séjour debout : 5 m² ...
Tente de camping - MH100 - 2 places4.4730.02.600Tissu principal\\n75% Polyester, 25% Polyéthylè...Dimensions de la housse : 58cm x 16cm x 16cm /...Chambre 130 X 210 cm. (2 couchages de 65cm) Ha...
Tente de camping - 2 SECONDS - 3 places4.60100.03.562Double toit\\n100% Polyester\\nChambre intérieur...Dimension de la housse : Ø77x9 cm / 41,9 L. Po...Chambre 180 X 210 cm.\\nHauteur max. utile : 10...
Tente de camping - 2 SECONDS - 2 places4.4065.02.900Tissu principal\\n75% Polyester, 25% Polyéthylè...Dimension de la housse : Ø65x7 cm / 23,2 L. Po...Chambre 120 x 210 cm.\\nHauteur max utile : 102...
Séjour à arceaux de camping - Arpenaz Base - 6 Personnes4.20120.07.950Arceau\\n100% Fibre de verre\\nTissu principal\\n...Housse cylindrique | 57 x 18 cm | 18 litres | ...Hauteur : 2,15 m | Surface au sol : 6,25 m² | ...
\n", 616 | "
" 617 | ], 618 | "text/plain": [ 619 | " rating price weight \\\n", 620 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... 4.13 120.0 10.200 \n", 621 | "Tente de camping - MH100 - 2 places 4.47 30.0 2.600 \n", 622 | "Tente de camping - 2 SECONDS - 3 places 4.60 100.0 3.562 \n", 623 | "Tente de camping - 2 SECONDS - 2 places 4.40 65.0 2.900 \n", 624 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... 4.20 120.0 7.950 \n", 625 | "\n", 626 | " composition \\\n", 627 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Tissu principal\\n100% Polyester\\nArceau\\n100% ... \n", 628 | "Tente de camping - MH100 - 2 places Tissu principal\\n75% Polyester, 25% Polyéthylè... \n", 629 | "Tente de camping - 2 SECONDS - 3 places Double toit\\n100% Polyester\\nChambre intérieur... \n", 630 | "Tente de camping - 2 SECONDS - 2 places Tissu principal\\n75% Polyester, 25% Polyéthylè... \n", 631 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Arceau\\n100% Fibre de verre\\nTissu principal\\n... \n", 632 | "\n", 633 | " packed_size \\\n", 634 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Housse rectangulaire | 60 x 24 x 24 cm | 35 li... \n", 635 | "Tente de camping - MH100 - 2 places Dimensions de la housse : 58cm x 16cm x 16cm /... \n", 636 | "Tente de camping - 2 SECONDS - 3 places Dimension de la housse : Ø77x9 cm / 41,9 L. Po... \n", 637 | "Tente de camping - 2 SECONDS - 2 places Dimension de la housse : Ø65x7 cm / 23,2 L. Po... \n", 638 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Housse cylindrique | 57 x 18 cm | 18 litres | ... \n", 639 | "\n", 640 | " size \n", 641 | "Tente à arceaux de camping - Arpenaz 4.1 - 4 Pe... Chambre : 240 x 210 cm | Séjour debout : 5 m² ... \n", 642 | "Tente de camping - MH100 - 2 places Chambre 130 X 210 cm. (2 couchages de 65cm) Ha... \n", 643 | "Tente de camping - 2 SECONDS - 3 places Chambre 180 X 210 cm.\\nHauteur max. utile : 10... \n", 644 | "Tente de camping - 2 SECONDS - 2 places Chambre 120 x 210 cm.\\nHauteur max utile : 102... \n", 645 | "Séjour à arceaux de camping - Arpenaz Base - 6 ... Hauteur : 2,15 m | Surface au sol : 6,25 m² | ... " 646 | ] 647 | }, 648 | "execution_count": 1, 649 | "metadata": {}, 650 | "output_type": "execute_result" 651 | } 652 | ], 653 | "source": [ 654 | "import pandas as pd\n", 655 | "\n", 656 | "tents_df = pd.read_csv('../../data/tents.csv', index_col=0)\n", 657 | "tents_df.head()\n" 658 | ] 659 | }, 660 | { 661 | "cell_type": "code", 662 | "execution_count": 16, 663 | "metadata": {}, 664 | "outputs": [ 665 | { 666 | "data": { 667 | "text/plain": [ 668 | "70" 669 | ] 670 | }, 671 | "execution_count": 16, 672 | "metadata": {}, 673 | "output_type": "execute_result" 674 | } 675 | ], 676 | "source": [ 677 | "tents_df = tents_df[~tents_df.isnull().any(axis=1)]\n", 678 | "len(tents_df)\n" 679 | ] 680 | }, 681 | { 682 | "cell_type": "code", 683 | "execution_count": 18, 684 | "metadata": {}, 685 | "outputs": [ 686 | { 687 | "data": { 688 | "text/plain": [ 689 | "70" 690 | ] 691 | }, 692 | "execution_count": 18, 693 | "metadata": {}, 694 | "output_type": "execute_result" 695 | } 696 | ], 697 | "source": [ 698 | "len(tents_df)\n" 699 | ] 700 | }, 701 | { 702 | "cell_type": "markdown", 703 | "metadata": {}, 704 | "source": [ 705 | "## Data analysis\n", 706 | "\n", 707 | "This is where the lab starts:\n", 708 | "\n", 709 | "1. Do a bit of analysis on the numeric fields\n", 710 | "2. Run a skyline.\n", 711 | "3. Run a PCA.\n", 712 | "4. Difficult: extract the size and/or the packed of the tent, and then rerun the same analysis\n", 713 | "5. Bonus: build a simple regression model" 714 | ] 715 | } 716 | ], 717 | "metadata": { 718 | "kernelspec": { 719 | "display_name": "Python 3", 720 | "language": "python", 721 | "name": "python3" 722 | }, 723 | "language_info": { 724 | "codemirror_mode": { 725 | "name": "ipython", 726 | "version": 3 727 | }, 728 | "file_extension": ".py", 729 | "mimetype": "text/x-python", 730 | "name": "python", 731 | "nbconvert_exporter": "python", 732 | "pygments_lexer": "ipython3", 733 | "version": "3.11.0" 734 | }, 735 | "orig_nbformat": 4 736 | }, 737 | "nbformat": 4, 738 | "nbformat_minor": 2 739 | } 740 | -------------------------------------------------------------------------------- /data-mining/text-processing/regex.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Regex\n", 8 | "\n", 9 | "Regex stands for *regular expressions*. The regex language is a powerful tool for text processing. It's as important for text as SQL is for databases.\n", 10 | "\n", 11 | "Here we'll give a brief introduction to how regex works in Python. But it's important to understand is implemented and available in many other languages/tools.\n", 12 | "\n", 13 | "Regular expressions are a way to match strings. They are very useful to find (and replace) text, to extract structured information such as e-mails, phone numbers, etc., or for cleaning up text that was entered by humans." 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "The basic feature is to search for a pattern in a string:" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## Searching" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 3, 33 | "metadata": {}, 34 | "outputs": [ 35 | { 36 | "data": { 37 | "text/plain": [ 38 | "" 39 | ] 40 | }, 41 | "execution_count": 3, 42 | "metadata": {}, 43 | "output_type": "execute_result" 44 | } 45 | ], 46 | "source": [ 47 | "import re\n", 48 | "\n", 49 | "pattern = r'\\d\\d\\d\\d-\\d\\d-\\d\\d'\n", 50 | "text = 'Kurt Gödel was born on 1906-04-28 in Brno'\n", 51 | "\n", 52 | "match = re.search(pattern, text)\n", 53 | "match\n" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 7, 59 | "metadata": {}, 60 | "outputs": [ 61 | { 62 | "data": { 63 | "text/plain": [ 64 | "'1906-04-28'" 65 | ] 66 | }, 67 | "execution_count": 7, 68 | "metadata": {}, 69 | "output_type": "execute_result" 70 | } 71 | ], 72 | "source": [ 73 | "match.group()\n" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "By default, the `search` function returns the first match:" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 4, 86 | "metadata": {}, 87 | "outputs": [ 88 | { 89 | "data": { 90 | "text/plain": [ 91 | "" 92 | ] 93 | }, 94 | "execution_count": 4, 95 | "metadata": {}, 96 | "output_type": "execute_result" 97 | } 98 | ], 99 | "source": [ 100 | "re.search(\n", 101 | " r'\\d\\d\\d\\d-\\d\\d-\\d\\d',\n", 102 | " 'Kurt Gödel was born on 1906-04-28 in Brno, and died on 1978-01-14 in Princeton, NJ'\n", 103 | ")\n" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "You can also search for multiple matches:" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 6, 116 | "metadata": {}, 117 | "outputs": [ 118 | { 119 | "data": { 120 | "text/plain": [ 121 | "['1906-04-28', '1978-01-14']" 122 | ] 123 | }, 124 | "execution_count": 6, 125 | "metadata": {}, 126 | "output_type": "execute_result" 127 | } 128 | ], 129 | "source": [ 130 | "re.findall(\n", 131 | " r'\\d\\d\\d\\d-\\d\\d-\\d\\d',\n", 132 | " 'Kurt Gödel was born on 1906-04-28 in Brno, and died on 1978-01-14 in Princeton, NJ'\n", 133 | ")\n" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "The `match` function only matches at the beginning of the string:" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "## Matching" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 9, 153 | "metadata": {}, 154 | "outputs": [ 155 | { 156 | "ename": "AssertionError", 157 | "evalue": "", 158 | "output_type": "error", 159 | "traceback": [ 160 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 161 | "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", 162 | "\u001b[1;32m/Users/max/projects/data-science-tutorials/data-mining/text-processing/regex.ipynb Cell 10\u001b[0m line \u001b[0;36m1\n\u001b[0;32m----> 1\u001b[0m \u001b[39massert\u001b[39;00m re\u001b[39m.\u001b[39mmatch(\n\u001b[1;32m 2\u001b[0m \u001b[39mr\u001b[39m\u001b[39m'\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md-\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md-\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m\\\u001b[39m\u001b[39md\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[1;32m 3\u001b[0m \u001b[39m'\u001b[39m\u001b[39mKurt Gödel was born on 1906-04-28 in Brno, and died on 1978-01-14 in Princeton, NJ\u001b[39m\u001b[39m'\u001b[39m\n\u001b[1;32m 4\u001b[0m )\n", 163 | "\u001b[0;31mAssertionError\u001b[0m: " 164 | ] 165 | } 166 | ], 167 | "source": [ 168 | "assert re.match(\n", 169 | " r'\\d\\d\\d\\d-\\d\\d-\\d\\d',\n", 170 | " 'Kurt Gödel was born on 1906-04-28 in Brno, and died on 1978-01-14 in Princeton, NJ'\n", 171 | ")\n" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 10, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "assert re.match(\n", 181 | " r'\\d\\d\\d\\d-\\d\\d-\\d\\d',\n", 182 | " '1978-01-14'\n", 183 | ")\n" 184 | ] 185 | }, 186 | { 187 | "cell_type": "markdown", 188 | "metadata": {}, 189 | "source": [ 190 | "## Examples" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "Repetition." 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 17, 203 | "metadata": {}, 204 | "outputs": [ 205 | { 206 | "data": { 207 | "text/plain": [ 208 | "" 209 | ] 210 | }, 211 | "execution_count": 17, 212 | "metadata": {}, 213 | "output_type": "execute_result" 214 | } 215 | ], 216 | "source": [ 217 | "re.search(r'\\d+', 'abc123def')\n" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 12, 223 | "metadata": {}, 224 | "outputs": [ 225 | { 226 | "data": { 227 | "text/plain": [ 228 | "" 229 | ] 230 | }, 231 | "execution_count": 12, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "re.search(r'\\w+', 'abc123def')\n" 238 | ] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "metadata": {}, 243 | "source": [ 244 | "Character classes." 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": 29, 250 | "metadata": {}, 251 | "outputs": [ 252 | { 253 | "data": { 254 | "text/plain": [ 255 | "" 256 | ] 257 | }, 258 | "execution_count": 29, 259 | "metadata": {}, 260 | "output_type": "execute_result" 261 | } 262 | ], 263 | "source": [ 264 | "re.search(r'[abcdefghijklmnopqrstuvwxyz]+', 'abc123def')\n" 265 | ] 266 | }, 267 | { 268 | "cell_type": "markdown", 269 | "metadata": {}, 270 | "source": [ 271 | "Ranges." 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": 30, 277 | "metadata": {}, 278 | "outputs": [ 279 | { 280 | "data": { 281 | "text/plain": [ 282 | "" 283 | ] 284 | }, 285 | "execution_count": 30, 286 | "metadata": {}, 287 | "output_type": "execute_result" 288 | } 289 | ], 290 | "source": [ 291 | "re.search(r'[a-z]+', 'abc123def')\n" 292 | ] 293 | }, 294 | { 295 | "cell_type": "markdown", 296 | "metadata": {}, 297 | "source": [ 298 | "Groups." 299 | ] 300 | }, 301 | { 302 | "cell_type": "code", 303 | "execution_count": 21, 304 | "metadata": {}, 305 | "outputs": [ 306 | { 307 | "data": { 308 | "text/plain": [ 309 | "'123def'" 310 | ] 311 | }, 312 | "execution_count": 21, 313 | "metadata": {}, 314 | "output_type": "execute_result" 315 | } 316 | ], 317 | "source": [ 318 | "re.search(r'\\d+([a-z]+)', 'abc123def').group(0)\n" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": 22, 324 | "metadata": {}, 325 | "outputs": [ 326 | { 327 | "data": { 328 | "text/plain": [ 329 | "'def'" 330 | ] 331 | }, 332 | "execution_count": 22, 333 | "metadata": {}, 334 | "output_type": "execute_result" 335 | } 336 | ], 337 | "source": [ 338 | "re.search(r'\\d+([a-z]+)', 'abc123def').group(1)\n" 339 | ] 340 | }, 341 | { 342 | "cell_type": "markdown", 343 | "metadata": {}, 344 | "source": [ 345 | "Quantifiers." 346 | ] 347 | }, 348 | { 349 | "cell_type": "code", 350 | "execution_count": 32, 351 | "metadata": {}, 352 | "outputs": [ 353 | { 354 | "data": { 355 | "text/plain": [ 356 | "" 357 | ] 358 | }, 359 | "execution_count": 32, 360 | "metadata": {}, 361 | "output_type": "execute_result" 362 | } 363 | ], 364 | "source": [ 365 | "re.search('\\d{2}', 'This sentence contains a number 42')\n" 366 | ] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": 37, 371 | "metadata": {}, 372 | "outputs": [ 373 | { 374 | "data": { 375 | "text/plain": [ 376 | "['420', '420', '0', '42']" 377 | ] 378 | }, 379 | "execution_count": 37, 380 | "metadata": {}, 381 | "output_type": "execute_result" 382 | } 383 | ], 384 | "source": [ 385 | "re.findall(r'\\d{1,3}', 'This sentence contains 420, 4200, 42')\n" 386 | ] 387 | }, 388 | { 389 | "cell_type": "markdown", 390 | "metadata": {}, 391 | "source": [ 392 | "Word boundaries." 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 38, 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "data": { 402 | "text/plain": [ 403 | "['420', '420', '42']" 404 | ] 405 | }, 406 | "execution_count": 38, 407 | "metadata": {}, 408 | "output_type": "execute_result" 409 | } 410 | ], 411 | "source": [ 412 | "re.findall(r'\\b\\d{1,3}', 'This sentence contains 420, 4200, 42')\n" 413 | ] 414 | }, 415 | { 416 | "cell_type": "markdown", 417 | "metadata": {}, 418 | "source": [ 419 | "Group naming." 420 | ] 421 | }, 422 | { 423 | "cell_type": "code", 424 | "execution_count": 26, 425 | "metadata": {}, 426 | "outputs": [ 427 | { 428 | "data": { 429 | "text/plain": [ 430 | "" 431 | ] 432 | }, 433 | "execution_count": 26, 434 | "metadata": {}, 435 | "output_type": "execute_result" 436 | } 437 | ], 438 | "source": [ 439 | "match = re.search(\n", 440 | " r\"\"\"\n", 441 | " (?P\\d)\n", 442 | " \\s\n", 443 | " (?P\\d\\d)\n", 444 | " \\s\n", 445 | " (?P\\d\\d)\n", 446 | " \\s\n", 447 | " (?P\\d\\d)\n", 448 | " \\s\n", 449 | " (?P\\d\\d\\d)\n", 450 | " \\s\n", 451 | " (?P\\d\\d\\d)\n", 452 | " \\s\n", 453 | " (?P\\d\\d)\n", 454 | " \"\"\",\n", 455 | " '1 94 08 99 135 241 51',\n", 456 | " re.VERBOSE\n", 457 | ")\n", 458 | "match\n" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": 39, 464 | "metadata": {}, 465 | "outputs": [ 466 | { 467 | "data": { 468 | "text/plain": [ 469 | "{'gender': '1',\n", 470 | " 'annee': '94',\n", 471 | " 'mois': '08',\n", 472 | " 'departement': '99',\n", 473 | " 'commune': '135',\n", 474 | " 'ordre': '241',\n", 475 | " 'cle': '51'}" 476 | ] 477 | }, 478 | "execution_count": 39, 479 | "metadata": {}, 480 | "output_type": "execute_result" 481 | } 482 | ], 483 | "source": [ 484 | "match.groupdict()\n" 485 | ] 486 | }, 487 | { 488 | "cell_type": "markdown", 489 | "metadata": {}, 490 | "source": [ 491 | "The regex language is very powerful. It's worth learning it well. You can find a good tutorial [here](https://docs.python.org/3/howto/regex.html). Nowadays, with LLMs, it's really easy to generate complex regexes." 492 | ] 493 | } 494 | ], 495 | "metadata": { 496 | "kernelspec": { 497 | "display_name": "Python 3", 498 | "language": "python", 499 | "name": "python3" 500 | }, 501 | "language_info": { 502 | "codemirror_mode": { 503 | "name": "ipython", 504 | "version": 3 505 | }, 506 | "file_extension": ".py", 507 | "mimetype": "text/x-python", 508 | "name": "python", 509 | "nbconvert_exporter": "python", 510 | "pygments_lexer": "ipython3", 511 | "version": "3.11.0" 512 | } 513 | }, 514 | "nbformat": 4, 515 | "nbformat_minor": 2 516 | } 517 | -------------------------------------------------------------------------------- /data-mining/text-processing/spelling-correction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Spelling correction" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "This is adapted from Peter Norvig's [post](http://norvig.com/spell-correct.html) on spelling correction." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import requests\n", 24 | "\n", 25 | "big = requests.get('http://norvig.com/big.txt').text\n" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 4, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "The Project Gutenberg EBook of The Adventures of Sherlock Holmes\n", 38 | "by Sir Arthur Conan Doyle\n", 39 | "(#15 in our series by Sir Arthur Conan Doyle)\n", 40 | "\n", 41 | "Copyright laws are changing all over the world. Be sure to check the\n", 42 | "copyright laws for your country before downloading or redistributing\n", 43 | "this or any other Project Gutenberg eBook.\n", 44 | "\n", 45 | "This header should be the first thing seen when viewing this Project\n", 46 | "Gutenberg file. Please do not remove it. Do not change or edit the\n", 47 | "header without written permission.\n", 48 | "\n", 49 | "Please read the \"legal small print,\" and other information about the\n", 50 | "eBook and Project Gutenberg at the bottom of this file. Included is\n", 51 | "important information about your specific rights and restrictions in\n", 52 | "how the file may be used. You can also find out about how to make a\n", 53 | "donation to Project Gutenberg, and how to get involved.\n", 54 | "\n", 55 | "\n", 56 | "**Welcome To The World of Free Plain Vanilla Electronic Texts**\n", 57 | "\n", 58 | "**eBooks Readable By Both Humans and By Computers, Since 1971**\n", 59 | "\n", 60 | "*****These eBooks Were Prepared By Thousan\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "print(big[:1000])\n" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 50, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "import re\n", 75 | "from collections import Counter\n", 76 | "\n", 77 | "def words(text):\n", 78 | " return re.findall(r'\\w+', text.lower())\n", 79 | "\n", 80 | "WORDS = Counter(words(big))\n", 81 | "\n", 82 | "def correct(word):\n", 83 | " \"Most probable spelling correction for word.\"\n", 84 | " return max(candidates(word), key=P)\n", 85 | "\n", 86 | "def P(word, N=sum(WORDS.values())):\n", 87 | " \"Probability of `word`.\"\n", 88 | " return WORDS[word] / N\n", 89 | "\n", 90 | "def candidates(word):\n", 91 | " \"Generate possible spelling corrections for word.\"\n", 92 | " return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])\n", 93 | "\n", 94 | "def known(words):\n", 95 | " \"The subset of `words` that appear in the dictionary of WORDS.\"\n", 96 | " return set(w for w in words if w in WORDS)\n", 97 | "\n", 98 | "def edits1(word):\n", 99 | " \"All edits that are one edit away from `word`.\"\n", 100 | " letters = 'abcdefghijklmnopqrstuvwxyz'\n", 101 | " splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n", 102 | " deletes = [L + R[1:] for L, R in splits if R]\n", 103 | " transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]\n", 104 | " replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\n", 105 | " inserts = [L + c + R for L, R in splits for c in letters]\n", 106 | " return set(deletes + transposes + replaces + inserts)\n", 107 | "\n", 108 | "def edits2(word):\n", 109 | " \"All edits that are two edits away from `word`.\"\n", 110 | " return (e2 for e1 in edits1(word) for e2 in edits1(e1))\n" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "Some examples of spelling correction:" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 51, 123 | "metadata": {}, 124 | "outputs": [ 125 | { 126 | "data": { 127 | "text/plain": [ 128 | "'spelling'" 129 | ] 130 | }, 131 | "execution_count": 51, 132 | "metadata": {}, 133 | "output_type": "execute_result" 134 | } 135 | ], 136 | "source": [ 137 | "correct('speling')\n" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 52, 143 | "metadata": {}, 144 | "outputs": [ 145 | { 146 | "data": { 147 | "text/plain": [ 148 | "'corrected'" 149 | ] 150 | }, 151 | "execution_count": 52, 152 | "metadata": {}, 153 | "output_type": "execute_result" 154 | } 155 | ], 156 | "source": [ 157 | "correct('korrectud')\n" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "How does this work? Well, the first idea to generate candidates. We can do this by generating all possible words that are one edit away from the original word. We then filter these candidates by only keeping the ones that are in our vocabulary. Next, we rank these candidates by some score. In this case, the score is frequency of the word. We then pick the candidate with the highest score." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 53, 170 | "metadata": {}, 171 | "outputs": [ 172 | { 173 | "data": { 174 | "text/plain": [ 175 | "['canda',\n", 176 | " 'cnadl',\n", 177 | " 'candjl',\n", 178 | " 'candzl',\n", 179 | " 'candls',\n", 180 | " 'candln',\n", 181 | " 'canfl',\n", 182 | " 'lcandl',\n", 183 | " 'cacndl',\n", 184 | " 'cabndl']" 185 | ] 186 | }, 187 | "execution_count": 53, 188 | "metadata": {}, 189 | "output_type": "execute_result" 190 | } 191 | ], 192 | "source": [ 193 | "import random\n", 194 | "\n", 195 | "random.choices(list(edits1('candl')), k=10)\n" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "We can also consider words that are two edits away." 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": 54, 208 | "metadata": {}, 209 | "outputs": [ 210 | { 211 | "data": { 212 | "text/plain": [ 213 | "['caandil',\n", 214 | " 'caazdl',\n", 215 | " 'vaqndl',\n", 216 | " 'candlfy',\n", 217 | " 'calndm',\n", 218 | " 'caqndd',\n", 219 | " 'cagdl',\n", 220 | " 'cawdo',\n", 221 | " 'cacbdl',\n", 222 | " 'canldla']" 223 | ] 224 | }, 225 | "execution_count": 54, 226 | "metadata": {}, 227 | "output_type": "execute_result" 228 | } 229 | ], 230 | "source": [ 231 | "random.choices(list(edits2('candl')), k=10)\n" 232 | ] 233 | }, 234 | { 235 | "cell_type": "markdown", 236 | "metadata": {}, 237 | "source": [ 238 | "The `candidates` function lists all the 1-edits and 2-edits, and filter those that are not in the vocabulary." 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": 55, 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "data": { 248 | "text/plain": [ 249 | "{'canal', 'candle', 'candy'}" 250 | ] 251 | }, 252 | "execution_count": 55, 253 | "metadata": {}, 254 | "output_type": "execute_result" 255 | } 256 | ], 257 | "source": [ 258 | "candidates('candl')\n" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "So in this case, there's three of them." 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": 56, 271 | "metadata": {}, 272 | "outputs": [ 273 | { 274 | "name": "stdout", 275 | "output_type": "stream", 276 | "text": [ 277 | "candy 8.963906829152417e-07\n", 278 | "canal 6.454012916989741e-05\n", 279 | "candle 3.2270064584948705e-05\n" 280 | ] 281 | } 282 | ], 283 | "source": [ 284 | "for candidate in candidates('candl'):\n", 285 | " print(candidate, P(candidate))\n" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 57, 291 | "metadata": {}, 292 | "outputs": [ 293 | { 294 | "data": { 295 | "text/plain": [ 296 | "'canal'" 297 | ] 298 | }, 299 | "execution_count": 57, 300 | "metadata": {}, 301 | "output_type": "execute_result" 302 | } 303 | ], 304 | "source": [ 305 | "correct('candl')\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": {}, 311 | "source": [ 312 | "This model is actually a simple Bayes classifier. We want to find the most probable correction given a word. We can do this by using Bayes rule:\n", 313 | "\n", 314 | "$$argmax_{c \\in candidates} P(c|w) = \\frac{P(c)P(w|c)}{P(w)}$$\n", 315 | "\n", 316 | "We can ignore the denominator since it's the same for all candidates. So we can simplify this to:\n", 317 | "\n", 318 | "$$argmax_{c \\in candidates} P(c)P(w|c)$$\n", 319 | "\n", 320 | "This basic model estimates $P(c)$ by the frequency of the word in the vocabulary:" 321 | ] 322 | }, 323 | { 324 | "cell_type": "code", 325 | "execution_count": 58, 326 | "metadata": {}, 327 | "outputs": [ 328 | { 329 | "name": "stdout", 330 | "output_type": "stream", 331 | "text": [ 332 | "7.15%\n" 333 | ] 334 | } 335 | ], 336 | "source": [ 337 | "print(f\"{P('the'):.2%}\")\n" 338 | ] 339 | }, 340 | { 341 | "cell_type": "markdown", 342 | "metadata": {}, 343 | "source": [ 344 | "The $P(w|c)$ is more complicated. It's the likelihood of observing a typo. The thing is, we don't really have access to a list of typos people made. So our basic model simply says that a 1-edit is more likely than a 2-edit. A more sophisticated model would use a corpus of misspellings to learn typical typos." 345 | ] 346 | }, 347 | { 348 | "cell_type": "markdown", 349 | "metadata": {}, 350 | "source": [ 351 | "## Application" 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": 59, 357 | "metadata": {}, 358 | "outputs": [], 359 | "source": [ 360 | "from bs4 import BeautifulSoup\n", 361 | "import requests\n", 362 | "\n", 363 | "url = 'https://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/For_machines'\n", 364 | "\n", 365 | "content = requests.get(url).content.decode()\n", 366 | "soup = BeautifulSoup(content)\n" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": 60, 372 | "metadata": {}, 373 | "outputs": [ 374 | { 375 | "name": "stdout", 376 | "output_type": "stream", 377 | "text": [ 378 | "3,156 words\n", 379 | "4,291 misspellings\n" 380 | ] 381 | } 382 | ], 383 | "source": [ 384 | "from collections import defaultdict\n", 385 | "\n", 386 | "typos = defaultdict(list)\n", 387 | "\n", 388 | "for line in soup.find(name='pre').text.splitlines():\n", 389 | " typo, correction = line.split('->')\n", 390 | " typos[correction].append(typo)\n", 391 | "\n", 392 | "typos = dict(typos)\n", 393 | "\n", 394 | "print(f'{len(typos):,d} words')\n", 395 | "print(f'{sum(map(len, typos.values())):,d} misspellings')\n" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": 63, 401 | "metadata": {}, 402 | "outputs": [ 403 | { 404 | "name": "stdout", 405 | "output_type": "stream", 406 | "text": [ 407 | "✅ consituted -> constituted == constituted\n", 408 | "❌ tast -> last != taste\n", 409 | "✅ appearences -> appearances == appearances\n", 410 | "✅ apperances -> appearances == appearances\n", 411 | "✅ appereances -> appearances == appearances\n", 412 | "❌ Pucini -> mucin != Puccini\n", 413 | "❌ rechargable -> rechargable != rechargeable\n", 414 | "✅ casette -> cassette == cassette\n", 415 | "✅ verisons -> versions == versions\n", 416 | "✅ nowe -> now == now\n", 417 | "❌ regardes -> regarded != regards\n", 418 | "✅ mataphysical -> metaphysical == metaphysical\n", 419 | "✅ coform -> conform == conform\n", 420 | "❌ shoudln -> shouldn != should, shouldn't\n", 421 | "❌ homogeneize -> homogeneize != homogenize\n", 422 | "✅ guerrila -> guerrilla == guerrilla\n", 423 | "❌ implimented -> complimented != implemented\n", 424 | "✅ threee -> three == three\n", 425 | "✅ inocence -> innocence == innocence\n", 426 | "❌ maneouvres -> maneuvers != manoeuvres\n", 427 | "❌ deteoriated -> deteoriated != deteriorated\n", 428 | "❌ scoll -> scold != scroll\n", 429 | "❌ Malcom -> falcon != Malcolm\n", 430 | "✅ buisness -> business == business\n", 431 | "✅ busines -> business == business\n", 432 | "✅ busness -> business == business\n", 433 | "✅ bussiness -> business == business\n", 434 | "❌ omniverously -> omniverously != omnivorously\n", 435 | "✅ soudns -> sounds == sounds\n", 436 | "❌ souveniers -> souvenir != souvenirs\n" 437 | ] 438 | } 439 | ], 440 | "source": [ 441 | "import random\n", 442 | "\n", 443 | "keys = random.sample(list(typos.keys()), 25)\n", 444 | "\n", 445 | "for truth in keys:\n", 446 | " misspellings = typos[truth]\n", 447 | " for misspelling in misspellings:\n", 448 | " correction = correct(misspelling)\n", 449 | " if correction == truth:\n", 450 | " print(f'✅ {misspelling} -> {correction} == {truth}')\n", 451 | " else:\n", 452 | " print(f'❌ {misspelling} -> {correction} != {truth}')\n" 453 | ] 454 | } 455 | ], 456 | "metadata": { 457 | "kernelspec": { 458 | "display_name": "Python 3", 459 | "language": "python", 460 | "name": "python3" 461 | }, 462 | "language_info": { 463 | "codemirror_mode": { 464 | "name": "ipython", 465 | "version": 3 466 | }, 467 | "file_extension": ".py", 468 | "mimetype": "text/x-python", 469 | "name": "python", 470 | "nbconvert_exporter": "python", 471 | "pygments_lexer": "ipython3", 472 | "version": "3.11.0" 473 | } 474 | }, 475 | "nbformat": 4, 476 | "nbformat_minor": 2 477 | } 478 | -------------------------------------------------------------------------------- /data-mining/text-processing/tfidf.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# TF-IDF for dummies" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "We'll use a dataset which was used for a [Kaggle InClass competition](https://www.kaggle.com/competitions/defi-ia-insa-toulouse/overview) from a few years ago. The goal is to predict a person's job based on their resume. The competition's purpose was to build a classifier that was biased towards gender. But in this notebook, we'll just focus on the TF-IDF part." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [ 22 | { 23 | "data": { 24 | "text/html": [ 25 | "
\n", 26 | "\n", 39 | "\n", 40 | " \n", 41 | " \n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | " \n", 57 | " \n", 58 | " \n", 59 | " \n", 60 | " \n", 61 | " \n", 62 | " \n", 63 | " \n", 64 | " \n", 65 | " \n", 66 | " \n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | "
descriptiongenderjob
Id
0She is also a Ronald D. Asmus Policy Entrepre...Fprofessor
1He is a member of the AICPA and WICPA. Brent ...Maccountant
2Dr. Aster has held teaching and research posi...Mprofessor
3He runs a boutique design studio attending cl...Marchitect
4He focuses on cloud security, identity and ac...Marchitect
\n", 87 | "
" 88 | ], 89 | "text/plain": [ 90 | " description gender job\n", 91 | "Id \n", 92 | "0 She is also a Ronald D. Asmus Policy Entrepre... F professor\n", 93 | "1 He is a member of the AICPA and WICPA. Brent ... M accountant\n", 94 | "2 Dr. Aster has held teaching and research posi... M professor\n", 95 | "3 He runs a boutique design studio attending cl... M architect\n", 96 | "4 He focuses on cloud security, identity and ac... M architect" 97 | ] 98 | }, 99 | "execution_count": 1, 100 | "metadata": {}, 101 | "output_type": "execute_result" 102 | } 103 | ], 104 | "source": [ 105 | "import pathlib\n", 106 | "import zipfile\n", 107 | "import pandas as pd\n", 108 | "\n", 109 | "data_dir = pathlib.Path('../../data/bias-in-bios.zip')\n", 110 | "\n", 111 | "with zipfile.ZipFile(data_dir, 'r') as z:\n", 112 | " with z.open('train.json') as f:\n", 113 | " train = pd.read_json(f).set_index('Id')\n", 114 | " with z.open('categories_string.csv') as f:\n", 115 | " names = pd.read_csv(f)['0'].to_dict()\n", 116 | " with z.open('train_label.csv') as f:\n", 117 | " jobs = pd.read_csv(f, index_col='Id')['Category']\n", 118 | " jobs = jobs.map(names)\n", 119 | " jobs = jobs.rename('job')\n", 120 | " train['job'] = jobs\n", 121 | "\n", 122 | "train.head()\n" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 2, 128 | "metadata": {}, 129 | "outputs": [ 130 | { 131 | "data": { 132 | "text/plain": [ 133 | "'217,197'" 134 | ] 135 | }, 136 | "execution_count": 2, 137 | "metadata": {}, 138 | "output_type": "execute_result" 139 | } 140 | ], 141 | "source": [ 142 | "f\"{len(train):,d}\"\n" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 3, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "professor 70016\n", 154 | "attorney 18820\n", 155 | "photographer 14646\n", 156 | "nurse 12622\n", 157 | "journalist 12295\n", 158 | "physician 11607\n", 159 | "psychologist 10391\n", 160 | "teacher 9145\n", 161 | "surgeon 6616\n", 162 | "architect 5841\n", 163 | "dentist 5450\n", 164 | "painter 4621\n", 165 | "poet 4292\n", 166 | "filmmaker 4124\n", 167 | "model 4115\n", 168 | "software_engineer 4060\n", 169 | "composer 3395\n", 170 | "accountant 3121\n", 171 | "dietitian 2288\n", 172 | "comedian 1639\n", 173 | "pastor 1497\n", 174 | "chiropractor 1406\n", 175 | "paralegal 967\n", 176 | "yoga_teacher 944\n", 177 | "interior_designer 858\n", 178 | "dj 831\n", 179 | "personal_trainer 807\n", 180 | "rapper 783\n", 181 | "Name: job, dtype: int64" 182 | ] 183 | }, 184 | "execution_count": 3, 185 | "metadata": {}, 186 | "output_type": "execute_result" 187 | } 188 | ], 189 | "source": [ 190 | "train['job'].value_counts()\n" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "No machine learning model takes as input text directly. The text always has to be transformed. In particular, for text, the act of transforming text into a vector of numbers is called **vectorization**. There are many ways to vectorize text, but the most common one is called **TF-IDF**. Before we go into that, let's first look at a simpler method called **Bag of Words**." 198 | ] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "execution_count": 4, 203 | "metadata": {}, 204 | "outputs": [], 205 | "source": [ 206 | "from sklearn.feature_extraction.text import CountVectorizer\n", 207 | "\n", 208 | "vectorizer = CountVectorizer()\n" 209 | ] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "metadata": {}, 214 | "source": [ 215 | "A vectorizer does two things. First it normalizes the text:" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 5, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "data": { 225 | "text/plain": [ 226 | "' she is also a ronald d. asmus policy entrepreneur fellow with the german marshall fund and is a visiting fellow at the centre for international studies (cis) at the university of oxford. this commentary first appeared at sada, an online journal published by the carnegie endowment for international peace.'" 227 | ] 228 | }, 229 | "execution_count": 5, 230 | "metadata": {}, 231 | "output_type": "execute_result" 232 | } 233 | ], 234 | "source": [ 235 | "clean = vectorizer.build_preprocessor()(train['description'][0])\n", 236 | "clean\n" 237 | ] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "metadata": {}, 242 | "source": [ 243 | "Next, it splits the text into tokens:" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": 6, 249 | "metadata": {}, 250 | "outputs": [ 251 | { 252 | "data": { 253 | "text/plain": [ 254 | "['she',\n", 255 | " 'is',\n", 256 | " 'also',\n", 257 | " 'ronald',\n", 258 | " 'asmus',\n", 259 | " 'policy',\n", 260 | " 'entrepreneur',\n", 261 | " 'fellow',\n", 262 | " 'with',\n", 263 | " 'the']" 264 | ] 265 | }, 266 | "execution_count": 6, 267 | "metadata": {}, 268 | "output_type": "execute_result" 269 | } 270 | ], 271 | "source": [ 272 | "tokens = vectorizer.build_tokenizer()(clean)\n", 273 | "tokens[:10]\n" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "The idea is then to build a matrix where each row corresponds to a document and each column corresponds to a token. The value of each cell is the number of times the token appears in the document. This is called a **Bag of Words** representation because we lose the order of the words in the text. We only keep track of the number of times each word appears in the text." 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": 7, 286 | "metadata": {}, 287 | "outputs": [ 288 | { 289 | "data": { 290 | "text/plain": [ 291 | "<217197x230368 sparse matrix of type ''\n", 292 | "\twith 9851657 stored elements in Compressed Sparse Row format>" 293 | ] 294 | }, 295 | "execution_count": 7, 296 | "metadata": {}, 297 | "output_type": "execute_result" 298 | } 299 | ], 300 | "source": [ 301 | "counts = vectorizer.fit_transform(raw_documents=train['description'])\n", 302 | "counts\n" 303 | ] 304 | }, 305 | { 306 | "cell_type": "markdown", 307 | "metadata": {}, 308 | "source": [ 309 | "This is a sparse matrix, because that's a data structure which makes sense in this case: most documents will only contain a small subset of the tokens, so it's a waste of memory to store all the zeros. Sparse matrices are very common in text processing, so some machine learning algorithms are optimized to work with them.\n", 310 | "\n", 311 | "It's important to think about the data in terms of a sparse matrix. For instance, regular standard scaling should be avoided. Indeed, if you subtract the mean of a sparse matrix, you'll get a dense matrix, which will take a lot of memory. Instead, you should use a scaler which is aware of the sparse structure of the data, such as `MaxAbsScaler` or `MinMaxScaler`. Indeed, dividing each value by the maximum value of the row will keep the data sparse." 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": 8, 317 | "metadata": {}, 318 | "outputs": [ 319 | { 320 | "data": { 321 | "text/html": [ 322 | "
Pipeline(steps=[('countvectorizer', CountVectorizer()),\n",
323 |        "                ('standardscaler', StandardScaler(with_mean=False)),\n",
324 |        "                ('normalizer', Normalizer()),\n",
325 |        "                ('sgdclassifier',\n",
326 |        "                 SGDClassifier(loss='log_loss', max_iter=100))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" 331 | ], 332 | "text/plain": [ 333 | "Pipeline(steps=[('countvectorizer', CountVectorizer()),\n", 334 | " ('standardscaler', StandardScaler(with_mean=False)),\n", 335 | " ('normalizer', Normalizer()),\n", 336 | " ('sgdclassifier',\n", 337 | " SGDClassifier(loss='log_loss', max_iter=100))])" 338 | ] 339 | }, 340 | "execution_count": 8, 341 | "metadata": {}, 342 | "output_type": "execute_result" 343 | } 344 | ], 345 | "source": [ 346 | "from sklearn import linear_model\n", 347 | "from sklearn import pipeline\n", 348 | "from sklearn import preprocessing\n", 349 | "\n", 350 | "model = pipeline.make_pipeline(\n", 351 | " CountVectorizer(),\n", 352 | " preprocessing.StandardScaler(with_mean=False),\n", 353 | " preprocessing.Normalizer(),\n", 354 | " linear_model.SGDClassifier(loss='log_loss', max_iter=100, tol=1e-3)\n", 355 | ")\n", 356 | "model.fit(train['description'], train['job'])\n" 357 | ] 358 | }, 359 | { 360 | "cell_type": "markdown", 361 | "metadata": {}, 362 | "source": [ 363 | "The Bag of Words representation is very simple, but it has a few drawbacks. First, it doesn't take into account the order of the words. Second, it doesn't take into account the fact that some words are more common than others. For instance, the word \"the\" is very common, but it doesn't carry much information. TF-IDF is a way to fix that.\n", 364 | "\n", 365 | "TF-IDF stands for **Term Frequency - Inverse Document Frequency**. It's a way to normalize the Bag of Words representation. The idea is to divide each value by the number of times the token appears in the document. This is called the **Term Frequency**. But we also divide by the number of documents in which the token appears. This is called the **Inverse Document Frequency**. The intuition is that if a token appears in many documents, it's not very informative. On the other hand, if it appears in only a few documents, it's more informative.\n", 366 | "\n", 367 | "There are actually many ways to compute the TF-IDF. The most common one is called **smoothed TF-IDF**. It's computed as follows:\n", 368 | "\n", 369 | "$$\n", 370 | "\\text{TF-IDF}(d, t) = \\frac{\\text{TF}(d, t)}{\\text{max}(\\text{TF}(d, t'))} \\times \\log\\left(\\frac{N}{\\text{DF}(t)}\\right)\n", 371 | "$$\n", 372 | "\n", 373 | "where $d$ is a document, $t$ is a token, $t'$ is a token in the document $d$, $N$ is the number of documents, $\\text{TF}(d, t)$ is the number of times the token $t$ appears in the document $d$, $\\text{DF}(t)$ is the number of documents in which the token $t$ appears.\n", 374 | "\n", 375 | "The first part is the **Term Frequency**. The second part is the **Inverse Document Frequency**. The $\\log$ is here to make sure that the values are not too large. The $\\text{max}$ is here to make sure that the values are not too small. The $\\text{max}$ is computed over all the tokens in the document $d$.\n", 376 | "\n", 377 | "**TLDR: TF-IDF is a way to normalize the Bag of Words representation. It's a way to give more importance to rare words.**" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": 9, 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "from sklearn.feature_extraction.text import TfidfVectorizer\n", 387 | "\n", 388 | "vectorizer = TfidfVectorizer()\n", 389 | "tfidf_matrix = vectorizer.fit_transform(raw_documents=train['description'])\n" 390 | ] 391 | }, 392 | { 393 | "cell_type": "code", 394 | "execution_count": 15, 395 | "metadata": {}, 396 | "outputs": [ 397 | { 398 | "data": { 399 | "text/plain": [ 400 | "<1x230368 sparse matrix of type ''\n", 401 | "\twith 37 stored elements in Compressed Sparse Row format>" 402 | ] 403 | }, 404 | "execution_count": 15, 405 | "metadata": {}, 406 | "output_type": "execute_result" 407 | } 408 | ], 409 | "source": [ 410 | "tfidf_matrix[0]\n" 411 | ] 412 | }, 413 | { 414 | "cell_type": "code", 415 | "execution_count": 33, 416 | "metadata": {}, 417 | "outputs": [ 418 | { 419 | "data": { 420 | "text/plain": [ 421 | "array([160507, 75843, 47025, 44714, 170236, 113882, 154903, 24702,\n", 422 | " 181876, 26907, 83165, 55279, 207078, 157328, 153755, 214671,\n", 423 | " 52562, 199296, 108467, 84635, 48901, 29976, 219339, 24992,\n", 424 | " 86829, 133833, 89664, 206175, 224453, 81756, 76468, 165639,\n", 425 | " 29401, 179670, 23555, 109703, 189090], dtype=int32)" 426 | ] 427 | }, 428 | "execution_count": 33, 429 | "metadata": {}, 430 | "output_type": "execute_result" 431 | } 432 | ], 433 | "source": [ 434 | "tfidf_matrix[0].indices\n" 435 | ] 436 | }, 437 | { 438 | "cell_type": "code", 439 | "execution_count": 32, 440 | "metadata": {}, 441 | "outputs": [ 442 | { 443 | "data": { 444 | "text/plain": [ 445 | "array([0.17587834, 0.209706 , 0.18732235, 0.08337532, 0.09836813,\n", 446 | " 0.11498371, 0.13353337, 0.064915 , 0.31659522, 0.12865291,\n", 447 | " 0.10931213, 0.21066895, 0.09937576, 0.16555669, 0.03304113,\n", 448 | " 0.05916193, 0.26172833, 0.10305093, 0.2036051 , 0.09570113,\n", 449 | " 0.14782546, 0.15956879, 0.15093547, 0.02909765, 0.18124474,\n", 450 | " 0.19921095, 0.17256723, 0.13280117, 0.05378592, 0.24756103,\n", 451 | " 0.2139983 , 0.12751501, 0.32776078, 0.22907902, 0.07296681,\n", 452 | " 0.09074152, 0.05282196])" 453 | ] 454 | }, 455 | "execution_count": 32, 456 | "metadata": {}, 457 | "output_type": "execute_result" 458 | } 459 | ], 460 | "source": [ 461 | "tfidf_matrix[0].data\n" 462 | ] 463 | }, 464 | { 465 | "cell_type": "code", 466 | "execution_count": 55, 467 | "metadata": {}, 468 | "outputs": [ 469 | { 470 | "name": "stdout", 471 | "output_type": "stream", 472 | "text": [ 473 | "asmus 0.328\n", 474 | "sada 0.317\n", 475 | "cis 0.262\n", 476 | "fellow 0.248\n", 477 | "ronald 0.229\n", 478 | "entrepreneur 0.214\n", 479 | "commentary 0.211\n", 480 | "endowment 0.210\n", 481 | "international 0.204\n", 482 | "marshall 0.199\n", 483 | "carnegie 0.187\n", 484 | "fund 0.181\n", 485 | "peace 0.176\n", 486 | "german 0.173\n", 487 | "oxford 0.166\n", 488 | "at 0.160\n", 489 | "visiting 0.151\n", 490 | "centre 0.148\n", 491 | "online 0.134\n", 492 | "the 0.133\n", 493 | "appeared 0.129\n", 494 | "policy 0.128\n", 495 | "journal 0.115\n", 496 | "first 0.109\n", 497 | "studies 0.103\n", 498 | "this 0.099\n", 499 | "published 0.098\n", 500 | "for 0.096\n", 501 | "is 0.091\n", 502 | "by 0.083\n", 503 | "also 0.073\n", 504 | "an 0.065\n", 505 | "university 0.059\n", 506 | "with 0.054\n", 507 | "she 0.053\n", 508 | "of 0.033\n", 509 | "and 0.029\n" 510 | ] 511 | } 512 | ], 513 | "source": [ 514 | "feature_names = vectorizer.get_feature_names_out()\n", 515 | "\n", 516 | "indices = tfidf_matrix[0].indices\n", 517 | "scores = tfidf_matrix[0].data\n", 518 | "\n", 519 | "for i in scores.argsort()[::-1]:\n", 520 | " print(f\"{feature_names[indices[i]]:<20} {scores[i]:.3f}\")\n" 521 | ] 522 | }, 523 | { 524 | "cell_type": "markdown", 525 | "metadata": {}, 526 | "source": [ 527 | "Compare this to a Bag of Words representation:" 528 | ] 529 | }, 530 | { 531 | "cell_type": "code", 532 | "execution_count": 57, 533 | "metadata": {}, 534 | "outputs": [ 535 | { 536 | "name": "stdout", 537 | "output_type": "stream", 538 | "text": [ 539 | "the 4.000\n", 540 | "at 3.000\n", 541 | "international 2.000\n", 542 | "is 2.000\n", 543 | "fellow 2.000\n", 544 | "for 2.000\n", 545 | "german 1.000\n", 546 | "visiting 1.000\n", 547 | "and 1.000\n", 548 | "fund 1.000\n", 549 | "marshall 1.000\n", 550 | "with 1.000\n", 551 | "entrepreneur 1.000\n", 552 | "policy 1.000\n", 553 | "asmus 1.000\n", 554 | "ronald 1.000\n", 555 | "also 1.000\n", 556 | "centre 1.000\n", 557 | "peace 1.000\n", 558 | "endowment 1.000\n", 559 | "studies 1.000\n", 560 | "carnegie 1.000\n", 561 | "by 1.000\n", 562 | "published 1.000\n", 563 | "journal 1.000\n", 564 | "online 1.000\n", 565 | "an 1.000\n", 566 | "sada 1.000\n", 567 | "appeared 1.000\n", 568 | "first 1.000\n", 569 | "commentary 1.000\n", 570 | "this 1.000\n", 571 | "oxford 1.000\n", 572 | "of 1.000\n", 573 | "university 1.000\n", 574 | "cis 1.000\n", 575 | "she 1.000\n" 576 | ] 577 | } 578 | ], 579 | "source": [ 580 | "indices = counts[0].indices\n", 581 | "scores = counts[0].data\n", 582 | "\n", 583 | "for i in scores.argsort()[::-1]:\n", 584 | " print(f\"{feature_names[indices[i]]:<20} {scores[i]:.3f}\")\n" 585 | ] 586 | }, 587 | { 588 | "cell_type": "markdown", 589 | "metadata": {}, 590 | "source": [ 591 | "One last thing to mention is that the tokenization can be customized. For instance, in search engines, it's common to use n-grams instead of tokens. An n-gram is a sequence of n tokens. In particular, trigrams are quite common." 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": 74, 597 | "metadata": {}, 598 | "outputs": [], 599 | "source": [ 600 | "trigrammer = CountVectorizer(ngram_range=(1, 3))\n", 601 | "trigrams = trigrammer.fit_transform(raw_documents=train['description'][:100])\n" 602 | ] 603 | }, 604 | { 605 | "cell_type": "code", 606 | "execution_count": 72, 607 | "metadata": {}, 608 | "outputs": [ 609 | { 610 | "name": "stdout", 611 | "output_type": "stream", 612 | "text": [ 613 | "the 4.000\n", 614 | "at 3.000\n", 615 | "is 2.000\n", 616 | "international 2.000\n", 617 | "for 2.000\n", 618 | "for international 2.000\n", 619 | "fellow 2.000\n", 620 | "at the 2.000\n", 621 | "asmus policy 1.000\n", 622 | "fellow with 1.000\n", 623 | "entrepreneur fellow 1.000\n", 624 | "policy entrepreneur 1.000\n", 625 | "for international peace 1.000\n", 626 | "ronald asmus 1.000\n", 627 | "also ronald 1.000\n", 628 | "the german 1.000\n", 629 | "is also 1.000\n", 630 | "she is 1.000\n", 631 | "peace 1.000\n", 632 | "endowment 1.000\n" 633 | ] 634 | } 635 | ], 636 | "source": [ 637 | "feature_names = trigrammer.get_feature_names_out()\n", 638 | "\n", 639 | "indices = trigrams[0].indices\n", 640 | "scores = trigrams[0].data\n", 641 | "\n", 642 | "for i in scores.argsort()[::-1][:20]:\n", 643 | " print(f\"{feature_names[indices[i]]:<20} {scores[i]:.3f}\")\n" 644 | ] 645 | }, 646 | { 647 | "cell_type": "markdown", 648 | "metadata": {}, 649 | "source": [ 650 | "This can also be done at the character level." 651 | ] 652 | }, 653 | { 654 | "cell_type": "code", 655 | "execution_count": 85, 656 | "metadata": {}, 657 | "outputs": [], 658 | "source": [ 659 | "trigrammer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char_wb', )\n", 660 | "trigrams = trigrammer.fit_transform(raw_documents=train['description'][:10_000])\n" 661 | ] 662 | }, 663 | { 664 | "cell_type": "code", 665 | "execution_count": 88, 666 | "metadata": {}, 667 | "outputs": [ 668 | { 669 | "name": "stdout", 670 | "output_type": "stream", 671 | "text": [ 672 | " 0.725\n", 673 | "e 0.215\n", 674 | "a 0.185\n", 675 | "n 0.170\n", 676 | "t 0.155\n", 677 | "i 0.148\n", 678 | "r 0.133\n", 679 | "o 0.118\n", 680 | "s 0.118\n", 681 | "l 0.104\n", 682 | " a 0.074\n", 683 | "d 0.074\n", 684 | "h 0.067\n", 685 | "na 0.063\n", 686 | "f 0.060\n", 687 | "e 0.059\n", 688 | "rna 0.056\n", 689 | "smu 0.056\n", 690 | "(ci 0.054\n", 691 | "nal 0.054\n", 692 | "is) 0.053\n", 693 | "fel 0.052\n", 694 | "rn 0.052\n", 695 | "u 0.052\n", 696 | "wme 0.050\n", 697 | "nt 0.049\n", 698 | " f 0.049\n", 699 | "owm 0.049\n", 700 | "al 0.048\n", 701 | "pea 0.048\n" 702 | ] 703 | } 704 | ], 705 | "source": [ 706 | "feature_names = trigrammer.get_feature_names_out()\n", 707 | "\n", 708 | "indices = trigrams[0].indices\n", 709 | "scores = trigrams[0].data\n", 710 | "\n", 711 | "for i in scores.argsort()[::-1][:30]:\n", 712 | " print(f\"{feature_names[indices[i]]:<20} {scores[i]:.3f}\")\n" 713 | ] 714 | } 715 | ], 716 | "metadata": { 717 | "kernelspec": { 718 | "display_name": "Python 3", 719 | "language": "python", 720 | "name": "python3" 721 | }, 722 | "language_info": { 723 | "codemirror_mode": { 724 | "name": "ipython", 725 | "version": 3 726 | }, 727 | "file_extension": ".py", 728 | "mimetype": "text/x-python", 729 | "name": "python", 730 | "nbconvert_exporter": "python", 731 | "pygments_lexer": "ipython3", 732 | "version": "3.11.0" 733 | } 734 | }, 735 | "nbformat": 4, 736 | "nbformat_minor": 2 737 | } 738 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/data/.gitkeep -------------------------------------------------------------------------------- /data/bias-in-bios.zip: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:ffe3459b453cd4c05c92b592f984923247213b78196928d11886a8712d54fa00 3 | size 44411352 4 | -------------------------------------------------------------------------------- /data/mens-machine-learning-competition-2019.zip: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:3472f2e2d40e27d81611fafb634e5600aa2881467bc1e60b2267fc6641c311d3 3 | size 283441226 4 | -------------------------------------------------------------------------------- /data/wowah.zip: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:f2b7177fb4408168c9ca2c3a96ac105af548a988447cf906cd45956f56a8899f 3 | size 74950617 4 | -------------------------------------------------------------------------------- /deep-learning/backprop.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Yet another explanation of backprop\n", 8 | "\n", 9 | "There are many tutorials on backpropagation out there. I've skimmed through a bunch of them, and overall my favorite was [this one](https://www.ritchievink.com/blog/2017/07/10/programming-a-neural-network-from-scratch/) by Ritchie Vink. I preferred because the code examples are of good quality and give a lot of leeway for improvement. [This](https://victorzhou.com/blog/intro-to-neural-networks/) blogpost by Victor Zhou also helped me develop a mental model of what's going on." 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## Neural networks in a nutshell\n", 17 | "\n", 18 | "A neural network is a sequence of layers. Every layer takes as input $x$ and outputs $z$. We can denote this by a function which we call $f$:\n", 19 | "\n", 20 | "$$z = f(x)$$\n", 21 | "\n", 22 | "Note that the input $x$ can be a set of features, as well as the output from another layer. In the case of a dense layer, $f$ is an affine transformation:\n", 23 | "\n", 24 | "$$z = w x + b$$\n", 25 | "\n", 26 | "When we stack layers, we are simply chaining functions:\n", 27 | "\n", 28 | "$$\\hat{y} = f(f(f(\\dots(f(x)))))$$\n", 29 | "\n", 30 | "In the case of dense layers, which are linear, chaining them essentially results in a linear function. This means that even if we have a million dense layers stacked together, we still won't be able to learn non-linear patterns such as the XOR function. To add non-linearity, we add an *activation function* after each layer. Let's call these activation functions $g$. The output from the activation functions will be called $a$.\n", 31 | "\n", 32 | "$$a = g(f(x))$$\n", 33 | "\n", 34 | "When we stack layers, our final output is:\n", 35 | "\n", 36 | "$$\\hat{y} = g(f(g(f(\\dots(g(f(x)))))))$$\n", 37 | "\n", 38 | "Of course there are many more flavors of neural networks but that's the general idea. In the case of using dense layers, we're looking to tune the weights $w$ and biases $b$. That's where backpropagation comes in.\n", 39 | "\n", 40 | "## Backpropagation\n", 41 | "\n", 42 | "First of all, let's get the chain rule out of the way. Say you have a function $f$, a function $g$, and an input $x$. If we compose our functions and apply them to $x$ we get $g(f(x))$. Now say we want to find the derivative of $g$ with respect to $x$. The trick is that there the function $f$ in between $g$ and $x$. In this case we use the chain rule, which gives us:\n", 43 | "\n", 44 | "$$\\frac{\\partial g}{\\partial x} = \\frac{\\partial g}{\\partial f} \\times \\frac{\\partial f}{\\partial x}$$\n", 45 | "\n", 46 | "In other words, in order to compute $\\frac{\\partial g}{\\partial x}$, we have to compute $\\frac{\\partial g}{\\partial f}$ and $\\frac{\\partial f}{\\partial x}$ and multiply them together. The chain rule is thus just a tool that we can add to our toolkit. In the case of neural networks it's super useful because we're basically just chaining functions. \n", 47 | "\n", 48 | "Let's say we're looking at the weights of the final layer. We'll call them $w$. The output of the network is denoted as $\\hat{y}$ whilst the ground truth is $y$. We have a loss function $L$ which indicates the error between $y$ and $\\hat{y}$. To update the weights, we need to calculate the gradient of the loss function with respect to the weights:\n", 49 | "\n", 50 | "$$\\frac{\\partial L}{\\partial w}$$\n", 51 | "\n", 52 | "In between $w_i$ and $L$, there is the application of the dense layer and the activation function. We can thus apply the chain rule:\n", 53 | "\n", 54 | "$$\\frac{\\partial L}{\\partial w} = \\frac{\\partial L}{\\partial a} \\times \\frac{\\partial a}{\\partial z} \\times \\frac{\\partial z}{\\partial w}$$\n", 55 | "\n", 56 | "In the case where our loss function is the mean squared error, the derivative is:\n", 57 | "\n", 58 | "$$\\frac{\\partial L}{\\partial a} = 2 \\times (a - y)$$\n", 59 | "\n", 60 | "For a sigmoid activation function, the derivative is:\n", 61 | "\n", 62 | "$$\\frac{\\partial a}{\\partial z} = \\sigma(z) (1 - \\sigma(z))$$\n", 63 | "\n", 64 | "where $\\sigma$ is in fact the sigmoid function. In the case of a dense layer, the derivative is:\n", 65 | "\n", 66 | "$$\\frac{\\partial z}{\\partial w} = x$$\n", 67 | "\n", 68 | "We simply have to multiply all these elements together in order to obtain $\\frac{\\partial L}{\\partial w}$:\n", 69 | "\n", 70 | "$$\\frac{\\partial L}{\\partial w} = (2 \\times (a - y)) \\times (\\sigma(z) (1 - \\sigma(z))) \\times x$$\n", 71 | "\n", 72 | "Recall that $a$ is the output of the network after having been processed by the activation function. We could have as well called it $\\hat{y}$ because we're looking at the final layer, but we use $a$ because it's more generic and applies to each layer in the network. $z$ is the output of the network *before* being processed by the activation function. Note that implementation wise we thus have to keep both in memory. We can't just obtain $a$ and erase $z$.\n", 73 | "\n", 74 | "If we plug in a different activation function and/or a different loss function, then everything will still work as long as each element is differentiable. Note that if we use the identity activation function (which doesn't change the input and has a derivative of 1), then we're simply doing linear regression!\n", 75 | "\n", 76 | "Now how about the weights of the penultimate layer (the one just before the last one). Well we \"just\" have write it down using the chain rule. Here goes:\n", 77 | "\n", 78 | "$$\\frac{\\partial L}{\\partial w_2} = \\frac{\\partial L}{\\partial a_3} \\times \\frac{\\partial a_3}{\\partial z_3} \\times \\frac{\\partial z_3}{\\partial a_2} \\times \\frac{\\partial a_2}{\\partial z_2} \\times \\frac{\\partial z_2}{\\partial w_2}$$\n", 79 | "\n", 80 | "We've indexed the $a$s and $z$s because we're looking at multiple layer. In this case $a_3$ is the output of the 3rd layer (we called it $a$ before) whilst $a_2$ is the output of the 2nd layer. An important thing to notice is that we're using $\\frac{\\partial L}{\\partial a_3} \\times \\frac{\\partial a_3}{\\partial z_3}$, which we already calculated previously. We can exploit this when we implement backpropagation in order to speed up our code but also make it shorter.\n", 81 | "\n", 82 | "Here is the gradients for the weights of the 1st layer:\n", 83 | "\n", 84 | "$$\\frac{\\partial L}{\\partial w_2} = \\frac{\\partial L}{\\partial a_3} \\times \\frac{\\partial a_3}{\\partial z_3} \\times \\frac{\\partial z_3}{\\partial a_2} \\times \\frac{\\partial a_2}{\\partial z_2} \\times \\frac{\\partial z_2}{\\partial a_1} \\times \\frac{\\partial a_1}{\\partial z_1} \\times \\frac{\\partial z_1}{\\partial w_1}$$\n", 85 | "\n", 86 | "Again the first four elements of the product have already been computed.\n", 87 | "\n", 88 | "How about the biases $b_i$? Well in a dense layer the derivative with respect to the biases is 1 (it was $x$ with respect to the weights). For the 3rd layer this will result in:\n", 89 | "\n", 90 | "$$\\frac{\\partial L}{\\partial b} = (2 \\times (a - y)) \\times (\\sigma(z) (1 - \\sigma(z))) \\times 1$$" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 45, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "import numpy as np\n", 100 | "from sklearn import datasets\n", 101 | "from sklearn import metrics\n", 102 | "from sklearn import model_selection\n", 103 | "from sklearn import preprocessing\n", 104 | "\n", 105 | "\n", 106 | "class ReLU:\n", 107 | " \"\"\"Rectified Linear Unit (ReLU) activation function.\"\"\"\n", 108 | "\n", 109 | " @staticmethod\n", 110 | " def activation(z):\n", 111 | " z[z < 0] = 0\n", 112 | " return z\n", 113 | "\n", 114 | " @staticmethod\n", 115 | " def gradient(z):\n", 116 | " z[z < 0] = 0\n", 117 | " z[z > 0] = 1\n", 118 | " return z\n", 119 | "\n", 120 | "\n", 121 | "class Sigmoid:\n", 122 | " \"\"\"Sigmoid activation function.\"\"\"\n", 123 | "\n", 124 | " @staticmethod\n", 125 | " def activation(z):\n", 126 | " return 1 / (1 + np.exp(-z))\n", 127 | "\n", 128 | " @staticmethod\n", 129 | " def gradient(z):\n", 130 | " s = Sigmoid.activation(z)\n", 131 | " return s * (1 - s)\n", 132 | "\n", 133 | "\n", 134 | "class Identity:\n", 135 | " \"\"\"Identity activation function.\"\"\"\n", 136 | "\n", 137 | " @staticmethod\n", 138 | " def activation(z):\n", 139 | " return z\n", 140 | "\n", 141 | " @staticmethod\n", 142 | " def gradient(z):\n", 143 | " return np.ones_like(z)\n", 144 | "\n", 145 | "\n", 146 | "class MSE:\n", 147 | " \"\"\"Mean Squared Error (MSE) loss function.\"\"\"\n", 148 | "\n", 149 | " @staticmethod\n", 150 | " def loss(y_true, y_pred):\n", 151 | " return np.mean((y_pred - y_true) ** 2)\n", 152 | "\n", 153 | " @staticmethod\n", 154 | " def gradient(y_true, y_pred):\n", 155 | " return 2 * (y_pred - y_true)\n", 156 | "\n", 157 | "\n", 158 | "class SGD:\n", 159 | " \"\"\"Stochastic Gradient Descent (SGD).\"\"\"\n", 160 | "\n", 161 | " def __init__(self, learning_rate):\n", 162 | " self.learning_rate = learning_rate\n", 163 | "\n", 164 | " def step(self, weights, gradients):\n", 165 | " weights -= self.learning_rate * gradients\n", 166 | "\n", 167 | "\n", 168 | "class NN:\n", 169 | " \"\"\"\n", 170 | "\n", 171 | " Parameters:\n", 172 | " dimensions (tuples of ints of length n_layers)\n", 173 | "\n", 174 | " \"\"\"\n", 175 | "\n", 176 | " def __init__(self, dimensions, activations, loss, optimizer):\n", 177 | " self.n_layers = len(dimensions)\n", 178 | " self.loss = loss\n", 179 | " self.optimizer = optimizer\n", 180 | "\n", 181 | " # Weights and biases are initiated by index. For a one hidden layer net you will have a w[1] and w[2]\n", 182 | " self.w = {}\n", 183 | " self.b = {}\n", 184 | "\n", 185 | " # Activations are also initiated by index. For the example we will have activations[2] and activations[3]\n", 186 | " self.activations = {}\n", 187 | " for i in range(len(dimensions) - 1):\n", 188 | " self.w[i + 1] = np.random.randn(dimensions[i], dimensions[i + 1]) / np.sqrt(dimensions[i])\n", 189 | " self.b[i + 1] = np.zeros(dimensions[i + 1])\n", 190 | " self.activations[i + 2] = activations[i]\n", 191 | "\n", 192 | " def _feed_forward(self, X):\n", 193 | " \"\"\"Executes a forward pass through the neural network.\n", 194 | "\n", 195 | " This will return the state at each layer of the network, which includes the output of the\n", 196 | " network.\n", 197 | "\n", 198 | " Parameters:\n", 199 | " X (array of shape (batch_size, n_features))\n", 200 | "\n", 201 | " \"\"\"\n", 202 | "\n", 203 | " # z = w(x) + b\n", 204 | " z = {}\n", 205 | "\n", 206 | " # a = f(z)\n", 207 | " a = {1: X} # First layer has no activations as input\n", 208 | "\n", 209 | " for i in range(2, self.n_layers + 1):\n", 210 | " z[i] = np.dot(a[i - 1], self.w[i - 1]) + self.b[i - 1]\n", 211 | " a[i] = self.activations[i].activation(z[i])\n", 212 | "\n", 213 | " return z, a\n", 214 | "\n", 215 | " def _backprop(self, z, a, y_true):\n", 216 | " \"\"\"Backpropagation.\n", 217 | "\n", 218 | " Parameters:\n", 219 | " z (dict of length n_layers - 1):\n", 220 | "\n", 221 | " z = {\n", 222 | " 2: w1 * x + b1\n", 223 | " 3: w2 * (w1 * x + b1) + b2\n", 224 | " 4: w3 * (w2 * (w1 * x + b1) + b2) + b3\n", 225 | " ...\n", 226 | " }\n", 227 | "\n", 228 | " a (dict of length n_layers):\n", 229 | "\n", 230 | " a = {\n", 231 | " 1: x,\n", 232 | " 2: f(w1 * x + b1)\n", 233 | " 3: f(w2 * (w1 * x + b1) + b2)\n", 234 | " 4: f(w3 * (w2 * (w1 * x + b1) + b2) + b3)\n", 235 | " ...\n", 236 | " }\n", 237 | "\n", 238 | " y_true (array of shape (batch_size, n_targets))\n", 239 | "\n", 240 | " \"\"\"\n", 241 | "\n", 242 | " # Determine the partial derivative and delta for the output layer\n", 243 | " y_pred = a[self.n_layers]\n", 244 | " final_activation = self.activations[self.n_layers]\n", 245 | " delta = self.loss.gradient(y_true, y_pred) * final_activation.gradient(y_pred)\n", 246 | " dw = np.dot(a[self.n_layers - 1].T, delta)\n", 247 | "\n", 248 | " update_params = {\n", 249 | " self.n_layers - 1: (dw, delta)\n", 250 | " }\n", 251 | "\n", 252 | " # Go through the layers in reverse order\n", 253 | " for i in range(self.n_layers - 2, 0, -1):\n", 254 | " delta = np.dot(delta, self.w[i + 1].T) * self.activations[i + 1].gradient(z[i + 1])\n", 255 | " dw = np.dot(a[i].T, delta)\n", 256 | " update_params[i] = (dw, delta)\n", 257 | "\n", 258 | " # Update the parameters\n", 259 | " for k, (dw, delta) in update_params.items():\n", 260 | " self.optimizer.step(weights=self.w[k], gradients=dw)\n", 261 | " self.optimizer.step(weights=self.b[k], gradients=np.mean(delta, axis=0))\n", 262 | "\n", 263 | " def fit(self, X, y, epochs, batch_size, print_every=np.inf):\n", 264 | " \"\"\"Trains the neural network.\n", 265 | "\n", 266 | " Parameters:\n", 267 | " X (array of shape (n_samples, n_features))\n", 268 | " y (array of shape (n_samples, n_targets))\n", 269 | " epochs (int)\n", 270 | " batch_size (int)\n", 271 | "\n", 272 | " \"\"\"\n", 273 | "\n", 274 | " # As a convention we expect y to be 2D, even if there is only one target to predict\n", 275 | " if y.ndim == 1:\n", 276 | " y = np.expand_dims(y, axis=1)\n", 277 | "\n", 278 | " # Go through the epochs\n", 279 | " for i in range(epochs):\n", 280 | "\n", 281 | " # Shuffle the data\n", 282 | " idx = np.arange(X.shape[0])\n", 283 | " np.random.shuffle(idx)\n", 284 | " x_ = X[idx]\n", 285 | " y_ = y[idx]\n", 286 | "\n", 287 | " # Iterate over the training data in mini-batches\n", 288 | " for j in range(X.shape[0] // batch_size):\n", 289 | " start = j * batch_size\n", 290 | " stop = (j + 1) * batch_size\n", 291 | " z, a = self._feed_forward(x_[start:stop])\n", 292 | " self._backprop(z, a, y_[start:stop])\n", 293 | "\n", 294 | " # Display the performance every print_every eooch\n", 295 | " if (i + 1) % print_every == 0:\n", 296 | " y_pred = self.predict(X)\n", 297 | " print(f'[{i+1}] train loss: {self.loss.loss(y, y_pred)}')\n", 298 | "\n", 299 | " def predict(self, X):\n", 300 | " \"\"\"Predicts an output for each sample in X.\n", 301 | "\n", 302 | " Parameters:\n", 303 | " X (array of shape (n_samples, n_features))\n", 304 | "\n", 305 | " \"\"\"\n", 306 | " _, a = self._feed_forward(X)\n", 307 | " return a[self.n_layers]" 308 | ] 309 | }, 310 | { 311 | "cell_type": "markdown", 312 | "metadata": {}, 313 | "source": [ 314 | "Boston." 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": 46, 320 | "metadata": {}, 321 | "outputs": [ 322 | { 323 | "name": "stdout", 324 | "output_type": "stream", 325 | "text": [ 326 | "[10] train loss: 11.796707532482444\n", 327 | "[20] train loss: 9.700941500985953\n", 328 | "[30] train loss: 9.023612069639709\n", 329 | "2.505495393489851\n" 330 | ] 331 | } 332 | ], 333 | "source": [ 334 | "np.random.seed(1)\n", 335 | "\n", 336 | "X, y = datasets.load_boston(return_X_y=True)\n", 337 | "X = preprocessing.scale(X)\n", 338 | "\n", 339 | "# Split into train and test\n", 340 | "X_train, X_test, y_train, y_test = model_selection.train_test_split(\n", 341 | " X, y,\n", 342 | " test_size=.3,\n", 343 | " shuffle=True,\n", 344 | " random_state=42\n", 345 | ")\n", 346 | "\n", 347 | "nn = NN(\n", 348 | " dimensions=(13, 10, 1),\n", 349 | " activations=(ReLU, Identity),\n", 350 | " loss=MSE,\n", 351 | " optimizer=SGD(learning_rate=1e-3)\n", 352 | ")\n", 353 | "nn.fit(X_train, y_train, epochs=30, batch_size=8, print_every=10)\n", 354 | "\n", 355 | "y_pred = nn.predict(X_test)\n", 356 | "\n", 357 | "print(metrics.mean_absolute_error(y_test, y_pred))" 358 | ] 359 | }, 360 | { 361 | "cell_type": "markdown", 362 | "metadata": {}, 363 | "source": [ 364 | "Digits." 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": 47, 370 | "metadata": {}, 371 | "outputs": [ 372 | { 373 | "name": "stdout", 374 | "output_type": "stream", 375 | "text": [ 376 | "[10] train loss: 0.008308476136280957\n", 377 | "[20] train loss: 0.004984925198988307\n", 378 | "[30] train loss: 0.004102445263740696\n", 379 | "[40] train loss: 0.0029634369443098745\n", 380 | "[50] train loss: 0.0018708680417568045\n", 381 | " precision recall f1-score support\n", 382 | "\n", 383 | " 0 1.00 1.00 1.00 53\n", 384 | " 1 0.96 0.98 0.97 50\n", 385 | " 2 0.94 1.00 0.97 47\n", 386 | " 3 0.96 0.96 0.96 54\n", 387 | " 4 0.98 1.00 0.99 60\n", 388 | " 5 0.94 0.97 0.96 66\n", 389 | " 6 0.98 0.98 0.98 53\n", 390 | " 7 1.00 0.98 0.99 55\n", 391 | " 8 1.00 0.93 0.96 43\n", 392 | " 9 0.98 0.93 0.96 59\n", 393 | "\n", 394 | " accuracy 0.97 540\n", 395 | " macro avg 0.98 0.97 0.97 540\n", 396 | "weighted avg 0.97 0.97 0.97 540\n", 397 | "\n" 398 | ] 399 | } 400 | ], 401 | "source": [ 402 | "np.random.seed(1)\n", 403 | "\n", 404 | "X, y = datasets.load_digits(return_X_y=True)\n", 405 | "\n", 406 | "# One-hot encode y\n", 407 | "y = np.eye(10)[y]\n", 408 | "\n", 409 | "# Split into train and test\n", 410 | "X_train, X_test, y_train, y_test = model_selection.train_test_split(\n", 411 | " X, y,\n", 412 | " test_size=.3,\n", 413 | " shuffle=True,\n", 414 | " random_state=42\n", 415 | ")\n", 416 | "\n", 417 | "nn = NN(\n", 418 | " dimensions=(64, 15, 10),\n", 419 | " activations=(ReLU, Sigmoid),\n", 420 | " loss=MSE,\n", 421 | " optimizer=SGD(learning_rate=1e-3)\n", 422 | ")\n", 423 | "nn.fit(X_train, y_train, epochs=50, batch_size=16, print_every=10)\n", 424 | "\n", 425 | "y_pred = nn.predict(X_test)\n", 426 | "\n", 427 | "print(metrics.classification_report(y_test.argmax(1), y_pred.argmax(1)))" 428 | ] 429 | } 430 | ], 431 | "metadata": { 432 | "kernelspec": { 433 | "display_name": "Python 3", 434 | "language": "python", 435 | "name": "python3" 436 | }, 437 | "language_info": { 438 | "codemirror_mode": { 439 | "name": "ipython", 440 | "version": 3 441 | }, 442 | "file_extension": ".py", 443 | "mimetype": "text/x-python", 444 | "name": "python", 445 | "nbconvert_exporter": "python", 446 | "pygments_lexer": "ipython3", 447 | "version": "3.7.4" 448 | } 449 | }, 450 | "nbformat": 4, 451 | "nbformat_minor": 2 452 | } 453 | -------------------------------------------------------------------------------- /deep-learning/brad-pitt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/brad-pitt.jpg -------------------------------------------------------------------------------- /deep-learning/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/cat.jpg -------------------------------------------------------------------------------- /deep-learning/charseq.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/charseq.jpeg -------------------------------------------------------------------------------- /deep-learning/complexity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/complexity.png -------------------------------------------------------------------------------- /deep-learning/cross-val.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/cross-val.png -------------------------------------------------------------------------------- /deep-learning/duck_rabbit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/duck_rabbit.jpg -------------------------------------------------------------------------------- /deep-learning/learning-rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/learning-rate.png -------------------------------------------------------------------------------- /deep-learning/mini-batch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/mini-batch.png -------------------------------------------------------------------------------- /deep-learning/momentum.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/momentum.gif -------------------------------------------------------------------------------- /deep-learning/noisette-loo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/noisette-loo.jpg -------------------------------------------------------------------------------- /deep-learning/skip-gram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/skip-gram.png -------------------------------------------------------------------------------- /deep-learning/stuff.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Yet another practical introduction to deep learning\n", 8 | "\n", 9 | "## Stuff nobody teaches you\n", 10 | "\n", 11 | "### Max Halford\n", 12 | "\n", 13 | "#### Toulouse School of Economics Master's degree" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "## Handling streaming data" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## Putting a model into production" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "## Squeezing all the juice: stacking and blending" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "## The hashing trick for handling text" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "## Monitoring a model" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "## Online clouds and GPUs" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "## How to keep in touch with the latest trends" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "## Being a good programmer" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [] 78 | } 79 | ], 80 | "metadata": { 81 | "kernelspec": { 82 | "display_name": "Python 3", 83 | "language": "python", 84 | "name": "python3" 85 | }, 86 | "language_info": { 87 | "codemirror_mode": { 88 | "name": "ipython", 89 | "version": 3 90 | }, 91 | "file_extension": ".py", 92 | "mimetype": "text/x-python", 93 | "name": "python", 94 | "nbconvert_exporter": "python", 95 | "pygments_lexer": "ipython3", 96 | "version": "3.7.4" 97 | } 98 | }, 99 | "nbformat": 4, 100 | "nbformat_minor": 2 101 | } 102 | -------------------------------------------------------------------------------- /deep-learning/word2vec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MaxHalford/data-science-tutorials/73a731845de39b89b8852ba2dcc38b67c8f1e1e1/deep-learning/word2vec.png -------------------------------------------------------------------------------- /introduction-to-python/my_package/__init__.py: -------------------------------------------------------------------------------- 1 | from . import my_sequences 2 | -------------------------------------------------------------------------------- /introduction-to-python/my_package/my_sequences.py: -------------------------------------------------------------------------------- 1 | # Fibonacci sequence 2 | def fibonacci(n): 3 | s = [0] 4 | a, b = 0, 1 5 | while len(s) < n: 6 | s.append(b) 7 | a, b = b, a + b 8 | return s 9 | 10 | 11 | # Syracuse sequence 12 | def syracuse(n): 13 | s = [] 14 | u = n 15 | while u != 1: 16 | s.append(u) 17 | u = u // 2 if u % 2 == 0 else 3 * u + 1 18 | s.append(1) 19 | return s 20 | -------------------------------------------------------------------------------- /introduction-to-python/my_sequences.py: -------------------------------------------------------------------------------- 1 | # Fibonacci sequence 2 | def fibonacci(n): 3 | s = [0] 4 | a, b = 0, 1 5 | while len(s) < n: 6 | s.append(b) 7 | a, b = b, a + b 8 | return s 9 | 10 | 11 | # Syracuse sequence 12 | def syracuse(n): 13 | s = [] 14 | u = n 15 | while u != 1: 16 | s.append(u) 17 | u = u // 2 if u % 2 == 0 else 3 * u + 1 18 | s.append(1) 19 | return s 20 | -------------------------------------------------------------------------------- /project-assignments.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "Assigning projects to student groups. We'll do with the Hungarian algorithm, which minimizes the total cost of the assignment. The cost matrix is the rankings of student preferences for projects. The Hungarian algorithm is implemented in scipy.optimize.linear_sum_assignment.\n", 8 | "\n", 9 | "https://docs.google.com/spreadsheets/d/10FtHxUDOMJFE8EtAe-BRW8Qdei34KObC7-gaDR7sEOw/edit#gid=0" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 11, 15 | "metadata": {}, 16 | "outputs": [ 17 | { 18 | "data": { 19 | "text/html": [ 20 | "
\n", 21 | "\n", 34 | "\n", 35 | " \n", 36 | " \n", 37 | " \n", 38 | " \n", 39 | " \n", 40 | " \n", 41 | " \n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | " \n", 57 | " \n", 58 | " \n", 59 | " \n", 60 | " \n", 61 | " \n", 62 | " \n", 63 | " \n", 64 | " \n", 65 | " \n", 66 | " \n", 67 | " \n", 68 | " \n", 69 | " \n", 70 | " \n", 71 | " \n", 72 | " \n", 73 | " \n", 74 | " \n", 75 | " \n", 76 | " \n", 77 | " \n", 78 | " \n", 79 | " \n", 80 | " \n", 81 | " \n", 82 | " \n", 83 | " \n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | " \n", 119 | " \n", 120 | " \n", 121 | " \n", 122 | " \n", 123 | " \n", 124 | " \n", 125 | " \n", 126 | " \n", 127 | " \n", 128 | " \n", 129 | " \n", 130 | " \n", 131 | " \n", 132 | " \n", 133 | " \n", 134 | " \n", 135 | " \n", 136 | " \n", 137 | " \n", 138 | " \n", 139 | " \n", 140 | " \n", 141 | " \n", 142 | " \n", 143 | " \n", 144 | " \n", 145 | " \n", 146 | " \n", 147 | " \n", 148 | " \n", 149 | " \n", 150 | " \n", 151 | " \n", 152 | " \n", 153 | " \n", 154 | " \n", 155 | " \n", 156 | " \n", 157 | " \n", 158 | " \n", 159 | " \n", 160 | " \n", 161 | " \n", 162 | " \n", 163 | " \n", 164 | " \n", 165 | " \n", 166 | " \n", 167 | " \n", 168 | " \n", 169 | " \n", 170 | " \n", 171 | " \n", 172 | " \n", 173 | " \n", 174 | " \n", 175 | " \n", 176 | " \n", 177 | " \n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | "
Wind turbines loobackWind turbines look aheadBike sharing tripsBike sharing forecastDPE surroguateDPE x AirbnbMenu footprintMenu vegetarianStreetname gendersCloud footprint
Group 1 Léa/Laure/Capucine86914235710
Group 2 Cindy Raphaël Eva21985743106
Group 3 Jingwen/Jiabao/Qianyao45127698310
Group 4 Ben / Théo B96127845103
Group 5 Rodrigue/Nikola83619472105
group 6 Shen/Mian/kolani21645891037
Group 7 Anna Maria Matthieu21534891067
Group 8 Basile Theo45617931028
Group 9 Anatole/Nam/Yaqi95108431726
Group 10 Alexis/Jade/Camille91324765108
\n", 183 | "
" 184 | ], 185 | "text/plain": [ 186 | " Wind turbines looback \\\n", 187 | "Group 1 Léa/Laure/Capucine 8 \n", 188 | "Group 2 Cindy Raphaël Eva 2 \n", 189 | "Group 3 Jingwen/Jiabao/Qianyao 4 \n", 190 | "Group 4 Ben / Théo B 9 \n", 191 | "Group 5 Rodrigue/Nikola 8 \n", 192 | "group 6 Shen/Mian/kolani 2 \n", 193 | "Group 7 Anna Maria Matthieu 2 \n", 194 | "Group 8 Basile Theo 4 \n", 195 | "Group 9 Anatole/Nam/Yaqi 9 \n", 196 | "Group 10 Alexis/Jade/Camille 9 \n", 197 | "\n", 198 | " Wind turbines look ahead Bike sharing trips \\\n", 199 | "Group 1 Léa/Laure/Capucine 6 9 \n", 200 | "Group 2 Cindy Raphaël Eva 1 9 \n", 201 | "Group 3 Jingwen/Jiabao/Qianyao 5 1 \n", 202 | "Group 4 Ben / Théo B 6 1 \n", 203 | "Group 5 Rodrigue/Nikola 3 6 \n", 204 | "group 6 Shen/Mian/kolani 1 6 \n", 205 | "Group 7 Anna Maria Matthieu 1 5 \n", 206 | "Group 8 Basile Theo 5 6 \n", 207 | "Group 9 Anatole/Nam/Yaqi 5 10 \n", 208 | "Group 10 Alexis/Jade/Camille 1 3 \n", 209 | "\n", 210 | " Bike sharing forecast DPE surroguate \\\n", 211 | "Group 1 Léa/Laure/Capucine 1 4 \n", 212 | "Group 2 Cindy Raphaël Eva 8 5 \n", 213 | "Group 3 Jingwen/Jiabao/Qianyao 2 7 \n", 214 | "Group 4 Ben / Théo B 2 7 \n", 215 | "Group 5 Rodrigue/Nikola 1 9 \n", 216 | "group 6 Shen/Mian/kolani 4 5 \n", 217 | "Group 7 Anna Maria Matthieu 3 4 \n", 218 | "Group 8 Basile Theo 1 7 \n", 219 | "Group 9 Anatole/Nam/Yaqi 8 4 \n", 220 | "Group 10 Alexis/Jade/Camille 2 4 \n", 221 | "\n", 222 | " DPE x Airbnb Menu footprint Menu vegetarian \\\n", 223 | "Group 1 Léa/Laure/Capucine 2 3 5 \n", 224 | "Group 2 Cindy Raphaël Eva 7 4 3 \n", 225 | "Group 3 Jingwen/Jiabao/Qianyao 6 9 8 \n", 226 | "Group 4 Ben / Théo B 8 4 5 \n", 227 | "Group 5 Rodrigue/Nikola 4 7 2 \n", 228 | "group 6 Shen/Mian/kolani 8 9 10 \n", 229 | "Group 7 Anna Maria Matthieu 8 9 10 \n", 230 | "Group 8 Basile Theo 9 3 10 \n", 231 | "Group 9 Anatole/Nam/Yaqi 3 1 7 \n", 232 | "Group 10 Alexis/Jade/Camille 7 6 5 \n", 233 | "\n", 234 | " Streetname genders Cloud footprint \n", 235 | "Group 1 Léa/Laure/Capucine 7 10 \n", 236 | "Group 2 Cindy Raphaël Eva 10 6 \n", 237 | "Group 3 Jingwen/Jiabao/Qianyao 3 10 \n", 238 | "Group 4 Ben / Théo B 10 3 \n", 239 | "Group 5 Rodrigue/Nikola 10 5 \n", 240 | "group 6 Shen/Mian/kolani 3 7 \n", 241 | "Group 7 Anna Maria Matthieu 6 7 \n", 242 | "Group 8 Basile Theo 2 8 \n", 243 | "Group 9 Anatole/Nam/Yaqi 2 6 \n", 244 | "Group 10 Alexis/Jade/Camille 10 8 " 245 | ] 246 | }, 247 | "execution_count": 11, 248 | "metadata": {}, 249 | "output_type": "execute_result" 250 | } 251 | ], 252 | "source": [ 253 | "import pandas as pd\n", 254 | "\n", 255 | "choices = pd.read_clipboard()\n", 256 | "choices = choices.fillna(10)\n", 257 | "choices\n" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 12, 263 | "metadata": {}, 264 | "outputs": [ 265 | { 266 | "name": "stdout", 267 | "output_type": "stream", 268 | "text": [ 269 | "Assignments:\n", 270 | "Group 1 Léa/Laure/Capucine -> DPE x Airbnb (2)\n", 271 | "Group 2 Cindy Raphaël Eva -> Wind turbines look ahead (1)\n", 272 | "Group 3 Jingwen/Jiabao/Qianyao -> Bike sharing trips (1)\n", 273 | "Group 4 Ben / Théo B -> Cloud footprint (3)\n", 274 | "Group 5 Rodrigue/Nikola -> Menu vegetarian (2)\n", 275 | "group 6 Shen/Mian/kolani -> Streetname genders (3)\n", 276 | "Group 7 Anna Maria Matthieu -> Wind turbines looback (2)\n", 277 | "Group 8 Basile Theo -> Bike sharing forecast (1)\n", 278 | "Group 9 Anatole/Nam/Yaqi -> Menu footprint (1)\n", 279 | "Group 10 Alexis/Jade/Camille -> DPE surroguate (4)\n" 280 | ] 281 | } 282 | ], 283 | "source": [ 284 | "import numpy as np\n", 285 | "from scipy.optimize import linear_sum_assignment\n", 286 | "\n", 287 | "# Apply the Hungarian Algorithm\n", 288 | "row_indices, col_indices = linear_sum_assignment(choices)\n", 289 | "\n", 290 | "# The row_indices represent the assigned students, and col_indices represent the assigned projects\n", 291 | "assignments = [(row, col) for row, col in zip(row_indices, col_indices)]\n", 292 | "\n", 293 | "print(\"Assignments:\")\n", 294 | "for i, j in assignments:\n", 295 | " group = choices.index[i]\n", 296 | " project = choices.columns[j]\n", 297 | " print(f\"{group} -> {project} ({choices.loc[group, project]})\")\n" 298 | ] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "execution_count": 13, 303 | "metadata": {}, 304 | "outputs": [ 305 | { 306 | "name": "stdout", 307 | "output_type": "stream", 308 | "text": [ 309 | "Score: 20\n", 310 | "Best: 10\n", 311 | "Worst: 100\n" 312 | ] 313 | } 314 | ], 315 | "source": [ 316 | "print(\n", 317 | " f\"Score: {sum([choices.iloc[i, j] for i, j in assignments])}\\n\"\n", 318 | " f\"Best: {1 * choices.shape[0]}\\n\"\n", 319 | " f\"Worst: {10 * choices.shape[0]}\"\n", 320 | ")\n" 321 | ] 322 | } 323 | ], 324 | "metadata": { 325 | "kernelspec": { 326 | "display_name": "Python 3", 327 | "language": "python", 328 | "name": "python3" 329 | }, 330 | "language_info": { 331 | "codemirror_mode": { 332 | "name": "ipython", 333 | "version": 3 334 | }, 335 | "file_extension": ".py", 336 | "mimetype": "text/x-python", 337 | "name": "python", 338 | "nbconvert_exporter": "python", 339 | "pygments_lexer": "ipython3", 340 | "version": "3.11.0" 341 | } 342 | }, 343 | "nbformat": 4, 344 | "nbformat_minor": 2 345 | } 346 | --------------------------------------------------------------------------------