├── .gitignore ├── Abhishek Thakur ML Book Notes └── Abhishek_Thakur_ML_Book_Chapter_1.ipynb ├── Introduction to Neural Networks ├── Introduction to Neural Networks.pptx ├── img │ ├── 01 graph.png │ ├── 02 graph line .png │ ├── 03 graph line x1 x2.png │ ├── 04 graph equations.png │ ├── 05 graph general eq.png │ ├── 06 multi-dim.png │ ├── 07 multi-dim equations.png │ ├── 08 hyper-dim equations.png │ ├── 09 features and targets .png │ ├── 10 perceptron basic .png │ ├── 11 perceptron generic.png │ ├── 12 perceptron generic mathematical.png │ ├── 13 perceptron generic mathematical abstract.png │ ├── 14 misclassified point.png │ ├── 15 perceptron algo 01 .png │ ├── 16 perceptron algo 02.png │ ├── 17 perceptron algo 03.png │ ├── 18 perceptron algo 04.png │ ├── 19 NonLinear Boundary .png │ ├── 20 NonLinear Combination.png │ ├── 21 NonLinear Combination 2.png │ ├── 22 NonLinear Combination 3.png │ ├── 23 NonLinear Combination 4.png │ ├── 24 NonLinear Combination 5.png │ ├── 25 NonLinear Combination 6.png │ ├── 26 NonLinear Combination 7 - DNN.png │ ├── 27 piko.png │ ├── 28 robin.png │ ├── 29 goku.png │ ├── 30 trollface.png │ ├── 31 simpson.png │ ├── 32 DNN.png │ ├── 33 NN black box.png │ ├── 34 non-linear input output.png │ ├── 35 classification .png │ ├── 36 regression.png │ ├── 37 best weights .png │ ├── 38 a generic DNN.png │ ├── 39 weight matrix.png │ ├── 40 simple feedforward.png │ ├── 41 neuron internals.png │ ├── 42 simple ff1.png │ ├── 43 simple ff2.png │ ├── 44 simple ff3.png │ ├── 45 simple ff4.png │ ├── 46 ff calc 1.png │ ├── 47 ff calc 2.png │ ├── 48 ff calc 3.png │ ├── 49 ff calc 4.png │ ├── 50 ff calc 5.png │ ├── 51 ff calc 6.png │ ├── 52 ff calc 7.png │ ├── 53 ff calc 8.png │ ├── 54 ff calc 9.png │ ├── 55 ff calc 10.png │ ├── 56 weight matrix.png │ ├── 57 err 1.png │ ├── 58 err 2.png │ ├── 59 is this skynet.png │ ├── 60 skynet.png │ ├── 61 newton.png │ ├── 62 GD 1.png │ ├── 63 GD 2.png │ ├── 64 GD 3.png │ ├── 65 GD 4.png │ ├── 66 GD 5.png │ ├── 67 GD 6.png │ ├── 68 GD 7.png │ ├── 69 BP 1.png │ ├── 70 BP 2.png │ ├── 71 BP 3.png │ ├── 72 BP 4.png │ ├── 73 GD vs BP.png │ ├── repo1.png │ └── repo2.png └── img2 │ ├── 01 nn.png │ ├── 02 error.png │ ├── 03 sse.png │ ├── 04 sse 2.png │ ├── 05 features labels.png │ ├── 06 error=f(weights).png │ ├── 07 GD.png │ ├── 08 weights update .png │ ├── 09 de_by_dw.png │ ├── 10 chain rule .png │ ├── 11 chain rule 2.png │ ├── 12 error term .png │ ├── 13 multiple outputs 1.png │ ├── 14 multiple outputs 2.png │ ├── 15 GD Algo.png │ ├── 16 mlp.png │ ├── 17 mlp structure.png │ ├── 18 hidden layer activation .png │ ├── 19 hidden layer activation .png │ ├── 20 mlp output.png │ ├── 21 mlp weight matrix .png │ ├── 22 matrix multiplication .png │ ├── 23 BP 01.png │ ├── 24 BP 02.png │ ├── 25 BP 03.png │ ├── 26 BP 04.png │ ├── 27 BP 05.png │ ├── 28 GD 01 .png │ ├── 29 GD 02.png │ ├── 29 GD 03.png │ └── 30 GD 04.png ├── Kevin's Data Science Class Notes ├── Kevin's_Q&A_Session_Notes_1.ipynb ├── Kevin's_Q&A_Session_Notes_2.ipynb ├── Kevin_Class_1_and_2_Code_with_Notes.ipynb ├── README.md └── img │ └── class_description.png ├── PyTorch Image Classification in 2020 ├── Image_Classification_master.ipynb ├── Image_Classification_practice.ipynb ├── README.md └── img │ ├── 01_torchvision_mnist.png │ ├── 02_dataloader.png │ ├── 03_dataloader_analogy.jpg │ ├── 04_matplotlib.png │ ├── 05_conv2d.png │ ├── 06_maxpool2d.png │ ├── 07_dropout.png │ ├── 08_linear .png │ ├── 09_batch_visualization.png │ ├── 10_poutyne.png │ ├── 11_fitting examples.png │ ├── 12_overfitting_matrix.png │ ├── 13_overfitting_validation.png │ ├── 14_overfitting_model.png │ ├── 15_early_stopping .png │ ├── 16_early_stopping .png │ ├── 17_1_vgg_architecture.png │ ├── 17_cnn_architecture.jpeg │ ├── 18_mnist_cnn.jpeg │ ├── 19_3channel_image.png │ ├── 20_single_channel_convolution.gif │ ├── 21_3channel_convolution.gif │ ├── 22_3channel_convolution_summation.gif │ ├── 23_cnn_stride_1.gif │ ├── 24_cnn_stride_2.gif │ ├── 25_padding_1.gif │ ├── 26_max_pooling.gif │ ├── 27_pooling_types.png │ ├── 28_fc_layer.jpeg │ ├── 28_tensors.png │ └── 29_forward_backward.png ├── README.md ├── Transformers ├── doc │ ├── Rough Sheet - Keys Values Queries.docx │ ├── Rough Sheet - Self Attention.docx │ └── ~$ugh Sheet - Self Attention.docx └── img │ └── rasa │ ├── 01 Attention for Time Series.png │ ├── 02 Basic Text Reweighing Idea.png │ ├── 03 Word Embeddings King Queen.png │ ├── 04 Self Attention 01 .png │ ├── 04 Self Attention 02.png │ ├── 04 Self Attention 03.png │ ├── 05 Basic Self Attention.png │ ├── 06 Calculating Scores .png │ ├── 07 Calculating Weights.png │ ├── 08 Calculating All Outputs .png │ ├── 08 Calculating Outputs .png │ ├── 09 Database Analogy.png │ ├── 10 Matrices for Keys Queries Values .png │ └── 11 Neural Self Attention Block .png └── Udacity Deep Learning Nano Degree Notes └── Udacity-Deep-Learning-Notes.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # PPT Files 107 | # *.ppt 108 | # *.pptx 109 | *temp/ -------------------------------------------------------------------------------- /Introduction to Neural Networks/Introduction to Neural Networks.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/Introduction to Neural Networks.pptx -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/01 graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/01 graph.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/02 graph line .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/02 graph line .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/03 graph line x1 x2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/03 graph line x1 x2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/04 graph equations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/04 graph equations.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/05 graph general eq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/05 graph general eq.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/06 multi-dim.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/06 multi-dim.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/07 multi-dim equations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/07 multi-dim equations.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/08 hyper-dim equations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/08 hyper-dim equations.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/09 features and targets .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/09 features and targets .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/10 perceptron basic .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/10 perceptron basic .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/11 perceptron generic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/11 perceptron generic.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/12 perceptron generic mathematical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/12 perceptron generic mathematical.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/13 perceptron generic mathematical abstract.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/13 perceptron generic mathematical abstract.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/14 misclassified point.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/14 misclassified point.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/15 perceptron algo 01 .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/15 perceptron algo 01 .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/16 perceptron algo 02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/16 perceptron algo 02.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/17 perceptron algo 03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/17 perceptron algo 03.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/18 perceptron algo 04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/18 perceptron algo 04.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/19 NonLinear Boundary .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/19 NonLinear Boundary .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/20 NonLinear Combination.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/20 NonLinear Combination.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/21 NonLinear Combination 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/21 NonLinear Combination 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/22 NonLinear Combination 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/22 NonLinear Combination 3.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/23 NonLinear Combination 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/23 NonLinear Combination 4.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/24 NonLinear Combination 5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/24 NonLinear Combination 5.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/25 NonLinear Combination 6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/25 NonLinear Combination 6.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/26 NonLinear Combination 7 - DNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/26 NonLinear Combination 7 - DNN.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/27 piko.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/27 piko.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/28 robin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/28 robin.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/29 goku.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/29 goku.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/30 trollface.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/30 trollface.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/31 simpson.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/31 simpson.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/32 DNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/32 DNN.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/33 NN black box.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/33 NN black box.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/34 non-linear input output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/34 non-linear input output.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/35 classification .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/35 classification .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/36 regression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/36 regression.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/37 best weights .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/37 best weights .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/38 a generic DNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/38 a generic DNN.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/39 weight matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/39 weight matrix.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/40 simple feedforward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/40 simple feedforward.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/41 neuron internals.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/41 neuron internals.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/42 simple ff1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/42 simple ff1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/43 simple ff2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/43 simple ff2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/44 simple ff3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/44 simple ff3.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/45 simple ff4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/45 simple ff4.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/46 ff calc 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/46 ff calc 1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/47 ff calc 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/47 ff calc 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/48 ff calc 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/48 ff calc 3.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/49 ff calc 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/49 ff calc 4.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/50 ff calc 5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/50 ff calc 5.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/51 ff calc 6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/51 ff calc 6.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/52 ff calc 7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/52 ff calc 7.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/53 ff calc 8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/53 ff calc 8.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/54 ff calc 9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/54 ff calc 9.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/55 ff calc 10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/55 ff calc 10.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/56 weight matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/56 weight matrix.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/57 err 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/57 err 1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/58 err 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/58 err 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/59 is this skynet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/59 is this skynet.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/60 skynet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/60 skynet.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/61 newton.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/61 newton.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/62 GD 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/62 GD 1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/63 GD 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/63 GD 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/64 GD 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/64 GD 3.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/65 GD 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/65 GD 4.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/66 GD 5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/66 GD 5.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/67 GD 6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/67 GD 6.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/68 GD 7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/68 GD 7.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/69 BP 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/69 BP 1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/70 BP 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/70 BP 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/71 BP 3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/71 BP 3.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/72 BP 4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/72 BP 4.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/73 GD vs BP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/73 GD vs BP.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/repo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/repo1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img/repo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img/repo2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/01 nn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/01 nn.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/02 error.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/02 error.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/03 sse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/03 sse.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/04 sse 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/04 sse 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/05 features labels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/05 features labels.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/06 error=f(weights).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/06 error=f(weights).png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/07 GD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/07 GD.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/08 weights update .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/08 weights update .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/09 de_by_dw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/09 de_by_dw.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/10 chain rule .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/10 chain rule .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/11 chain rule 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/11 chain rule 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/12 error term .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/12 error term .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/13 multiple outputs 1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/13 multiple outputs 1.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/14 multiple outputs 2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/14 multiple outputs 2.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/15 GD Algo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/15 GD Algo.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/16 mlp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/16 mlp.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/17 mlp structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/17 mlp structure.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/18 hidden layer activation .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/18 hidden layer activation .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/19 hidden layer activation .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/19 hidden layer activation .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/20 mlp output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/20 mlp output.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/21 mlp weight matrix .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/21 mlp weight matrix .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/22 matrix multiplication .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/22 matrix multiplication .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/23 BP 01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/23 BP 01.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/24 BP 02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/24 BP 02.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/25 BP 03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/25 BP 03.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/26 BP 04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/26 BP 04.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/27 BP 05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/27 BP 05.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/28 GD 01 .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/28 GD 01 .png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/29 GD 02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/29 GD 02.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/29 GD 03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/29 GD 03.png -------------------------------------------------------------------------------- /Introduction to Neural Networks/img2/30 GD 04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Introduction to Neural Networks/img2/30 GD 04.png -------------------------------------------------------------------------------- /Kevin's Data Science Class Notes/Kevin's_Q&A_Session_Notes_1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Kevin's Q&A Session Notes 1.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | } 13 | }, 14 | "cells": [ 15 | { 16 | "cell_type": "code", 17 | "metadata": { 18 | "id": "DhDnME-iUs3j", 19 | "colab_type": "code", 20 | "colab": {} 21 | }, 22 | "source": [ 23 | "df['Survived'] # Single Frame - Series 1D \n", 24 | "df[['Survived']] # Double Frame - Pandas DF 2D \n", 25 | "\n", 26 | "df.to_numpy() # Officially the best way to convert DF to numpy" 27 | ], 28 | "execution_count": 0, 29 | "outputs": [] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": { 34 | "id": "wOFbihZwVjf8", 35 | "colab_type": "text" 36 | }, 37 | "source": [ 38 | "In a multi label problem requires a 2D array - A DataFrame. \n", 39 | "We are dealing with a single label problem here. - Hence a 1D Series. " 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "metadata": { 45 | "id": "fTWoYKoiViao", 46 | "colab_type": "code", 47 | "colab": {} 48 | }, 49 | "source": [ 50 | "# sklearn has 5 different solvers \n", 51 | "\n", 52 | "# convergence warning is often related to solver - simply change the solver \n", 53 | "\n", 54 | "# Any time a pseudo-random process in the code - set the random state for \n", 55 | "# reproducibility " 56 | ], 57 | "execution_count": 0, 58 | "outputs": [] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "metadata": { 63 | "id": "fgO9VoF5Wt7p", 64 | "colab_type": "code", 65 | "colab": {} 66 | }, 67 | "source": [ 68 | "# LrCV a variation of Lr Class \n", 69 | "# LrCV doesnt integrate well with the rest of sklearn \n", 70 | "# Lr is super flexible. " 71 | ], 72 | "execution_count": 0, 73 | "outputs": [] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "metadata": { 78 | "id": "AzJEk0G8WxEV", 79 | "colab_type": "code", 80 | "colab": {} 81 | }, 82 | "source": [ 83 | "# CV Goal - estimate future model performance \n", 84 | "# You should fit model to the entire Dataset after tuning the hyperparameters " 85 | ], 86 | "execution_count": 0, 87 | "outputs": [] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "metadata": { 92 | "id": "-A0Y5P4lYH9G", 93 | "colab_type": "code", 94 | "colab": {} 95 | }, 96 | "source": [ 97 | "# Find the evauation metric that matches your 'PRIORITIES' \n", 98 | "# Spam Filter - Precision \n", 99 | "# Credit Card Fraud - Recall " 100 | ], 101 | "execution_count": 0, 102 | "outputs": [] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "metadata": { 107 | "id": "rW4-p3DYZYaL", 108 | "colab_type": "code", 109 | "colab": {} 110 | }, 111 | "source": [ 112 | "# If you train a model with X features, you can ONLY test the model with X features. \n", 113 | "# When handle_unknown='ignore', All unknown categories are represented as zeroes.\n", 114 | "# Typically leave handle_unknown to deafault, ie Error. \n", 115 | "# Either you remove that feature, or find training data that contains that feature. " 116 | ], 117 | "execution_count": 0, 118 | "outputs": [] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "metadata": { 123 | "id": "fIfAFcDCbsbt", 124 | "colab_type": "code", 125 | "colab": {} 126 | }, 127 | "source": [ 128 | "# Oridnal Data - Natural Logical Ordering\n", 129 | "# - if numerical - just leave it as is\n", 130 | "# - if string\n", 131 | "# - import ordinal encoder and tell the logical \n", 132 | "# progression of the categories\n", 133 | "# - turns the cateogries into numbers, can be added in col_transformer \n", 134 | "# What if we get new oridnal categories? Kevin doesn't know yet. \n", 135 | "# Nominal Data - Unordered - use OHE " 136 | ], 137 | "execution_count": 0, 138 | "outputs": [] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "metadata": { 143 | "id": "IIQIh_JXefQM", 144 | "colab_type": "code", 145 | "colab": {} 146 | }, 147 | "source": [ 148 | "# OHE - unordered categorcial data \n", 149 | "# OE - ordered categorical data\n", 150 | "# LE - Similar to OE, except: \n", 151 | "# 1. LE is for labels - Targets! \n", 152 | "# 2. LE is for alphabetical only \n", 153 | "# Not much use of LE in the new sklearn " 154 | ], 155 | "execution_count": 0, 156 | "outputs": [] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "metadata": { 161 | "id": "DqHbGpw6fQgn", 162 | "colab_type": "code", 163 | "colab": {} 164 | }, 165 | "source": [ 166 | "# in col_transformer, 'remainder' has 3 options \n", 167 | "# 1 - passthrough \n", 168 | "# 2 - drop \n", 169 | "# 3 - col_tran object - transform every unknown col " 170 | ], 171 | "execution_count": 0, 172 | "outputs": [] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "metadata": { 177 | "id": "iBnOXKlhfzq0", 178 | "colab_type": "code", 179 | "colab": {} 180 | }, 181 | "source": [ 182 | "# ct.get_feature_names() - gets the name of all the transformed cols \n", 183 | "# - doesnt work with remainder='passthrough' \n", 184 | "# - 0.22.3 will fix this (coming out soon)" 185 | ], 186 | "execution_count": 0, 187 | "outputs": [] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "metadata": { 192 | "id": "pTbZsFtcgjLs", 193 | "colab_type": "code", 194 | "colab": {} 195 | }, 196 | "source": [ 197 | "# col_transform - ways to specify cols \n", 198 | "# specify cols by position \n", 199 | "# specify by slicing \n", 200 | "# make_column_selector - use regex to select cols " 201 | ], 202 | "execution_count": 0, 203 | "outputs": [] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "metadata": { 208 | "id": "CZ3K7xKVhMhJ", 209 | "colab_type": "code", 210 | "colab": {} 211 | }, 212 | "source": [ 213 | "# pipeline pipe.fit() modifies the underlying objects " 214 | ], 215 | "execution_count": 0, 216 | "outputs": [] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "metadata": { 221 | "id": "B99f0CEbhk63", 222 | "colab_type": "code", 223 | "colab": {} 224 | }, 225 | "source": [ 226 | "# make_pipeline - assigns the names on its own - lowercase class name\n", 227 | "# - results in more readable code \n", 228 | "# Piepline - forces you to name the steps \n", 229 | "# - custom names are good for clarity when using grid_search()" 230 | ], 231 | "execution_count": 0, 232 | "outputs": [] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "metadata": { 237 | "id": "ibyg1fI7i-jG", 238 | "colab_type": "code", 239 | "colab": {} 240 | }, 241 | "source": [ 242 | "# sklearn has 5 documentation pages \n", 243 | "# 1 - API Reference - find something \n", 244 | "# 2 - Class Documentation \n", 245 | "# 3 - User Guide - Context and advice \n", 246 | "# 4 - Examples - When 2,3 dont have complex enough examples \n", 247 | "# 5 - Glossary - When there is a term that I don't understand" 248 | ], 249 | "execution_count": 0, 250 | "outputs": [] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "metadata": { 255 | "id": "9SFYXrBSkd9f", 256 | "colab_type": "code", 257 | "colab": {} 258 | }, 259 | "source": [ 260 | "# Dont usually create feature interactions \n", 261 | "# Doesnt scale really well \n", 262 | "# Tree based model can learn them anyway " 263 | ], 264 | "execution_count": 0, 265 | "outputs": [] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "metadata": { 270 | "id": "I-35EBpHlGK2", 271 | "colab_type": "code", 272 | "colab": {} 273 | }, 274 | "source": [ 275 | "# OHE - 2D input - most sklearn preprocessors require 2D data\n", 276 | "# CVect - 1D input - not built to transform multiple docs (multiple cols)at once \n", 277 | "# - pass multiple tuples for multiple docs/cols" 278 | ], 279 | "execution_count": 0, 280 | "outputs": [] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": { 285 | "id": "VFMJIosKhG5P", 286 | "colab_type": "text" 287 | }, 288 | "source": [ 289 | "Happy to connect \n", 290 | "\n", 291 | "https://www.linkedin.com/in/pranjall/" 292 | ] 293 | } 294 | ] 295 | } -------------------------------------------------------------------------------- /Kevin's Data Science Class Notes/Kevin's_Q&A_Session_Notes_2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Kevin's_Q&A_Session_Notes_2.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [], 9 | "authorship_tag": "ABX9TyMtXiNl0+vGM/yf/DplIeZ2", 10 | "include_colab_link": true 11 | }, 12 | "kernelspec": { 13 | "name": "python3", 14 | "display_name": "Python 3" 15 | } 16 | }, 17 | "cells": [ 18 | { 19 | "cell_type": "markdown", 20 | "metadata": { 21 | "id": "view-in-github", 22 | "colab_type": "text" 23 | }, 24 | "source": [ 25 | "\"Open" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "metadata": { 31 | "id": "d3hiIDhbDvmb", 32 | "colab_type": "code", 33 | "colab": {} 34 | }, 35 | "source": [ 36 | "import pandas as pd \n", 37 | "from sklearn.impute import SimpleImputer\n", 38 | "from sklearn.preprocessing import OneHotEncoder \n", 39 | "from sklearn.feature_extraction.text import CountVectorizer \n", 40 | "from sklearn.linear_model import LogisticRegression\n", 41 | "from sklearn.compose import make_column_transformer\n", 42 | "from sklearn.pipeline import make_pipeline " 43 | ], 44 | "execution_count": 0, 45 | "outputs": [] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "metadata": { 50 | "id": "Cckw3TYXS-5V", 51 | "colab_type": "code", 52 | "colab": {} 53 | }, 54 | "source": [ 55 | "cols = ['Parch', 'Fare', 'Embarked', 'Sex', 'Name', 'Age']" 56 | ], 57 | "execution_count": 0, 58 | "outputs": [] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "metadata": { 63 | "id": "DRIPyMoATO3P", 64 | "colab_type": "code", 65 | "colab": {} 66 | }, 67 | "source": [ 68 | "df = pd.read_csv('http://bit.ly/kaggletrain')\n", 69 | "X = df[cols]\n", 70 | "y = df['Survived']" 71 | ], 72 | "execution_count": 0, 73 | "outputs": [] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "metadata": { 78 | "id": "P2JVVb0ATZpG", 79 | "colab_type": "code", 80 | "colab": {} 81 | }, 82 | "source": [ 83 | "df_new = pd.read_csv('http://bit.ly/kaggletest')\n", 84 | "X_new = df_new[cols]" 85 | ], 86 | "execution_count": 0, 87 | "outputs": [] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "metadata": { 92 | "id": "8H4oYDVtTj16", 93 | "colab_type": "code", 94 | "colab": {} 95 | }, 96 | "source": [ 97 | "imp_constant = SimpleImputer(strategy='constant', fill_value='missing')\n", 98 | "ohe = OneHotEncoder()" 99 | ], 100 | "execution_count": 0, 101 | "outputs": [] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "metadata": { 106 | "id": "cAKhXCBuT2az", 107 | "colab_type": "code", 108 | "colab": {} 109 | }, 110 | "source": [ 111 | "imp_ohe = make_pipeline(imp_constant, ohe)\n", 112 | "vect = CountVectorizer()\n", 113 | "imp = SimpleImputer()" 114 | ], 115 | "execution_count": 0, 116 | "outputs": [] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "metadata": { 121 | "id": "_OzQTLi-UBed", 122 | "colab_type": "code", 123 | "colab": {} 124 | }, 125 | "source": [ 126 | "ct = make_column_transformer(\n", 127 | " (imp_ohe, ['Embarked', 'Sex']),\n", 128 | " (vect, 'Name'),\n", 129 | " (imp, ['Age', 'Fare']),\n", 130 | " remainder='passthrough'\n", 131 | ")" 132 | ], 133 | "execution_count": 0, 134 | "outputs": [] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "metadata": { 139 | "id": "2IWrRhLAUUxF", 140 | "colab_type": "code", 141 | "colab": {} 142 | }, 143 | "source": [ 144 | "logreg = LogisticRegression(solver='liblinear', random_state=1)" 145 | ], 146 | "execution_count": 0, 147 | "outputs": [] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "metadata": { 152 | "id": "sxVyLIP1ZL03", 153 | "colab_type": "code", 154 | "colab": { 155 | "base_uri": "https://localhost:8080/", 156 | "height": 372 157 | }, 158 | "outputId": "f5fd23d9-4669-48d7-e573-32d247797a25" 159 | }, 160 | "source": [ 161 | "pipe = make_pipeline(ct, logreg)\n", 162 | "pipe.fit(X, y)\n", 163 | "pipe.predict(X_new)" 164 | ], 165 | "execution_count": 23, 166 | "outputs": [ 167 | { 168 | "output_type": "execute_result", 169 | "data": { 170 | "text/plain": [ 171 | "array([0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n", 172 | " 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n", 173 | " 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n", 174 | " 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1,\n", 175 | " 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0,\n", 176 | " 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0,\n", 177 | " 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0,\n", 178 | " 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,\n", 179 | " 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1,\n", 180 | " 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0,\n", 181 | " 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n", 182 | " 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1,\n", 183 | " 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0,\n", 184 | " 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1,\n", 185 | " 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,\n", 186 | " 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0,\n", 187 | " 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0,\n", 188 | " 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1,\n", 189 | " 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1])" 190 | ] 191 | }, 192 | "metadata": { 193 | "tags": [] 194 | }, 195 | "execution_count": 23 196 | } 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "metadata": { 202 | "id": "HsoSGudXZVRE", 203 | "colab_type": "code", 204 | "colab": {} 205 | }, 206 | "source": [ 207 | "# A column transformer operates in PARALLEL\n", 208 | "# A pipeline operates SEQUENTIALLY\n", 209 | "\n", 210 | "# Therefore, different steps in a column transformer \n", 211 | "# are executed IN PARALELL, and NOT SEQUENTIALLY. \n", 212 | "\n", 213 | "# Output columns from the various 'steps' of a \n", 214 | "# Column Transformer are STACKED SIDE BY SIDE! " 215 | ], 216 | "execution_count": 0, 217 | "outputs": [] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "metadata": { 222 | "id": "3Y6gT7P4cf4X", 223 | "colab_type": "code", 224 | "colab": {} 225 | }, 226 | "source": [ 227 | "ct_no_name = make_column_transformer(\n", 228 | " (imp_ohe, ['Embarked', 'Sex']),\n", 229 | " ('drop', 'Name'), # Neat little trick to drop a Column from the transformer\n", 230 | " # Great for quick experimentation \n", 231 | " (imp, ['Age', 'Fare']),\n", 232 | " remainder='passthrough'\n", 233 | ")" 234 | ], 235 | "execution_count": 0, 236 | "outputs": [] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "metadata": { 241 | "id": "KV1aBHX2hwr-", 242 | "colab_type": "code", 243 | "colab": {} 244 | }, 245 | "source": [ 246 | "# There is no way to know how accurate your model COULD GET on a given dataset. " 247 | ], 248 | "execution_count": 0, 249 | "outputs": [] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "metadata": { 254 | "id": "EdgLMYGKh32e", 255 | "colab_type": "code", 256 | "colab": {} 257 | }, 258 | "source": [ 259 | "# cross_val_score() under the hood: \n", 260 | "# for each fold in the dataset:\n", 261 | "# 1. a new 20% is set aside for testing, rest 80% for training --> SPLIT\n", 262 | "# 2. pipe.fit() runs of the 80% and learns --> TRANSFORM + FIT\n", 263 | "# 3. pipe.predict() runs on the 20% testing data --> PREDICTION\n", 264 | "# 4. Prediction Accuracy calculated on the 20% test data --> ACCURACY\n", 265 | "\n", 266 | "# For each SPLIT step, the imputation values are calculated separately for each fold. " 267 | ], 268 | "execution_count": 0, 269 | "outputs": [] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "metadata": { 274 | "id": "b03K5CRtkVfg", 275 | "colab_type": "code", 276 | "colab": {} 277 | }, 278 | "source": [ 279 | "# Pipelines save you from Data Leakage. " 280 | ], 281 | "execution_count": 0, 282 | "outputs": [] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "metadata": { 287 | "id": "wvUHzRz_lAAI", 288 | "colab_type": "code", 289 | "colab": {} 290 | }, 291 | "source": [ 292 | "# Stratified - Sampling in such a way so that 'class frequencies' are the same \n", 293 | "# in EACH SPLIT. \n", 294 | "\n", 295 | "# By default, StratifiedKFold DOES NOT shuffle rows. " 296 | ], 297 | "execution_count": 0, 298 | "outputs": [] 299 | }, 300 | { 301 | "cell_type": "code", 302 | "metadata": { 303 | "id": "PhS2ZMM4mrOu", 304 | "colab_type": "code", 305 | "colab": {} 306 | }, 307 | "source": [ 308 | "from sklearn.pipeline import make_union\n", 309 | "\n", 310 | "# make_union - Feature Union - Stacks the features side-by-side\n", 311 | "# - Applies MULTIPLE TRANSFORMATIONS to a SINGLE COLUMN and \n", 312 | "# stacks the results side by side\n", 313 | "\n", 314 | "# column_transformer - Applies a DIFFERENT TRANSFORMATION to EACH input column\n", 315 | "# and stacks the results side-by-side. \n", 316 | "\n", 317 | "# A Feature Union works on ONLY ONE COLUMN. \n", 318 | "# A column transformer works on an ARBITRARY NUMBER OF COLUMNS.\n", 319 | "# Both apply the transformations in PARALLEL!!!\n", 320 | "\n", 321 | "# FU are not much useful in front of column transformers. \n", 322 | "# So a BIG FU to FUs. Sorry. " 323 | ], 324 | "execution_count": 0, 325 | "outputs": [] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "metadata": { 330 | "id": "bttVDkn2qps7", 331 | "colab_type": "code", 332 | "colab": {} 333 | }, 334 | "source": [ 335 | "from sklearn.experimental import enable_iterative_imputer\n", 336 | "from sklearn.impute import IterativeImputer \n", 337 | "\n", 338 | "# Iterative Imputer - runs a regression model and uses it to predict the missing values. \n", 339 | "# Needs other cols apart from the target column, as features \n", 340 | "# for the regression model. \n", 341 | "# Supplied columns CAN HAVE missing values. It will run as \n", 342 | "# many regression models as the cols that have missing values. \n", 343 | "# You can also specify the regression model being used. \n", 344 | "\n", 345 | "from sklearn.impute import KNNImputer # NOT experimental \n", 346 | "\n", 347 | "# KNNImputer - Utilizes the k-Nearest Neighbors to find the missing values. \n", 348 | "\n", 349 | "# In most cases, Iterative and KNN imputers will produce better results than the \n", 350 | "# SimpleImputer. \n", 351 | "\n", 352 | "# ONLY numerical columns work with Iterative and KNN Imputer. " 353 | ], 354 | "execution_count": 0, 355 | "outputs": [] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "metadata": { 360 | "id": "G7GNciidq6D1", 361 | "colab_type": "code", 362 | "colab": {} 363 | }, 364 | "source": [ 365 | "# We can automate the process of feature selection through pipelines. \n", 366 | "from sklearn.feature_selection import SelectPercentile, chi2\n", 367 | "\n", 368 | "# SelectPercentile - Uses statistical tests (chi2 in this case) to score the \n", 369 | "# the features.\n", 370 | "\n", 371 | "selection = SelectPercentile(chi2, percentile=50) # Keep top 50% of the features\n", 372 | "pipe_selection = make_pipeline(ct, selection, logreg)" 373 | ], 374 | "execution_count": 0, 375 | "outputs": [] 376 | }, 377 | { 378 | "cell_type": "code", 379 | "metadata": { 380 | "id": "_F07KR8ktVDR", 381 | "colab_type": "code", 382 | "colab": {} 383 | }, 384 | "source": [ 385 | "# We can also select the features based of _coef importance of the models \n", 386 | "from sklearn.feature_selection import SelectFromModel\n", 387 | "\n", 388 | "logreg_selection = LogisticRegression(solver='liblinear', penalty='l1', random_state=1)\n", 389 | "selection = SelectFromModel(logreg_selection, threshold='mean')\n", 390 | "pipe_selection = make_pipeline(ct, selection, logreg)" 391 | ], 392 | "execution_count": 0, 393 | "outputs": [] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "metadata": { 398 | "id": "cgOUGUF5tbTR", 399 | "colab_type": "code", 400 | "colab": {} 401 | }, 402 | "source": [ 403 | "# Some models benefit from Feature Standardization \n", 404 | "from sklearn.preprocessing import StandardScaler\n", 405 | "\n", 406 | "scaler = StandardScaler()\n", 407 | "imp_scaler = make_pipeline(imp, scaler)\n", 408 | "\n", 409 | "ct_scaler = make_column_transformer(\n", 410 | " (imp_ohe, ['Embarked', 'Sex']),\n", 411 | " (vect, 'Name'),\n", 412 | " (imp_scaler, ['Age', 'Fare']),\n", 413 | " remainder='passthrough'\n", 414 | ")\n", 415 | "\n", 416 | "pipe_scaler = make_pipeline(ct_scaler, logreg)\n", 417 | "\n", 418 | "# You do not always have to do the Standardization\n", 419 | "# Our solver, liblinear, is pretty robust. \n", 420 | "# liblinear doesn't require standardized inputs MOST OF THE TIMES. \n", 421 | "\n", 422 | "# DO NOT use the StandardScaler for a sparse matrix. \n", 423 | "# StandardScaler DESTROYS the sparsity of the matrix. " 424 | ], 425 | "execution_count": 0, 426 | "outputs": [] 427 | }, 428 | { 429 | "cell_type": "code", 430 | "metadata": { 431 | "id": "Z3mfnMih3kKd", 432 | "colab_type": "code", 433 | "colab": {} 434 | }, 435 | "source": [ 436 | "# An alternative way to scale, is to scale ALL THE COLUMNS. \n", 437 | "from sklearn.preprocessing import MaxAbsScaler\n", 438 | "\n", 439 | "scaler = MaxAbsScaler() # PRESERVES SPARSITY\n", 440 | " # Scales everything coming out of the column transformer\n", 441 | "pipe_scaler = make_pipeline(ct, scaler, logreg)" 442 | ], 443 | "execution_count": 0, 444 | "outputs": [] 445 | }, 446 | { 447 | "cell_type": "code", 448 | "metadata": { 449 | "id": "VQD_iKTi5OgK", 450 | "colab_type": "code", 451 | "colab": {} 452 | }, 453 | "source": [ 454 | "# To Handle Outliers \n", 455 | "# 1. Use ROBUST SCALER \n", 456 | "# 2. Identify and remove them \n", 457 | "\n", 458 | "# column transformers DO NOT REMOVE ROWS. \n", 459 | "# That's what they are called column transformers. \n", 460 | "\n", 461 | "# You CAN USE pandas to remove outliers. No issue of data leakage here. " 462 | ], 463 | "execution_count": 0, 464 | "outputs": [] 465 | }, 466 | { 467 | "cell_type": "code", 468 | "metadata": { 469 | "id": "UqSbn3NJ6FsJ", 470 | "colab_type": "code", 471 | "colab": {} 472 | }, 473 | "source": [ 474 | "# Custom Transformation Functions \n", 475 | "from sklearn.preprocessing import FunctionTransformer\n", 476 | "import numpy as np \n", 477 | "\n", 478 | "get_floor = FunctionTransformer(np.floor) # Custom Transformation " 479 | ], 480 | "execution_count": 0, 481 | "outputs": [] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "metadata": { 486 | "id": "b6-EYBnW7-Fn", 487 | "colab_type": "code", 488 | "colab": {} 489 | }, 490 | "source": [ 491 | "# You should alwyas try to ensure that your custom tranformations \n", 492 | "# work well with both, pandas dataframes and the numpy arrays. \n", 493 | "def first_letter(df):\n", 494 | " return pd.DataFrame(df).apply(lambda x: x.str.slice(0,1))\n", 495 | "\n", 496 | "get_first_letter = FunctionTransformer(first_letter)\n", 497 | "\n", 498 | "# Shape considerations for custom transformer functions \n", 499 | "# 1. They should ideally accept 2D inputs --> can pass multiple columns \n", 500 | "# 2. column transformer REQUIRES that all the functions output a 2D object" 501 | ], 502 | "execution_count": 0, 503 | "outputs": [] 504 | }, 505 | { 506 | "cell_type": "code", 507 | "metadata": { 508 | "id": "nILTRSbKEHIs", 509 | "colab_type": "code", 510 | "colab": {} 511 | }, 512 | "source": [ 513 | "# If there are some rare categories in transformed dataset, that may \n", 514 | "# cause a problem during the cross validation. \n", 515 | "# It might happen that all of them might end up in the testing fold.\n", 516 | "# We need to be prepared for such a case. \n", 517 | "ohe_ignore = OneHotEncoder(handle_unknown='ignore')" 518 | ], 519 | "execution_count": 0, 520 | "outputs": [] 521 | } 522 | ] 523 | } -------------------------------------------------------------------------------- /Kevin's Data Science Class Notes/README.md: -------------------------------------------------------------------------------- 1 | # Kevin Markham's Data Science Class Notes 2 | 3 | These notes were taken during the live data science classes conducted by [Kevin Markham](https://www.linkedin.com/in/justmarkham/ "Kevin Markham") ([Data School](https://www.dataschool.io/ "Data School")). 4 | 5 |
The classes were nothing short of being phenomenal. Kevin has mastered the art of teaching, learning from him Live was an experience to remember. 6 | The notes represent what I had understood during the live class. They might have a few errors here and there. 7 |
You can know more about the course [here](https://gumroad.com/l/ML-course?variant=Live%20Course%20%2B%20Advanced%20Course%20%3D%20%24129 "here"). 8 | 9 | [![Kevin's Data Science Class](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Kevin's%20Data%20Science%20Class%20Notes/img/class_description.png "Kevin's Data Science Class")](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Kevin's%20Data%20Science%20Class%20Notes/img/class_description.png "Kevin's Data Science Class") 10 | 11 | PS - The course ran for almost 8 hours, instead of 4. Kevin overdelivered by a factor of 2. 12 | -------------------------------------------------------------------------------- /Kevin's Data Science Class Notes/img/class_description.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Kevin's Data Science Class Notes/img/class_description.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/Image_Classification_practice.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Image Classification_practice.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [], 9 | "authorship_tag": "ABX9TyPy0pItKubxoPUu15qhfObI", 10 | "include_colab_link": true 11 | }, 12 | "kernelspec": { 13 | "name": "python3", 14 | "display_name": "Python 3" 15 | } 16 | }, 17 | "cells": [ 18 | { 19 | "cell_type": "markdown", 20 | "metadata": { 21 | "id": "view-in-github", 22 | "colab_type": "text" 23 | }, 24 | "source": [ 25 | "\"Open" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "metadata": { 31 | "id": "ut1uDoXJzCTu", 32 | "colab_type": "text" 33 | }, 34 | "source": [ 35 | "# Image Classification using PyTorch in 2020 \n", 36 | "\n", 37 | "In this notebook we will be utilizing some of the latest advancements in the \n", 38 | "[PyTorch Ecosystem](https://pytorch.org/ecosystem/ \"Click to visit the PyTorch Ecosystem homepage\") to build a simple image classifier using CNNs. \n", 39 | "\n", 40 | "Along the way, we will learn some PyTorch and CNN (Convolution Neural \n", 41 | "Networks) basics. \n", 42 | "\n", 43 | "
Note: You can find this notebook along with the master notebook (with \n", 44 | "all the code) in this Github Repository, \n", 45 | "https://github.com/pranjalchaubey/Deep-Learning-Notes\n", 46 | "\n", 47 | "Please checkout the `PyTorch Image Classification in 2020` folder. \t " 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": { 53 | "id": "rFhLAYud0CQK", 54 | "colab_type": "text" 55 | }, 56 | "source": [ 57 | "### 1. Get the Dataset Onboard\n", 58 | "\n", 59 | "In any Machine Learning/Data Science problem, the first step is always to get \n", 60 | "the dataset. \n", 61 | "\n", 62 | "In our case, to get things started, we will initially use the simple [MNIST Dataset](https://en.wikipedia.org/wiki/MNIST_database \"Wikipedia to the rescue!\"). \n", 63 | "MNIST is largely considered the _'Hello World!'_ of AI/ML. The dataset was \n", 64 | "created way back in the late 90s. The [official description](http://yann.lecun.com/exdb/mnist/ \"Yann Lecun is God.\") states, \n", 65 | "\n", 66 | "_\"The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image._ \n", 67 | "\n", 68 | "_It is a good database for people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting.\"_ \n", 69 | "\n", 70 | "
You might be wondering, how to get this dataset in our Colab Workspace? \n", 71 | "PyTorch comes with a _datasets_ module called, [Torchvision.Datasets](https://pytorch.org/docs/stable/torchvision/datasets.html \"Official Documentation\"). \n", 72 | "Torchvision.Datasets module contains a number of publically available datasets \n", 73 | "including the one we are looking for, MNIST. You are encouraged to explore the \n", 74 | "Torchvision.Datasets documentation page. " 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "metadata": { 80 | "id": "tia9JXE46rJF", 81 | "colab_type": "code", 82 | "colab": {} 83 | }, 84 | "source": [ 85 | "# Lets import some libraries \n", 86 | "import torch # PyTorch \n", 87 | "from torchvision import datasets # Datasets module \n", 88 | "import torchvision.transforms as transforms # Image Transforms \n", 89 | "from torch.utils.data.sampler import SubsetRandomSampler # Sampler " 90 | ], 91 | "execution_count": 0, 92 | "outputs": [] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "metadata": { 97 | "id": "rVP4itgp7jC9", 98 | "colab_type": "code", 99 | "colab": {} 100 | }, 101 | "source": [ 102 | "# The Data Science Regulars\n", 103 | "# ---- FILL IN ----\n" 104 | ], 105 | "execution_count": 0, 106 | "outputs": [] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": { 111 | "id": "6h8vdeaZ7sj_", 112 | "colab_type": "text" 113 | }, 114 | "source": [ 115 | "Checking out the torchvision.datasets module documentation, we find \n", 116 | "![Torchvision.Dataset](https://drive.google.com/uc?id=1Zsgc5_PnO9BQQ5wqssf67A5Ge-qIXtLh)\n", 117 | "\n" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "metadata": { 123 | "id": "UapF3_qS67Pi", 124 | "colab_type": "code", 125 | "colab": {} 126 | }, 127 | "source": [ 128 | "# convert data to torch.FloatTensor\n", 129 | "transform = transforms.ToTensor()\n", 130 | "\n", 131 | "# choose the training and test datasets\n", 132 | "train_data = # ---- FILL IN ----\n", 133 | "test_data = # ---- FILL IN ----" 134 | ], 135 | "execution_count": 0, 136 | "outputs": [] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": { 141 | "id": "NnZrOAcMZRTp", 142 | "colab_type": "text" 143 | }, 144 | "source": [ 145 | "### 2. Train Validation Test Split \n", 146 | "\n", 147 | "Once the download is complete (usually instantaneous), you should be able to \n", 148 | "see the MNIST dataset downloaded inside the _'data'_ folder on the left hand \n", 149 | "side. (Click on the _Files_ icon on the left sidebar) \n", 150 | "\n", 151 | "We have both the training and the test sets. Now we need to bifurcate the \n", 152 | "training set in two parts, \n", 153 | "1. Training Set (80% images)\n", 154 | "2. Validation Set (20% images) \n", 155 | "\n", 156 | "The algorithm we use to do this is quite simple, \n", 157 | "1. Create a list of indices of the training data \n", 158 | "2. Randomly Shuffle those indices \n", 159 | "3. Slice the indices in 80-20 split \n", 160 | "\n", 161 | "[Why create a _Validation Set_ at all?](https://datascience.stackexchange.com/questions/18339/why-use-both-validation-set-and-test-set \"In order to avoid overfitting on the test set!\") " 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "metadata": { 167 | "id": "IrsPBIfRYwNc", 168 | "colab_type": "code", 169 | "colab": {} 170 | }, 171 | "source": [ 172 | "# obtain training indices that will be used for validation\n", 173 | "\n", 174 | "# 1. Create a list of indices of the training data \n", 175 | "# ---- FILL IN ----" 176 | ], 177 | "execution_count": 0, 178 | "outputs": [] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "metadata": { 183 | "id": "xjDu_dd35TZZ", 184 | "colab_type": "code", 185 | "colab": {} 186 | }, 187 | "source": [ 188 | "# 2. Randomly Shuffle those indices\n", 189 | "# ---- FILL IN ----" 190 | ], 191 | "execution_count": 0, 192 | "outputs": [] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "metadata": { 197 | "id": "UKqrNSg-5VMq", 198 | "colab_type": "code", 199 | "colab": {} 200 | }, 201 | "source": [ 202 | "# 3. Slice the indices in 80-20 split\n", 203 | "# percentage of training set to use as validation\n", 204 | "valid_size = 0.2 # ie Train Set divided into two parts \n", 205 | " # 80% Train 20% Validation \n", 206 | "# ---- FILL IN ----" 207 | ], 208 | "execution_count": 0, 209 | "outputs": [] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "metadata": { 214 | "id": "14RHH0Jt8_k_", 215 | "colab_type": "text" 216 | }, 217 | "source": [ 218 | "Please Note that so far we have just been fiddling around with the _'indices'_, \n", 219 | "not the actual images as such.....but Why? \n", 220 | "Answer below. " 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": { 226 | "id": "IlTpuhxVAfCK", 227 | "colab_type": "text" 228 | }, 229 | "source": [ 230 | "### 3. Prepare the Dataloaders \n", 231 | "\n", 232 | "We have downloaded the dataset, and created a train/valid/test split. \n", 233 | "Q: How do we _'push'_ this data into a PyTorch model? \n", 234 | "A: PyTorch has a mechanism to _'ingest'_ data from a dataset through a module \n", 235 | "known as `DataLoader`. \n", 236 | "\n", 237 | "A great analogy, \n", 238 | "\n", 239 | "![DataLoader](https://drive.google.com/uc?id=1U4IG-5lbFGQQS4xwQPU2QiYdR1hFGBZ5 \"Always remember, Deep Learning Model is Your Baby and you got to feed it well!\")\n", 240 | "\n", 241 | "[Great tutorial on DataLoaders.](https://www.journaldev.com/36576/pytorch-dataloader \"PyTorch DataLoader\") \n", 242 | "[Ultimate tutorial on DataLoaders.](https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel \"A detailed example of how to generate your data in parallel with PyTorch\") \n", 243 | "\n", 244 | "Time to prepare the _DataLoaders_ now! \n", 245 | "\n", 246 | "![DataLoader Documentation](https://drive.google.com/uc?id=1YFbWIGwNlL5Kp4Zvt52Ck0_Wk4MNfxS9)" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "metadata": { 252 | "id": "5_Zzb55d8k7o", 253 | "colab_type": "code", 254 | "colab": {} 255 | }, 256 | "source": [ 257 | "# define samplers for obtaining training and validation batches\n", 258 | "# remember train_idx and valid_idx were the indices that we shuffled above\n", 259 | "# ---- FILL IN ----\n", 260 | "\n", 261 | "# prepare dataloaders\n", 262 | "# number of subprocesses to use for data loading\n", 263 | "num_workers = 0 # do not modify \n", 264 | "# how many samples per batch to load\n", 265 | "batch_size = 20 # ie 20 images per batch \n", 266 | "\n", 267 | "# Training Set \n", 268 | "train_loader = torch.utils.data.DataLoader(dataset=train_data, \\\n", 269 | " batch_size=batch_size, \\\n", 270 | " sampler=train_sampler, \\\n", 271 | " num_workers=num_workers)\n", 272 | "# Validation Set \n", 273 | "valid_loader = # ---- FILL IN ----\n", 274 | "\n", 275 | "# Test Set \n", 276 | "# Notice we have not used a 'sampler' here as it was not required \n", 277 | "test_loader = # ---- FILL IN ----" 278 | ], 279 | "execution_count": 0, 280 | "outputs": [] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": { 285 | "id": "ZBy_7mS_4hdc", 286 | "colab_type": "text" 287 | }, 288 | "source": [ 289 | "We got the dataloaders working, but how do we know that they are working indeed? \n", 290 | "Visualizing the data from the dataloaders would be a good check! " 291 | ] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "metadata": { 296 | "id": "JrRjUiQG5uOu", 297 | "colab_type": "code", 298 | "colab": {} 299 | }, 300 | "source": [ 301 | "# Visualize a whole batch of data from the dataloaders \n", 302 | "\n", 303 | "# ---- FILL IN ----\n", 304 | "\n", 305 | "print(len(images), len(labels)) # Should be equal to the batch size, 20\n", 306 | "print('Correct Labels: ', labels)\n", 307 | "images = images.numpy() # Convert the images to numpy array for matplotlib\n", 308 | "print('Shape of our images tensor =', images.shape)\n", 309 | "print('Batch Size =', images.shape[0], 'Image Height/Width =', \\\n", 310 | " images.shape[2])\n", 311 | "\n", 312 | "print()\n", 313 | "print('Squeezing the images tensor =', np.squeeze(images).shape)\n", 314 | "print('Un-squeezing the images tensor (axis=3) =', \\\n", 315 | " np.expand_dims(images, axis=3).shape)" 316 | ], 317 | "execution_count": 0, 318 | "outputs": [] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "metadata": { 323 | "id": "Y58P0-etIck4", 324 | "colab_type": "code", 325 | "colab": {} 326 | }, 327 | "source": [ 328 | "import matplotlib.pyplot as plt \n", 329 | "# Plots are plotted inside the notebooks, 'inline'\n", 330 | "%matplotlib inline " 331 | ], 332 | "execution_count": 0, 333 | "outputs": [] 334 | }, 335 | { 336 | "cell_type": "markdown", 337 | "metadata": { 338 | "id": "yL8sSrheGg9-", 339 | "colab_type": "text" 340 | }, 341 | "source": [ 342 | "With matplotlib, always remember that _figures contain axes which in turn \n", 343 | "contain the plots_. \n", 344 | "![Real Python](https://drive.google.com/uc?id=1KdlAGoCK8Lj9pFkrZf52oqOJK3sH3JuH \"Figure Contains the Axes Contains the Axis Contains the Plot\") \n", 345 | "\n", 346 | "[Great tutorial on Matplotlib.](https://realpython.com/python-matplotlib-guide/ \"Real Python has some of the best Python Tutorials on the Internet...no kidding!\")" 347 | ] 348 | }, 349 | { 350 | "cell_type": "code", 351 | "metadata": { 352 | "id": "cC5KTKSv6Y8T", 353 | "colab_type": "code", 354 | "colab": {} 355 | }, 356 | "source": [ 357 | "# Plot the whole batch \n", 358 | "fig = plt.figure(figsize=(25, 4))\n", 359 | "# Loop over all the images in the batch(20)\n", 360 | "for idx in np.arange(20):\n", 361 | " # Add a subplot for the image \n", 362 | " ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n", 363 | " # Populate the subplot with the image \n", 364 | " ax.imshow(np.squeeze(images[idx]), cmap='gray')\n", 365 | " # print out the correct label for each image\n", 366 | " # .item() gets the value contained in a Tensor\n", 367 | " ax.set_title(str(labels[idx].item()))" 368 | ], 369 | "execution_count": 0, 370 | "outputs": [] 371 | }, 372 | { 373 | "cell_type": "code", 374 | "metadata": { 375 | "id": "ls_AFh0FzMkG", 376 | "colab_type": "code", 377 | "colab": {} 378 | }, 379 | "source": [ 380 | "# A neat little numpy trick\n", 381 | "# Just for fun! \n", 382 | "\n", 383 | "dataiter = iter(train_loader) \n", 384 | "images, labels = dataiter.next() \n", 385 | "images = images.numpy() \n", 386 | "np.set_printoptions(precision=2, threshold=None, edgeitems=None, \\\n", 387 | " linewidth=180, suppress=None)\n", 388 | "print('Label', labels[0])\n", 389 | "print(images[0])" 390 | ], 391 | "execution_count": 0, 392 | "outputs": [] 393 | }, 394 | { 395 | "cell_type": "markdown", 396 | "metadata": { 397 | "id": "2APl70a5LH66", 398 | "colab_type": "text" 399 | }, 400 | "source": [ 401 | "Our dataloaders seem to be working fine and out data looks great! \n", 402 | "
\n", 403 | "Time to build our CNN based image classification model in PyTorch.....in 2020! " 404 | ] 405 | }, 406 | { 407 | "cell_type": "markdown", 408 | "metadata": { 409 | "id": "LGFnKhH5Lh9t", 410 | "colab_type": "text" 411 | }, 412 | "source": [ 413 | "### 4. Build a PyTorch CNN Model\n", 414 | "But first, we need to know how CNNs work and what are the components of a \n", 415 | "typical CNN based image classification architecture. \n", 416 | "\n", 417 | "![CNN Scan](https://media.giphy.com/media/vRINohj6YtmnkTQqHi/giphy.gif \"'Bad Guy' uses Tensorflow :P\")\n" 418 | ] 419 | }, 420 | { 421 | "cell_type": "markdown", 422 | "metadata": { 423 | "id": "e99lxoYpt9o8", 424 | "colab_type": "text" 425 | }, 426 | "source": [ 427 | "####4.1.1 CNN Architecture \n", 428 | "A typical CNN Architecture looks like this, \n", 429 | "\n", 430 | "![Block CNN Architecture](https://drive.google.com/uc?id=1RYYh27hsyY5Mx4L_lwGUVGQ3OYkSCqB- \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\") \n", 431 | "
A real life example of VGG-16, \n", 432 | "\n", 433 | "![VGG-16 Architecture](https://drive.google.com/uc?id=14OYg0ihFKHGsLQLcqoeSLt2nyPS4quss \"https://www.researchgate.net/figure/The-architecture-of-a-VGG-16-network_fig2_330467052\") \n", 434 | "
Another example, very close to what we are going to build today, \n", 435 | "\n", 436 | "![MNIST CNN Architecture](https://drive.google.com/uc?id=1cnDzorKeRmNUAUJYw2yvQiY2D4f-sDNL \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\")" 437 | ] 438 | }, 439 | { 440 | "cell_type": "markdown", 441 | "metadata": { 442 | "id": "obnRoku91VXj", 443 | "colab_type": "text" 444 | }, 445 | "source": [ 446 | "####4.1.2 Basic CNN Layer \n", 447 | "So how does a CNN layer actually _'works'_? \n", 448 | "\n", 449 | "![CNN Scan](https://drive.google.com/uc?id=1JictobCPmaIX_9pm2mQewc4QiAzl0pzO \"Remember Billie Eilish above?\") \n", 450 | "\n", 451 | "
A colored image has 3 channels, \n", 452 | "![RGB Image](https://drive.google.com/uc?id=1QlU04TZ6IN2IRqQJFB8m6GFJ2MmqYGhz \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\") \n", 453 | "\n", 454 | "
A 3-channel convolution, \n", 455 | "![RGB Convolution](https://drive.google.com/uc?id=1CGIqxGHjJGXr8aduPtwGd1-ikgA3WqJ5 \"https://towardsdatascience.com/a-comprehensive-introduction-to-different-types-of-convolutions-in-deep-learning-669281e58215\") \n", 456 | "\n", 457 | "![RGB Convolution Summation](https://drive.google.com/uc?id=1fpM3NLvyjeiKF6_at0Nv4XhYPRkwH1IY \"https://towardsdatascience.com/a-comprehensive-introduction-to-different-types-of-convolutions-in-deep-learning-669281e58215\")" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "metadata": { 463 | "id": "dsM9p5pN55Ht", 464 | "colab_type": "text" 465 | }, 466 | "source": [ 467 | "####4.1.3 Stride \n", 468 | "_'Stride'_ of the kernel while scanning the image. \n", 469 | "\n", 470 | "Here's an example with _stride=1_, \n", 471 | "![CNN Stride = 1](https://drive.google.com/uc?id=1wJd7VCYfiMDes0Ex0SlBL97yBP6P7ajt \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\") \n", 472 | "\n", 473 | "
Another example with _stride=2_, \n", 474 | "![CNN Stride = 2](https://drive.google.com/uc?id=1dG2i4WtxUzu9Wlsl3jyH4ZXxUSYVdKfk)" 475 | ] 476 | }, 477 | { 478 | "cell_type": "markdown", 479 | "metadata": { 480 | "colab_type": "text", 481 | "id": "0UTagwYo92ZK" 482 | }, 483 | "source": [ 484 | "####4.1.4 Padding \n", 485 | "Padding ensures that there is no loss of information while an image with a \n", 486 | "convolutional kernel. \n", 487 | "![Padding = 1](https://drive.google.com/uc?id=1xIQSJVRtAS7em_E387ZBd6sErjwA7RD6 \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\")" 488 | ] 489 | }, 490 | { 491 | "cell_type": "markdown", 492 | "metadata": { 493 | "colab_type": "text", 494 | "id": "kKK72-_H94bi" 495 | }, 496 | "source": [ 497 | "####4.1.5 Max Pooling \n", 498 | "Max Pooling layer primarily reduces the dimensionality of the input. \n", 499 | "\n", 500 | "![Max Pooling](https://drive.google.com/uc?id=11_1ThNaU4e4DAEFs7I9GsNoD7oFmtVES \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\") \n", 501 | "\n", 502 | "Max Pooling is not the only type of pooling layer out there. \n", 503 | "![Type of Pooling](https://drive.google.com/uc?id=1ECZrE8vAhTE1gEsp_3vPy03oLuSj0aOW) \n", 504 | "\n", 505 | "[A nice tutorial on pooling layers.](https://machinelearningmastery.com/pooling-layers-for-convolutional-neural-networks/ \"Jason Brownlee is an absolute genius. His blog is a damn goldmine!\") " 506 | ] 507 | }, 508 | { 509 | "cell_type": "markdown", 510 | "metadata": { 511 | "id": "98q9buSu-hxw", 512 | "colab_type": "text" 513 | }, 514 | "source": [ 515 | "####4.1.6 Rectified Linear Units aka ReLU \n", 516 | "The non-linear activation function. \n", 517 | "![ReLU Activation](https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2018/10/Line-Plot-of-Rectified-Linear-Activation-for-Negative-and-Positive-Inputs.png \"https://machinelearningmastery.com/rectified-linear-activation-function-for-deep-learning-neural-networks/\")" 518 | ] 519 | }, 520 | { 521 | "cell_type": "markdown", 522 | "metadata": { 523 | "id": "vvnlvaIQ_ECZ", 524 | "colab_type": "text" 525 | }, 526 | "source": [ 527 | "####4.1.7 _Fully Connected_ or _Linear Layers_ \n", 528 | "Final dimensionality reduction for either classification or regression tasks. \n", 529 | "\n", 530 | "![Fully Connected Layers](https://drive.google.com/uc?id=1Fwh-NqMDLx-xqKlqN2c-z780M9aBUHvO \"https://towardsdatascience.com/a-comprehensive-guide-to-convolutional-neural-networks-the-eli5-way-3bd2b1164a53\") " 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "metadata": { 536 | "id": "Xct_yN3R_8pP", 537 | "colab_type": "text" 538 | }, 539 | "source": [ 540 | "Before we move forward, a few questions for everyone: \n", 541 | "1. What kind of features do the first few CNN layers capture? \n", 542 | "2. What kind of features do the last few CNN layers capture? \n", 543 | "3. What is the role of max-pool? \n", 544 | "4. Can we use _stride_ to perform a role similar to max-pool? \n", 545 | " " 546 | ] 547 | }, 548 | { 549 | "cell_type": "markdown", 550 | "metadata": { 551 | "id": "el1NUdaz0HrE", 552 | "colab_type": "text" 553 | }, 554 | "source": [ 555 | "####4.2.1 PyTorch \n", 556 | "PyTorch is currently the hottest Deep Learning library out there. In terms of \n", 557 | "popularity, it has even taken over Tensorflow. Tensorflow came before PyTorch \n", 558 | "and is backed by the engineering and marketing might of _**Google**_. \n", 559 | "\n", 560 | "_Why PyTorch got so darn famous?_ \n", 561 | "The answer lies in the fact that PyTorch is highly pythonic (due to dynamic \n", 562 | "computational graphs) which makes it extremely flexible and ideal for \n", 563 | "researchers and developers alike. " 564 | ] 565 | }, 566 | { 567 | "cell_type": "markdown", 568 | "metadata": { 569 | "id": "3iKCm6RQJUL5", 570 | "colab_type": "text" 571 | }, 572 | "source": [ 573 | "####4.2.2 Understanding Computational Graphs \n", 574 | "At the bottom of every Deep Neural Network training, there are only two things \n", 575 | "taking place, \n", 576 | "1. A forward pass - pushing images/data from the start of the network and \n", 577 | "generating an output (and a loss/error). \n", 578 | "2. Backpropagation - essentially a backward pass where we calculate gradients \n", 579 | "using partial derivatives with respect to the loss, and make changes to the \n", 580 | "weights of the network. In a nutshell, this is how deep learning networks \n", 581 | "train. \n", 582 | "\n", 583 | "![A Computational Graph](https://drive.google.com/uc?id=1dgVg08M02gfIkPm0JKyhm4k4gYVLB_Me \"Udacity Deep Learning Nano Degree\") \n", 584 | "The image above, is a simple neural network. But it is also a \n", 585 | "computational graph. \n", 586 | "\n", 587 | "We first make a forward pass through our network and then a backward pass \n", 588 | "to calculate how much loss was being contributed by _**W1**_ weight in \n", 589 | "particular. \n", 590 | "\n", 591 | "Every neural network you define, PyTorch _sees_ it as a computational graph \n", 592 | "similar to what we see above and keeps a track of all the operations \n", 593 | "performed by every node. This ensures that it calculates accurate gradients \n", 594 | "when make a backward pass. \n", 595 | "\n", 596 | "Good thing about PyTorch is that it creates these computational graphs on the \n", 597 | "fly! And this aspect makes PyTorch and extremely felxible (and pythonic) deep \n", 598 | "learning library. \n", 599 | "\n", 600 | "![Dynamic Computational Graphs](https://cdn.analyticsvidhya.com/wp-content/uploads/2019/09/dynamic_graph.gif \"PyTorch building dynamic computational graphs on the fly\")" 601 | ] 602 | }, 603 | { 604 | "cell_type": "markdown", 605 | "metadata": { 606 | "id": "HNCoUgD7R-wL", 607 | "colab_type": "text" 608 | }, 609 | "source": [ 610 | "####4.2.3 Tensors \n", 611 | "Tensors are the building blocks of every deep learning library including \n", 612 | "PyTorch. What are tensors though? \n", 613 | "\n", 614 | "![Tensors](https://drive.google.com/uc?id=1F5cLclu4RML7zj7axf8IGU7aUXJlI7N0 \"Udacity Deep Learning Nano Degree\") \n", 615 | "\n", 616 | "Creating tensors in PyTorch is easy \n", 617 | "```\n", 618 | "import torch \n", 619 | "x = torch.rand(3, 3)\n", 620 | "print(x)\n", 621 | "\n", 622 | ">>Prints out:\n", 623 | ">>tensor([[0.5264, 0.1839, 0.9907],\n", 624 | ">> [0.0343, 0.9839, 0.9294],\n", 625 | ">> [0.6938, 0.6755, 0.2258]])\n", 626 | "```\n", 627 | "Tensors in PyTorch are exactly like the numpy arrays, except that they can \n", 628 | "also live on a GPU which makes them realy really fast! \n", 629 | "```\n", 630 | "torch.FloatTensor([[20, 30, 40], [90, 60, 70]]) # Tensor on CPU\n", 631 | "torch.cuda.FloatTensor([[20, 30, 40], [90, 60, 70]]) # Tensor on GPU\n", 632 | "```\n", 633 | "Moving tensors (and complex deep learning models) to a GPU (or a CPU) is \n", 634 | "pretty straightforward in PyTorch. \n", 635 | "```\n", 636 | "x = torch.FloatTensor([[20, 30, 40], [90, 60, 70]]) # Tensor on CPU\n", 637 | "print('Is tensor x on GPU?', x.is_cuda) # False\n", 638 | "x = x.to('cuda') # Moves to GPU\n", 639 | "print('Is tensor x on GPU?', x.is_cuda) # True \n", 640 | "x = x.to('cpu') # Moves back to CPU\n", 641 | "print('Is tensor x on GPU?', x.is_cuda) # False\n", 642 | "```" 643 | ] 644 | }, 645 | { 646 | "cell_type": "markdown", 647 | "metadata": { 648 | "id": "E0VnwmbZVrLW", 649 | "colab_type": "text" 650 | }, 651 | "source": [ 652 | "####4.2.4 The Autograd Module \n", 653 | "Autograd is the real rockstar module in PyTorch. \n", 654 | "Autograd is the module that keeps a track of all the operations performed on a \n", 655 | "tensor and calculates the gradients through a technique called _**Automatic \n", 656 | "Differentiation**_. \n", 657 | "\n", 658 | "To enable tracking computation history on a tensor, set _**`.requires_grad`**_ \n", 659 | "to _**`True`**_. To detach a tensor from its computation history, call \n", 660 | "_**`.detach()`**_. \n", 661 | "\n", 662 | "In order to stop autograd from keeping history of computations on a deep \n", 663 | "learning model, wrap it around _**`torch.no_grad():`**_. This is usually done \n", 664 | "during inference. \n", 665 | "```\n", 666 | "with torch.no_grad():\n", 667 | " # inference code \n", 668 | "``` " 669 | ] 670 | }, 671 | { 672 | "cell_type": "markdown", 673 | "metadata": { 674 | "id": "9S0uLBsCZqjG", 675 | "colab_type": "text" 676 | }, 677 | "source": [ 678 | "####4.2.5 The nn.Module \n", 679 | "The nn module in PyTorch is used to 'build' the neural networks and contains \n", 680 | "all the deep learning layers. It obviously depends upon the autograd module to \n", 681 | "calculate gradients. \n", 682 | "\n", 683 | "When defining our custom models in PyTorch, we typically subclass the \n", 684 | "nn.Module class and override the `__init__()` and `forward()` functions. \n", 685 | "1. `__init__()` - This is where we define the layers of our network. \n", 686 | "2. `forward()` - This is where you actaully connect the layers together and \n", 687 | "make everything work. \n", 688 | "\n", 689 | "Don't worry if this sounds a little confusing, we will be seeing `nn.Module` \n", 690 | "in action very soon! \n", 691 | "\n" 692 | ] 693 | }, 694 | { 695 | "cell_type": "markdown", 696 | "metadata": { 697 | "id": "y0MkS6OadYRQ", 698 | "colab_type": "text" 699 | }, 700 | "source": [ 701 | "####4.2.6 The Optim Package \n", 702 | "The optim package in PyTorch contains the optimization algorithms that help \n", 703 | "to train your network. \n", 704 | "\n", 705 | "A simple example, \n", 706 | "`optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)` \n", 707 | "\n", 708 | "
[My all time favorite 'Intro to PyTorch' tutorial](https://towardsdatascience.com/understanding-pytorch-with-an-example-a-step-by-step-tutorial-81fc5f8c4e8e \"Understanding PyTorch with an example: a step-by-step tutorial\") \n", 709 | "[Great PyTorch Tutorial Part 1](https://blog.paperspace.com/pytorch-101-understanding-graphs-and-automatic-differentiation/ \"PyTorch 101, Part 1: Understanding Graphs, Automatic Differentiation and Autograd\") \n", 710 | "[Great PyTorch Tutorial Part 2](https://blog.paperspace.com/pytorch-101-building-neural-networks/ \"PyTorch 101, Part 2: Building Your First Neural Network\") \n", 711 | "[Great PyTorch Tutorial Part 3](https://blog.paperspace.com/pytorch-101-advanced/ \"PyTorch 101, Part 3: Going Deep with PyTorch\") \n", 712 | "[Great PyTorch Tutorial Part 4](https://blog.paperspace.com/pytorch-memory-multi-gpu-debugging/ \"PyTorch 101, Part 4: Memory Management and Using Multiple GPUs\") \n", 713 | "[Great PyTorch Tutorial Part 5](https://blog.paperspace.com/pytorch-hooks-gradient-clipping-debugging/ \"PyTorch 101, Part 5: Understanding Hooks\") \n", 714 | "[Stunning Insight into the Internals of PyTorch](http://blog.ezyang.com/2019/05/pytorch-internals/ \"PyTorch Internals\")" 715 | ] 716 | }, 717 | { 718 | "cell_type": "markdown", 719 | "metadata": { 720 | "id": "wYblCGur448H", 721 | "colab_type": "text" 722 | }, 723 | "source": [ 724 | "####4.3 Time to define our model! \n", 725 | "But before we start off with the model definition, let's have a look at what \n", 726 | "the PyTorch Documentation says about the Convolutional NNs. \n", 727 | "\n", 728 | "![Conv2d Layer](https://drive.google.com/uc?id=1odHZIXURYjogjUcyCQ56RYfQPqgzO7DX \"Conv2D\") \n", 729 | "\n", 730 | "
We also need to check out about the MaxPool, Dropout and Linear Layers. \n", 731 | "\n", 732 | "![MaxPool 2D](https://drive.google.com/uc?id=1r1EeLHrV5oAG4OUyRSm0lw2OcZyJQ7ne \"MaxPool2D Layer\") \n", 733 | "\n", 734 | "![Dropout Layer](https://drive.google.com/uc?id=1kYgb4wDrGEBEF5WB169N7Q5hTrwDDO1d \"Dropout Layer\") \n", 735 | "\n", 736 | "![Linear Layer](https://drive.google.com/uc?id=1rrIEqPtun_8Td1js76B2hv22Xm5tjY6m \"Linear Layer\") " 737 | ] 738 | }, 739 | { 740 | "cell_type": "markdown", 741 | "metadata": { 742 | "id": "q0zv4wi7BNRL", 743 | "colab_type": "text" 744 | }, 745 | "source": [ 746 | "Did you guys notice a weird anomaly in the Conv2d and Linear layers? \n", 747 | "```\n", 748 | "torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros')\n", 749 | "``` \n", 750 | "```\n", 751 | "torch.nn.Linear(in_features, out_features, bias=True)\n", 752 | "```" 753 | ] 754 | }, 755 | { 756 | "cell_type": "markdown", 757 | "metadata": { 758 | "id": "jrhOYd8JTQ-B", 759 | "colab_type": "text" 760 | }, 761 | "source": [ 762 | "Conv2d layer expects `in_channels` while the Linear layer expects `in_features`. \n", 763 | "\n", 764 | "Bottom line is, that PyTorch expects different things from a tensor dimension. \n", 765 | "Specifically, \n", 766 | "```\n", 767 | "\"\"\"Example tensor size outputs, how PyTorch reads them, and where you encounter them in the wild. \n", 768 | "Note: the values below are only examples. Focus on the rank of the tensor (how many dimensions it has).\"\"\"\n", 769 | ">>> torch.Size([32])\n", 770 | " # 1d: [batch_size] \n", 771 | " # use for target labels or predictions.\n", 772 | ">>> torch.Size([12, 256])\n", 773 | " # 2d: [batch_size, num_features (aka: C * H * W)]\n", 774 | " # use for as nn.Linear() input.\n", 775 | ">>> torch.Size([10, 1, 2048])\n", 776 | " # 3d: [batch_size, channels, num_features (aka: H * W)]\n", 777 | " # when used as nn.Conv1d() input.\n", 778 | " # (but [seq_len, batch_size, num_features]\n", 779 | " # if feeding an RNN).\n", 780 | ">>> torch.Size([16, 3, 28, 28])\n", 781 | " # 4d: [batch_size, channels, height, width]\n", 782 | " # use for as nn.Conv2d() input.\n", 783 | ">>> torch.Size([32, 1, 5, 15, 15])\n", 784 | " # 5D: [batch_size, channels, depth, height, width]\n", 785 | " # use for as nn.Conv3d() input.\n", 786 | "``` \n", 787 | "\n", 788 | "A neat method to make your tensors ready for the linear layer, \n", 789 | "```\n", 790 | "Use view() to change your tensor’s dimensions.\n", 791 | "\n", 792 | "image = image.view(batch_size, -1)\n", 793 | "\n", 794 | "You supply your batch_size as the first number, and then “-1” basically tells Pytorch, “you figure out this other number for me… please.” \n", 795 | "Your tensor will now feed properly into any linear layer.\n", 796 | "```\n", 797 | "\n", 798 | "[Incredible Tutorial on PyTorch Layer Dimensions.](https://towardsdatascience.com/pytorch-layer-dimensions-what-sizes-should-they-be-and-why-4265a41e01fd \"Eye opener of an article, must read!\")" 799 | ] 800 | }, 801 | { 802 | "cell_type": "markdown", 803 | "metadata": { 804 | "id": "ouQTXZetn53I", 805 | "colab_type": "text" 806 | }, 807 | "source": [ 808 | "Before we start, \n", 809 | "1. What is the shape (dimensions) of our images? \n", 810 | "2. What is the size of our batch? \n", 811 | "3. How many _'channels'_ are there in our images? \n", 812 | "\n", 813 | "![Batch Visualization](https://drive.google.com/uc?id=193KFtV2hr-7VkhQUJxd36oCF6Ms12xwD \"Batch Visualization\")" 814 | ] 815 | }, 816 | { 817 | "cell_type": "code", 818 | "metadata": { 819 | "id": "I-McCK6u3WnN", 820 | "colab_type": "code", 821 | "colab": {} 822 | }, 823 | "source": [ 824 | "import torch.nn as nn # nn module contains all the layers \n", 825 | "import torch.nn.functional as F # same as nn, but a little different " 826 | ], 827 | "execution_count": 0, 828 | "outputs": [] 829 | }, 830 | { 831 | "cell_type": "code", 832 | "metadata": { 833 | "id": "AtUDongrrJlj", 834 | "colab_type": "code", 835 | "colab": {} 836 | }, 837 | "source": [ 838 | "# Our CNN based neural architecture \n", 839 | "# Let's build a simple one with only Convolutional, Linear \n", 840 | "# and dropout layers\n", 841 | "class MNISTModel1(nn.Module):\n", 842 | " # Here we define the neural architecture \n", 843 | " def __init__(self):\n", 844 | " super(MNISTModel1, self).__init__() # Initialize the nn module \n", 845 | " \n", 846 | " # Convolutional Layers\n", 847 | " # What shape/dimensions the first layer is going to see? \n", 848 | " # Do we need to have some padding for a kernel_size = 3? \n", 849 | " # Input Features = 1 x 28 x 28\n", 850 | " # Output Features = ???\n", 851 | " # Shape of a Convolutional Layer = (W - K + 2P)\n", 852 | " # ------------ + 1\n", 853 | " # S\n", 854 | " # where, \n", 855 | " # W = Width/Height of previous layer = 28\n", 856 | " # K = Filter Size = 3\n", 857 | " # P = Padding = 0\n", 858 | " # S = Stride = 1(default)\n", 859 | " # Therefore, \n", 860 | " # if padding = 0\n", 861 | " # Output Shape = ((28 - 3 + 2*0)/1)+1 = 26 \n", 862 | " # We want the dimensions to stay the same so that there is no \n", 863 | " # loss of information when performing the convolution. \n", 864 | " # Hence, \n", 865 | " # if padding = 1\n", 866 | " # Output Shape = ((28 - 3 + 2*1)/1)+1 = 28\n", 867 | " # ---- FILL IN ----\n", 868 | "\n", 869 | " # Linear Layers \n", 870 | " # What shape the first linear layer is going see?\n", 871 | " # What are the total number of features given out by conv2?\n", 872 | " # Features = 16 x 28 x 28 = 12544\n", 873 | " # Therefore, \n", 874 | " # ---- FILL IN ----\n", 875 | "\n", 876 | " # Last linear layer should output 10 features as we are \n", 877 | " # Classifying the images in 10 categories \n", 878 | " # ---- FILL IN ----\n", 879 | "\n", 880 | " # Dropout \n", 881 | " # ---- FILL IN ----\n", 882 | "\n", 883 | " # Here we define the 'forward behaviour' of our neural architecture \n", 884 | " def forward(self, image_batch):\n", 885 | " # This is also the place where we add ACTIVATION functions \n", 886 | " # ---- FILL IN ----\n", 887 | "\n", 888 | " # Remember that when passing image_batch through the Linear layers, \n", 889 | " # PyTorch expects: \n", 890 | " # >>> torch.Size([12, 256]) -> example values \n", 891 | " # 2d: [batch_size, num_features (aka: C * H * W)]\n", 892 | " # use for nn.Linear() input. \n", 893 | " # Therefore, we need to 'flatten' image_batch\n", 894 | " # image_batch = image_batch.view(batch_size, -1) --> batch size ???\n", 895 | " # ---- FILL IN ----\n", 896 | "\n", 897 | " # Let's add the dropout too \n", 898 | " # ---- FILL IN ----\n", 899 | "\n", 900 | " # Final Layer of the network \n", 901 | " # ---- FILL IN ----\n", 902 | "\n", 903 | " # The output from the final layer is a tensor with 10 'logits'\n", 904 | " return None " 905 | ], 906 | "execution_count": 0, 907 | "outputs": [] 908 | }, 909 | { 910 | "cell_type": "markdown", 911 | "metadata": { 912 | "id": "HEPNIgRiS9FN", 913 | "colab_type": "text" 914 | }, 915 | "source": [ 916 | "Now that we have defined our model, is there a way we can peep inside to see \n", 917 | "what is going on and that if everything is alright? \n", 918 | "\n", 919 | "Say hello to _**[torchsummary !!!](https://github.com/sksq96/pytorch-summary \"Click to visit GitHub Page\")**_ " 920 | ] 921 | }, 922 | { 923 | "cell_type": "code", 924 | "metadata": { 925 | "id": "hHTtCrNxQ_X7", 926 | "colab_type": "code", 927 | "colab": {} 928 | }, 929 | "source": [ 930 | "# Let's install torchsummary and do some cool stuff \n", 931 | "!pip install torchsummary # https://github.com/sksq96/pytorch-summary " 932 | ], 933 | "execution_count": 0, 934 | "outputs": [] 935 | }, 936 | { 937 | "cell_type": "code", 938 | "metadata": { 939 | "id": "NylWMGTf1QbN", 940 | "colab_type": "code", 941 | "colab": {} 942 | }, 943 | "source": [ 944 | "from torchsummary import summary " 945 | ], 946 | "execution_count": 0, 947 | "outputs": [] 948 | }, 949 | { 950 | "cell_type": "code", 951 | "metadata": { 952 | "id": "DtUFlNAd5lhN", 953 | "colab_type": "code", 954 | "colab": {} 955 | }, 956 | "source": [ 957 | "# We can make the use of torchsummary library here to figure \n", 958 | "# if we have done something wrong \n", 959 | "\n", 960 | "# But first we need to tell PyTorch where to 'keep' the model \n", 961 | "# On GPU or on CPU \n", 962 | "device = # ---- FILL IN ----\n", 963 | "print('The model will run on', device)\n", 964 | "\n", 965 | "# Initialize the model \n", 966 | "mnist1 = # ---- FILL IN ----\n", 967 | "summary(# ---- FILL IN ----) # Summarize" 968 | ], 969 | "execution_count": 0, 970 | "outputs": [] 971 | }, 972 | { 973 | "cell_type": "markdown", 974 | "metadata": { 975 | "id": "kyTvkdq3gqHM", 976 | "colab_type": "text" 977 | }, 978 | "source": [ 979 | "That was a lot of work.....Whew! \n", 980 | "\n", 981 | "_***Q: Isn't there an 'easier' way to define the Model in 2020?***_ \n", 982 | "A: _**Yes, absolutely!**_ \n", 983 | "Say hello to the _**[torchlayers library !!!](https://github.com/szymonmaszke/torchlayers \"The best thing to have happened to PyTorch in recent times, shame it won't run on Colab!\")**_ \n", 984 | "With torchlayers, the above code will be reduced to about 7-8 lines! \n", 985 | "But unfortunately, [torchlayers requires Python 3.7](https://github.com/szymonmaszke/torchlayers/issues/5 \"I tried my best!\") and above. Colab only \n", 986 | "supports Python 3.6.x. \n", 987 | "```\n", 988 | "# A slightly different example from ours\n", 989 | "import torchlayers as tl\n", 990 | "\n", 991 | "# torch.nn and torchlayers can be mixed easily\n", 992 | "model = torch.nn.Sequential(\n", 993 | " tl.Conv(64), # specify ONLY out_channels\n", 994 | " torch.nn.ReLU(), # use torch.nn wherever you wish\n", 995 | " tl.BatchNorm(), # BatchNormNd inferred from input\n", 996 | " tl.Conv(128), # Default kernel_size equal to 3\n", 997 | " tl.ReLU(),\n", 998 | " tl.Conv(256, kernel_size=11), # \"same\" padding as default\n", 999 | " tl.GlobalMaxPool(), # Known from Keras\n", 1000 | " tl.Linear(10), # Output for 10 classes\n", 1001 | ")\n", 1002 | "\n", 1003 | "print(model)\n", 1004 | "```\n", 1005 | "\n" 1006 | ] 1007 | }, 1008 | { 1009 | "cell_type": "markdown", 1010 | "metadata": { 1011 | "id": "YljS2tiRlHql", 1012 | "colab_type": "text" 1013 | }, 1014 | "source": [ 1015 | "With our model definition complete, it is time to train! " 1016 | ] 1017 | }, 1018 | { 1019 | "cell_type": "markdown", 1020 | "metadata": { 1021 | "id": "f08VsPbPn5bO", 1022 | "colab_type": "text" 1023 | }, 1024 | "source": [ 1025 | "### 5. Train a PyTorch Model in 2020 \n", 1026 | "\n", 1027 | "PyTorch is infamous among newcomers for it's _'training loops'_. \n", 1028 | "They can be long and at times a little confusing too. However, most of them \n", 1029 | "are similar and writing training loops simply turns out to be a boring and repetitive \n", 1030 | "exercise. \n", 1031 | "\n", 1032 | "Q: _**This is 2020.**_ Is there a better way? \n", 1033 | "A: _**Yes, absolutely!**_ \n", 1034 | "Say hello to _**[Poutyne !!!](https://poutyne.org/index.html \"How do you pronounce this?\")**_ \n", 1035 | "\n", 1036 | "\n", 1037 | "Thanks to _Poutyne_, writing training loops in PyTorch is _**FUN !!!**_ \n", 1038 | "\n", 1039 | "PS - Poutyne is pronounced as Poutine or Pu-tin. \n", 1040 | "![Poutyne](https://drive.google.com/uc?id=142xYy_mJoPSk97SDicvn9zRNxHMpXDxz \"You think loops are boring?!?!\")" 1041 | ] 1042 | }, 1043 | { 1044 | "cell_type": "code", 1045 | "metadata": { 1046 | "id": "9gpy_1cRZ6gV", 1047 | "colab_type": "code", 1048 | "colab": {} 1049 | }, 1050 | "source": [ 1051 | "# Install Poutyne \n", 1052 | "!pip install poutyne " 1053 | ], 1054 | "execution_count": 0, 1055 | "outputs": [] 1056 | }, 1057 | { 1058 | "cell_type": "code", 1059 | "metadata": { 1060 | "id": "LnHdjqkhdzLb", 1061 | "colab_type": "code", 1062 | "colab": {} 1063 | }, 1064 | "source": [ 1065 | "from poutyne.framework import Model # The core datastructure of poutyne \n", 1066 | " # https://poutyne.org/model.html" 1067 | ], 1068 | "execution_count": 0, 1069 | "outputs": [] 1070 | }, 1071 | { 1072 | "cell_type": "code", 1073 | "metadata": { 1074 | "id": "GHVhp0yEiFfs", 1075 | "colab_type": "code", 1076 | "colab": {} 1077 | }, 1078 | "source": [ 1079 | "from torch import optim # Optimizer: we need it to train our network" 1080 | ], 1081 | "execution_count": 0, 1082 | "outputs": [] 1083 | }, 1084 | { 1085 | "cell_type": "code", 1086 | "metadata": { 1087 | "id": "-EwMBpBtKE7w", 1088 | "colab_type": "code", 1089 | "colab": {} 1090 | }, 1091 | "source": [ 1092 | "# A pouytne training loop\n", 1093 | "\n", 1094 | "# A few hyperparamters for the training loop \n", 1095 | "learning_rate = 0.1\n", 1096 | "epochs = 3\n", 1097 | "\n", 1098 | "def poutyne_train(pytorch_model):\n", 1099 | " \n", 1100 | " # ---- FILL IN ----\n", 1101 | " \n", 1102 | " print(f'Test:\\n\\tLoss: {test_loss: .3f}\\n\\tAccuracy: {test_acc: .3f}')\n", 1103 | "\n", 1104 | " return None" 1105 | ], 1106 | "execution_count": 0, 1107 | "outputs": [] 1108 | }, 1109 | { 1110 | "cell_type": "code", 1111 | "metadata": { 1112 | "id": "_znXT5djwzeY", 1113 | "colab_type": "code", 1114 | "colab": {} 1115 | }, 1116 | "source": [ 1117 | "# Let's start the training people!!! \n", 1118 | "poutyne_train(mnist1)" 1119 | ], 1120 | "execution_count": 0, 1121 | "outputs": [] 1122 | }, 1123 | { 1124 | "cell_type": "markdown", 1125 | "metadata": { 1126 | "id": "wqforBpLr0dk", 1127 | "colab_type": "text" 1128 | }, 1129 | "source": [ 1130 | "_**CONGRATULATIONS !!! You just trained your first(?) CNN Model!**_ \n", 1131 | "The accuracy looks pretty decent as well! \n" 1132 | ] 1133 | }, 1134 | { 1135 | "cell_type": "markdown", 1136 | "metadata": { 1137 | "id": "hakTX49k5wnW", 1138 | "colab_type": "text" 1139 | }, 1140 | "source": [ 1141 | "### 6. Understanding Overfitting \n", 1142 | "Before we move any further we need to understand the concept of _**Overfitting**_ \n", 1143 | "in Machine Learning models. \n", 1144 | "What is _**overfitting?**_ \n", 1145 | "![Fitting Examples](https://drive.google.com/uc?id=1gFOa5I24S7XDDep4WaVMIoivh9JvIp1Y \"https://www.curiousily.com/posts/hackers-guide-to-fixing-underfitting-and-overfitting-models/\")" 1146 | ] 1147 | }, 1148 | { 1149 | "cell_type": "markdown", 1150 | "metadata": { 1151 | "id": "gnE0htQzVRAs", 1152 | "colab_type": "text" 1153 | }, 1154 | "source": [ 1155 | "We are always looking to ensure that our models have a low bias with a low \n", 1156 | "variance. \n", 1157 | "However, if we train for too long (too many epochs) our model will start to \n", 1158 | "overfit. \n", 1159 | "\n", 1160 | "How do we identify that the model has started overfitting? \n", 1161 | "![Overfitting](https://drive.google.com/uc?id=1q02q0ge0jldHJm8P_Hq6ECeIvaSlcc36 \"https://mlexplained.com/2018/04/24/overfitting-isnt-simple-overfitting-re-explained-with-priors-biases-and-no-free-lunch/\") \n", 1162 | "Dotted vertical line is where we should either stop training the model (also \n", 1163 | "known as _**Early Stopping**_) or we should have some logic in the training \n", 1164 | "loop that _'saves'_ the model around that time while the training still \n", 1165 | "continues. " 1166 | ] 1167 | }, 1168 | { 1169 | "cell_type": "markdown", 1170 | "metadata": { 1171 | "id": "p6qwm4NIZQNf", 1172 | "colab_type": "text" 1173 | }, 1174 | "source": [ 1175 | "Our model will also overfit if it is too complex. \n", 1176 | "For example (y-axis is different), \n", 1177 | "![Complex Model](https://drive.google.com/uc?id=1vWHgknPrbXEQczdKMliB0RyVgYld3cGa \"https://medium.com/@george.drakos62/cross-validation-70289113a072\") \n", 1178 | "\n", 1179 | "
Early stopping can save the day! \n", 1180 | "![alt text](https://drive.google.com/uc?id=1HnBFMWZGHy0UFMUICKU25qBiB-4_G0Xn \"https://www.jeremyjordan.me/deep-neural-networks-preventing-overfitting/\")" 1181 | ] 1182 | }, 1183 | { 1184 | "cell_type": "markdown", 1185 | "metadata": { 1186 | "id": "nhvAC7xGapnU", 1187 | "colab_type": "text" 1188 | }, 1189 | "source": [ 1190 | "A really nice matrix to identify if we are overfitting: \n", 1191 | "![Overfitting Matrix](https://drive.google.com/uc?id=19bFHepjNgQ9kpqmQdMEW4lDJ-qXi5WF8 \"https://hackernoon.com/memorizing-is-not-learning-6-tricks-to-prevent-overfitting-in-machine-learning-820b091dc42\") \n", 1192 | "While preparing the above matrix, the author has considered only two sets, \n", 1193 | "training and testing (kinda Deep Learning _faux pass_!). Since we have a \n", 1194 | "validation set as well, replace the word _**'Testing'**_ with \n", 1195 | "_**'Validation'**_. " 1196 | ] 1197 | }, 1198 | { 1199 | "cell_type": "markdown", 1200 | "metadata": { 1201 | "id": "5qlOIi5RaeVI", 1202 | "colab_type": "text" 1203 | }, 1204 | "source": [ 1205 | "How can we avoid _**Overfitting**_? \n", 1206 | "1. Use a simpler model (less layers) \n", 1207 | "2. Use _Dropout_ \n", 1208 | "3. Get more training data (if possible) \n", 1209 | "4. Augment the data and add noise \n", 1210 | "5. Early Stopping \n", 1211 | "\n", 1212 | "PS - Not an exhaustive list AT ALL. \n", 1213 | "\n", 1214 | "
Can we write a better training loop now? " 1215 | ] 1216 | }, 1217 | { 1218 | "cell_type": "markdown", 1219 | "metadata": { 1220 | "id": "10dxUCy3iR0A", 1221 | "colab_type": "text" 1222 | }, 1223 | "source": [ 1224 | "### 8. A Better Training Loop \n", 1225 | "We combine our knowledge of Early Stopping and saving the model at the right \n", 1226 | "time during training to write a better _Training Loop_. \n", 1227 | "\n", 1228 | "We use [Callbacks](https://poutyne.org/callbacks.html# \"Click to visit link\") in Poutyne to incorporate Early Stopping and saving the model after every epoch." 1229 | ] 1230 | }, 1231 | { 1232 | "cell_type": "code", 1233 | "metadata": { 1234 | "id": "hLajWBECiXsI", 1235 | "colab_type": "code", 1236 | "colab": {} 1237 | }, 1238 | "source": [ 1239 | "from poutyne.framework import ModelCheckpoint # Saves trained model during training\n", 1240 | " # https://poutyne.org/callbacks.html#checkpointing\n", 1241 | "from poutyne.framework import EarlyStopping # You know what it does! ;) \n", 1242 | " # https://poutyne.org/callbacks.html#poutyne.framework.callbacks.EarlyStopping" 1243 | ], 1244 | "execution_count": 0, 1245 | "outputs": [] 1246 | }, 1247 | { 1248 | "cell_type": "code", 1249 | "metadata": { 1250 | "id": "icK1WbNHr-ud", 1251 | "colab_type": "code", 1252 | "colab": {} 1253 | }, 1254 | "source": [ 1255 | "# A better pouytne training loop\n", 1256 | "# Turn the GPU ON\n", 1257 | "\n", 1258 | "# A few hyperparamters for the training loop \n", 1259 | "learning_rate = 0.1\n", 1260 | "epochs = 10 # let's train for more epochs to see the callbacks in action\n", 1261 | "\n", 1262 | "def better_poutyne_train(model_name, pytorch_model):\n", 1263 | " \n", 1264 | " callbacks = [\n", 1265 | " # ---- FILL IN ----\n", 1266 | " ]\n", 1267 | " \n", 1268 | " # Select the optimizer and the loss function \n", 1269 | " optimizer = optim.SGD(pytorch_model.parameters(), lr=learning_rate)\n", 1270 | " loss_function = nn.CrossEntropyLoss()\n", 1271 | " # Poutyne Model\n", 1272 | " model = Model(pytorch_model, optimizer, loss_function, batch_metrics=['accuracy'])\n", 1273 | " # Send the 'Poutyne model' on GPU/CPU whichever is available \n", 1274 | " model.to(device)\n", 1275 | " # Train\n", 1276 | " model.fit_generator(train_loader, valid_loader, epochs=epochs, callbacks=callbacks)\n", 1277 | " # Test\n", 1278 | " test_loss, test_acc = model.evaluate_generator(test_loader)\n", 1279 | " print(f'Test:\\n\\tLoss: {test_loss: .3f}\\n\\tAccuracy: {test_acc: .3f}')\n", 1280 | "\n", 1281 | " return None " 1282 | ], 1283 | "execution_count": 0, 1284 | "outputs": [] 1285 | }, 1286 | { 1287 | "cell_type": "code", 1288 | "metadata": { 1289 | "id": "QrxuV06YE2QV", 1290 | "colab_type": "code", 1291 | "colab": {} 1292 | }, 1293 | "source": [ 1294 | "# COLAB TIP\n", 1295 | "!nvidia-smi # Use this command to figure out the GPU assigned by Google " 1296 | ], 1297 | "execution_count": 0, 1298 | "outputs": [] 1299 | }, 1300 | { 1301 | "cell_type": "code", 1302 | "metadata": { 1303 | "id": "ffcjuFHa5v75", 1304 | "colab_type": "code", 1305 | "colab": {} 1306 | }, 1307 | "source": [ 1308 | "# Let's initialize a new CNN Model just like before \n", 1309 | "# ---- FILL IN ----" 1310 | ], 1311 | "execution_count": 0, 1312 | "outputs": [] 1313 | }, 1314 | { 1315 | "cell_type": "code", 1316 | "metadata": { 1317 | "id": "UTWPxL4Y6Avb", 1318 | "colab_type": "code", 1319 | "colab": {} 1320 | }, 1321 | "source": [ 1322 | "# Time to train in a better way!.....takes about 120secs on GPU \n", 1323 | "# Make sure to Turn the GPU ON\n", 1324 | "# ---- FILL IN ----" 1325 | ], 1326 | "execution_count": 0, 1327 | "outputs": [] 1328 | }, 1329 | { 1330 | "cell_type": "markdown", 1331 | "metadata": { 1332 | "id": "us0ZQ7nqBGUk", 1333 | "colab_type": "text" 1334 | }, 1335 | "source": [ 1336 | "Nice! But you showed us those fancy graphs while talking about early stopping. \n", 1337 | "_**Where are those graphs now?!**_ " 1338 | ] 1339 | }, 1340 | { 1341 | "cell_type": "markdown", 1342 | "metadata": { 1343 | "id": "DYsQWympCOuv", 1344 | "colab_type": "text" 1345 | }, 1346 | "source": [ 1347 | "###9. A Fancy Training Loop \n", 1348 | "\n", 1349 | "When it comes to productivity enhancing libraries in the PyTorch ecosystem \n", 1350 | "Poutyne is not alone. \n", 1351 | "Say hello to [LiveLossPlot !!!](https://github.com/stared/livelossplot \"Click to visit Github Repository\") \n", 1352 | "\n", 1353 | "Best part is the fact that LiveLossPlot and Poutyne are compatible with each \n", 1354 | "other! " 1355 | ] 1356 | }, 1357 | { 1358 | "cell_type": "code", 1359 | "metadata": { 1360 | "id": "Uiu2yYMU6hYX", 1361 | "colab_type": "code", 1362 | "colab": {} 1363 | }, 1364 | "source": [ 1365 | "!pip install livelossplot " 1366 | ], 1367 | "execution_count": 0, 1368 | "outputs": [] 1369 | }, 1370 | { 1371 | "cell_type": "code", 1372 | "metadata": { 1373 | "id": "SXlPkpKsTsfn", 1374 | "colab_type": "code", 1375 | "colab": {} 1376 | }, 1377 | "source": [ 1378 | "from livelossplot import PlotLossesPoutyne # This module talks with Poutyne" 1379 | ], 1380 | "execution_count": 0, 1381 | "outputs": [] 1382 | }, 1383 | { 1384 | "cell_type": "code", 1385 | "metadata": { 1386 | "id": "dLpRKgNOGWFK", 1387 | "colab_type": "code", 1388 | "colab": {} 1389 | }, 1390 | "source": [ 1391 | "# A fancy pouytne training loop\n", 1392 | "# Turn the GPU ON\n", 1393 | "\n", 1394 | "# A few hyperparamters for the training loop \n", 1395 | "learning_rate = 0.1\n", 1396 | "epochs = 10 # let's train for more epochs to see the callbacks in action\n", 1397 | "\n", 1398 | "def fancy_poutyne_train(model_name, pytorch_model):\n", 1399 | " \n", 1400 | " # setting up the livelossplot callback\n", 1401 | " # ---- FILL IN ----\n", 1402 | "\n", 1403 | " callbacks = [\n", 1404 | " # Save the latest weights \n", 1405 | " ModelCheckpoint(model_name + '_last_epoch.ckpt', \\\n", 1406 | " temporary_filename='last_epoch.ckpt.tmp'),\n", 1407 | " # EarlyStopping\n", 1408 | " EarlyStopping(monitor='val_acc', patience=0, verbose=True, mode='max'),\n", 1409 | " # ---- FILL IN ----\n", 1410 | " ]\n", 1411 | " \n", 1412 | " # Select the optimizer and the loss function \n", 1413 | " optimizer = optim.SGD(pytorch_model.parameters(), lr=learning_rate)\n", 1414 | " loss_function = nn.CrossEntropyLoss()\n", 1415 | " # Poutyne Model\n", 1416 | " model = Model(pytorch_model, optimizer, loss_function, batch_metrics=['accuracy'])\n", 1417 | " # Send the 'Poutyne model' on GPU/CPU whichever is available \n", 1418 | " model.to(device)\n", 1419 | " # Train\n", 1420 | " model.fit_generator(train_loader, valid_loader, epochs=epochs, callbacks=callbacks)\n", 1421 | " # Test\n", 1422 | " test_loss, test_acc = model.evaluate_generator(test_loader)\n", 1423 | " print(f'Test:\\n\\tLoss: {test_loss: .3f}\\n\\tAccuracy: {test_acc: .3f}')\n", 1424 | "\n", 1425 | " return None " 1426 | ], 1427 | "execution_count": 0, 1428 | "outputs": [] 1429 | }, 1430 | { 1431 | "cell_type": "code", 1432 | "metadata": { 1433 | "id": "Bj9sG_AUUShG", 1434 | "colab_type": "code", 1435 | "colab": {} 1436 | }, 1437 | "source": [ 1438 | "# Yet another CNN Model \n", 1439 | "mnist3 = MNISTModel1().to(device)\n", 1440 | "summary(model=mnist3, input_size=(1, 28, 28), batch_size=20) # Summarize" 1441 | ], 1442 | "execution_count": 0, 1443 | "outputs": [] 1444 | }, 1445 | { 1446 | "cell_type": "code", 1447 | "metadata": { 1448 | "id": "zwA8L0XWUbVI", 1449 | "colab_type": "code", 1450 | "colab": {} 1451 | }, 1452 | "source": [ 1453 | "# Let the Fancy training loop start! \n", 1454 | "# Make sure to Turn the GPU ON\n", 1455 | "# ---- FILL IN ----" 1456 | ], 1457 | "execution_count": 0, 1458 | "outputs": [] 1459 | }, 1460 | { 1461 | "cell_type": "markdown", 1462 | "metadata": { 1463 | "id": "cxpV5o0Z-b3G", 1464 | "colab_type": "text" 1465 | }, 1466 | "source": [ 1467 | "Thanks to LiveLossPlot, we can see all the fancy graphs now! \n", 1468 | "\n", 1469 | "_**You can now do a little experiment and see for yourself how the model starts \n", 1470 | "to overfit. Simply remove the Early Stopping callback from the training loop.**_\n", 1471 | "\n", 1472 | "
You can see your model overfit even on such a simple dataset! " 1473 | ] 1474 | }, 1475 | { 1476 | "cell_type": "markdown", 1477 | "metadata": { 1478 | "id": "DaXagq-rA8vL", 1479 | "colab_type": "text" 1480 | }, 1481 | "source": [ 1482 | "###10. Inference \n", 1483 | "We have a trained model in our hands now. \n", 1484 | "We would now like to write a simnple inference routine where we can enjoy \n", 1485 | "the predictions of our PyTorch Model! " 1486 | ] 1487 | }, 1488 | { 1489 | "cell_type": "code", 1490 | "metadata": { 1491 | "id": "fpqrJYFZUpig", 1492 | "colab_type": "code", 1493 | "colab": {} 1494 | }, 1495 | "source": [ 1496 | "# A rather straightforward inference routine \n", 1497 | "\n", 1498 | "def inference():\n", 1499 | " PATH = 'mnist3_last_epoch.ckpt' # Path to the saved model checkpoint\n", 1500 | "\n", 1501 | " # ---- FILL IN ----\n", 1502 | " \n", 1503 | " # Our model outputs 'logits', we need to transform it into class probabilities. \n", 1504 | " # https://discuss.pytorch.org/t/how-to-extract-probabilities/2720/12\n", 1505 | " # To transform logits, we need to use the 'Softmax' function\n", 1506 | " # https://medium.com/data-science-bootcamp/understand-the-softmax-function-in-minutes-f3a59641e86d\n", 1507 | " # Therefore, \n", 1508 | " class_probabilities = F.softmax(output, dim=1).numpy().squeeze()\n", 1509 | " print('\\nClass Probabilities ==>', class_probabilities)\n", 1510 | " for i, proba in enumerate(class_probabilities):\n", 1511 | " print(f'Class \\t{i}\\t Probability \\t{100*proba:.2f}%')\n", 1512 | "\n", 1513 | " # A very Fancy way to showcase the results \n", 1514 | " # Create a figure with two axes, ax1 and ax2\n", 1515 | " fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2) # Subplot with 2 columns\n", 1516 | " # ax1 holds the image from the test dataset\n", 1517 | " ax1.imshow(image.resize_(1, 28, 28).numpy().squeeze())\n", 1518 | " ax1.set_title('Ground Truth ' + str(label.numpy()))\n", 1519 | " # ax2 holds a horizontal bar chart containing class_probabilities \n", 1520 | " ax2.barh(np.arange(10), class_probabilities)\n", 1521 | " ax2.set_aspect(0.1) # aspect ratio of ax2, else it will get too big\n", 1522 | " ax2.set_yticks(np.arange(10)) # 10 ticks on the y-axis for 10 classes\n", 1523 | " ax2.set_yticklabels(np.arange(10)) # set the ticklabels from 0 to 9\n", 1524 | " ax2.set_title('Class Probability')\n", 1525 | " ax2.set_xlim(0, 1.1) # probability can't be over 1, hence set limit to 1.1\n", 1526 | "\n", 1527 | " return None" 1528 | ], 1529 | "execution_count": 0, 1530 | "outputs": [] 1531 | }, 1532 | { 1533 | "cell_type": "code", 1534 | "metadata": { 1535 | "id": "enklrq5uH3qi", 1536 | "colab_type": "code", 1537 | "colab": {} 1538 | }, 1539 | "source": [ 1540 | "inference() " 1541 | ], 1542 | "execution_count": 0, 1543 | "outputs": [] 1544 | }, 1545 | { 1546 | "cell_type": "markdown", 1547 | "metadata": { 1548 | "id": "AAttJPwQ8O4t", 1549 | "colab_type": "text" 1550 | }, 1551 | "source": [ 1552 | "_**CONGRATULATIONS!!!**_ \n", 1553 | "You just wrote a complete inference loop for your Deep Learning Model! \n", 1554 | "\n", 1555 | "![Congratulations](https://media.giphy.com/media/F22UTGzxpASWc/source.gif \"Can you identify the guitarist? ;) \")" 1556 | ] 1557 | }, 1558 | { 1559 | "cell_type": "markdown", 1560 | "metadata": { 1561 | "id": "iKN3ZR_Gh22S", 1562 | "colab_type": "text" 1563 | }, 1564 | "source": [ 1565 | "###11. Thank You! \n", 1566 | "Guys, thank you so much for being with me through this webinar. \n", 1567 | "I would like to thank each and every one of you out there! \n", 1568 | "\n", 1569 | "I will be really happy to connect with you all on LinkedIN, feel free to drop \n", 1570 | "in a connection request. \n", 1571 | " \n", 1572 | "https://www.linkedin.com/in/pranjall/" 1573 | ] 1574 | }, 1575 | { 1576 | "cell_type": "markdown", 1577 | "metadata": { 1578 | "id": "J2lXpTfRk7JO", 1579 | "colab_type": "text" 1580 | }, 1581 | "source": [ 1582 | "###12. BONUS! \n", 1583 | "\n", 1584 | "I will be adding some bonus content in this Notebook in the coming week. \n", 1585 | "Namely, \n", 1586 | "1. Transfer Learning \n", 1587 | "2. Visualizing computation graphs of your custom model \n", 1588 | "3. Visualizing what your CNN layer is looking at (it is amazing, trust me!) \n", 1589 | "\n", 1590 | "Consider _**starring**_ this repository on Github if you liked this webinar \n", 1591 | "and want to get the bonus material coming later in this week. \n", 1592 | "A lot of my peers at Udacity have loved the content in this repo, checkout the \n", 1593 | "`Introduction to Neural Networks` folder. \n", 1594 | "\n", 1595 | "I plan to keep uploading/updating the content of all of my future webinars in \n", 1596 | "this repository. \n", 1597 | "\n", 1598 | "https://github.com/pranjalchaubey/Deep-Learning-Notes " 1599 | ] 1600 | }, 1601 | { 1602 | "cell_type": "markdown", 1603 | "metadata": { 1604 | "id": "2KbNEwlOmLq1", 1605 | "colab_type": "text" 1606 | }, 1607 | "source": [ 1608 | "_**Wishing you guys all the best in your Deep Learning journey with PyTorch!**_ " 1609 | ] 1610 | } 1611 | ] 1612 | } -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/README.md: -------------------------------------------------------------------------------- 1 | # Python Image Classification in 2020 2 | 3 | This webinar was conducted on 12th April 2020 in association with [Facebook Developer Circles Sweden](https://www.linkedin.com/company/developercirclesfromfacebook/ "Facebook Developer Circles Sweden") and [People in Data](https://www.linkedin.com/company/people-in-data/ "People in Data"). 4 | 5 | The `Image_Classification_master.ipynb` notebook contains the complete code along with the comments and explanations. 6 | The `Image_Classification_practice.ipynb` is the notebook I used to code live during the webinar. 7 | 8 | For bonus content, please refer `Image_Classification_master.ipynb` only (content will be added soon). 9 | 10 | _**Webinar recording: https://youtu.be/eAVWos2-qp0**_ 11 | 12 | ------------ 13 | This work has been translated to Turkish as well, thanks to [Merve Ayyüce KIZRAK](https://www.linkedin.com/in/merve-ayyuce-kizrak/). 14 | Translated Notebook Repository: https://github.com/ayyucekizrak/PyTorch-ile-Goruntu-Siniflandirma 15 | -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/01_torchvision_mnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/01_torchvision_mnist.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/02_dataloader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/02_dataloader.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/03_dataloader_analogy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/03_dataloader_analogy.jpg -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/04_matplotlib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/04_matplotlib.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/05_conv2d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/05_conv2d.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/06_maxpool2d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/06_maxpool2d.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/07_dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/07_dropout.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/08_linear .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/08_linear .png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/09_batch_visualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/09_batch_visualization.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/10_poutyne.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/10_poutyne.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/11_fitting examples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/11_fitting examples.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/12_overfitting_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/12_overfitting_matrix.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/13_overfitting_validation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/13_overfitting_validation.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/14_overfitting_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/14_overfitting_model.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/15_early_stopping .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/15_early_stopping .png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/16_early_stopping .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/16_early_stopping .png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/17_1_vgg_architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/17_1_vgg_architecture.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/17_cnn_architecture.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/17_cnn_architecture.jpeg -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/18_mnist_cnn.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/18_mnist_cnn.jpeg -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/19_3channel_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/19_3channel_image.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/20_single_channel_convolution.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/20_single_channel_convolution.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/21_3channel_convolution.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/21_3channel_convolution.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/22_3channel_convolution_summation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/22_3channel_convolution_summation.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/23_cnn_stride_1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/23_cnn_stride_1.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/24_cnn_stride_2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/24_cnn_stride_2.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/25_padding_1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/25_padding_1.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/26_max_pooling.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/26_max_pooling.gif -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/27_pooling_types.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/27_pooling_types.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/28_fc_layer.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/28_fc_layer.jpeg -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/28_tensors.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/28_tensors.png -------------------------------------------------------------------------------- /PyTorch Image Classification in 2020/img/29_forward_backward.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/PyTorch Image Classification in 2020/img/29_forward_backward.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep-Learning-Notes 2 | 3 | Update 29th April 2020: Added Kevin Markham's Data School's new Data Science course notes in the `Kevin's Data Science Class Notes` folder. 4 | 5 | 6 | Update 22nd April 2020: My Jupyter Notebook on PyTorch for the Webinar conducted for Facebook Developer Circles, Sweden, has been translated to Turkish. All credit goes to [Merve Ayyüce KIZRAK](https://www.linkedin.com/in/merve-ayyuce-kizrak/). 7 | Translated Notebook Repository: https://github.com/ayyucekizrak/PyTorch-ile-Goruntu-Siniflandirma 8 | 9 | 10 | Update 16th April 2020: For _the webinar conducted on 12th April 2020_ for Facebook Developer Circles, Sweden in association with People in Data, the Juypter Notebooks with full source code along with the link of the webinar recording are present in the `PyTorch Image Classification in 2020` folder. 11 | 12 | ------------ 13 | My handwritten notes from Udacity's Deep Learning Course. 14 | 15 | 16 |
PDF file, `Udacity-Deep-Learning-Notes.pdf`, has the notes. 17 | There's also a presentation on Neural Networks inside the `Introduction to Neural Networks` folder. 18 | 19 |
[![Introduction to Neural Networks ](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Introduction%20to%20Neural%20Networks/img/repo1.png "Introduction to Neural Networks ")](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Introduction%20to%20Neural%20Networks/img/repo1.png "Introduction to Neural Networks ") 20 | 21 |
[![Hand written notes](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Introduction%20to%20Neural%20Networks/img/repo2.png "Hand written notes")](https://github.com/pranjalchaubey/Deep-Learning-Notes/blob/master/Introduction%20to%20Neural%20Networks/img/repo2.png "Hand written notes") 22 | -------------------------------------------------------------------------------- /Transformers/doc/Rough Sheet - Keys Values Queries.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/doc/Rough Sheet - Keys Values Queries.docx -------------------------------------------------------------------------------- /Transformers/doc/Rough Sheet - Self Attention.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/doc/Rough Sheet - Self Attention.docx -------------------------------------------------------------------------------- /Transformers/doc/~$ugh Sheet - Self Attention.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/doc/~$ugh Sheet - Self Attention.docx -------------------------------------------------------------------------------- /Transformers/img/rasa/01 Attention for Time Series.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/01 Attention for Time Series.png -------------------------------------------------------------------------------- /Transformers/img/rasa/02 Basic Text Reweighing Idea.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/02 Basic Text Reweighing Idea.png -------------------------------------------------------------------------------- /Transformers/img/rasa/03 Word Embeddings King Queen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/03 Word Embeddings King Queen.png -------------------------------------------------------------------------------- /Transformers/img/rasa/04 Self Attention 01 .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/04 Self Attention 01 .png -------------------------------------------------------------------------------- /Transformers/img/rasa/04 Self Attention 02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/04 Self Attention 02.png -------------------------------------------------------------------------------- /Transformers/img/rasa/04 Self Attention 03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/04 Self Attention 03.png -------------------------------------------------------------------------------- /Transformers/img/rasa/05 Basic Self Attention.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/05 Basic Self Attention.png -------------------------------------------------------------------------------- /Transformers/img/rasa/06 Calculating Scores .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/06 Calculating Scores .png -------------------------------------------------------------------------------- /Transformers/img/rasa/07 Calculating Weights.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/07 Calculating Weights.png -------------------------------------------------------------------------------- /Transformers/img/rasa/08 Calculating All Outputs .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/08 Calculating All Outputs .png -------------------------------------------------------------------------------- /Transformers/img/rasa/08 Calculating Outputs .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/08 Calculating Outputs .png -------------------------------------------------------------------------------- /Transformers/img/rasa/09 Database Analogy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/09 Database Analogy.png -------------------------------------------------------------------------------- /Transformers/img/rasa/10 Matrices for Keys Queries Values .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/10 Matrices for Keys Queries Values .png -------------------------------------------------------------------------------- /Transformers/img/rasa/11 Neural Self Attention Block .png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Transformers/img/rasa/11 Neural Self Attention Block .png -------------------------------------------------------------------------------- /Udacity Deep Learning Nano Degree Notes/Udacity-Deep-Learning-Notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pranjalchaubey/Deep-Learning-Notes/0dbdf240e4ebc554912346b5b057cb325d0705b5/Udacity Deep Learning Nano Degree Notes/Udacity-Deep-Learning-Notes.pdf --------------------------------------------------------------------------------