├── Data_set ├── Readme └── exp_vs_salary_Data.csv ├── README.md ├── first_neural_network.py ├── Linear_regression_.ipynb ├── resnet9_cifar10.py ├── Pytorch_basics.ipynb └── Linear_regression_example.ipynb /Data_set/Readme: -------------------------------------------------------------------------------- 1 | All the data sets used on deep learning neural networks 2 | -------------------------------------------------------------------------------- /Data_set/exp_vs_salary_Data.csv: -------------------------------------------------------------------------------- 1 | YearsExperience,Salary 2 | 1.2,39342 3 | 1.2,46205 4 | 1.4,37730 5 | 2,43524 6 | 2.2,39890 7 | 2.9,56642 8 | 3,60150 9 | 3.2,54445 10 | 3.2,64445 11 | 3.6,57188 12 | 3.9,63218 13 | 4,55794 14 | 4,56957 15 | 4.1,57081 16 | 4.6,61110 17 | 4.9,67938 18 | 5.1,66028 19 | 5.3,83088 20 | 5.8,81362 21 | 6,93940 22 | 6.8,91738 23 | 7.1,98272 24 | 7.9,101302 25 | 8.2,113812 26 | 8.6,109430 27 | 9,105582 28 | 9.4,116969 29 | 9.7,112635 30 | 10.3,122390 31 | 10.5,121870 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep_Learning_Models 2 | Python Deep learning models with deployment 3 | 4 | Pytorch Zero to GANS 5 | 6 | My Medium Blog posts : 7 | 8 | 1.Deep Learning — Artificial Neural Network(ANN) - https://medium.com/analytics-vidhya/deep-learning-artificial-neural-network-ann-13b54c3f370f 9 | 10 | 2.Tensors — Basics of pytorch programming - https://medium.com/@arun.purakkatt/tensors-basics-of-pytorch-programming-5de82ea45ebf 11 | 12 | 3.Linear Regression with PyTorch - https://medium.com/analytics-vidhya/linear-regression-with-pytorch-147fed55f138 13 | 14 | 4.Image classification with PyTorch - https://medium.com/analytics-vidhya/image-classification-with-pytorch-184e76c2cf3b 15 | 16 | 5.Training Deep Neural Networks on a GPU with PyTorch - https://medium.com/analytics-vidhya/training-deep-neural-networks-on-a-gpu-with-pytorch-2851ccfb6066 17 | 18 | 6. Image classification with CNN - https://medium.com/swlh/image-classification-with-cnn-4f2a501faadb 19 | 20 | 7. ResNet Residual Neural network on CIFAR10 - https://medium.com/analytics-vidhya/resnet-10f4ef1b9d4c 21 | 22 | -------------------------------------------------------------------------------- /first_neural_network.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """First_neural_network.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1mJX4LlGa18GS6LARmLI5KBwXjM2PzJqK 8 | """ 9 | 10 | #importing modules from Keras 11 | from keras.datasets import mnist 12 | from keras import models 13 | from keras import layers 14 | from keras.utils import to_categorical 15 | 16 | (train_images, train_labels), (test_images, test_labels) = mnist.load_data() 17 | 18 | network = models.Sequential() 19 | network.add(layers.Dense(784, activation='relu', input_shape=(28 * 28,))) 20 | network.add(layers.Dense(784, activation='relu', input_shape=(28 * 28,))) 21 | network.add(layers.Dense(10, activation='softmax')) 22 | network.compile(optimizer='adam', 23 | loss='categorical_crossentropy', 24 | metrics=['accuracy']) 25 | 26 | train_images = train_images.reshape((60000, 28 * 28)) 27 | train_images = train_images.astype('float32') / 255 28 | test_images = test_images.reshape((10000, 28 * 28)) 29 | test_images = test_images.astype('float32') / 255 30 | 31 | train_labels = to_categorical(train_labels) 32 | test_labels = to_categorical(test_labels) 33 | 34 | network.fit(train_images, train_labels, epochs=5, batch_size=128) 35 | 36 | test_loss, test_acc = network.evaluate(test_images, test_labels) 37 | print('test_acc:', test_acc, 'test_loss', test_loss) 38 | 39 | -------------------------------------------------------------------------------- /Linear_regression_.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Linear_regression_.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | } 13 | }, 14 | "cells": [ 15 | { 16 | "cell_type": "code", 17 | "metadata": { 18 | "id": "Ywna6VCTuf6-", 19 | "colab_type": "code", 20 | "colab": {} 21 | }, 22 | "source": [ 23 | "#Import Libraries\n", 24 | "import torch\n", 25 | "import torch.nn as nn\n", 26 | "import pandas as pd\n", 27 | "import numpy as np" 28 | ], 29 | "execution_count": 96, 30 | "outputs": [] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "metadata": { 35 | "id": "eQGKlHwC-BHV", 36 | "colab_type": "code", 37 | "colab": {} 38 | }, 39 | "source": [ 40 | "# Input (temp, rainfall, humidity)\n", 41 | "inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], \n", 42 | " [102, 43, 37], [69, 96, 70], [73, 67, 43], \n", 43 | " [91, 88, 64], [87, 134, 58], [102, 43, 37], \n", 44 | " [69, 96, 70], [73, 67, 43], [91, 88, 64], \n", 45 | " [87, 134, 58], [102, 43, 37], [69, 96, 70]], \n", 46 | " dtype='float32')\n", 47 | "\n", 48 | "# Targets (apples, oranges)\n", 49 | "targets = np.array([[56, 70], [81, 101], [119, 133], \n", 50 | " [22, 37], [103, 119], [56, 70], \n", 51 | " [81, 101], [119, 133], [22, 37], \n", 52 | " [103, 119], [56, 70], [81, 101], \n", 53 | " [119, 133], [22, 37], [103, 119]], \n", 54 | " dtype='float32')\n", 55 | "\n", 56 | "inputs = torch.from_numpy(inputs)\n", 57 | "targets = torch.from_numpy(targets)" 58 | ], 59 | "execution_count": 97, 60 | "outputs": [] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "metadata": { 65 | "id": "9uPZut6k8H22", 66 | "colab_type": "code", 67 | "colab": { 68 | "base_uri": "https://localhost:8080/", 69 | "height": 272 70 | }, 71 | "outputId": "5d71d3cb-459c-402d-e4d8-50d82ed39a81" 72 | }, 73 | "source": [ 74 | "inputs" 75 | ], 76 | "execution_count": 98, 77 | "outputs": [ 78 | { 79 | "output_type": "execute_result", 80 | "data": { 81 | "text/plain": [ 82 | "tensor([[ 73., 67., 43.],\n", 83 | " [ 91., 88., 64.],\n", 84 | " [ 87., 134., 58.],\n", 85 | " [102., 43., 37.],\n", 86 | " [ 69., 96., 70.],\n", 87 | " [ 73., 67., 43.],\n", 88 | " [ 91., 88., 64.],\n", 89 | " [ 87., 134., 58.],\n", 90 | " [102., 43., 37.],\n", 91 | " [ 69., 96., 70.],\n", 92 | " [ 73., 67., 43.],\n", 93 | " [ 91., 88., 64.],\n", 94 | " [ 87., 134., 58.],\n", 95 | " [102., 43., 37.],\n", 96 | " [ 69., 96., 70.]])" 97 | ] 98 | }, 99 | "metadata": { 100 | "tags": [] 101 | }, 102 | "execution_count": 98 103 | } 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "metadata": { 109 | "id": "S7CM-uopDE_g", 110 | "colab_type": "code", 111 | "colab": { 112 | "base_uri": "https://localhost:8080/", 113 | "height": 272 114 | }, 115 | "outputId": "83033f4d-b781-4505-cc97-f5c57ccf3738" 116 | }, 117 | "source": [ 118 | "targets" 119 | ], 120 | "execution_count": 99, 121 | "outputs": [ 122 | { 123 | "output_type": "execute_result", 124 | "data": { 125 | "text/plain": [ 126 | "tensor([[ 56., 70.],\n", 127 | " [ 81., 101.],\n", 128 | " [119., 133.],\n", 129 | " [ 22., 37.],\n", 130 | " [103., 119.],\n", 131 | " [ 56., 70.],\n", 132 | " [ 81., 101.],\n", 133 | " [119., 133.],\n", 134 | " [ 22., 37.],\n", 135 | " [103., 119.],\n", 136 | " [ 56., 70.],\n", 137 | " [ 81., 101.],\n", 138 | " [119., 133.],\n", 139 | " [ 22., 37.],\n", 140 | " [103., 119.]])" 141 | ] 142 | }, 143 | "metadata": { 144 | "tags": [] 145 | }, 146 | "execution_count": 99 147 | } 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "metadata": { 153 | "id": "8Xk2mWJ5DGc6", 154 | "colab_type": "code", 155 | "colab": {} 156 | }, 157 | "source": [ 158 | "# Data Set & Data loader" 159 | ], 160 | "execution_count": 100, 161 | "outputs": [] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "metadata": { 166 | "id": "5WQHUH2PTy7m", 167 | "colab_type": "code", 168 | "colab": {} 169 | }, 170 | "source": [ 171 | "from torch.utils.data import TensorDataset" 172 | ], 173 | "execution_count": 101, 174 | "outputs": [] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "metadata": { 179 | "id": "NBWed0iZT5Sr", 180 | "colab_type": "code", 181 | "colab": { 182 | "base_uri": "https://localhost:8080/", 183 | "height": 102 184 | }, 185 | "outputId": "41c31107-4d70-43b7-cdbe-e3feff553f06" 186 | }, 187 | "source": [ 188 | "# Define dataset\n", 189 | "train_ds = TensorDataset(inputs, targets)\n", 190 | "train_ds[0:3]" 191 | ], 192 | "execution_count": 102, 193 | "outputs": [ 194 | { 195 | "output_type": "execute_result", 196 | "data": { 197 | "text/plain": [ 198 | "(tensor([[ 73., 67., 43.],\n", 199 | " [ 91., 88., 64.],\n", 200 | " [ 87., 134., 58.]]), tensor([[ 56., 70.],\n", 201 | " [ 81., 101.],\n", 202 | " [119., 133.]]))" 203 | ] 204 | }, 205 | "metadata": { 206 | "tags": [] 207 | }, 208 | "execution_count": 102 209 | } 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "metadata": { 215 | "id": "D-N7Qbk0g8oj", 216 | "colab_type": "code", 217 | "colab": {} 218 | }, 219 | "source": [ 220 | "from torch.utils.data import DataLoader" 221 | ], 222 | "execution_count": 103, 223 | "outputs": [] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "metadata": { 228 | "id": "IskevY2PiV-B", 229 | "colab_type": "code", 230 | "colab": {} 231 | }, 232 | "source": [ 233 | "# Define data loader\n", 234 | "batch_size = 5\n", 235 | "train_dl = DataLoader(train_ds, batch_size, shuffle=True)" 236 | ], 237 | "execution_count": 104, 238 | "outputs": [] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "metadata": { 243 | "id": "DRx62QoDiccC", 244 | "colab_type": "code", 245 | "colab": { 246 | "base_uri": "https://localhost:8080/", 247 | "height": 102 248 | }, 249 | "outputId": "f00cc610-8b60-4e24-90b2-7535d723549d" 250 | }, 251 | "source": [ 252 | "# Define model\n", 253 | "model = nn.Linear(3, 2)\n", 254 | "print(model.weight)\n", 255 | "print(model.bias)" 256 | ], 257 | "execution_count": 105, 258 | "outputs": [ 259 | { 260 | "output_type": "stream", 261 | "text": [ 262 | "Parameter containing:\n", 263 | "tensor([[-0.2901, 0.3793, 0.3939],\n", 264 | " [-0.1927, -0.4132, 0.1687]], requires_grad=True)\n", 265 | "Parameter containing:\n", 266 | "tensor([ 0.2594, -0.1166], requires_grad=True)\n" 267 | ], 268 | "name": "stdout" 269 | } 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "metadata": { 275 | "id": "pmUXsCU1DZN9", 276 | "colab_type": "code", 277 | "colab": {} 278 | }, 279 | "source": [ 280 | "" 281 | ], 282 | "execution_count": 87, 283 | "outputs": [] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "metadata": { 288 | "id": "TnYjHNk0ZY9_", 289 | "colab_type": "code", 290 | "colab": { 291 | "base_uri": "https://localhost:8080/", 292 | "height": 34 293 | }, 294 | "outputId": "c3b3003c-e9ae-40a3-fe20-9d858079bde0" 295 | }, 296 | "source": [ 297 | "print(model.parameters())" 298 | ], 299 | "execution_count": 106, 300 | "outputs": [ 301 | { 302 | "output_type": "stream", 303 | "text": [ 304 | "\n" 305 | ], 306 | "name": "stdout" 307 | } 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "metadata": { 313 | "id": "Hkq-aBCaOTeQ", 314 | "colab_type": "code", 315 | "colab": {} 316 | }, 317 | "source": [ 318 | "# Instantiate loss class \n", 319 | "\n", 320 | "# Import nn.functional\n", 321 | "import torch.nn.functional as F\n", 322 | "\n", 323 | "# Define loss function\n", 324 | "loss_fn = F.mse_loss" 325 | ], 326 | "execution_count": 107, 327 | "outputs": [] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "metadata": { 332 | "id": "EcbTz5WgPV5Z", 333 | "colab_type": "code", 334 | "colab": {} 335 | }, 336 | "source": [ 337 | "" 338 | ], 339 | "execution_count": 92, 340 | "outputs": [] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "metadata": { 345 | "id": "NvE51j6yWn4f", 346 | "colab_type": "code", 347 | "colab": {} 348 | }, 349 | "source": [ 350 | "# Define optimizer\n", 351 | "opt = torch.optim.SGD(model.parameters(), lr=1e-5)" 352 | ], 353 | "execution_count": 108, 354 | "outputs": [] 355 | }, 356 | { 357 | "cell_type": "code", 358 | "metadata": { 359 | "id": "JIJfQVxQP57c", 360 | "colab_type": "code", 361 | "colab": {} 362 | }, 363 | "source": [ 364 | "# Utility function to train the model\n", 365 | "def fit(num_epochs, model, loss_fn, opt, train_dl):\n", 366 | " \n", 367 | " # Repeat for given number of epochs\n", 368 | " for epoch in range(num_epochs):\n", 369 | " \n", 370 | " # Train with batches of data\n", 371 | " for xb,yb in train_dl:\n", 372 | " \n", 373 | " # 1. Generate predictions\n", 374 | " pred = model(xb)\n", 375 | " \n", 376 | " # 2. Calculate loss\n", 377 | " loss = loss_fn(pred, yb)\n", 378 | " \n", 379 | " # 3. Compute gradients\n", 380 | " loss.backward()\n", 381 | " \n", 382 | " # 4. Update parameters using gradients\n", 383 | " opt.step()\n", 384 | " \n", 385 | " # 5. Reset the gradients to zero\n", 386 | " opt.zero_grad()\n", 387 | " \n", 388 | " # Print the progress\n", 389 | " if (epoch+1) % 10 == 0:\n", 390 | " print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))" 391 | ], 392 | "execution_count": 109, 393 | "outputs": [] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "metadata": { 398 | "id": "y4PjiyIxQe67", 399 | "colab_type": "code", 400 | "colab": { 401 | "base_uri": "https://localhost:8080/", 402 | "height": 187 403 | }, 404 | "outputId": "d884ae19-ad5b-43b9-ef0b-20841262904b" 405 | }, 406 | "source": [ 407 | "fit(100, model, criterion , optimizer ,train_dl)" 408 | ], 409 | "execution_count": 110, 410 | "outputs": [ 411 | { 412 | "output_type": "stream", 413 | "text": [ 414 | "Epoch [10/100], Loss: 14966.5293\n", 415 | "Epoch [20/100], Loss: 9267.5000\n", 416 | "Epoch [30/100], Loss: 12967.0986\n", 417 | "Epoch [40/100], Loss: 8861.1406\n", 418 | "Epoch [50/100], Loss: 7353.0430\n", 419 | "Epoch [60/100], Loss: 11972.5566\n", 420 | "Epoch [70/100], Loss: 8753.9951\n", 421 | "Epoch [80/100], Loss: 14966.5283\n", 422 | "Epoch [90/100], Loss: 10967.6660\n", 423 | "Epoch [100/100], Loss: 6754.5640\n" 424 | ], 425 | "name": "stdout" 426 | } 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "metadata": { 432 | "id": "iq15YFJ5QwFa", 433 | "colab_type": "code", 434 | "colab": {} 435 | }, 436 | "source": [ 437 | "" 438 | ], 439 | "execution_count": null, 440 | "outputs": [] 441 | } 442 | ] 443 | } -------------------------------------------------------------------------------- /resnet9_cifar10.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ResNet9_CIFAR10.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1YW0m-X6ERjVkEi5m22b5VlAPJdTBseTW 8 | 9 | **ResNet architechture on CIFAR10 Data Set** 10 | 11 | *An implementation of https://arxiv.org/pdf/1512.03385.pdf* 12 | """ 13 | 14 | # Commented out IPython magic to ensure Python compatibility. 15 | import os 16 | import torch 17 | import torchvision 18 | import tarfile 19 | import torch.nn as nn 20 | import numpy as np 21 | import torch.nn.functional as F 22 | from torchvision.datasets.utils import download_url 23 | from torchvision.datasets import ImageFolder 24 | from torch.utils.data import DataLoader 25 | import torchvision.transforms as tt 26 | from torch.utils.data import random_split 27 | from torchvision.utils import make_grid 28 | import matplotlib.pyplot as plt 29 | # %matplotlib inline 30 | 31 | # Device configuration 32 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 33 | 34 | #1.Use test set for validation 35 | #2.Channel-wise data normalization 36 | #3.Randomized data augmentations 37 | 38 | """**Use test set for validation**: Instead of setting aside a fraction (e.g. 10%) of the data from the training set for validation, we'll simply use the test set as our validation set. This just gives a little more data to train with. In general, once you have picked the best model architecture & hypeparameters using a fixed validation set, it is a good idea to retrain the same model on the entire dataset just to give it a small final boost in performance. 39 | 40 | **Channel-wise data normalization**: We will normalize the image tensors by subtracting the mean and dividing by the standard deviation across each channel. As a result, the mean of the data across each channel is 0, and standard deviation is 1. Normalizing the data prevents the values from any one channel from disproportionately affecting the losses and gradients while training, simply by having a higher or wider range of values that others. 41 | 42 | **Randomized data augmentations**: We will apply randomly chosen transformations while loading images from the training dataset. Specifically, we will pad each image by 4 pixels, and then take a random crop of size 32 x 32 pixels, and then flip the image horizontally with a 50% probability. Since the transformation will be applied randomly and dynamically each time a particular image is loaded, the model sees slightly different images in each epoch of training, which allows it generalize better. 43 | """ 44 | 45 | 46 | 47 | # Data transforms (normalization & data augmentation) 48 | stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) 49 | train_tfms = tt.Compose([tt.RandomCrop(32, padding=4, padding_mode='reflect'), 50 | tt.RandomHorizontalFlip(), 51 | tt.ToTensor(), 52 | tt.Normalize(*stats,inplace=True)]) 53 | valid_tfms = tt.Compose([tt.ToTensor(), tt.Normalize(*stats)]) 54 | 55 | # CIFAR-10 dataset 56 | train_dataset = torchvision.datasets.CIFAR10(root='../../data/', 57 | train=True, 58 | transform=train_tfms, 59 | download=True) 60 | 61 | valid_dataset = torchvision.datasets.CIFAR10(root='../../data/', 62 | train=False, 63 | transform=valid_tfms) 64 | 65 | batch_size = 400 66 | 67 | # PyTorch data loaders 68 | train_dl = DataLoader(train_dataset, batch_size, shuffle=True, num_workers=3, pin_memory=True) 69 | valid_dl = DataLoader(valid_dataset, batch_size*2, num_workers=3, pin_memory=True) 70 | 71 | """Let's take a look at some sample images from the training dataloader.""" 72 | 73 | def show_batch(dl): 74 | for images, labels in dl: 75 | fig, ax = plt.subplots(figsize=(12, 12)) 76 | ax.set_xticks([]); ax.set_yticks([]) 77 | ax.imshow(make_grid(images[:64], nrow=8).permute(1, 2, 0)) 78 | break 79 | 80 | show_batch(train_dl) 81 | 82 | """The colors seem out of place because of the normalization. Note that normalization is also applied during inference. If you look closely, you can see the cropping and reflection padding in some of the images. Horizontal flip is a bit difficult to detect from visual inspection. 83 | 84 | **Using a GPU** 85 | 86 | To seamlessly use a GPU, if one is available, we define a couple of helper functions (get_default_device & to_device) and a helper class DeviceDataLoader to move our model & data to the GPU as required 87 | """ 88 | 89 | def get_default_device(): 90 | """Pick GPU if available, else CPU""" 91 | if torch.cuda.is_available(): 92 | return torch.device('cuda') 93 | else: 94 | return torch.device('cpu') 95 | 96 | def to_device(data, device): 97 | """Move tensor(s) to chosen device""" 98 | if isinstance(data, (list,tuple)): 99 | return [to_device(x, device) for x in data] 100 | return data.to(device, non_blocking=True) 101 | 102 | class DeviceDataLoader(): 103 | """Wrap a dataloader to move data to a device""" 104 | def __init__(self, dl, device): 105 | self.dl = dl 106 | self.device = device 107 | 108 | def __iter__(self): 109 | """Yield a batch of data after moving it to device""" 110 | for b in self.dl: 111 | yield to_device(b, self.device) 112 | 113 | def __len__(self): 114 | """Number of batches""" 115 | return len(self.dl) 116 | 117 | device = get_default_device() 118 | device 119 | 120 | """We can now wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches of data to the GPU (if available).""" 121 | 122 | train_dl = DeviceDataLoader(train_dl, device) 123 | valid_dl = DeviceDataLoader(valid_dl, device) 124 | 125 | """**Model with Residual Blocks and Batch Normalization** 126 | 127 | One of the key changes to our CNN model this time is the addition of the resudial block, which adds the original input back to the output feature map obtained by passing the input through one or more convolutional layers. 128 | """ 129 | 130 | #simple residual block 131 | 132 | class SimpleResidualBlock(nn.Module): 133 | def __init__(self): 134 | super().__init__() 135 | self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1) 136 | self.relu1 = nn.ReLU() 137 | self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3, stride=1, padding=1) 138 | self.relu2 = nn.ReLU() 139 | 140 | def forward(self, x): 141 | out = self.conv1(x) 142 | out = self.relu1(out) 143 | out = self.conv2(out) 144 | return self.relu2(out) + x # ReLU can be applied before or after adding the input 145 | 146 | simple_resnet = to_device(SimpleResidualBlock(), device) 147 | 148 | for images, labels in train_dl: 149 | out = simple_resnet(images) 150 | print(out.shape) 151 | break 152 | 153 | del simple_resnet, images, labels 154 | torch.cuda.empty_cache() 155 | 156 | """This seeming small change produces a drastic improvement in the performance of the model. Also, after each convolutional layer, we'll add a batch normalization layer, which normalizes the outputs of the previous layer. 157 | 158 | Go through the following blog posts to learn more: 159 | 160 | Why and how residual blocks work: https://towardsdatascience.com/residual-blocks-building-blocks-of-resnet-fd90ca15d6ec 161 | 162 | Batch normalization and dropout explained: https://towardsdatascience.com/batch-normalization-and-dropout-in-neural-networks-explained-with-pytorch-47d7a8459bcd 163 | 164 | We will use the ResNet9 architecture, as described in this blog series : 165 | 166 | resnet-9 167 | """ 168 | 169 | def accuracy(outputs, labels): 170 | _, preds = torch.max(outputs, dim=1) 171 | return torch.tensor(torch.sum(preds == labels).item() / len(preds)) 172 | 173 | class ImageClassificationBase(nn.Module): 174 | def training_step(self, batch): 175 | images, labels = batch 176 | out = self(images) # Generate predictions 177 | loss = F.cross_entropy(out, labels) # Calculate loss 178 | return loss 179 | 180 | def validation_step(self, batch): 181 | images, labels = batch 182 | out = self(images) # Generate predictions 183 | loss = F.cross_entropy(out, labels) # Calculate loss 184 | acc = accuracy(out, labels) # Calculate accuracy 185 | return {'val_loss': loss.detach(), 'val_acc': acc} 186 | 187 | def validation_epoch_end(self, outputs): 188 | batch_losses = [x['val_loss'] for x in outputs] 189 | epoch_loss = torch.stack(batch_losses).mean() # Combine losses 190 | batch_accs = [x['val_acc'] for x in outputs] 191 | epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies 192 | return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()} 193 | 194 | def epoch_end(self, epoch, result): 195 | print("Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format( 196 | epoch, result['lrs'][-1], result['train_loss'], result['val_loss'], result['val_acc'])) 197 | 198 | def conv_block(in_channels, out_channels, pool=False): 199 | layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), 200 | nn.BatchNorm2d(out_channels), 201 | nn.ReLU(inplace=True)] 202 | if pool: layers.append(nn.MaxPool2d(2)) 203 | return nn.Sequential(*layers) 204 | 205 | class ResNet9(ImageClassificationBase): 206 | def __init__(self, in_channels, num_classes): 207 | super().__init__() 208 | 209 | self.conv1 = conv_block(in_channels, 64) 210 | self.conv2 = conv_block(64, 128, pool=True) 211 | self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128)) 212 | 213 | self.conv3 = conv_block(128, 256, pool=True) 214 | self.conv4 = conv_block(256, 512, pool=True) 215 | self.res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512)) 216 | 217 | self.classifier = nn.Sequential(nn.MaxPool2d(4), 218 | nn.Flatten(), 219 | nn.Linear(512, num_classes)) 220 | 221 | def forward(self, xb): 222 | out = self.conv1(xb) 223 | out = self.conv2(out) 224 | out = self.res1(out) + out 225 | out = self.conv3(out) 226 | out = self.conv4(out) 227 | out = self.res2(out) + out 228 | out = self.classifier(out) 229 | return out 230 | 231 | model = to_device(ResNet9(3, 10), device) 232 | model 233 | 234 | """**Training the model** 235 | 236 | Before we train the model, we're going to make a bunch of small but important improvements to our fit function: 237 | 238 | Learning rate scheduling: Instead of using a fixed learning rate, we will use a learning rate scheduler, which will change the learning rate after every batch of training. There are many strategies for varying the learning rate during training, and the one we'll use is called the "One Cycle Learning Rate Policy", which involves starting with a low learning rate, gradually increasing it batch-by-batch to a high learning rate for about 30% of epochs, then gradually decreasing it to a very low value for the remaining epochs. Learn more: https://sgugger.github.io/the-1cycle-policy.html 239 | 240 | Weight decay: We also use weight decay, which is yet another regularization technique which prevents the weights from becoming too large by adding an additional term to the loss function.Learn more: https://towardsdatascience.com/this-thing-called-weight-decay-a7cd4bcfccab 241 | 242 | Gradient clipping: Apart from the layer weights and outputs, it also helpful to limit the values of gradients to a small range to prevent undesirable changes in parameters due to large gradient values. This simple yet effective technique is called gradient clipping. Learn more: https://towardsdatascience.com/what-is-gradient-clipping-b8e815cdfb48 243 | 244 | Let's define a fit_one_cycle function to incorporate these changes. We'll also record the learning rate used for each batch. 245 | """ 246 | 247 | @torch.no_grad() 248 | def evaluate(model, val_loader): 249 | model.eval() 250 | outputs = [model.validation_step(batch) for batch in val_loader] 251 | return model.validation_epoch_end(outputs) 252 | 253 | def get_lr(optimizer): 254 | for param_group in optimizer.param_groups: 255 | return param_group['lr'] 256 | 257 | def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader, 258 | weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD): 259 | torch.cuda.empty_cache() 260 | history = [] 261 | 262 | # Set up cutom optimizer with weight decay 263 | optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay) 264 | # Set up one-cycle learning rate scheduler 265 | sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs, 266 | steps_per_epoch=len(train_loader)) 267 | 268 | for epoch in range(epochs): 269 | # Training Phase 270 | model.train() 271 | train_losses = [] 272 | lrs = [] 273 | for batch in train_loader: 274 | loss = model.training_step(batch) 275 | train_losses.append(loss) 276 | loss.backward() 277 | 278 | # Gradient clipping 279 | if grad_clip: 280 | nn.utils.clip_grad_value_(model.parameters(), grad_clip) 281 | 282 | optimizer.step() 283 | optimizer.zero_grad() 284 | 285 | # Record & update learning rate 286 | lrs.append(get_lr(optimizer)) 287 | sched.step() 288 | 289 | # Validation phase 290 | result = evaluate(model, val_loader) 291 | result['train_loss'] = torch.stack(train_losses).mean().item() 292 | result['lrs'] = lrs 293 | model.epoch_end(epoch, result) 294 | history.append(result) 295 | return history 296 | 297 | history = [evaluate(model, valid_dl)] 298 | history 299 | 300 | """We're now ready to train our model. Instead of SGD (stochastic gradient descent), we'll use the Adam optimizer which uses techniques like momentum and adaptive learning rates for faster training. You can learn more about optimizers here: https://ruder.io/optimizing-gradient-descent/index.html""" 301 | 302 | epochs = 8 303 | max_lr = 0.01 304 | grad_clip = 0.1 305 | weight_decay = 1e-4 306 | opt_func = torch.optim.Adam 307 | 308 | # Commented out IPython magic to ensure Python compatibility. 309 | # %%time 310 | # history += fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl, 311 | # grad_clip=grad_clip, 312 | # weight_decay=weight_decay, 313 | # opt_func=opt_func) 314 | 315 | """Our model trained to over 90% accuracy in just 4 minutes! Try playing around with the data augmentations, network architecture & hyperparameters to achive the following results: 316 | 317 | 94% accuracy in under 10 minutes (easy) 318 | 90% accuracy in under 2.5 minutes (intermediate) 319 | 94% accuracy in under 5 minutes (hard) 320 | 321 | Let's plot the valdation set accuracies to study how the model improves over time. 322 | """ 323 | 324 | def plot_accuracies(history): 325 | accuracies = [x['val_acc'] for x in history] 326 | plt.plot(accuracies, '-x') 327 | plt.xlabel('epoch') 328 | plt.ylabel('accuracy') 329 | plt.title('Accuracy vs. No. of epochs'); 330 | 331 | plot_accuracies(history) 332 | 333 | def plot_losses(history): 334 | train_losses = [x.get('train_loss') for x in history] 335 | val_losses = [x['val_loss'] for x in history] 336 | plt.plot(train_losses, '-bx') 337 | plt.plot(val_losses, '-rx') 338 | plt.xlabel('epoch') 339 | plt.ylabel('loss') 340 | plt.legend(['Training', 'Validation']) 341 | plt.title('Loss vs. No. of epochs'); 342 | 343 | plot_losses(history) 344 | 345 | """It's clear from the trend that our model isn't overfitting to the training data just yet. Finally, let's visualize how the learning rate changed over time, batch-by-batch over all the epochs.""" 346 | 347 | def plot_lrs(history): 348 | lrs = np.concatenate([x.get('lrs', []) for x in history]) 349 | plt.plot(lrs) 350 | plt.xlabel('Batch no.') 351 | plt.ylabel('Learning rate') 352 | plt.title('Learning Rate vs. Batch no.'); 353 | 354 | plot_lrs(history) 355 | 356 | """As expected, the learning rate starts at a low value, and gradually increases for 30% of the iterations to a maximum value of 0.01, and then gradually decreases to a very small value.""" 357 | 358 | -------------------------------------------------------------------------------- /Pytorch_basics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Pytorch_basics.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [] 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | } 14 | }, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "metadata": { 19 | "id": "7lK0nr0zAkit", 20 | "colab_type": "code", 21 | "colab": {} 22 | }, 23 | "source": [ 24 | "# Pytorch Basics " 25 | ], 26 | "execution_count": 1, 27 | "outputs": [] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "metadata": { 32 | "id": "mdNjN0t-rBu5", 33 | "colab_type": "code", 34 | "colab": {} 35 | }, 36 | "source": [ 37 | "import torch" 38 | ], 39 | "execution_count": null, 40 | "outputs": [] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "metadata": { 45 | "id": "5Xg6jj_0AsiJ", 46 | "colab_type": "code", 47 | "colab": { 48 | "base_uri": "https://localhost:8080/", 49 | "height": 34 50 | }, 51 | "outputId": "72178783-246f-4c01-eb41-425a0ffb9e4a" 52 | }, 53 | "source": [ 54 | "# Number\n", 55 | "t1 = torch.tensor(2.)\n", 56 | "t1\n" 57 | ], 58 | "execution_count": 6, 59 | "outputs": [ 60 | { 61 | "output_type": "execute_result", 62 | "data": { 63 | "text/plain": [ 64 | "tensor(2.)" 65 | ] 66 | }, 67 | "metadata": { 68 | "tags": [] 69 | }, 70 | "execution_count": 6 71 | } 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "metadata": { 77 | "id": "4DP-hpxSE-zr", 78 | "colab_type": "code", 79 | "colab": { 80 | "base_uri": "https://localhost:8080/", 81 | "height": 34 82 | }, 83 | "outputId": "e4320e6c-d65a-44cf-b8fe-3d2808c9c8bd" 84 | }, 85 | "source": [ 86 | "t1.dtype" 87 | ], 88 | "execution_count": 5, 89 | "outputs": [ 90 | { 91 | "output_type": "execute_result", 92 | "data": { 93 | "text/plain": [ 94 | "torch.float32" 95 | ] 96 | }, 97 | "metadata": { 98 | "tags": [] 99 | }, 100 | "execution_count": 5 101 | } 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "metadata": { 107 | "id": "_LSpG8sBFD-8", 108 | "colab_type": "code", 109 | "colab": { 110 | "base_uri": "https://localhost:8080/", 111 | "height": 34 112 | }, 113 | "outputId": "f0065a20-df2b-48e7-d29b-510f0bde51b5" 114 | }, 115 | "source": [ 116 | "# Vector\n", 117 | "t2 = torch.tensor([1., 2., 3, 4])\n", 118 | "t2" 119 | ], 120 | "execution_count": 7, 121 | "outputs": [ 122 | { 123 | "output_type": "execute_result", 124 | "data": { 125 | "text/plain": [ 126 | "tensor([1., 2., 3., 4.])" 127 | ] 128 | }, 129 | "metadata": { 130 | "tags": [] 131 | }, 132 | "execution_count": 7 133 | } 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "metadata": { 139 | "id": "f2H8_J5lFVzV", 140 | "colab_type": "code", 141 | "colab": { 142 | "base_uri": "https://localhost:8080/", 143 | "height": 68 144 | }, 145 | "outputId": "3cd368f5-f32c-473b-977c-4de459bb42a8" 146 | }, 147 | "source": [ 148 | "# Matrix\n", 149 | "t3 = torch.tensor([[5., 6], \n", 150 | " [7, 8], \n", 151 | " [9, 10]])\n", 152 | "t3" 153 | ], 154 | "execution_count": 8, 155 | "outputs": [ 156 | { 157 | "output_type": "execute_result", 158 | "data": { 159 | "text/plain": [ 160 | "tensor([[ 5., 6.],\n", 161 | " [ 7., 8.],\n", 162 | " [ 9., 10.]])" 163 | ] 164 | }, 165 | "metadata": { 166 | "tags": [] 167 | }, 168 | "execution_count": 8 169 | } 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "metadata": { 175 | "id": "rjHt14cgFaok", 176 | "colab_type": "code", 177 | "colab": { 178 | "base_uri": "https://localhost:8080/", 179 | "height": 102 180 | }, 181 | "outputId": "596c9a9b-9e2e-445f-a116-f38a6449f491" 182 | }, 183 | "source": [ 184 | "# 3-dimensional array\n", 185 | "t4 = torch.tensor([\n", 186 | " [[21, 22, 23], \n", 187 | " [23, 24, 25]], \n", 188 | " [[25, 26, 27], \n", 189 | " [27, 28, 29.]]])\n", 190 | "t4" 191 | ], 192 | "execution_count": 10, 193 | "outputs": [ 194 | { 195 | "output_type": "execute_result", 196 | "data": { 197 | "text/plain": [ 198 | "tensor([[[21., 22., 23.],\n", 199 | " [23., 24., 25.]],\n", 200 | "\n", 201 | " [[25., 26., 27.],\n", 202 | " [27., 28., 29.]]])" 203 | ] 204 | }, 205 | "metadata": { 206 | "tags": [] 207 | }, 208 | "execution_count": 10 209 | } 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "metadata": { 215 | "id": "nAGOrMvvFi98", 216 | "colab_type": "code", 217 | "colab": { 218 | "base_uri": "https://localhost:8080/", 219 | "height": 34 220 | }, 221 | "outputId": "9e30c68e-e0f2-4203-e231-be403523b7cb" 222 | }, 223 | "source": [ 224 | "t4.shape" 225 | ], 226 | "execution_count": 11, 227 | "outputs": [ 228 | { 229 | "output_type": "execute_result", 230 | "data": { 231 | "text/plain": [ 232 | "torch.Size([2, 2, 3])" 233 | ] 234 | }, 235 | "metadata": { 236 | "tags": [] 237 | }, 238 | "execution_count": 11 239 | } 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "metadata": { 245 | "id": "KAQNDcw8piHf", 246 | "colab_type": "code", 247 | "colab": {} 248 | }, 249 | "source": [ 250 | "#Basic Auto grad example 1" 251 | ], 252 | "execution_count": null, 253 | "outputs": [] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "metadata": { 258 | "id": "vuB0YLhPM2vf", 259 | "colab_type": "code", 260 | "colab": {} 261 | }, 262 | "source": [ 263 | "#Basic Autograd Example\n", 264 | "# Create tensors.\n", 265 | "x = torch.tensor(1., requires_grad=True)\n", 266 | "w = torch.tensor(2., requires_grad=True)\n", 267 | "b = torch.tensor(3., requires_grad=True)" 268 | ], 269 | "execution_count": 17, 270 | "outputs": [] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "metadata": { 275 | "id": "7mxb4CfoSrQp", 276 | "colab_type": "code", 277 | "colab": { 278 | "base_uri": "https://localhost:8080/", 279 | "height": 68 280 | }, 281 | "outputId": "1db24dc6-1794-45fb-84c7-d84ac15199c3" 282 | }, 283 | "source": [ 284 | "# Build a computational graph.\n", 285 | "y = w * x + b # y = 2 * x + 3\n", 286 | "\n", 287 | "# Compute gradients.\n", 288 | "y.backward()\n", 289 | "\n", 290 | "# Print out the gradients.\n", 291 | "print(x.grad) # x.grad = 2 \n", 292 | "print(w.grad) # w.grad = 1 \n", 293 | "print(b.grad) # b.grad = 1 \n" 294 | ], 295 | "execution_count": 18, 296 | "outputs": [ 297 | { 298 | "output_type": "stream", 299 | "text": [ 300 | "tensor(2.)\n", 301 | "tensor(1.)\n", 302 | "tensor(1.)\n" 303 | ], 304 | "name": "stdout" 305 | } 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "metadata": { 311 | "id": "oNVSvMpQSxTJ", 312 | "colab_type": "code", 313 | "colab": { 314 | "base_uri": "https://localhost:8080/", 315 | "height": 68 316 | }, 317 | "outputId": "98085f6a-5454-45a4-dfb3-3251711650c1" 318 | }, 319 | "source": [ 320 | "# Display gradients\n", 321 | "print('dy/dx:', x.grad)\n", 322 | "print('dy/dw:', w.grad)\n", 323 | "print('dy/db:', b.grad)" 324 | ], 325 | "execution_count": 19, 326 | "outputs": [ 327 | { 328 | "output_type": "stream", 329 | "text": [ 330 | "dy/dx: tensor(2.)\n", 331 | "dy/dw: tensor(1.)\n", 332 | "dy/db: tensor(1.)\n" 333 | ], 334 | "name": "stdout" 335 | } 336 | ] 337 | }, 338 | { 339 | "cell_type": "code", 340 | "metadata": { 341 | "id": "ztFEnO3Mpqpb", 342 | "colab_type": "code", 343 | "colab": {} 344 | }, 345 | "source": [ 346 | "#Basic Auto grad example 2" 347 | ], 348 | "execution_count": 23, 349 | "outputs": [] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "metadata": { 354 | "id": "w7KaiInupu0e", 355 | "colab_type": "code", 356 | "colab": {} 357 | }, 358 | "source": [ 359 | "# Create tensors of shape (10, 3) and (10, 2).\n", 360 | "x = torch.randn(10, 3)\n", 361 | "y = torch.randn(10, 2)" 362 | ], 363 | "execution_count": 24, 364 | "outputs": [] 365 | }, 366 | { 367 | "cell_type": "code", 368 | "metadata": { 369 | "id": "glR0ho9ap2ms", 370 | "colab_type": "code", 371 | "colab": { 372 | "base_uri": "https://localhost:8080/", 373 | "height": 102 374 | }, 375 | "outputId": "2ff993ac-53fa-4cc9-f7ca-3d67b0e4faef" 376 | }, 377 | "source": [ 378 | "# Build a fully connected layer.\n", 379 | "import torch.nn as nn \n", 380 | "linear = nn.Linear(3, 2)\n", 381 | "print ('w: ', linear.weight)\n", 382 | "print ('b: ', linear.bias)" 383 | ], 384 | "execution_count": 26, 385 | "outputs": [ 386 | { 387 | "output_type": "stream", 388 | "text": [ 389 | "w: Parameter containing:\n", 390 | "tensor([[ 0.2952, -0.1433, 0.3021],\n", 391 | " [-0.5628, -0.3223, 0.5685]], requires_grad=True)\n", 392 | "b: Parameter containing:\n", 393 | "tensor([ 0.0969, -0.3935], requires_grad=True)\n" 394 | ], 395 | "name": "stdout" 396 | } 397 | ] 398 | }, 399 | { 400 | "cell_type": "code", 401 | "metadata": { 402 | "id": "qoAVeEp6qPzZ", 403 | "colab_type": "code", 404 | "colab": {} 405 | }, 406 | "source": [ 407 | "# Build loss function and optimizer.\n", 408 | "criterion = nn.MSELoss()\n", 409 | "optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)" 410 | ], 411 | "execution_count": 27, 412 | "outputs": [] 413 | }, 414 | { 415 | "cell_type": "code", 416 | "metadata": { 417 | "id": "-mInVtOaqUpJ", 418 | "colab_type": "code", 419 | "colab": {} 420 | }, 421 | "source": [ 422 | "# Forward pass.\n", 423 | "pred = linear(x)" 424 | ], 425 | "execution_count": 28, 426 | "outputs": [] 427 | }, 428 | { 429 | "cell_type": "code", 430 | "metadata": { 431 | "id": "Hbi-hCiEqYvT", 432 | "colab_type": "code", 433 | "colab": { 434 | "base_uri": "https://localhost:8080/", 435 | "height": 34 436 | }, 437 | "outputId": "6a60d172-9ed8-44ba-fbd0-2ccedec15848" 438 | }, 439 | "source": [ 440 | "# Compute loss.\n", 441 | "loss = criterion(pred, y)\n", 442 | "print('loss: ', loss.item())" 443 | ], 444 | "execution_count": 29, 445 | "outputs": [ 446 | { 447 | "output_type": "stream", 448 | "text": [ 449 | "loss: 0.9510029554367065\n" 450 | ], 451 | "name": "stdout" 452 | } 453 | ] 454 | }, 455 | { 456 | "cell_type": "code", 457 | "metadata": { 458 | "id": "trdTY-xiqee7", 459 | "colab_type": "code", 460 | "colab": {} 461 | }, 462 | "source": [ 463 | "# Backward pass.\n", 464 | "loss.backward()" 465 | ], 466 | "execution_count": 30, 467 | "outputs": [] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "metadata": { 472 | "id": "unuJJcTBqj13", 473 | "colab_type": "code", 474 | "colab": { 475 | "base_uri": "https://localhost:8080/", 476 | "height": 68 477 | }, 478 | "outputId": "ef7fc233-e651-44d4-d6a5-90430f6ab9f3" 479 | }, 480 | "source": [ 481 | "# Print out the gradients.\n", 482 | "print ('dL/dw: ', linear.weight.grad) \n", 483 | "print ('dL/db: ', linear.bias.grad)" 484 | ], 485 | "execution_count": 31, 486 | "outputs": [ 487 | { 488 | "output_type": "stream", 489 | "text": [ 490 | "dL/dw: tensor([[ 0.2379, 0.8574, 0.7902],\n", 491 | " [-0.2352, 0.3811, 0.3544]])\n", 492 | "dL/db: tensor([0.6118, 0.0326])\n" 493 | ], 494 | "name": "stdout" 495 | } 496 | ] 497 | }, 498 | { 499 | "cell_type": "code", 500 | "metadata": { 501 | "id": "849wkCBlqn1-", 502 | "colab_type": "code", 503 | "colab": {} 504 | }, 505 | "source": [ 506 | "# 1-step gradient descent.\n", 507 | "optimizer.step()" 508 | ], 509 | "execution_count": 32, 510 | "outputs": [] 511 | }, 512 | { 513 | "cell_type": "code", 514 | "metadata": { 515 | "id": "Cw37Qk5Aqoeg", 516 | "colab_type": "code", 517 | "colab": { 518 | "base_uri": "https://localhost:8080/", 519 | "height": 34 520 | }, 521 | "outputId": "ddfb8937-b367-47b3-ca72-5a4642e81225" 522 | }, 523 | "source": [ 524 | "# You can also perform gradient descent at the low level.\n", 525 | "# linear.weight.data.sub_(0.01 * linear.weight.grad.data)\n", 526 | "# linear.bias.data.sub_(0.01 * linear.bias.grad.data)\n", 527 | "\n", 528 | "# Print out the loss after 1-step gradient descent.\n", 529 | "pred = linear(x)\n", 530 | "loss = criterion(pred, y)\n", 531 | "print('loss after 1 step optimization: ', loss.item())" 532 | ], 533 | "execution_count": 33, 534 | "outputs": [ 535 | { 536 | "output_type": "stream", 537 | "text": [ 538 | "loss after 1 step optimization: 0.9300593137741089\n" 539 | ], 540 | "name": "stdout" 541 | } 542 | ] 543 | }, 544 | { 545 | "cell_type": "code", 546 | "metadata": { 547 | "id": "sQhkcGodq0rV", 548 | "colab_type": "code", 549 | "colab": {} 550 | }, 551 | "source": [ 552 | "#Pytorch interopretability with numpy" 553 | ], 554 | "execution_count": null, 555 | "outputs": [] 556 | }, 557 | { 558 | "cell_type": "code", 559 | "metadata": { 560 | "id": "pK5F4ACeik0s", 561 | "colab_type": "code", 562 | "colab": { 563 | "base_uri": "https://localhost:8080/", 564 | "height": 51 565 | }, 566 | "outputId": "a3019059-79ab-40d4-d71a-9e5b2e89ecb5" 567 | }, 568 | "source": [ 569 | "import numpy as np\n", 570 | "\n", 571 | "x = np.array([[1, 2], [3, 4.]])\n", 572 | "x" 573 | ], 574 | "execution_count": 20, 575 | "outputs": [ 576 | { 577 | "output_type": "execute_result", 578 | "data": { 579 | "text/plain": [ 580 | "array([[1., 2.],\n", 581 | " [3., 4.]])" 582 | ] 583 | }, 584 | "metadata": { 585 | "tags": [] 586 | }, 587 | "execution_count": 20 588 | } 589 | ] 590 | }, 591 | { 592 | "cell_type": "code", 593 | "metadata": { 594 | "id": "G0_Lt2YJkKk1", 595 | "colab_type": "code", 596 | "colab": { 597 | "base_uri": "https://localhost:8080/", 598 | "height": 51 599 | }, 600 | "outputId": "55e07eff-4bf0-4f9e-8e14-a57eebf31318" 601 | }, 602 | "source": [ 603 | "# Convert the numpy array to a torch tensor.\n", 604 | "y = torch.from_numpy(x)\n", 605 | "y" 606 | ], 607 | "execution_count": 21, 608 | "outputs": [ 609 | { 610 | "output_type": "execute_result", 611 | "data": { 612 | "text/plain": [ 613 | "tensor([[1., 2.],\n", 614 | " [3., 4.]], dtype=torch.float64)" 615 | ] 616 | }, 617 | "metadata": { 618 | "tags": [] 619 | }, 620 | "execution_count": 21 621 | } 622 | ] 623 | }, 624 | { 625 | "cell_type": "code", 626 | "metadata": { 627 | "id": "LPLYK9fTkPbk", 628 | "colab_type": "code", 629 | "colab": { 630 | "base_uri": "https://localhost:8080/", 631 | "height": 51 632 | }, 633 | "outputId": "0e4345c1-0c7e-4740-eb9e-ad14b387550d" 634 | }, 635 | "source": [ 636 | "# Convert a torch tensor to a numpy array\n", 637 | "z = y.numpy()\n", 638 | "z" 639 | ], 640 | "execution_count": 22, 641 | "outputs": [ 642 | { 643 | "output_type": "execute_result", 644 | "data": { 645 | "text/plain": [ 646 | "array([[1., 2.],\n", 647 | " [3., 4.]])" 648 | ] 649 | }, 650 | "metadata": { 651 | "tags": [] 652 | }, 653 | "execution_count": 22 654 | } 655 | ] 656 | }, 657 | { 658 | "cell_type": "code", 659 | "metadata": { 660 | "id": "NUhGnfeJkWzU", 661 | "colab_type": "code", 662 | "colab": {} 663 | }, 664 | "source": [ 665 | "" 666 | ], 667 | "execution_count": null, 668 | "outputs": [] 669 | } 670 | ] 671 | } -------------------------------------------------------------------------------- /Linear_regression_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "Linear_regression_example.ipynb", 7 | "provenance": [] 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | } 13 | }, 14 | "cells": [ 15 | { 16 | "cell_type": "code", 17 | "metadata": { 18 | "id": "P-sHhT87oXfd", 19 | "colab_type": "code", 20 | "colab": {} 21 | }, 22 | "source": [ 23 | "#Import libraries\n", 24 | "import pandas as pd\n", 25 | "import numpy as np\n", 26 | "import matplotlib.pyplot as plt\n", 27 | "from sklearn.model_selection import train_test_split\n", 28 | "import torch\n", 29 | "import torch.nn as nn" 30 | ], 31 | "execution_count": null, 32 | "outputs": [] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "metadata": { 37 | "id": "Qn5Wss2MoyWN", 38 | "colab_type": "code", 39 | "colab": {} 40 | }, 41 | "source": [ 42 | "#Loading the data set\n", 43 | "data=pd.read_csv('/content/exp_vs_salary_Data.csv')" 44 | ], 45 | "execution_count": null, 46 | "outputs": [] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "metadata": { 51 | "id": "I4k6F7g2qhP_", 52 | "colab_type": "code", 53 | "colab": { 54 | "base_uri": "https://localhost:8080/", 55 | "height": 204 56 | }, 57 | "outputId": "f500e6bf-e779-45fd-863e-4233b6a4493f" 58 | }, 59 | "source": [ 60 | "#let us try to visualize our data \n", 61 | "data.head()" 62 | ], 63 | "execution_count": null, 64 | "outputs": [ 65 | { 66 | "output_type": "execute_result", 67 | "data": { 68 | "text/html": [ 69 | "
\n", 70 | "\n", 83 | "\n", 84 | " \n", 85 | " \n", 86 | " \n", 87 | " \n", 88 | " \n", 89 | " \n", 90 | " \n", 91 | " \n", 92 | " \n", 93 | " \n", 94 | " \n", 95 | " \n", 96 | " \n", 97 | " \n", 98 | " \n", 99 | " \n", 100 | " \n", 101 | " \n", 102 | " \n", 103 | " \n", 104 | " \n", 105 | " \n", 106 | " \n", 107 | " \n", 108 | " \n", 109 | " \n", 110 | " \n", 111 | " \n", 112 | " \n", 113 | " \n", 114 | " \n", 115 | " \n", 116 | " \n", 117 | " \n", 118 | "
YearsExperienceSalary
01.239342
11.246205
21.437730
32.043524
42.239890
\n", 119 | "
" 120 | ], 121 | "text/plain": [ 122 | " YearsExperience Salary\n", 123 | "0 1.2 39342\n", 124 | "1 1.2 46205\n", 125 | "2 1.4 37730\n", 126 | "3 2.0 43524\n", 127 | "4 2.2 39890" 128 | ] 129 | }, 130 | "metadata": { 131 | "tags": [] 132 | }, 133 | "execution_count": 24 134 | } 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "metadata": { 140 | "id": "K6TNBAgMq-Ta", 141 | "colab_type": "code", 142 | "colab": { 143 | "base_uri": "https://localhost:8080/", 144 | "height": 295 145 | }, 146 | "outputId": "71ebb30e-4781-4d70-edef-98ad727e35fa" 147 | }, 148 | "source": [ 149 | "# lets visualize our data\n", 150 | "import matplotlib.pyplot as plt\n", 151 | "plt.scatter(data.YearsExperience,data.Salary)\n", 152 | "plt.xlabel(\"YearsExperience\")\n", 153 | "plt.ylabel(\"Salary\")\n", 154 | "plt.title(\"Years Experience VS Salary\")\n", 155 | "plt.show()" 156 | ], 157 | "execution_count": null, 158 | "outputs": [ 159 | { 160 | "output_type": "display_data", 161 | "data": { 162 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAEWCAYAAACqitpwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3deZxcVZ338c/XJECDQIMgQzpIMhJRFiXYLBpFBpQEUJPBBdCRoIw4LrghEsbXMziKQ5zogzgqioCA8oAIGDKABIbAqGiAQJA9EkFImi0QOqBEIPh7/rinzE2lq9dbdauqv+/Xq15965y7nKpO7q/PehURmJmZFellZRfAzMzaj4OLmZkVzsHFzMwK5+BiZmaFc3AxM7PCObiYmVnhHFzMSiDpg5KuKbsczUjSuZJOKbscNjIOLlY3kn4i6UdVaW+T9JSk7UsqU0j6s6Q/5V5fbHQ5IuKCiDio0dfti6QuSWslvbqPvJ9L+kbaniHpdknPSHpS0kJJk2qcc4KkS9N+qyXdJenoOn8UayJjyy6AtbXPAHdLekdEXCtpE+CHwPER8WgRF5A0NiLWDvGwN0TEsiKuPxzDLHPdRESPpOuADwFfrqRL2ho4BOiWtBNwPnAYsBB4OXAQ8FKN0/4Y+B2wI/A8sDvwd3X6CJXyClBE/LWe17HBcc3F6iYingKOA86UtBlwMvCHiDhX0r6SfiOpV9LvJO1fOU7ShyXdK+lZSQ9I+lgub39JKySdKOkx4EeStpF0RTrXKkm/kjTkf9uSrpL0zdz7iySdk7aPlnSjpO+kv8Tvk3Rgbt8tJZ0t6VFJPZJOkTSm6tjTJD0FfDml/Tp3/GslXZvKv1TS+3N550r6rqQr03dyU76WIWnX3LGPS/rXlP4ySbMl/SHVFi9OAaMv55EFl7wjgHsi4k5gD+DBiLguMs9GxKUR8XCN8+0FnBsRf46ItRGxJCJ+kSvzzyQ9lr7LX0ratcbvZKv0u10p6em0PSGXf4Okr0m6EXgOOF7SrVXn+Lyky2uU0+olIvzyq64v4FJgPvAUsAPQlbYPIfsD5x3p/bZp/0OBVwMC3kZ209gz5e0PrAW+DmwMdACnAt8HxqXXW8n+gu2rLAHsVCPv74AngAOADwIPAJunvKPTdT+XrnE4sBrYOuX/HPgBsBnwSuBm4GNVxx5H1lrQkdJ+nfI3A5YDH075U4AngV1S/rnp+9k75V8AXJTyNgceBY4HNknv90l5nwEWARPSd/UD4MIan70jfZ635NJ+C3w2bf898BfgNOAfgJcP8Dv/H+BGsgD1qj7yP5LKujHwLeD2XN65wClp+xXAe4BN0/4/A+bl9r0BeBjYNX03GwOrgNfl9lkCvKfs/wej7VV6Afxq/xewHfAn4DPp/YnAj6v2WQDMqnH8vNyx+wMvAJvk8r8CXE6NoFF1rgCeAXpzr2m5/PekG/2TVTfao4FHyAUtsgDyofT5ngc6cnlHAtfnjn24qhxHsy64HA78qir/B8DJaftc4Kxc3iHAfbnrLKnxWe8FDsy93x54ERhbY/+zgDPT9uT0Pb8yl78vcDGwMgWac2sFGWArYA5wN1nT2e3AXjX27Uy/ly1zn/eUGvvuATyde38D8JWqfc4Avpa2dwWeBjYu+//BaHu5WczqLiIeJ7tZ352SdgTel5qxeiX1Am8hu/kh6WBJi1IzTy/ZzXSb3ClXRsRfcu/nAsuAa1Iz2uwBirRnRHTmXgtyef8NjAGWRsSvq47riXTHSh4CxqfPMw54NPd5fkBWg6lY3k95dgT2qfo+Psj6fRSP5bafI+vzgKwm+Id+zvvz3DnvJbvRb1dj//PIfi+bkAXNBRHxRCUzIhZFxPsjYluy2uF+wJf6OlFEPB0RsyNi13S924F5yoyRNCc11z0D/DEdtk31eSRtKukHkh5K+/4S6Kw0OSbV3+15wAdSH8yHgIsj4vkan9nqxMHFyrCcrOaSv8FvFhFzJG1M1oz2DWC7iOgEriJrIqtYbynvyNr/j4+IvwfeDXw+3x8yRF8juwlvL+nIqryudMOqeBVZbWY5Wc1lm9zn2SLdWPssc5XlwP9WfR8vj4iPD6K8y8marGrlHVx13k0ioqfG/r8ma1KaAfwT2U26TxFxC3AZsNtABYyIJ8l+n+OBrYEPpGu8HdgSmJh2VR+HHw/sTNbUtwVZQKvet/rfwyKyWtdb07V+PFAZrXgOLlaGnwDvkjQt/RW7SeqonwBsRNZuvhJYK+lgslFJNUl6p6Sd0o1/Ndlf50MeMSRpP7J+j6OAWcB/SerK7fJK4NOSxkl6H/A64KrIRr5dA3xT0hapI/3Vkt42yEtfAbxG0ofSucdJ2kvS6wZ57PaSPitpY0mbS9on5X0f+JqkHdPn21bSjFonSrWy88n6szrJanGkY98i6aOSXpnev5YskC/q61ySvi5pN0ljJW0OfBxYFtkgj83JgvFTZH0p/9HP59scWAP0psEIJw/0hSTnA98BXuyjBmoN4OBiDRcRy8n+cv1XsiCyHDgBeFlEPAt8mqxt/2myvzznD3DKyWQdyH8i64T+XkRc38/+v9P681y+JWkLshvSpyKiJyJ+BZxNNhqt8lfyTelaT5LVcN6bbpaQBaSNgHtSuS8hNfMN4vt4liyAHkFWE3qMdQMWBnPsO4B3pePuJ+twBzid7Lu7RtKzZIFgn77Ok3M+WY3sp1VNSb1kweROSX8CriYbxPCfNc6zacrvJRsYsWM6vnKNh4Aesu+rzwCVfItssMGTab+rByh/xY/JalU/GeT+VjCt34RsZn1RNgHwnyPiLWWXxQYmqYNs5N+eEXF/2eUZjVxzMbN29HHgFgeW8niGvpm1FUl/JOvwn1lyUUY1N4uZmVnh3CxmZmaFc7NYss0228TEiRPLLoaZWUu59dZbn0wTa9fj4JJMnDiRxYsXl10MM7OWIumhvtLdLGZmZoVzcDEzs8I5uJiZWeEcXMzMrHAOLmZmVjiPFjMza2PzlvQwd8FSHuldw/jODk6YtjMzp3QNfOAIObiYmbWpeUt6OOmyO1nz4ksA9PSu4aTL7gRg5pSuugYeBxczszY1d8HSvwWWijUvvsTcBUsB+g08I+U+FzOzNvVI75qa6QMFnpFycDEza1PjOztqpvcXeIrg4GJm1qZOmLYzHePGrJfWMW4MJ0zbud/AU4S6BRdJ50h6QtJdubS5ku6TdIekn0vqzOWdJGmZpKWSpuXSp6e0ZZJm59InSboppf9U0kYpfeP0flnKn1ivz2hm1sxmTuni1MN2p6uzAwFdnR2cetjuzJzS1W/gKULdnuciaT+yZ5qfHxG7pbSDgIURsVbS1wEi4kRJuwAXAnsD48meh/6adKrfkz0jfAVwC3BkRNwj6WLgsoi4SNL3gd9FxBmSPgG8PiL+RdIRwD9GxOEDlbe7uzu8cKWZjSZFjBaTdGtEdFen1220WET8srrWEBHX5N4uAt6btmcAF0XE88CDkpaRBRqAZRHxAICki4AZku4FDgA+kPY5D/gycEY615dT+iXAdyQp/FQ0M7P1zJzSVbc5L2X2uXwE+EXa7gKW5/JWpLRa6a8AeiNibVX6eudK+avT/huQdKykxZIWr1y5csQfyMzMMqUEF0lfAtYCF5Rx/YqIODMiuiOie9ttN3jWjZmZDVPDJ1FKOhp4J3BgrqmqB9ght9uElEaN9KeATkljU+0kv3/lXCskjQW2TPubmVmDNLTmImk68EXg3RHxXC5rPnBEGuk1CZgM3EzWgT85jQzbCDgCmJ+C0vWs67OZBVyeO9estP1esgEE7m8xM2ugutVcJF0I7A9sI2kFcDJwErAxcK0kgEUR8S8RcXca/XUPWXPZJyPipXSeTwELgDHAORFxd7rEicBFkk4BlgBnp/SzgR+nQQGryAKSmVnpylpEsgx1G4rcajwU2czqqXoRScjmlVTmnbSqWkORPUPfzKwB6r2WV7NxcDEza4B6r+XVbBxczMwaoN5reTUbBxczswao91pezcYPCzMza4BKp/1oGS3m4GJmbaPZh/rWcy2vZuPgYmZtYaDnxVtjuc/FzNrCaBvq2+xcczGzttAuQ32bvWlvsFxzMbO20A5DfStNez29awjWNe3NW9Iz4LHNxsHFzNpCOwz1baemPTeLmVlbaIehvu3StAcOLmbWRlp9qO/4zg56+ggkrdS0V+FmMTOzJtEOTXsVrrmYmTWJdmjaq3BwMTNrIq3etFfh4GJmo1q7zCtpNg4uZjZqecmY+nGHvpmNWu00r6TZOLiY2ajVTvNKmo2Di5mNWu2wZEyzcnAxs1GrneaVNBt36JvZqNVO80qajYOLmY1q7TKvpNk4uJiZDZHnxgzMwcXMbAg8N2Zw3KFvZjYEnhszOA4uZmZD4Lkxg+PgYmY2BJ4bMzgOLmZmQ+C5MYPjDn0zsyHw3JjBcXAxMxsiz40ZmJvFzMyscA4uZmZWODeLmVnT84z41uPgYmZNzTPiW5ObxcysqXlGfGuqW3CRdI6kJyTdlUvbWtK1ku5PP7dK6ZL0bUnLJN0hac/cMbPS/vdLmpVLf6OkO9Mx35ak/q5hZq3JM+JbUz1rLucC06vSZgPXRcRk4Lr0HuBgYHJ6HQucAVmgAE4G9gH2Bk7OBYszgI/mjps+wDXMrAV5RnxrqltwiYhfAquqkmcA56Xt84CZufTzI7MI6JS0PTANuDYiVkXE08C1wPSUt0VELIqIAM6vOldf1zCzFuQZ8a2p0R3620XEo2n7MWC7tN0FLM/ttyKl9Ze+oo/0/q5hZi3IM+JbU2mjxSIiJEWZ15B0LFkzHK961avqWRQzGwHPiG89jR4t9nhq0iL9fCKl9wA75PabkNL6S5/QR3p/19hARJwZEd0R0b3tttsO+0OZjVbzlvQwdc5CJs2+kqlzFjJvSc/AB9mo0OjgMh+ojPiaBVyeSz8qjRrbF1idmrYWAAdJ2ip15B8ELEh5z0jaN40SO6rqXH1dw8wKVJl/0tO7hmDd/BMHGIP6DkW+EPgtsLOkFZKOAeYA75B0P/D29B7gKuABYBnwQ+ATABGxCvgqcEt6fSWlkfY5Kx3zB+AXKb3WNcysQJ5/Yv2pW59LRBxZI+vAPvYN4JM1znMOcE4f6YuB3fpIf6qva5hZsTz/xPrjGfpmNiyef2L9cXAxs2Hx/BPrjxeuNLNh8fwT64+Di5kNm+efWC1uFjMzs8I5uJiZWeHcLGbWQvxERmsVDi5mLcJPZLRW4mYxsxbhGfHWShxczFqEZ8RbK3FwMWsRnhFvrcTBxaxFNHJGvJfSt5Fyh75Zi2jUjHgPHLAiOLiYtZBGzIjvb+CAg4sNlpvFzGw9HjhgRXBwMbP1eOCAFcHBxczW46X0rQjuczGz9XgpfSuCg4uZbcBL6dtIuVnMzMwK5+BiZmaFc3AxM7PCObiYmVnhHFzMzKxwDi5mZlY4BxczMyucg4uZmRXOwcXMzAo3qOAiaczAe5mZmWUGW3O5X9JcSbvUtTRmZtYWBhtc3gD8HjhL0iJJx0raoo7lMjOzFjaohSsj4lngh8APJb0N+H/AaZIuAb4aEcvqWEazljJvSY9XFLZRb1DBJfW5HAp8GJgIfBO4AHgrcBXwmjqVz6yl+PnzZpnBLrl/P3A9MDcifpNLv0TSfsUXy6w1+fnzZpkBg0uqtZwbEV/pKz8iPl14qcxalJ8/b5YZsEM/Il4C3tmAspi1vKKePz9vSQ9T5yxk0uwrmTpnIfOW9BRRPLOGGexosRslfUfSWyXtWXnVtWRmLaiI589X+m16etcQrOu3cYCxVjLYPpc90s9801gABxRbHLPWVsTz591vY+1gsEOR/6HIi0r6HPDPZAHqTrJRaNsDFwGvAG4FPhQRL0jaGDgfeCPwFHB4RPwxneck4BjgJeDTEbEgpU8HTgfGAGdFxJwiy2/Wn5E+f979NtYOBr22mKRDJX1R0r9VXsO5oKQu4NNAd0TsRhYAjgC+DpwWETsBT5MFDdLPp1P6aWk/0moBRwC7AtOB70kakwYgfBc4GNgFONIrC1grKarfxqxMg11b7PvA4cBxgID3ATuO4LpjgQ5JY4FNgUfJmtguSfnnATPT9oz0npR/oCSl9Isi4vmIeBBYBuydXssi4oGIeIGsNjRjBGU1a6gi+m3MyjbYmsubI+IoshrEvwNvYpgTJyOiB/gG8DBZUFlN1gzWGxFr024rgEq7QhewPB27Nu3/inx61TG10jeQlrFZLGnxypUrh/NxzAo3c0oXpx62O12dHQjo6uzg1MN2d3+LtZTBduhXGnufkzSerO9j++FcUNJWZDWJSUAv8DOyZq2Gi4gzgTMBuru7o4wymPVlpP02ZmUbbHC5QlInMBe4jawj/qxhXvPtwIMRsRJA0mXAVKBT0thUO5kAVMZd9gA7ACtSM9qWZMGtkl6RP6ZWupmZNcCgmsUi4qsR0RsRl5L1tbw2Iv7PMK/5MLCvpE1T38mBwD1ky8u8N+0zC7g8bc9P70n5CyMiUvoRkjaWNAmYDNwM3AJMljRJ0kZknf7zh1lWMzMbhn5rLpIO6yePiLhsqBeMiJvSasq3AWuBJWRNU1cCF0k6JaWdnQ45G/ixpGXAKrJgQUTcLelissC0FvhkWk0ASZ8CFpCNRDsnIu4eajnNzGz4lFUCamRKP+rn2IiIjxRfpHJ0d3fH4sWLyy6GmVlLkXRrRHRXp/dbc4mID9evSGZm1q4G26GPpEPJJixuUkmrtVKymZmNbmVNojQzszbW8EmUZmbW/gYbXKonUa5lmJMozcys/Q11EuV/ki3VAsOfRGlmZm1uoHkuewHLI+Kr6f3LyZbIv49shWIzM7MNDNQs9gPgBQBJ+wFzUtpq0ppcZmZm1QZqFhsTEavS9uHAmWkJmEsl3V7fopmZWasaqOYyJi0WCdkaYAtzeYOeI2NmZqPLQAHiQuB/JT1JNmLsVwCSdiJrGjMzM9vAQMu/fE3SdWTDjq+JdQuRvYxsQqWZmdkGBmzaiohFfaT9vj7FMTOzdjDYSZRmZmaD5uBiZmaFc3AxM7PCObiYmVnhHFzMzKxwDi5mZlY4z7I368e8JT3MXbCUR3rXML6zgxOm7czMKV1lF8us6Tm4mNUwb0kPJ112J2tefAmAnt41nHTZnQAOMGYDcLOYWQ1zFyz9W2CpWPPiS8xdsLSkEpm1DgcXsxoe6V0zpHQzW8fBxayG8Z0dQ0o3s3UcXMxqOGHaznSMG7NeWse4MZwwbWcg65OZOmchk2ZfydQ5C5m3pKeMYpo1JXfom9VQ6bTva7SYO/vN+ufgYtaPmVO6+gwW/XX2O7iYuVnMbFjc2W/WP9dcbMRG40TD8Z0d9PQRSNzZb5ZxzcVGpNL30NO7hmBd30O7d24P1NlvNtq55mIjUu++h2atFfXX2W9mDi42QvXse2j2EVm1OvvNzM1iNkL1nGjo5VfMWpeDi41IPfsePCLLrHU5uNiIzJzSxamH7U5XZwcCujo7OPWw3QtpLvLyK2aty30uNmL16ns4YdrO6/W5QHG1omYdKGDWLhxcrGnVa0RWsw8UMGsHDi7W1OpRK/LSLWb1V0qfi6ROSZdIuk/SvZLeJGlrSddKuj/93CrtK0nflrRM0h2S9sydZ1ba/35Js3Lpb5R0Zzrm25JUxudsJ+20ArAHCpjVX1kd+qcDV0fEa4E3APcCs4HrImIycF16D3AwMDm9jgXOAJC0NXAysA+wN3ByJSClfT6aO256Az5T22q3WfgeKGBWfw0PLpK2BPYDzgaIiBcioheYAZyXdjsPmJm2ZwDnR2YR0Clpe2AacG1ErIqIp4Frgekpb4uIWBQRAZyfO5cNQ7vNN/HSLWb1V0bNZRKwEviRpCWSzpK0GbBdRDya9nkM2C5tdwHLc8evSGn9pa/oI30Dko6VtFjS4pUrV47wY7WvdmtGqufwaTPLlNGhPxbYEzguIm6SdDrrmsAAiIiQFPUuSEScCZwJ0N3dXffrtap2XAHYS7eY1VcZNZcVwIqIuCm9v4Qs2DyemrRIP59I+T3ADrnjJ6S0/tIn9JFuw+RmJDMbqoYHl4h4DFguqXJnOhC4B5gPVEZ8zQIuT9vzgaPSqLF9gdWp+WwBcJCkrVJH/kHAgpT3jKR90yixo3LnsmGoRzNSO40+M7MNlTXP5TjgAkkbAQ8AHyYLdBdLOgZ4CHh/2vcq4BBgGfBc2peIWCXpq8Atab+vRMSqtP0J4FygA/hFetkIFNmMNNxJjPlZ9Vt2jEOC3ude9Ax7syakbECVdXd3x+LFi8suRlOp1xIpU+cs7LMPp6uzgxtnH1CzLNVLweR1jBvjTnmzEki6NSK6q9O9cKX1qZ5zW4Yz+qyv4dB5rTw02qwdObhYn+o5t2U4kxgHM+y5VYdGm7UjBxfrUz3ntgxn9FnnpuMGPG8rD402azcOLtanei6RMpzRZwN1DXpotFlz8arI1qehPEtlOB3/Qx19tnrNizXzujxazKzpOLhYnwb7LJVGPRul1ioB/Y0wM7PyOLhYTYOpXTTq2Sj1fCqlmRXPwcVGpFGLWtbrqZRmVh8OLjYijVzU0otNmrUOjxazEfGilmbWF9dcbETcXGVmfXFwsRFzc5WZVXOzmJmZFc7BxczMCufgYmZmhXNwMTOzwjm4mJlZ4RxczMyscA4uZmZWOAcXMzMrnIOLmZkVzsHFzMwK5+BiZmaF89piIzCcx/uamY0GDi7D1KjH+5qZtSI3iw1Tf4/3NTMb7RxchqlRj/c1M2tFDi7DVOsxvvV4vK+ZWatxcBkmP97XzKw2d+gPUyMf7+tRaWbWahxcRqARj/f1qDQza0VuFmtyHpVmZq3IwaXJeVSambUiB5cm51FpZtaKHFyanEelmVkrcod+k2vkqDQzs6I4uLSARoxKMzMrUmnNYpLGSFoi6Yr0fpKkmyQtk/RTSRul9I3T+2Upf2LuHCel9KWSpuXSp6e0ZZJmN/qzmZmNdmX2uXwGuDf3/uvAaRGxE/A0cExKPwZ4OqWflvZD0i7AEcCuwHTgeylgjQG+CxwM7AIcmfZtS/OW9DB1zkImzb6SqXMWMm9JT9lFMjMrJ7hImgAcCpyV3gs4ALgk7XIeMDNtz0jvSfkHpv1nABdFxPMR8SCwDNg7vZZFxAMR8QJwUdq3cGXf2CsTLHt61xCsm2DpAGNmZSur5vIt4IvAX9P7VwC9EbE2vV8BVDoZuoDlACl/ddr/b+lVx9RKL1Qz3Ng9wdLMmlXDg4ukdwJPRMStjb52H2U5VtJiSYtXrlw5pGOb4cbuCZZm1qzKqLlMBd4t6Y9kTVYHAKcDnZIqo9cmAJUqQA+wA0DK3xJ4Kp9edUyt9A1ExJkR0R0R3dtuu+2QPkQz3Ng9wdLMmlXDg0tEnBQREyJiIlmH/MKI+CBwPfDetNss4PK0PT+9J+UvjIhI6Uek0WSTgMnAzcAtwOQ0+myjdI35RX+OgW7sjeiP8QRLM2tWzTRD/0Tg85KWkfWpnJ3SzwZekdI/D8wGiIi7gYuBe4CrgU9GxEupX+ZTwAKy0WgXp30L1d+NvVH9MTOndHHqYbvT1dmBgK7ODk49bHfPiTGz0imrBFh3d3csXrx4SMfUes7K1DkL6emjeayrs4MbZx9QVJHNzEon6daI6K5O9wz9Eag1c74Z+mPMzMrUTM1ibcMd7WY22jm41IE72s1stHOzWB14JWMzG+0cXOrEKxmb2WjmZjEzMyucg4uZmRXOwcXMzArn4GJmZoVzcDEzs8J5+ZdE0krgobLLMYBtgCfLLkQT8Pfg76DC30P538GOEbHBsvIOLi1E0uK+1vAZbfw9+Duo8PfQvN+Bm8XMzKxwDi5mZlY4B5fWcmbZBWgS/h78HVT4e2jS78B9LmZmVjjXXMzMrHAOLmZmVjgHlxYgaQdJ10u6R9Ldkj5TdpnKImmMpCWSrii7LGWR1CnpEkn3SbpX0pvKLlOjSfpc+r9wl6QLJW1SdpkaQdI5kp6QdFcubWtJ10q6P/3cqswyVji4tIa1wPERsQuwL/BJSbuUXKayfAa4t+xClOx04OqIeC3wBkbZ9yGpC/g00B0RuwFjgCPKLVXDnAtMr0qbDVwXEZOB69L70jm4tICIeDQibkvbz5LdTEbdw2IkTQAOBc4quyxlkbQlsB9wNkBEvBARveWWqhRjgQ5JY4FNgUdKLk9DRMQvgVVVyTOA89L2ecDMhhaqBgeXFiNpIjAFuKnckpTiW8AXgb+WXZASTQJWAj9KzYNnSdqs7EI1UkT0AN8AHgYeBVZHxDXllqpU20XEo2n7MWC7MgtT4eDSQiS9HLgU+GxEPFN2eRpJ0juBJyLi1rLLUrKxwJ7AGRExBfgzTdIM0iipT2EGWaAdD2wm6Z/KLVVziGxuSVPML3FwaRGSxpEFlgsi4rKyy1OCqcC7Jf0RuAg4QNJPyi1SKVYAKyKiUnO9hCzYjCZvBx6MiJUR8SJwGfDmkstUpsclbQ+Qfj5RcnkAB5eWIElkbez3RsT/Lbs8ZYiIkyJiQkRMJOu8XRgRo+6v1Yh4DFguaeeUdCBwT4lFKsPDwL6SNk3/Nw5klA1qqDIfmJW2ZwGXl1iWv3FwaQ1TgQ+R/bV+e3odUnahrDTHARdIugPYA/iPksvTUKnWdglwG3An2X2sKZdAKZqkC4HfAjtLWiHpGGAO8A5J95PV6uaUWcYKL/9iZmaFc83FzMwK5+BiZmaFc3AxM7PCObiYmVnhHFzMzKxwDi42qinza0kH59LeJ+nqOlzrBklLc8PJLyn6GlXXG1/va5jV4qHINupJ2g34GdmabWOBJcD0iPjDMM41NiLW1si7AfhCRCweQXFHXA6zRnDNxUa9iLgL+G/gRODfgJ8AX5J0c1occgZki4ZK+pWk29LrzSl9/5Q+H7hH0maSrpT0u/S8kcP7u76kyyUdlbY/JumCtH2DpNNTLecuSXun9M3Scz2qy3e0pPmSFgLXpfLelfLGSJor6RZJd0j6WK7sN+SeD3NBmvWOpL0k/SZ9jpslbV7rPGbVxpZdALMm8e9kM75fAK4gW17mI5I6gZsl/Q/Zmk3viIi/SJoMXAh0p+P3BHaLiAclvQd4JCIOhb8tk5U4AUcAAAJASURBVF9xgaQ1afvaiDgBOBa4UdKDwPFkz+yp2DQi9pC0H3AOsBvwpRrlq5Tj9RGxKq2gXXEM2erBe0naOF2vspLwFGBXsmXrbwSmSroZ+ClweETcImkLYE2t80TEg0P7uq3dObiYARHxZ0k/Bf4EvB94l6QvpOxNgFeR3Xy/I2kP4CXgNblT3Jy7wd4JfFPS14ErIuJXuf0+WN0sFhGPS/o34HrgHyMi/7yOC9M+v5S0RQomB5Et4lldPsgCVvXzPkjHvF7Se9P7LYHJZMH05ohYASDpdmAisBp4NCJuSdd/JuXXOo+Di63HwcVsnb+ml4D3RMTSfKakLwOPkz398WXAX3LZf65sRMTvJe0JHAKcIum6iPjKANfeHXiKbAn5vOpO0einfPvky1FFwHERsaDqmP2B53NJL9H/faHP85hVc5+L2YYWAMfl+h6mpPQtyf6a/yvZQqJj+jpY0njguYj4CTCXAZbET30pB5M1T31B0qRc9uFpn7eQNUet7qd8A32mjyt7dAOSXqP+HzK2FNhe0l5p/82VPfVxqOexUco1F7MNfZXsqZd3SHoZWZPPO4HvAZemzverqV1L2B2YK+mvwIvAx3N5+T6XJ8ke2/xD4MMR8Yik44FzJB2Q9vmLpCXAOOAjA5SvP2eRNXfdloLSSvp5HG5EvJAGIvyXpA6y/pa3D/U8Nnp5KLJZk2rk0GWzorlZzMzMCueai5mZFc41FzMzK5yDi5mZFc7BxczMCufgYmZmhXNwMTOzwv1/IhNTWQXSrlkAAAAASUVORK5CYII=\n", 163 | "text/plain": [ 164 | "
" 165 | ] 166 | }, 167 | "metadata": { 168 | "tags": [], 169 | "needs_background": "light" 170 | } 171 | } 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "metadata": { 177 | "id": "H7ummj4IrTFo", 178 | "colab_type": "code", 179 | "colab": {} 180 | }, 181 | "source": [ 182 | "#Splitting the dataset into training and testing dataset\n", 183 | "train, test = train_test_split(data, test_size = 0.2)" 184 | ], 185 | "execution_count": null, 186 | "outputs": [] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "metadata": { 191 | "id": "TY92B0w4xspy", 192 | "colab_type": "code", 193 | "colab": {} 194 | }, 195 | "source": [ 196 | "#Converting training data into tensors for Pytorch\n", 197 | "X_train = torch.Tensor([[x] for x in list(train.YearsExperience)])\n", 198 | "y_train = torch.torch.FloatTensor([[x] for x in list(train.Salary)])\n", 199 | "#Converting test data into tensors for Pytorch\n", 200 | "X_test = torch.Tensor([[x] for x in list(test.YearsExperience)])\n", 201 | "y_test = torch.torch.FloatTensor([[x] for x in list(test.Salary)])\n", 202 | "\n" 203 | ], 204 | "execution_count": null, 205 | "outputs": [] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "metadata": { 210 | "id": "945XnSQ9Ok3k", 211 | "colab_type": "code", 212 | "colab": {} 213 | }, 214 | "source": [ 215 | "class LinearRegression(nn.Module):\n", 216 | " def __init__(self, in_size, out_size):\n", 217 | " super().__init__()\n", 218 | " self.lin = nn.Linear(in_features = in_size, out_features = out_size)\n", 219 | " def forward(self, X):\n", 220 | " pred = self.lin(X)\n", 221 | " return(pred)" 222 | ], 223 | "execution_count": null, 224 | "outputs": [] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "metadata": { 229 | "id": "-o0PHI9uO1G8", 230 | "colab_type": "code", 231 | "colab": { 232 | "base_uri": "https://localhost:8080/", 233 | "height": 170 234 | }, 235 | "outputId": "c2023877-3af3-4f86-8073-be575c3bd19a" 236 | }, 237 | "source": [ 238 | "#Data set & Data set Loader\n", 239 | "from torch.utils.data import TensorDataset\n", 240 | "train_data=TensorDataset(X_train, y_train)\n", 241 | "train_data[0:5]" 242 | ], 243 | "execution_count": null, 244 | "outputs": [ 245 | { 246 | "output_type": "execute_result", 247 | "data": { 248 | "text/plain": [ 249 | "(tensor([[ 8.6000],\n", 250 | " [ 2.0000],\n", 251 | " [10.3000],\n", 252 | " [ 5.8000],\n", 253 | " [ 8.2000]]), tensor([[109430.],\n", 254 | " [ 43524.],\n", 255 | " [122390.],\n", 256 | " [ 81362.],\n", 257 | " [113812.]]))" 258 | ] 259 | }, 260 | "metadata": { 261 | "tags": [] 262 | }, 263 | "execution_count": 8 264 | } 265 | ] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "metadata": { 270 | "id": "q5ADurmGO5oM", 271 | "colab_type": "code", 272 | "colab": {} 273 | }, 274 | "source": [ 275 | "# Define data loader\n", 276 | "from torch.utils.data import DataLoader\n", 277 | "batch_size = 5\n", 278 | "train_dl = DataLoader(train_data, batch_size, shuffle=True)\n" 279 | ], 280 | "execution_count": null, 281 | "outputs": [] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "metadata": { 286 | "id": "8d4fbAy0PAvb", 287 | "colab_type": "code", 288 | "colab": { 289 | "base_uri": "https://localhost:8080/", 290 | "height": 85 291 | }, 292 | "outputId": "9423589c-0cef-44ae-dc2c-2d58af7489e5" 293 | }, 294 | "source": [ 295 | "# Define model\n", 296 | "model = nn.Linear(1, 1) # nn.Linear(in_features,out_features)\n", 297 | "print(model.weight)\n", 298 | "print(model.bias)" 299 | ], 300 | "execution_count": null, 301 | "outputs": [ 302 | { 303 | "output_type": "stream", 304 | "text": [ 305 | "Parameter containing:\n", 306 | "tensor([[-0.0885]], requires_grad=True)\n", 307 | "Parameter containing:\n", 308 | "tensor([0.4374], requires_grad=True)\n" 309 | ], 310 | "name": "stdout" 311 | } 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "metadata": { 317 | "id": "nhB3gSXhw6kA", 318 | "colab_type": "code", 319 | "colab": { 320 | "base_uri": "https://localhost:8080/", 321 | "height": 68 322 | }, 323 | "outputId": "7db58a69-198a-4bfe-aa78-27f80309d2dc" 324 | }, 325 | "source": [ 326 | "# printing the model Parameters\n", 327 | "print(list(model.parameters()))" 328 | ], 329 | "execution_count": null, 330 | "outputs": [ 331 | { 332 | "output_type": "stream", 333 | "text": [ 334 | "[Parameter containing:\n", 335 | "tensor([[-0.0885]], requires_grad=True), Parameter containing:\n", 336 | "tensor([0.4374], requires_grad=True)]\n" 337 | ], 338 | "name": "stdout" 339 | } 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "metadata": { 345 | "id": "zX3bYtA4y3uM", 346 | "colab_type": "code", 347 | "colab": { 348 | "base_uri": "https://localhost:8080/", 349 | "height": 425 350 | }, 351 | "outputId": "1ef1edf6-cc3a-474f-9b4c-a60f3958352e" 352 | }, 353 | "source": [ 354 | "# Generate predictions\n", 355 | "preds = model(X_train)\n", 356 | "preds" 357 | ], 358 | "execution_count": null, 359 | "outputs": [ 360 | { 361 | "output_type": "execute_result", 362 | "data": { 363 | "text/plain": [ 364 | "tensor([[-0.3235],\n", 365 | " [ 0.2605],\n", 366 | " [-0.4739],\n", 367 | " [-0.0757],\n", 368 | " [-0.2881],\n", 369 | " [-0.0934],\n", 370 | " [-0.4208],\n", 371 | " [-0.2615],\n", 372 | " [ 0.0747],\n", 373 | " [-0.3942],\n", 374 | " [ 0.3313],\n", 375 | " [-0.0138],\n", 376 | " [-0.1642],\n", 377 | " [ 0.3313],\n", 378 | " [ 0.1189],\n", 379 | " [ 0.0835],\n", 380 | " [ 0.1543],\n", 381 | " [-0.3588],\n", 382 | " [ 0.1808],\n", 383 | " [ 0.0835],\n", 384 | " [-0.4916],\n", 385 | " [ 0.0924],\n", 386 | " [-0.0315],\n", 387 | " [ 0.2428]], grad_fn=)" 388 | ] 389 | }, 390 | "metadata": { 391 | "tags": [] 392 | }, 393 | "execution_count": 12 394 | } 395 | ] 396 | }, 397 | { 398 | "cell_type": "code", 399 | "metadata": { 400 | "id": "W_WMQ6VzzQND", 401 | "colab_type": "code", 402 | "colab": {} 403 | }, 404 | "source": [ 405 | "#Define the loss function\n", 406 | "loss_fun = nn.MSELoss()" 407 | ], 408 | "execution_count": null, 409 | "outputs": [] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "metadata": { 414 | "id": "cYd8ffZC1Vjv", 415 | "colab_type": "code", 416 | "colab": { 417 | "base_uri": "https://localhost:8080/", 418 | "height": 34 419 | }, 420 | "outputId": "a5a57f0c-4d33-4b55-c250-33ddd6a34900" 421 | }, 422 | "source": [ 423 | "#compute loss for current prediction\n", 424 | "loss=loss_fun(model(X_train),y_train)\n", 425 | "print(loss)" 426 | ], 427 | "execution_count": null, 428 | "outputs": [ 429 | { 430 | "output_type": "stream", 431 | "text": [ 432 | "tensor(7.0447e+09, grad_fn=)\n" 433 | ], 434 | "name": "stdout" 435 | } 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "metadata": { 441 | "id": "hMDoPTq71vEl", 442 | "colab_type": "code", 443 | "colab": {} 444 | }, 445 | "source": [ 446 | "# Define SGD optimizer with learning rate 0.01\n", 447 | "optimizer = torch.optim.SGD(model.parameters(), lr=0.01)" 448 | ], 449 | "execution_count": null, 450 | "outputs": [] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": { 455 | "id": "ownUi5332RHP", 456 | "colab_type": "text" 457 | }, 458 | "source": [ 459 | "Train the model\n", 460 | "\n", 461 | "We are now ready to train the model. We'll follow the exact same process to implement gradient descent:\n", 462 | "\n", 463 | "Generate predictions\n", 464 | "\n", 465 | "Calculate the loss\n", 466 | "\n", 467 | "Compute gradients w.r.t the weights and biases\n", 468 | "\n", 469 | "Adjust the weights by subtracting a small quantity proportional to the gradient\n", 470 | "\n", 471 | "Reset the gradients to zero" 472 | ] 473 | }, 474 | { 475 | "cell_type": "code", 476 | "metadata": { 477 | "id": "8rvo05ug2LyP", 478 | "colab_type": "code", 479 | "colab": {} 480 | }, 481 | "source": [ 482 | "# Utility function to train the model\n", 483 | "def lrmodel(num_epochs, model, loss_fun, optimizer, train_dl):\n", 484 | " \n", 485 | " # Repeat for given number of epochs\n", 486 | " for epoch in range(num_epochs):\n", 487 | " \n", 488 | " # Train with batches of data\n", 489 | " for xb,yb in train_dl:\n", 490 | " \n", 491 | " # 1. Generate predictions\n", 492 | " pred = model(xb)\n", 493 | " \n", 494 | " # 2. Calculate loss\n", 495 | " loss = loss_fun(pred, yb)\n", 496 | " \n", 497 | " # 3. Compute gradients\n", 498 | " loss.backward()\n", 499 | " \n", 500 | " # 4. Update parameters using gradients\n", 501 | " optimizer.step()\n", 502 | " \n", 503 | " # 5. Reset the gradients to zero\n", 504 | " optimizer.zero_grad()\n", 505 | " \n", 506 | " # Print the progress\n", 507 | " if (epoch+1) % 10 == 0:\n", 508 | " print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))" 509 | ], 510 | "execution_count": null, 511 | "outputs": [] 512 | }, 513 | { 514 | "cell_type": "code", 515 | "metadata": { 516 | "id": "6fozH3iN2zQs", 517 | "colab_type": "code", 518 | "colab": { 519 | "base_uri": "https://localhost:8080/", 520 | "height": 187 521 | }, 522 | "outputId": "883fabce-8345-4118-f46d-89073a89e80b" 523 | }, 524 | "source": [ 525 | "#Training for 100 epochs\n", 526 | "num_epochs=100\n", 527 | "lrmodel(num_epochs, model, loss_fun, optimizer, train_dl)\n", 528 | "\n" 529 | ], 530 | "execution_count": null, 531 | "outputs": [ 532 | { 533 | "output_type": "stream", 534 | "text": [ 535 | "Epoch [10/100], Loss: 16100727.0000\n", 536 | "Epoch [20/100], Loss: 58836576.0000\n", 537 | "Epoch [30/100], Loss: 24312672.0000\n", 538 | "Epoch [40/100], Loss: 30125348.0000\n", 539 | "Epoch [50/100], Loss: 59348696.0000\n", 540 | "Epoch [60/100], Loss: 31803738.0000\n", 541 | "Epoch [70/100], Loss: 24751144.0000\n", 542 | "Epoch [80/100], Loss: 36488976.0000\n", 543 | "Epoch [90/100], Loss: 14319942.0000\n", 544 | "Epoch [100/100], Loss: 64535912.0000\n" 545 | ], 546 | "name": "stdout" 547 | } 548 | ] 549 | }, 550 | { 551 | "cell_type": "code", 552 | "metadata": { 553 | "id": "RHHEL3vt3Db0", 554 | "colab_type": "code", 555 | "colab": { 556 | "base_uri": "https://localhost:8080/", 557 | "height": 425 558 | }, 559 | "outputId": "b3e71f97-e94b-4d24-9e79-af49b24debe1" 560 | }, 561 | "source": [ 562 | "# Generate predictions\n", 563 | "preds = model(X_train)\n", 564 | "preds" 565 | ], 566 | "execution_count": null, 567 | "outputs": [ 568 | { 569 | "output_type": "execute_result", 570 | "data": { 571 | "text/plain": [ 572 | "tensor([[109632.5938],\n", 573 | " [ 45824.9609],\n", 574 | " [126067.8906],\n", 575 | " [ 82562.6875],\n", 576 | " [105765.4609],\n", 577 | " [ 84496.2500],\n", 578 | " [120267.1953],\n", 579 | " [102865.1172],\n", 580 | " [ 66127.3906],\n", 581 | " [117366.8438],\n", 582 | " [ 38090.7031],\n", 583 | " [ 75795.2109],\n", 584 | " [ 92230.5156],\n", 585 | " [ 38090.7031],\n", 586 | " [ 61293.4766],\n", 587 | " [ 65160.6094],\n", 588 | " [ 57426.3516],\n", 589 | " [113499.7188],\n", 590 | " [ 54526.0000],\n", 591 | " [ 65160.6094],\n", 592 | " [128001.4531],\n", 593 | " [ 64193.8281],\n", 594 | " [ 77728.7812],\n", 595 | " [ 47758.5234]], grad_fn=)" 596 | ] 597 | }, 598 | "metadata": { 599 | "tags": [] 600 | }, 601 | "execution_count": 21 602 | } 603 | ] 604 | }, 605 | { 606 | "cell_type": "code", 607 | "metadata": { 608 | "id": "JQ0gv5ey3ze6", 609 | "colab_type": "code", 610 | "colab": { 611 | "base_uri": "https://localhost:8080/", 612 | "height": 425 613 | }, 614 | "outputId": "ef53a097-57e9-47f1-d483-fd56fe855d89" 615 | }, 616 | "source": [ 617 | "# Compare with targets\n", 618 | "y_train" 619 | ], 620 | "execution_count": null, 621 | "outputs": [ 622 | { 623 | "output_type": "execute_result", 624 | "data": { 625 | "text/plain": [ 626 | "tensor([[109430.],\n", 627 | " [ 43524.],\n", 628 | " [122390.],\n", 629 | " [ 81362.],\n", 630 | " [113812.],\n", 631 | " [ 93940.],\n", 632 | " [112635.],\n", 633 | " [101302.],\n", 634 | " [ 57081.],\n", 635 | " [116969.],\n", 636 | " [ 46205.],\n", 637 | " [ 66028.],\n", 638 | " [ 91738.],\n", 639 | " [ 39342.],\n", 640 | " [ 57188.],\n", 641 | " [ 56957.],\n", 642 | " [ 64445.],\n", 643 | " [105582.],\n", 644 | " [ 56642.],\n", 645 | " [ 55794.],\n", 646 | " [121870.],\n", 647 | " [ 63218.],\n", 648 | " [ 83088.],\n", 649 | " [ 39890.]])" 650 | ] 651 | }, 652 | "metadata": { 653 | "tags": [] 654 | }, 655 | "execution_count": 22 656 | } 657 | ] 658 | }, 659 | { 660 | "cell_type": "code", 661 | "metadata": { 662 | "id": "XFpKpUfc4Ao6", 663 | "colab_type": "code", 664 | "colab": {} 665 | }, 666 | "source": [ 667 | "#Predicting for X_test\n", 668 | "y_pred_test = model(X_test)" 669 | ], 670 | "execution_count": null, 671 | "outputs": [] 672 | }, 673 | { 674 | "cell_type": "code", 675 | "metadata": { 676 | "id": "tqc2Wx6o4Z-g", 677 | "colab_type": "code", 678 | "colab": { 679 | "base_uri": "https://localhost:8080/", 680 | "height": 136 681 | }, 682 | "outputId": "f77a4ab1-331d-40f8-cf74-7f9e1749453a" 683 | }, 684 | "source": [ 685 | "\n", 686 | "#Converting predictions from tensor objects into a list\n", 687 | "\n", 688 | "y_pred_test = [y_pred_test[x].item() for x in range(len(y_pred_test))]\n", 689 | "# Comparing Actual and predicted values\n", 690 | "df = {}\n", 691 | "df['Actual Observation'] = y_test\n", 692 | "df['Predicted Salary'] = y_pred_test\n", 693 | "df = pd.DataFrame(df)\n", 694 | "print(df)" 695 | ], 696 | "execution_count": null, 697 | "outputs": [ 698 | { 699 | "output_type": "stream", 700 | "text": [ 701 | " Actual Observation Predicted Salary\n", 702 | "0 (tensor(67938.),) 73861.648438\n", 703 | "1 (tensor(61110.),) 70961.296875\n", 704 | "2 (tensor(98272.),) 95130.859375\n", 705 | "3 (tensor(37730.),) 40024.265625\n", 706 | "4 (tensor(60150.),) 55492.781250\n", 707 | "5 (tensor(54445.),) 57426.351562\n" 708 | ], 709 | "name": "stdout" 710 | } 711 | ] 712 | }, 713 | { 714 | "cell_type": "code", 715 | "metadata": { 716 | "id": "H9Nvxa6Y8KjI", 717 | "colab_type": "code", 718 | "colab": {} 719 | }, 720 | "source": [ 721 | "" 722 | ], 723 | "execution_count": null, 724 | "outputs": [] 725 | } 726 | ] 727 | } --------------------------------------------------------------------------------