├── AI_Driver.ipynb ├── README.md ├── ai_driver.py ├── drive.py ├── images ├── center.jpg ├── left.jpg ├── loss.png ├── menu.png ├── model_achitecture.png ├── recording.png ├── right.jpg ├── select_dir.png └── training.png ├── model.py └── model └── model.pth /AI_Driver.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "AI_Driver.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [] 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | }, 14 | "accelerator": "GPU" 15 | }, 16 | "cells": [ 17 | { 18 | "cell_type": "code", 19 | "metadata": { 20 | "id": "NqdtXFe5n5KT", 21 | "colab_type": "code", 22 | "outputId": "8bcd2308-b1df-4314-a5a0-0f61a6a008e6", 23 | "colab": { 24 | "base_uri": "https://localhost:8080/", 25 | "height": 986 26 | } 27 | }, 28 | "source": [ 29 | "!git clone https://github.com/LiyuanLucasLiu/RAdam.git\n", 30 | "!python RAdam/setup.py install" 31 | ], 32 | "execution_count": 1, 33 | "outputs": [ 34 | { 35 | "output_type": "stream", 36 | "text": [ 37 | "Cloning into 'RAdam'...\n", 38 | "remote: Enumerating objects: 24, done.\u001b[K\n", 39 | "remote: Counting objects: 4% (1/24)\u001b[K\rremote: Counting objects: 8% (2/24)\u001b[K\rremote: Counting objects: 12% (3/24)\u001b[K\rremote: Counting objects: 16% (4/24)\u001b[K\rremote: Counting objects: 20% (5/24)\u001b[K\rremote: Counting objects: 25% (6/24)\u001b[K\rremote: Counting objects: 29% (7/24)\u001b[K\rremote: Counting objects: 33% (8/24)\u001b[K\rremote: Counting objects: 37% (9/24)\u001b[K\rremote: Counting objects: 41% (10/24)\u001b[K\rremote: Counting objects: 45% (11/24)\u001b[K\rremote: Counting objects: 50% (12/24)\u001b[K\rremote: Counting objects: 54% (13/24)\u001b[K\rremote: Counting objects: 58% (14/24)\u001b[K\rremote: Counting objects: 62% (15/24)\u001b[K\rremote: Counting objects: 66% (16/24)\u001b[K\rremote: Counting objects: 70% (17/24)\u001b[K\rremote: Counting objects: 75% (18/24)\u001b[K\rremote: Counting objects: 79% (19/24)\u001b[K\rremote: Counting objects: 83% (20/24)\u001b[K\rremote: Counting objects: 87% (21/24)\u001b[K\rremote: Counting objects: 91% (22/24)\u001b[K\rremote: Counting objects: 95% (23/24)\u001b[K\rremote: Counting objects: 100% (24/24)\u001b[K\rremote: Counting objects: 100% (24/24), done.\u001b[K\n", 40 | "remote: Compressing objects: 4% (1/22)\u001b[K\rremote: Compressing objects: 9% (2/22)\u001b[K\rremote: Compressing objects: 13% (3/22)\u001b[K\rremote: Compressing objects: 18% (4/22)\u001b[K\rremote: Compressing objects: 22% (5/22)\u001b[K\rremote: Compressing objects: 27% (6/22)\u001b[K\rremote: Compressing objects: 31% (7/22)\u001b[K\rremote: Compressing objects: 36% (8/22)\u001b[K\rremote: Compressing objects: 40% (9/22)\u001b[K\rremote: Compressing objects: 45% (10/22)\u001b[K\rremote: Compressing objects: 50% (11/22)\u001b[K\rremote: Compressing objects: 54% (12/22)\u001b[K\rremote: Compressing objects: 59% (13/22)\u001b[K\rremote: Compressing objects: 63% (14/22)\u001b[K\rremote: Compressing objects: 68% (15/22)\u001b[K\rremote: Compressing objects: 72% (16/22)\u001b[K\rremote: Compressing objects: 77% (17/22)\u001b[K\rremote: Compressing objects: 81% (18/22)\u001b[K\rremote: Compressing objects: 86% (19/22)\u001b[K\rremote: Compressing objects: 90% (20/22)\u001b[K\rremote: Compressing objects: 95% (21/22)\u001b[K\rremote: Compressing objects: 100% (22/22)\u001b[K\rremote: Compressing objects: 100% (22/22), done.\u001b[K\n", 41 | "Receiving objects: 0% (1/298) \rReceiving objects: 1% (3/298) \rReceiving objects: 2% (6/298) \rReceiving objects: 3% (9/298) \rReceiving objects: 4% (12/298) \rReceiving objects: 5% (15/298) \rReceiving objects: 6% (18/298) \rReceiving objects: 7% (21/298) \rReceiving objects: 8% (24/298) \rReceiving objects: 9% (27/298) \rReceiving objects: 10% (30/298) \rReceiving objects: 11% (33/298) \rReceiving objects: 12% (36/298) \rReceiving objects: 13% (39/298) \rReceiving objects: 14% (42/298) \rReceiving objects: 15% (45/298) \rReceiving objects: 16% (48/298) \rReceiving objects: 17% (51/298) \rReceiving objects: 18% (54/298) \rReceiving objects: 19% (57/298) \rReceiving objects: 20% (60/298) \rReceiving objects: 21% (63/298) \rReceiving objects: 22% (66/298) \rReceiving objects: 23% (69/298) \rReceiving objects: 24% (72/298) \rReceiving objects: 25% (75/298) \rReceiving objects: 26% (78/298) \rReceiving objects: 27% (81/298) \rReceiving objects: 28% (84/298) \rReceiving objects: 29% (87/298) \rReceiving objects: 30% (90/298) \rReceiving objects: 31% (93/298) \rReceiving objects: 32% (96/298) \rReceiving objects: 33% (99/298) \rReceiving objects: 34% (102/298) \rReceiving objects: 35% (105/298) \rReceiving objects: 36% (108/298) \rReceiving objects: 37% (111/298) \rReceiving objects: 38% (114/298) \rReceiving objects: 39% (117/298) \rReceiving objects: 40% (120/298) \rReceiving objects: 41% (123/298) \rReceiving objects: 42% (126/298) \rReceiving objects: 43% (129/298) \rReceiving objects: 44% (132/298) \rReceiving objects: 45% (135/298) \rReceiving objects: 46% (138/298) \rReceiving objects: 47% (141/298) \rReceiving objects: 48% (144/298) \rReceiving objects: 49% (147/298) \rReceiving objects: 50% (149/298) \rReceiving objects: 51% (152/298) \rReceiving objects: 52% (155/298) \rReceiving objects: 53% (158/298) \rReceiving objects: 54% (161/298) \rReceiving objects: 55% (164/298) \rReceiving objects: 56% (167/298) \rReceiving objects: 57% (170/298) \rReceiving objects: 58% (173/298) \rReceiving objects: 59% (176/298) \rReceiving objects: 60% (179/298) \rReceiving objects: 61% (182/298) \rReceiving objects: 62% (185/298) \rReceiving objects: 63% (188/298) \rReceiving objects: 64% (191/298) \rReceiving objects: 65% (194/298) \rReceiving objects: 66% (197/298) \rReceiving objects: 67% (200/298) \rReceiving objects: 68% (203/298) \rReceiving objects: 69% (206/298) \rremote: Total 298 (delta 9), reused 10 (delta 2), pack-reused 274\u001b[K\n", 42 | "Receiving objects: 70% (209/298) \rReceiving objects: 71% (212/298) \rReceiving objects: 72% (215/298) \rReceiving objects: 73% (218/298) \rReceiving objects: 74% (221/298) \rReceiving objects: 75% (224/298) \rReceiving objects: 76% (227/298) \rReceiving objects: 77% (230/298) \rReceiving objects: 78% (233/298) \rReceiving objects: 79% (236/298) \rReceiving objects: 80% (239/298) \rReceiving objects: 81% (242/298) \rReceiving objects: 82% (245/298) \rReceiving objects: 83% (248/298) \rReceiving objects: 84% (251/298) \rReceiving objects: 85% (254/298) \rReceiving objects: 86% (257/298) \rReceiving objects: 87% (260/298) \rReceiving objects: 88% (263/298) \rReceiving objects: 89% (266/298) \rReceiving objects: 90% (269/298) \rReceiving objects: 91% (272/298) \rReceiving objects: 92% (275/298) \rReceiving objects: 93% (278/298) \rReceiving objects: 94% (281/298) \rReceiving objects: 95% (284/298) \rReceiving objects: 96% (287/298) \rReceiving objects: 97% (290/298) \rReceiving objects: 98% (293/298) \rReceiving objects: 99% (296/298) \rReceiving objects: 100% (298/298) \rReceiving objects: 100% (298/298), 948.24 KiB | 3.89 MiB/s, done.\n", 43 | "Resolving deltas: 0% (0/138) \rResolving deltas: 2% (4/138) \rResolving deltas: 7% (11/138) \rResolving deltas: 10% (15/138) \rResolving deltas: 11% (16/138) \rResolving deltas: 14% (20/138) \rResolving deltas: 15% (22/138) \rResolving deltas: 16% (23/138) \rResolving deltas: 20% (28/138) \rResolving deltas: 21% (29/138) \rResolving deltas: 25% (35/138) \rResolving deltas: 26% (37/138) \rResolving deltas: 27% (38/138) \rResolving deltas: 28% (39/138) \rResolving deltas: 30% (42/138) \rResolving deltas: 32% (45/138) \rResolving deltas: 34% (48/138) \rResolving deltas: 36% (50/138) \rResolving deltas: 39% (54/138) \rResolving deltas: 42% (59/138) \rResolving deltas: 49% (68/138) \rResolving deltas: 50% (69/138) \rResolving deltas: 63% (88/138) \rResolving deltas: 72% (100/138) \rResolving deltas: 84% (116/138) \rResolving deltas: 86% (119/138) \rResolving deltas: 87% (121/138) \rResolving deltas: 88% (122/138) \rResolving deltas: 90% (125/138) \rResolving deltas: 91% (126/138) \rResolving deltas: 92% (127/138) \rResolving deltas: 93% (129/138) \rResolving deltas: 94% (130/138) \rResolving deltas: 96% (133/138) \rResolving deltas: 99% (137/138) \rResolving deltas: 100% (138/138) \rResolving deltas: 100% (138/138), done.\n", 44 | "running install\n", 45 | "running bdist_egg\n", 46 | "running egg_info\n", 47 | "creating RAdam.egg-info\n", 48 | "writing RAdam.egg-info/PKG-INFO\n", 49 | "writing dependency_links to RAdam.egg-info/dependency_links.txt\n", 50 | "writing requirements to RAdam.egg-info/requires.txt\n", 51 | "writing top-level names to RAdam.egg-info/top_level.txt\n", 52 | "writing manifest file 'RAdam.egg-info/SOURCES.txt'\n", 53 | "reading manifest file 'RAdam.egg-info/SOURCES.txt'\n", 54 | "writing manifest file 'RAdam.egg-info/SOURCES.txt'\n", 55 | "installing library code to build/bdist.linux-x86_64/egg\n", 56 | "running install_lib\n", 57 | "warning: install_lib: 'build/lib' does not exist -- no Python modules to install\n", 58 | "\n", 59 | "creating build\n", 60 | "creating build/bdist.linux-x86_64\n", 61 | "creating build/bdist.linux-x86_64/egg\n", 62 | "creating build/bdist.linux-x86_64/egg/EGG-INFO\n", 63 | "copying RAdam.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\n", 64 | "copying RAdam.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", 65 | "copying RAdam.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", 66 | "copying RAdam.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", 67 | "copying RAdam.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", 68 | "zip_safe flag not set; analyzing archive contents...\n", 69 | "creating dist\n", 70 | "creating 'dist/RAdam-0.0.1-py3.6.egg' and adding 'build/bdist.linux-x86_64/egg' to it\n", 71 | "removing 'build/bdist.linux-x86_64/egg' (and everything under it)\n", 72 | "Processing RAdam-0.0.1-py3.6.egg\n", 73 | "Copying RAdam-0.0.1-py3.6.egg to /usr/local/lib/python3.6/dist-packages\n", 74 | "Adding RAdam 0.0.1 to easy-install.pth file\n", 75 | "\n", 76 | "Installed /usr/local/lib/python3.6/dist-packages/RAdam-0.0.1-py3.6.egg\n", 77 | "Processing dependencies for RAdam==0.0.1\n", 78 | "Searching for torch==1.3.1\n", 79 | "Best match: torch 1.3.1\n", 80 | "Adding torch 1.3.1 to easy-install.pth file\n", 81 | "Installing convert-caffe2-to-onnx script to /usr/local/bin\n", 82 | "Installing convert-onnx-to-caffe2 script to /usr/local/bin\n", 83 | "\n", 84 | "Using /usr/local/lib/python3.6/dist-packages\n", 85 | "Searching for numpy==1.17.5\n", 86 | "Best match: numpy 1.17.5\n", 87 | "Adding numpy 1.17.5 to easy-install.pth file\n", 88 | "Installing f2py script to /usr/local/bin\n", 89 | "Installing f2py3 script to /usr/local/bin\n", 90 | "Installing f2py3.6 script to /usr/local/bin\n", 91 | "\n", 92 | "Using /usr/local/lib/python3.6/dist-packages\n", 93 | "Finished processing dependencies for RAdam==0.0.1\n" 94 | ], 95 | "name": "stdout" 96 | } 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "metadata": { 102 | "id": "jm8XsBAGEngR", 103 | "colab_type": "code", 104 | "colab": {} 105 | }, 106 | "source": [ 107 | "# imports\n", 108 | "import os\n", 109 | "import sys\n", 110 | "\n", 111 | "import torch\n", 112 | "import torch.nn as nn\n", 113 | "import torch.optim as optim\n", 114 | "from torch.utils import data\n", 115 | "from torch.utils.data import DataLoader\n", 116 | "import torchvision.transforms as transforms\n", 117 | "from torch.utils.data.sampler import SubsetRandomSampler\n", 118 | "from RAdam import radam\n", 119 | "\n", 120 | "import cv2\n", 121 | "import matplotlib.image as mpimg\n", 122 | "import numpy as np\n", 123 | "import csv\n", 124 | "import requests\n", 125 | "import zipfile\n", 126 | "import time\n", 127 | "import pandas as pd" 128 | ], 129 | "execution_count": 0, 130 | "outputs": [] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "metadata": { 135 | "id": "EzlbrzoVEpil", 136 | "colab_type": "code", 137 | "colab": {} 138 | }, 139 | "source": [ 140 | "# class for download\n", 141 | "class DataDownloader:\n", 142 | "\n", 143 | " def __init__(self, file_id, destination, download = True):\n", 144 | " self.file_id = file_id\n", 145 | " self.destination = destination\n", 146 | "\n", 147 | " if download:\n", 148 | " self.download_dataset()\n", 149 | " self.extract_zip()\n", 150 | "\n", 151 | " def download_dataset(self):\n", 152 | " def get_confirm_token(response):\n", 153 | " for key, value in response.cookies.items():\n", 154 | " if key.startswith('download_warning'):\n", 155 | " return value\n", 156 | "\n", 157 | " return None\n", 158 | "\n", 159 | " def save_response_content(response):\n", 160 | " CHUNK_SIZE = 32768\n", 161 | "\n", 162 | " with open(self.destination, \"wb\") as f:\n", 163 | " for chunk in response.iter_content(CHUNK_SIZE):\n", 164 | " if chunk: # filter out keep-alive new chunks\n", 165 | " f.write(chunk)\n", 166 | "\n", 167 | " URL = \"https://docs.google.com/uc?export=download\"\n", 168 | "\n", 169 | " session = requests.Session()\n", 170 | "\n", 171 | " response = session.get(URL, params = { 'id' : self.file_id }, stream = True)\n", 172 | " token = get_confirm_token(response)\n", 173 | "\n", 174 | " if token:\n", 175 | " params = { 'id' : self.file_id, 'confirm' : token }\n", 176 | " response = session.get(URL, params = params, stream = True)\n", 177 | "\n", 178 | " save_response_content(response) \n", 179 | "\n", 180 | " def extract_zip(self):\n", 181 | " if not os.path.exists('input'):\n", 182 | " os.makedirs('input')\n", 183 | "\n", 184 | " if not os.path.exists('output'):\n", 185 | " os.makedirs('output')\n", 186 | "\n", 187 | " with zipfile.ZipFile(self.destination, 'r') as zip_ref:\n", 188 | " zip_ref.extractall('./input/')" 189 | ], 190 | "execution_count": 0, 191 | "outputs": [] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "metadata": { 196 | "id": "r6e2HTjgJGKP", 197 | "colab_type": "code", 198 | "colab": {} 199 | }, 200 | "source": [ 201 | "FILE_ID = '1VaYonsJUovGO1AamMQuC2LN47AZ4pkTm'\n", 202 | "DST_LOC = './self_driving_dataset.zip'\n", 203 | "DATA_CSV_FILE_PATH = './input/driving_log.csv'\n", 204 | "DATA_IMAGES_DIR = './input/IMG'\n", 205 | "MODEL_SAVE_PATH = './output/ai_driver_cnn.pth'\n", 206 | "IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3\n", 207 | "\n", 208 | "SAVE_DIR = './output/'\n", 209 | "\n", 210 | "data_download = DataDownloader(FILE_ID, DST_LOC, True)" 211 | ], 212 | "execution_count": 0, 213 | "outputs": [] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "metadata": { 218 | "id": "HKFjzNBSOAE6", 219 | "colab_type": "code", 220 | "colab": {} 221 | }, 222 | "source": [ 223 | "# Helper defs\n", 224 | "\n", 225 | "def load_image(data_dir, image_file):\n", 226 | " \"\"\"\n", 227 | " Load RGB images from a file\n", 228 | " \"\"\"\n", 229 | " name = image_file.split('/')[-1]\n", 230 | " return mpimg.imread(os.path.join(data_dir, name))\n", 231 | "\n", 232 | "def crop(image):\n", 233 | " \"\"\"\n", 234 | " Crop the image (removing the sky at the top and the car front at the bottom)\n", 235 | " \"\"\"\n", 236 | " return image[60:-25, :, :] # remove the sky and the car front\n", 237 | "\n", 238 | "\n", 239 | "def resize(image):\n", 240 | " \"\"\"\n", 241 | " Resize the image to the input shape used by the network model\n", 242 | " \"\"\"\n", 243 | " return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)\n", 244 | "\n", 245 | "\n", 246 | "def rgb2yuv(image):\n", 247 | " \"\"\"\n", 248 | " Convert the image from RGB to YUV (This is what the NVIDIA model does)\n", 249 | " \"\"\"\n", 250 | " return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n", 251 | "\n", 252 | "\n", 253 | "def preprocess(image):\n", 254 | " \"\"\"\n", 255 | " Combine all preprocess functions into one\n", 256 | " \"\"\"\n", 257 | " image = crop(image)\n", 258 | " image = resize(image)\n", 259 | " image = rgb2yuv(image)\n", 260 | " return image\n", 261 | "\n", 262 | "\n", 263 | "def choose_image(data_dir, center, left, right, steering_angle):\n", 264 | " \"\"\"\n", 265 | " Randomly choose an image from the center, left or right, and adjust\n", 266 | " the steering angle.\n", 267 | " \"\"\"\n", 268 | " choice = np.random.choice(3)\n", 269 | " if choice == 0:\n", 270 | " return load_image(data_dir, left), steering_angle + 0.2\n", 271 | " elif choice == 1:\n", 272 | " return load_image(data_dir, right), steering_angle - 0.2\n", 273 | " return load_image(data_dir, center), steering_angle\n", 274 | "\n", 275 | "\n", 276 | "def random_flip(image, steering_angle):\n", 277 | " \"\"\"\n", 278 | " Randomly flipt the image left <-> right, and adjust the steering angle.\n", 279 | " \"\"\"\n", 280 | " if np.random.rand() < 0.5:\n", 281 | " image = cv2.flip(image, 1)\n", 282 | " steering_angle = -steering_angle\n", 283 | " return image, steering_angle\n", 284 | "\n", 285 | "\n", 286 | "def random_translate(image, steering_angle, range_x, range_y):\n", 287 | " \"\"\"\n", 288 | " Randomly shift the image virtially and horizontally (translation).\n", 289 | " \"\"\"\n", 290 | " trans_x = range_x * (np.random.rand() - 0.5)\n", 291 | " trans_y = range_y * (np.random.rand() - 0.5)\n", 292 | " steering_angle += trans_x * 0.002\n", 293 | " trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])\n", 294 | " height, width = image.shape[:2]\n", 295 | " image = cv2.warpAffine(image, trans_m, (width, height))\n", 296 | " return image, steering_angle\n", 297 | "\n", 298 | "\n", 299 | "def random_shadow(image):\n", 300 | " \"\"\"\n", 301 | " Generates and adds random shadow\n", 302 | " \"\"\"\n", 303 | " print(image.shape)\n", 304 | " # (x1, y1) and (x2, y2) forms a line\n", 305 | " # xm, ym gives all the locations of the image\n", 306 | " x1, y1 = IMAGE_WIDTH * np.random.rand(), 0\n", 307 | " x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT\n", 308 | " xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]\n", 309 | "\n", 310 | " # mathematically speaking, we want to set 1 below the line and zero otherwise\n", 311 | " # Our coordinate is up side down. So, the above the line: \n", 312 | " # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)\n", 313 | " # as x2 == x1 causes zero-division problem, we'll write it in the below form:\n", 314 | " # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0\n", 315 | " mask = np.zeros_like(image[:, :, 1])\n", 316 | " mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1\n", 317 | "\n", 318 | " # choose which side should have shadow and adjust saturation\n", 319 | " cond = mask == np.random.randint(2)\n", 320 | " s_ratio = np.random.uniform(low=0.2, high=0.5)\n", 321 | "\n", 322 | " # adjust Saturation in HLS(Hue, Light, Saturation)\n", 323 | " hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n", 324 | " hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio\n", 325 | " return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)\n", 326 | "\n", 327 | "\n", 328 | "def random_brightness(image):\n", 329 | " \"\"\"\n", 330 | " Randomly adjust brightness of the image.\n", 331 | " \"\"\"\n", 332 | " # HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).\n", 333 | " hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n", 334 | " ratio = 1.0 + 0.4 * (np.random.rand() - 0.5)\n", 335 | " hsv[:,:,2] = hsv[:,:,2] * ratio\n", 336 | " return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n", 337 | "\n", 338 | "\n", 339 | "def augument(data_dir, center, left, right, steering_angle, range_x=100, range_y=10):\n", 340 | " \"\"\"\n", 341 | " Generate an augumented image and adjust steering angle.\n", 342 | " (The steering angle is associated with the center image)\n", 343 | " \"\"\"\n", 344 | " image, steering_angle = choose_image(data_dir, center, left, right, steering_angle)\n", 345 | " image, steering_angle = random_flip(image, steering_angle)\n", 346 | " image, steering_angle = random_translate(image, steering_angle, range_x, range_y)\n", 347 | " # image = random_shadow(image)\n", 348 | " image = random_brightness(image)\n", 349 | " return image, steering_angle" 350 | ], 351 | "execution_count": 0, 352 | "outputs": [] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "metadata": { 357 | "id": "iAr84GNvJMYP", 358 | "colab_type": "code", 359 | "colab": {} 360 | }, 361 | "source": [ 362 | "class CustomDataset(data.Dataset):\n", 363 | "\n", 364 | " def __init__(self, csv_file_path, image_dir, transform = None):\n", 365 | " self.csv_file_path = csv_file_path\n", 366 | " self.image_dir = image_dir\n", 367 | " self.transform = transform\n", 368 | "\n", 369 | " self.examples = []\n", 370 | "\n", 371 | " with open(self.csv_file_path) as csvfile:\n", 372 | " reader = csv.reader(csvfile)\n", 373 | " next(reader, None)\n", 374 | " for line in reader:\n", 375 | " self.examples.append(line)\n", 376 | "\n", 377 | "\n", 378 | " def __getitem__(self, index):\n", 379 | " example = self.examples[index]\n", 380 | " center, left, right = example[0], example[1], example[2]\n", 381 | " steering_angle = float(example[3])\n", 382 | "\n", 383 | " if np.random.rand() < 0.6:\n", 384 | " image, steering_angle = augument(self.image_dir, center, left, right, steering_angle)\n", 385 | " else:\n", 386 | " image = load_image(self.image_dir, center) \n", 387 | " \n", 388 | " image = preprocess(image)\n", 389 | " \n", 390 | " if self.transform is not None:\n", 391 | " image = self.transform(image)\n", 392 | " \n", 393 | " return image, steering_angle\n", 394 | "\n", 395 | " def __len__(self):\n", 396 | " return len(self.examples)" 397 | ], 398 | "execution_count": 0, 399 | "outputs": [] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "metadata": { 404 | "id": "PwDQ2JIgMbqE", 405 | "colab_type": "code", 406 | "colab": {} 407 | }, 408 | "source": [ 409 | "batch_size = 128\n", 410 | "num_epochs = 40\n", 411 | "\n", 412 | "validation_split = 0.25\n", 413 | "shuffle_dataset = True\n", 414 | "random_seed = 42\n", 415 | "num_workers = 4" 416 | ], 417 | "execution_count": 0, 418 | "outputs": [] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "metadata": { 423 | "id": "Xl0_MCEMMhBi", 424 | "colab_type": "code", 425 | "outputId": "3164e237-c5d6-486d-d2f9-14ea95b0c75a", 426 | "colab": { 427 | "base_uri": "https://localhost:8080/", 428 | "height": 34 429 | } 430 | }, 431 | "source": [ 432 | "print(\"Initializing Datasets and Dataloaders...\")\n", 433 | "\n", 434 | "# Creating data indices for training and validation splits:\n", 435 | "#Create a dataset object\n", 436 | "transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)])\n", 437 | "\n", 438 | "dataset = CustomDataset(DATA_CSV_FILE_PATH, DATA_IMAGES_DIR, transformations)\n", 439 | "dataset_size = len(dataset)\n", 440 | "# dataset_size = 3000\n", 441 | "indices = list(range(dataset_size))\n", 442 | "split = int(np.floor(validation_split * dataset_size))\n", 443 | "\n", 444 | "if shuffle_dataset :\n", 445 | " np.random.seed(random_seed)\n", 446 | " np.random.shuffle(indices)\n", 447 | "\n", 448 | "train_indices, val_indices = indices[split:], indices[:split]\n", 449 | "\n", 450 | "# Creating PT data samplers and loaders:\n", 451 | "train_sampler = SubsetRandomSampler(train_indices)\n", 452 | "valid_sampler = SubsetRandomSampler(val_indices)\n", 453 | "\n", 454 | "train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, \n", 455 | " sampler=train_sampler, num_workers=num_workers)\n", 456 | "validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n", 457 | " sampler=valid_sampler, num_workers=num_workers)\n", 458 | "\n", 459 | "test_loader = torch.utils.data.DataLoader(dataset, batch_size=1,\n", 460 | " sampler=valid_sampler, num_workers=num_workers)\n", 461 | "\n", 462 | "\n", 463 | "data_loader_dict = {\n", 464 | " 'train': train_loader,\n", 465 | " 'val': validation_loader \n", 466 | "}\n", 467 | "\n", 468 | "# Detect if we have a GPU available\n", 469 | "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")" 470 | ], 471 | "execution_count": 9, 472 | "outputs": [ 473 | { 474 | "output_type": "stream", 475 | "text": [ 476 | "Initializing Datasets and Dataloaders...\n" 477 | ], 478 | "name": "stdout" 479 | } 480 | ] 481 | }, 482 | { 483 | "cell_type": "code", 484 | "metadata": { 485 | "id": "4YlDs4IhMrhS", 486 | "colab_type": "code", 487 | "colab": {} 488 | }, 489 | "source": [ 490 | "class DriverNet(nn.Module):\n", 491 | "\n", 492 | " def __init__(self):\n", 493 | " super(DriverNet, self).__init__()\n", 494 | "\n", 495 | " self.conv_layers = nn.Sequential(\n", 496 | " nn.Conv2d(3, 24, kernel_size=5, stride=2),\n", 497 | " nn.ELU(),\n", 498 | " nn.Conv2d(24, 36, kernel_size=5, stride=2),\n", 499 | " nn.ELU(),\n", 500 | " nn.Conv2d(36, 48, kernel_size=5, stride=2),\n", 501 | " nn.ELU(),\n", 502 | " nn.Conv2d(48, 64, kernel_size=3, stride=1),\n", 503 | " nn.ELU(),\n", 504 | " nn.Conv2d(64, 64, kernel_size=3, stride=1),\n", 505 | " nn.ELU(),\n", 506 | " nn.Dropout(p=0.5)\n", 507 | " )\n", 508 | " self.linear_layers = nn.Sequential(\n", 509 | " nn.Linear(in_features=64*1*18, out_features=100),\n", 510 | " nn.ELU(),\n", 511 | " nn.Dropout(p=0.5),\n", 512 | " nn.Linear(in_features=100, out_features=64),\n", 513 | " nn.ELU(),\n", 514 | " nn.Linear(in_features=64, out_features=10),\n", 515 | " nn.ELU(),\n", 516 | " nn.Linear(in_features=10, out_features=1)\n", 517 | " )\n", 518 | " \n", 519 | "\n", 520 | " def forward(self, input):\n", 521 | " input = input.view(input.size(0), 3, 66, 200)\n", 522 | " output = self.conv_layers(input)\n", 523 | " output = output.view(output.size(0), -1)\n", 524 | " output = self.linear_layers(output)\n", 525 | " return output" 526 | ], 527 | "execution_count": 0, 528 | "outputs": [] 529 | }, 530 | { 531 | "cell_type": "code", 532 | "metadata": { 533 | "id": "Ja2-iF3jMuyI", 534 | "colab_type": "code", 535 | "outputId": "35f605a3-be24-4870-d4f3-6f7e6e3a7cd7", 536 | "colab": { 537 | "base_uri": "https://localhost:8080/", 538 | "height": 340 539 | } 540 | }, 541 | "source": [ 542 | "model_ft = DriverNet()\n", 543 | "\n", 544 | "# Send the model to GPU\n", 545 | "model_ft = model_ft.to(device)\n", 546 | "\n", 547 | "# Gather the parameters to be optimized/updated in this run. If we are\n", 548 | "# finetuning we will be updating all parameters. However, if we are\n", 549 | "# doing feature extract method, we will only update the parameters\n", 550 | "# that we have just initialized, i.e. the parameters with requires_grad\n", 551 | "# is True.\n", 552 | "params_to_update = model_ft.parameters()\n", 553 | "print(\"Params to learn:\")\n", 554 | "\n", 555 | "for name,param in model_ft.named_parameters():\n", 556 | " if param.requires_grad == True:\n", 557 | " print(\"\\t\",name)\n", 558 | "\n", 559 | "# Observe that all parameters are being optimized\n", 560 | "optimizer_ft = radam.RAdam(params_to_update)\n", 561 | "# optimizer_ft = optim.SGD(params_to_update, lr = 0.00008)\n", 562 | "# optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)" 563 | ], 564 | "execution_count": 11, 565 | "outputs": [ 566 | { 567 | "output_type": "stream", 568 | "text": [ 569 | "Params to learn:\n", 570 | "\t conv_layers.0.weight\n", 571 | "\t conv_layers.0.bias\n", 572 | "\t conv_layers.2.weight\n", 573 | "\t conv_layers.2.bias\n", 574 | "\t conv_layers.4.weight\n", 575 | "\t conv_layers.4.bias\n", 576 | "\t conv_layers.6.weight\n", 577 | "\t conv_layers.6.bias\n", 578 | "\t conv_layers.8.weight\n", 579 | "\t conv_layers.8.bias\n", 580 | "\t linear_layers.0.weight\n", 581 | "\t linear_layers.0.bias\n", 582 | "\t linear_layers.3.weight\n", 583 | "\t linear_layers.3.bias\n", 584 | "\t linear_layers.5.weight\n", 585 | "\t linear_layers.5.bias\n", 586 | "\t linear_layers.7.weight\n", 587 | "\t linear_layers.7.bias\n" 588 | ], 589 | "name": "stdout" 590 | } 591 | ] 592 | }, 593 | { 594 | "cell_type": "code", 595 | "metadata": { 596 | "id": "yy5DFQjoVO_E", 597 | "colab_type": "code", 598 | "colab": {} 599 | }, 600 | "source": [ 601 | "def toDevice(data, device):\n", 602 | " \n", 603 | " return data.float().to(device)" 604 | ], 605 | "execution_count": 0, 606 | "outputs": [] 607 | }, 608 | { 609 | "cell_type": "code", 610 | "metadata": { 611 | "id": "_ErcaBWrMx-L", 612 | "colab_type": "code", 613 | "colab": {} 614 | }, 615 | "source": [ 616 | "def train_model(model, dataloaders, criterion, optimizer, num_epochs=25):\n", 617 | " since = time.time()\n", 618 | "\n", 619 | " epoch_number, train_losses, val_losses, = [], [], []\n", 620 | " best_loss = 10000.0\n", 621 | "\n", 622 | " for epoch in range(num_epochs):\n", 623 | " print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n", 624 | " print('-' * 10)\n", 625 | " epoch_number.append(epoch) \n", 626 | " # Each epoch has a training and validation phase\n", 627 | " # Training loop\n", 628 | " train_loss = 0.0\n", 629 | " val_loss = 0.0\n", 630 | "\n", 631 | " # Training\n", 632 | " model.train()\n", 633 | " for inputs, labels in dataloaders['train']:\n", 634 | " inputs = toDevice(inputs, device)\n", 635 | " labels = toDevice(labels, device)\n", 636 | "\n", 637 | " optimizer.zero_grad()\n", 638 | " # Generate predictions\n", 639 | " out = model(inputs)\n", 640 | " # Calculate loss\n", 641 | " loss = criterion(out, labels.unsqueeze(1))\n", 642 | " # Backpropagation\n", 643 | " loss.backward()\n", 644 | " # Update model parameters\n", 645 | " optimizer.step()\n", 646 | "\n", 647 | " train_loss += loss.item()\n", 648 | "\n", 649 | " # Validation \n", 650 | " model.eval()\n", 651 | " with torch.no_grad():\n", 652 | " for inputs, labels in dataloaders['val']:\n", 653 | " inputs = toDevice(inputs, device)\n", 654 | " labels = toDevice(labels, device)\n", 655 | " # Generate predictions \n", 656 | " out = model(inputs)\n", 657 | " # Calculate loss\n", 658 | " loss = criterion(out, labels.unsqueeze(1))\n", 659 | " \n", 660 | " val_loss += loss.item()\n", 661 | "\n", 662 | " # Average validation loss\n", 663 | " train_loss = train_loss / len(dataloaders['train'])\n", 664 | " val_loss = val_loss / len(dataloaders['val'])\n", 665 | "\n", 666 | " train_losses.append(train_loss)\n", 667 | " val_losses.append(val_loss)\n", 668 | "\n", 669 | " print('Train Loss: {:.4f}'.format(train_loss))\n", 670 | " print('Val Loss: {:.4f}'.format(val_loss))\n", 671 | "\n", 672 | " # If the validation loss is at a minimum\n", 673 | " if val_loss < best_loss:\n", 674 | " # Save the model\n", 675 | " torch.save(model, MODEL_SAVE_PATH)\n", 676 | " best_loss = val_loss\n", 677 | "\n", 678 | " time_elapsed = time.time() - since\n", 679 | " print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n", 680 | " print('Lead val Loss: {:4f}'.format(best_loss))\n", 681 | "\n", 682 | " #creating dataframe and record all the losses and accuracies at each epoch\n", 683 | " log_frame = pd.DataFrame(columns = [\"Epoch\", \"Train Loss\", \"Test Loss\"])\n", 684 | " log_frame[\"Epoch\"] = epoch_number\n", 685 | " log_frame[\"Train Loss\"] = train_losses\n", 686 | " log_frame[\"Test Loss\"] = val_losses\n", 687 | " log_frame.to_csv(os.path.join(SAVE_DIR, \"log2.csv\"), index = False)\n", 688 | "\n", 689 | " # load best model weights\n", 690 | " # model.load_state_dict(best_model_wts)\n", 691 | " return model" 692 | ], 693 | "execution_count": 0, 694 | "outputs": [] 695 | }, 696 | { 697 | "cell_type": "code", 698 | "metadata": { 699 | "id": "gxvfwfW9M0Kx", 700 | "colab_type": "code", 701 | "colab": {} 702 | }, 703 | "source": [ 704 | "criterion = nn.MSELoss()\n", 705 | "\n", 706 | "# Train and evaluate\n", 707 | "model_ft = train_model(model_ft, data_loader_dict, criterion, optimizer_ft, num_epochs=num_epochs)" 708 | ], 709 | "execution_count": 0, 710 | "outputs": [] 711 | }, 712 | { 713 | "cell_type": "code", 714 | "metadata": { 715 | "id": "gOTlk_BaM2uu", 716 | "colab_type": "code", 717 | "colab": {} 718 | }, 719 | "source": [ 720 | "frame = pd.read_csv(os.path.join(SAVE_DIR, \"log.csv\"))\n", 721 | "frame" 722 | ], 723 | "execution_count": 0, 724 | "outputs": [] 725 | }, 726 | { 727 | "cell_type": "code", 728 | "metadata": { 729 | "id": "IgCRiZ4lod8E", 730 | "colab_type": "code", 731 | "outputId": "bf21aa67-fe53-45cb-ed76-91f7ceadb85b", 732 | "colab": { 733 | "base_uri": "https://localhost:8080/", 734 | "height": 295 735 | } 736 | }, 737 | "source": [ 738 | "from matplotlib import pyplot as plt\n", 739 | "from matplotlib import style\n", 740 | "\n", 741 | "from numpy import genfromtxt\n", 742 | "\n", 743 | "data = genfromtxt(os.path.join(SAVE_DIR, \"log2.csv\"),delimiter=',', names=['Epoch', 'Train Loss', 'Test Loss'])\n", 744 | "epoch_list = []\n", 745 | "train_loss_list = []\n", 746 | "test_loss_list = []\n", 747 | "for row in data:\n", 748 | " if not np.isnan(row[0]):\n", 749 | " epoch_list.append(row[0])\n", 750 | " train_loss_list.append(row[1])\n", 751 | " test_loss_list.append(row[2])\n", 752 | " \n", 753 | "\n", 754 | "plt.plot(epoch_list, train_loss_list, label = \"Training Loss\")\n", 755 | "plt.plot(epoch_list, test_loss_list, label = \"Testing Loss\")\n", 756 | "\n", 757 | "plt.title('MSE Loss Vs Epoch')\n", 758 | "plt.ylabel('Loss')\n", 759 | "plt.xlabel('Epoch')\n", 760 | "\n", 761 | "plt.show()" 762 | ], 763 | "execution_count": 17, 764 | "outputs": [ 765 | { 766 | "output_type": "display_data", 767 | "data": { 768 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3dd3xV9f3H8dcnN5skBMhghE1QAgIy\nxYUKCi5wK+5dtdr212qrbbWtrR3a4W5ddeNeuIp7gwzZe0NYIUAgELK/vz/OAS7hJgTI5V7I+/l4\n3EfuPed7z/nco9zP/Z7vMuccIiIiNcVEOgAREYlOShAiIhKSEoSIiISkBCEiIiEpQYiISEhKECIi\nEpIShIjsFTNbamZDIx2HhJ8ShEQF/0un3MwyamyfYmbOzDr4r3PM7A0zKzSzTWY208yu9Pd18Mtu\nqfG4sJZzfmFm14b5o9U8ZxszqzSzziH2vWVmf9/L431hZqU1Pu+7DRexNGZKEBJNlgCjtr8wsyOA\n5BplngdWAO2BFsBlwNoaZdKdcylBj1fCGPNecc6tBD7Fi3sHM2sOnAY8uw+HvbnG5z2zAUIVUYKQ\nqPI8cHnQ6yuA52qU6Q8845zb6pyrdM5Ncc592NCBmNkIM5tlZkX+r/RuQft+ZWYrzazYzOaZ2RB/\n+wAzm2Rmm81srZn9s5bDP0uNBAFcBMx2zs0wz7/MrMA/1gwz67EPn+EEM8s3s1/7Na6lZnZJ0P6m\nZvacma0zs2Vm9lsziwnaf52ZzfE/52wz6xN0+N5mNt2vxb1iZol7G59EPyUIiSbjgTQz62ZmAbwv\nzRdClHnEzC4ys3bhCMLMugIvAT8DMoEPgHfNLN7MDgNuBvo751KBYcBS/60PAA8459KAzsCrtZzi\nLSDDzI4N2nYZO2sPpwDHA12BpsAFwPp9/DgtgQygDV7Cfdz/DAAP+cfvBAzGS85XAZjZ+cDv/W1p\nwIgaMVwADAc6Aj2BK/cxPoliShASbbbXIk4G5gAra+w/H/gauBNYYmZTzax/jTKF/i//7Y9u7J0L\ngfedcx875yqAvwNJwNFAFZAA5JlZnHNuqXNukf++CqCLmWU457Y458aHOrhzbhvwmv85MbNcoC8w\nOug4qcDhgDnn5jjnVtcR74M1Pu8fa+y/0zlX5pz7EngfuCAoAd/hnCt2zi0F/sHOms21wL3OuYnO\ns9A5tyz4nM65Vc65DcC7QO864pODlBKERJvngYvxfpHWvL2Ec26jc+5251x3IBuYCrxtZhZULMM5\nlx70mLOXMbQGdnwZOueq8do92jjnFuLVLH4PFJjZy2bW2i96Dd6v/rlmNtHMzqjjHM8C5/u3Zi4D\nxjrnCvzzfQY8DDzin+NxM0ur41g/qfF57wzat9E5tzXo9TL/82UAccGf03/exn/eFlhE7dYEPS8B\nUuooKwcpJQiJKv6v1CV4DbZv7qFsId6v+9ZA8wYMYxVeIzgAfvJpi1+bcc6Nds4d65dxwN/87Quc\nc6OALH/b62bWpJZzfANsAEYCl1Kjcdo596Bzri+Qh5d0btvHz9KsRgzt/M9XiFdTaV9j3/Ya2wq8\n22TSiClBSDS6Bjipxi9fAMzsb2bWw8xizSwVuBFY6Jzb13v0sWaWGPSIw2s7ON3MhvivfwGUAd+Z\n2WFmdpKZJQClwDag2o/tUjPL9GscRf7xq0Od1Hnz7D+Hl0jS8W7TbP+M/c1soH/urf55Qh6nnv7g\nt58cB5wBvOacq/I/5z1mlmpm7YGfs7PN50ngVjPr6zead/HLSCOiBCFRxzm3yDk3qZbdyXiNvEXA\nYrxfwCNqlCmqMS7g53Wc7t94X/LbH0875+bh/ap/CO+X9pnAmc65crz2h7/629fg1Rbu8I81HJhl\nZlvwGqwv8tsbavMc3q/2V5xzZUHb04AngI14t33WA/fVcZyHa3zeyUH71vjHWQW8CNzgnJvr77sF\nLwEtxqvRjAb+C+Ccew24x99WDLxNw9bS5CBgWjBI5NBkZicALzjnciIdixycVIMQEZGQlCBERCQk\n3WISEZGQVIMQEZGQYiMdQEPJyMhwHTp0iHQYIiIHlcmTJxc65zJD7TtkEkSHDh2YNKm2npEiIhKK\nmS2rbZ9uMYmISEhKECIiEpIShIiIhKQEISIiISlBiIhISEoQIiISkhKEiIiE1OgTxObSCu7/ZD7T\nVhTtubCISCPS6BOEc3D/JwuYuHRDpEMREYkqjT5BpCXGEh+IYd2Wsj0XFhFpRBp9gjAzMlMTWFes\nBCEiEqzRJwiADCUIEZHdKEEAmSkJFG4pj3QYIiJRRQkCdItJRCQEJQi8BLFhaxlV1VpdT0RkOyUI\nIDMlnmoH67eqFiEisp0SBF4NAtBtJhGRIEoQ7EwQaqgWEdlJCQLITEkEVIMQEQmmBAFkpMYDShAi\nIsGUIIDk+FiaxAeUIEREgihB+DJTEzQfk4hIECUInzdYrjTSYYiIRA0lCF9mqqbbEBEJpgThy0zR\ndBsiIsGUIHwZKQls2lZBWWVVpEMREYkKShA+DZYTEdmVEoRP022IiOxKCcK3owahBCEiAihB7LCj\nBqGxECIigBLEDi2a6BaTiEgwJQhffGwM6clxShAiIj4liCAaCyEispMSRBBvNLUShIgIhDlBmNlw\nM5tnZgvN7PYQ+483sx/MrNLMzqux714zm2Vmc8zsQTOzcMYKmrBPRCRY2BKEmQWAR4BTgTxglJnl\n1Si2HLgSGF3jvUcDxwA9gR5Af2BwuGLdTreYRER2CmcNYgCw0Dm32DlXDrwMjAwu4Jxb6pybDlTX\neK8DEoF4IAGIA9aGMVYAMlITKCmvYmtZZbhPJSIS9cKZINoAK4Je5/vb9sg5Nw74HFjtP8Y65+bU\nLGdm15vZJDObtG7duv0OODNFXV1FRLaLykZqM+sCdANy8JLKSWZ2XM1yzrnHnXP9nHP9MjMz9/u8\nGiwnIrJTOBPESqBt0Oscf1t9nA2Md85tcc5tAT4EBjVwfLvRdBsiIjuFM0FMBHLNrKOZxQMXAWPq\n+d7lwGAzizWzOLwG6t1uMTU01SBERHYKW4JwzlUCNwNj8b7cX3XOzTKzu81sBICZ9TezfOB84DEz\nm+W//XVgETADmAZMc869G65Yt2uWHE8gxtQGISICxIbz4M65D4APamy7K+j5RLxbTzXfVwX8KJyx\nhRKIMZo3iVeCEBEhShupI0ljIUREPEoQNWi6DRERjxJEDZmpqkGIiIASxG62z8fknIt0KCIiEaUE\nUUNGSgIVVY5N2yoiHYqISEQpQdSwYyyEbjOJSCOnBFHDjvmY1FAtIo2cEkQNqkGIiHiUIGpQghAR\n8ShB1JCWGEt8IEa3mESk0VOCqMHMNBZCRAQliJAylCBERJQgQslMSaBwS3mkwxARiSgliBB0i0lE\nRAkipMzUBDZsLaOqWtNtiEjjpQQRQmZKPNUO1m9VLUJEGi8liBA0FkJERAkipO0JQg3VItKYKUGE\nkJmSCKgGISKNmxJECBmp8YAShIg0bkoQISTHx9IkPqAEISKNmhJELbavLCci0lgpQdQiMzWBQtUg\nRKQRU4KohWoQItLYKUFsXgUPHgnTX91lc2aKptsQkcZNCaJJJmzKhzUzdtmckZLApm0VlFVWRSgw\nEZHIUoIIxEHGYVAwe5fNGiwnIo2dEgRAdh6srSVB6DaTiDRSShAAWXlQvAq2bdyxSfMxiUhjpwQB\nkN3d+xtUi9iRINSTSUQaKSUI8GoQsEs7RIsmqkGISOOmBAGQ1hoSm8LaWTs2xcfGkJ4cpwQhIo1W\nWBOEmQ03s3lmttDMbg+x/3gz+8HMKs3svBr72pnZR2Y2x8xmm1mHMAYKWd1378mksRAi0oiFLUGY\nWQB4BDgVyANGmVlejWLLgSuB0SEO8Rxwn3OuGzAAKAhXrIDXk6lgDridy4xmpiZQqDYIEWmkwlmD\nGAAsdM4tds6VAy8DI4MLOOeWOuemA9XB2/1EEuuc+9gvt8U5VxLGWL12iLLNsGnFjk2abkNEGrNw\nJog2wIqg1/n+tvroChSZ2ZtmNsXM7vNrJLsws+vNbJKZTVq3bt3+RRuqJ5NuMYlIIxatjdSxwHHA\nrUB/oBPerahdOOced871c871y8zM3L8zZnXz/hbsbKjOSE2gpLyKrWWV+3dsEZGDUDgTxEqgbdDr\nHH9bfeQDU/3bU5XA20CfBo5vV4lNoWnb3WoQoK6uItI4hTNBTARyzayjmcUDFwFj9uK96Wa2vVpw\nEjC7jvINIytvl55MO+djUoIQkcYnbAnC/+V/MzAWmAO86pybZWZ3m9kIADPrb2b5wPnAY2Y2y39v\nFd7tpU/NbAZgwBPhinWH7DwonA+V3gR9mm5DRBqz2HAe3Dn3AfBBjW13BT2fiHfrKdR7PwZ6hjO+\n3WR1h+pKWL8Asrtrug0RadSitZE6MrL9YRp+O0Sz5HgCMaYahIg0SkoQwVrkQkzsjp5MgRijeZN4\nJQgRaZSUIILFxkNG1916MqmRWkQaIyWImkL0ZFINQkQaIyWImrLzvOk2SjcBShAi0ngpQdSU5U+5\nUTAH2DkfkwuaxE9EpDFQgqhpR08mr6E6IyWBiirHpm0VEQxKROTAU4KoqWlbSEjb0Q6hwXIi0lgp\nQdRk5k3c5/dk6pqdAsDXCwojGZWIyAGnBBFKVp43FsI5Dm+ZRp926Tw3binV1WqHEJHGQwkilOzu\nXi+mzasAuPKYjixdX8KX8/dzzQkRkYOIEkQoWX5Dtd8OcWqPlmSlJvD0d0sjF5OIyAGmBBFKjZ5M\ncYEYLj2qPV/NX8eidVsiGJiIyIGjBBFKUjNIbb3LiOpRA9oRH4jhOdUiRKSRUIKoTXbernMypSZw\nRs9WvD45n+JSjYkQkUNfvRKEmXU2swT/+Qlm9hMzSw9vaBGWlQeF86Bq53rUVxzdga3lVbw+OT+C\ngYmIHBj1rUG8AVSZWRfgcby1pkeHLapokN0dqsphw6Idm3q1TefIduk8N26ZuryKyCGvvgmi2l9C\n9GzgIefcbUCr8IUVBbJ2baje7sqjO7CkcCtfLlCXVxE5tNU3QVSY2SjgCuA9f1tceEKKEpmHgQV2\naagGOLVHKzJTE3jm26WRiUtE5ACpb4K4ChgE3OOcW2JmHYHnwxdWFIhNgBZddmmoBoiPjeHSge35\ncv46FqvLq4gcwuqVIJxzs51zP3HOvWRmzYBU59zfwhxb5GXn7Vh+NNiogW2JCxjPjVsWgaBERA6M\n+vZi+sLM0sysOfAD8ISZ/TO8oUWBrO6wcSmU7VpTyEpN5IyerdXlVUQOafW9xdTUObcZOAd4zjk3\nEBgavrCixPYR1evm7rbriqM7sKWskjfU5VVEDlH1TRCxZtYKuICdjdSHvlp6MgH0bptO77bq8ioi\nh676Joi7gbHAIufcRDPrBCwIX1hRIr09xDXZrSfTdlce3YHFhVv5Sl1eReQQVN9G6teccz2dczf6\nrxc7584Nb2hRICbGXzxo9xoEwGlH+F1eNT+TiByC6ttInWNmb5lZgf94w8xywh1cVMjO82oQbvfb\nSPGxMVx+VHu+mLeOF8arR5OIHFrqe4vpaWAM0Np/vOtvO/RldYeS9bClIOTuG07ozJDDs7jznZm8\nM3XlAQ5ORCR86psgMp1zTzvnKv3HM0BmGOOKHm37e38nPBZyd1wghkcu6cPAjs35+avT+GT22gMY\nnIhI+NQ3Qaw3s0vNLOA/LgXWhzOwqNGmL/S+FL65H1ZODlkkMS7Ak1f0p0frNG4a/QPfLSo8wEGK\niDS8+iaIq/G6uK4BVgPnAVeGKaboM+weSG0Jb98EFaUhi6QkxPLMVQPo0CKZ656dxJTlGw9wkCIi\nDau+vZiWOedGOOcynXNZzrmzgEO/F9N2Sekw4kFvwNwXf6m1WLMm8bxwzUBapCRw5dMTmbem+AAG\nKSLSsPZnRbmfN1gUB4MuQ6HP5fDdg7BiYq3FstISefHagSTGxXDpU9+ztHDrAQxSRKTh7E+CsD0W\nMBtuZvPMbKGZ3R5i//Fm9oOZVZrZeSH2p5lZvpk9vB9xNpxT7vHWqn7nJqjYVmuxts2TeeGagVRW\nVXPJk9+zelPtZUVEotX+JIg655cwswDwCHAqkAeMMrO8GsWW47Vl1LY63R+Br/YjxoaVmAYjH4bC\n+fD5PXUWzc1O5bmrB7JpWwVXPzOJ8srqAxSkiEjDqDNBmFmxmW0O8SjGGw9RlwHAQn/UdTnwMjAy\nuIBzbqlzbjqw27enmfUFsoGP9uYDhV3nE6Hf1fDdw7D8+zqLHpHTlH9d2Js5qzfz2JeL6iwrIhJt\n6kwQzrlU51xaiEeqcy52D8duA6wIep3vb9sjM4sB/gHcuody15vZJDObtG7dAZwP6eS7Ib0tvH0j\nlJfUXTQvm9N7tuKhzxaysECN1iJy8NifW0zhdBPwgXOuzrm0nXOPO+f6Oef6ZWYewHF7Cakw8hHY\nsAg+++Mei//+zO4kxQe4/Y0ZmvlVRA4a4UwQK4G2Qa9z/G31MQi42cyWAn8HLjezvzZsePup4/HQ\n/zoY/29Y+m2dRTNTE7jzjDwmLdvIi99rziYROTiEM0FMBHLNrKOZxQMX4c3ntEfOuUucc+2ccx3w\nbjM955zbrRdUxA39PTRr791qKtlQZ9Fz+7ThuNwM/vrhXFYVqVeTiES/sCUI51wlcDPeOhJzgFed\nc7PM7G4zGwFgZv3NLB84H3jMzELPqx2tElLg7MeheDW8ejlUltda1Mz489lHUO3gzrdn4kLMDisi\nEk3sUPmi6tevn5s0aVJkTj7tFXjreuh9idc2YbUPEXny68X86f05PDjqSEb02lNHMBGR8DKzyc65\nfqH2RWsj9cGl14Uw+HaY+iJ8/Y86i151TEd65TTlD2NmsXFr7TUOEZFIU4JoKCfcDkdc4PVqmvlG\nrcUCMcZfz+3Jpm0V/PH90EuZiohEAyWIhmLmjbJuNwjeuhFWTKi1aLdWadwwuDNv/bCCaV+9A0Ur\nai0rIhIpShANKTYBLnwRmraBl0bBhiWhy1VV8JPMyXyW/Gt6fXY5le/85MDGKSJSD0oQDa1JC7j4\nNXBVMPoC2Ba0LkTFNpjwBDzUh/gxN5KdlshXVUdgS77ctZyISBRQggiHjC5eTWLDEq/769b1XuP1\n/UfAB7dCSksY9TLJP/mexT1/RoAqvnn/hUhHLSKyC3VzDaepL8HbN4AFvBpF5yFw3M+h/TE7usJW\nVVVT9Oeu/FDRjsTLXuG43Max1LeIRIe6urnuacI92R+9R8G2DbB6Ghx1E7TuvVuRQCCG1CPPYfCk\n/3Lci9/y4k1D6ZKVEoFgRUR2pVtM4Tbox3DO4yGTw3bxPUYSTwUn2lSufXaixkeISFRQgogG7Y6C\nJpn8sv18VhWVcuOLk7XAkIhEnBJENIgJwOFn0HzlF9x7Vi7jF2/gd2P2PF+Tc45NJRUHKEgRaWzU\nBhEt8kbA5Kc5K3Ue80/ozKNfLCI3K5Wrj+24S7HqaseUFUV8MGM1H85YzapNpdx8Yhd+cUpXrI45\noERE9pYSRLTocBwkpsPsMdx61n9YWLCFP70/m46ZTRicm8mUFRt5f/oaPpy5mtWbSokPxHBcbgZH\ntmvGw58vZENJOX8c2YNAjJ8kthbCqqle76ncU+qcQFBEJBQliGgRiIPDToO57xNTXcG/LuzN+f8Z\nxy2jp5CSEMuazV5SOL5rJr8cfhhDumWTlhiHc47D3ytn4rh3eT//v5yeUUBgzTTYFDR9R79r4LT7\nvFtZIiL1pAQRTfJGwLTRsOQrmuQO5ckr+nHts5No0yyJ2484nCHdskhNjNtZft087OWLuWX9QogH\n1sOaTa1pkXsUcQOuh9ZHwsKP4dsHvJHaZz8GsfER+3gicnBRgogmnU6E+BSYMwZyh9I6PYkPfnpc\n6LLV1TDmJ1CyHk6+G1r1ZkxBJv83Zind16Xx9On9aZGSAB2Pg+QM+PhOL0lc+IK30JGIyB6oF1M0\niUuErsNg7vtQXVV32akvwIrxcPIf4ZifQqfBjDgqj8cv68u8NcWc/9g4Vm5f2vSYn8DIR2HJV/Dc\nCNatXckns9fyz4/n8/y4pVRXHxqj6UWkYWmqjWgz6y147Uq44j3v138oWwvh4X6Q2Q2u+mC3BuiJ\nSzdw9TMTaRIfy6OX9mFrWSXT8zfB3A+4du3drKjO5LLyO1hjLXAOTjwsk/svOpKmSXGhzycihyyt\nKHcw6XIyxCZ6t5lq89FvoawYzvhXyN5J/Ts059UfDaLKOc559Dsue2oC942dx+tbe/JYu7/TLm4z\nX7T4C7Nv6cgfR3bn6wWFjHj4G+atKQ7jBxORg41qENHo5Utg5WT4v9kQUyOHL/kKnj0Tjv05DP1d\nnYdZWbSNT2avpUtWCj1aN6Vpsl9DWD0dXjjHu411yetMrurIDS/8wJbSSu49rydnaq1skUZDNYiD\nTbcRULzaSxLBKsvgvZ9Dens4/rY9HqZNehJXHN2BY7pk7EwOAK16wtVjISEVnj+bvkkFvH/LsXRv\nncYtL03hnvdnU1mlqT5EGjsliGjUdRjExMGcd3bd/u0DsH4BnP5PiE/ev3O06AxXvuetgjf6fLJi\nNjP6uqO4fFB7nvh6CZf/dwLrt5Tt3zlE5KCmBBGNktKh02CYPQa23wJcvwi++jt0PxtyhzbMedLb\nwcUvw5Z18NJFxFeXcvfIHvz9/F5MWraREQ9/y/T8ooY5l4gcdJQgolW3EVC0DNZM95LE+7/wfu0P\n+0vDnqdNXzjvKVj5A7x1PVRXc17fHN644WgAzvvPOF6btGIPBxGRQ5ESRLQ6/HSwGK8WMfMNWPw5\nnHQnpLUKz7mG/RnmvAuf3AXAETlNefeWY+nfoRm3vT6dO9+eqSnIRRoZjaSOVk0yvKVJZ74B5Vu9\naTP6XxO+8x11I2xcAt89BM06Qv9raN4knmevGsC9Y+fx+FeLmbN6M49e2oes1MTwxSEiUUM1iGiW\nN9L70i4phDPuD+9ke2Yw/K/QdTh8cCss+BiA2EAMvz6tGw+NOpJZqzZzxoPfMHnZxvDFISJRQwki\nmh1+htebaeANdS5Z2mBiAnDuU5DdwxvNvWbGjl1n9mrNWz8+msS4ABc9Po4Xv1+2xwWNROTgpoFy\n0W7jMmiac2Cn6t68Gp4c4jWOX/cppO0cOLeppIKfvjKFL+at48J+bfndiDyS43WnUuRgpYFyB7Nm\n7Q/8Og5preDiV73pPF67yps51tc0OY6nrujPzSd24ZVJK+j/p0/45evT+H7xetUoRA4xqkFI7aa+\nBG/fAKfeBwOv3233D8s38vKE5bw/fTVby6to1zyZc/vkcE6fNrRtvp8D+UTkgKirBqEEIbVzDl44\nF5aPhx+P9wbWhVBSXsnYWWt4fXI+3y1aj3MwqFMLzu2bw+CumWSmJhzgwEWkviKWIMxsOPAAEACe\ndM79tcb+44H7gZ7ARc651/3tvYF/A2lAFXCPc+6Vus6lBBEmRcvh0UHQdgBc+uYe17bO31jCWz+s\n5PUf8lm2vgSA3KwUju7cgkGdWzCwYwuaNdGqdiLRIiIJwswCwHzgZCAfmAiMcs7NDirTAS8J3AqM\nCUoQXQHnnFtgZq2ByUA351yt8z4oQYTRhCe8rq9n/Rt6X1yvtzjnmLFyE98tWs93i9YzcckGtlVU\nYQbdWqYxqHMLju7cgqM6taBJghq5RSKlrgQRzn+ZA4CFzrnFfhAvAyOBHQnCObfU37fLEF3n3Pyg\n56vMrADIBDQxUCT0u8YbsPe/O6DzEEjN3uNbzIyeOen0zEnnhsGdKa+sZnp+EeP8hPH8+GU89c0S\n4gJGn3bNOL5rJsflZtCjdVNiYuqupYjIgRHOBNEGCJ7EJx8YuLcHMbMBQDywqIHikr0VEwMjHoJ/\nHwMf3gYXPLfXh4iPjaFfh+b069CcW4bkUlpRxeRlG/l6QSFfL1jHfWPncd/YeTRLjuOYLhkcn5vJ\nsbkZtE5PCsMHEpH6iOq6vZm1Ap4HrnDO7TYRkJldD1wP0K5d6AZUaSAZuXDC7fDpH7z5ofJG7N37\nK0q9UeHrF8GGRSSuX8QxlaUc0/MCbh92EoUlFXy7sJCv5nsJ473pqwHomNFkl9tRGSlq8BY5UMKZ\nIFYCbYNe5/jb6sXM0oD3gd8458aHKuOcexx4HLw2iH0PVerl6Fu8NbPf/4W3XnZSs9rL5k+C6a/A\nunmwYTFsygeC/hMltwBX7ZVp3pmMAdcxsvfFjOzdBucc89YW8+3C9YxbVMi7U1cx+vvlABzeMtVP\nGBkclp1K0+Q4UhNidVtKJAzC2Ugdi9dIPQQvMUwELnbOzQpR9hngvaBG6njgQ+Bd59z99TmfGqkP\nkNXT4fEToNcoOOuRXfdVVcDsd2D8v2HlJIhLhqxu0Lyzt0BR887QohM07+Qll8oyr/yExyF/IsQ1\ngV4XQv/rIDtvx2Erq6p3NHiPW7SeiUs3UBY0s2yMQVpSHOlJcTRNiqNpcjzNkuM4r28Ox+VmHqAL\nI3JwimQ319PwurEGgP865+4xs7uBSc65MWbWH3gLaAaUAmucc93N7FLgaSA4mVzpnJta27mUIA6g\nT++Gr//hdXvtMgS2FsLkp2HiU95Sqc07efNH9b7YW9a0PlZNgQlPwszXobIU2h8Lx/4Mck/erWhZ\nZRVTlhexYkMJm7ZVsGlbBUUl/l//9cqNJRRuKWfUgHb85vRupKinlEhIGignDauiFB47zvvbaTBM\nfxWqyqDTid604V1O9hq290XJBpjyPEx80huDcczPYMhdez3dSGlFFf/6eD6Pf72Y1k2TuPe8nhzT\nJWPfYhI5hClBSMNb/j38dxjEJkKvi7waQ9bhDXf8ynL48JdezaTzEG/Vu7raPGoxedkGbnttOosL\nt3LpUe2449RuGnchEkQJQsJjzUxvptfk5uE7x6Sn4YPbvBltR73ktWnspdKKKv4+dh5PfbuEnGZJ\n3HtuLwZ1bhGGYEUOPprNVcKjZY/wJgeAflfBle9DRQk8OdTrYruXEuMC/PaMPF790SACZox6Yjy/\ne2cm28qrwhCwyKFDCUKiX7uBcP0XkHkYvHoZfHbPLlOQ7+AcFK+FxV96DeaLPvPaSXz9OzTnw58e\nz1XHdODZccs446GvmbVq06rC2+8AABV6SURBVAH7GCIHG91ikoNHRSl88AuY8oK3NOpRN8K6+bBu\nDhTM9f5uq7Ecamyit7Z3lyHQ+STIPBzM+GZBIT9/dSpFJRX8cvhhXH1MR42lkEZJbRBy6HDO6+H0\nv9uhutLbltgUMrt5jeSZ/qNFZy9pLPoMFn0Khf70XqmtvUTR5SQ25AzlV2MW8PHstRyXm8E/LuhF\nVmpiraeuqKrmk9lreWniCkorqrjj1MM5st3eN5yLRBMlCDn0FMyF4lVeYkhtucdpyCla4SeLz2Dx\nF1BaBEnNcL0vZUz8qfzqs800iY/lvvN7ctLhu05GuHx9CS9PXM6rk/Ip3FJG66aJVDlHQXEZVwzq\nwK3DDtM4CzloKUGIBKuugmXfeTWROe+Cq2Zr+yH8Zf1xvFjYmcsHdeTWYYfxzYJCRk9YztcLCokx\nOOnwbC4e2JbBXbMoKa/k72Pn8dz4ZbRMS+QPI7pzSveWkf5kIntNCUKkNptXeV1pJz8DWwtYn5DD\nI1tO5C03mI3VybRumsiF/dtxQf8cWjXdfWbZH5Zv5I43ZjBvbTHDu7fk9yO607Jp7bepRKKNEoTI\nnlSWw5wx3rxQK76nJJDGlNPGcNSRvQnsofG6oqqaJ75ezAOfLCAuEMOvhh/GJQPbq9FbDgpKECJ7\nI38SPHMG5A6FC1+o99uWFm7lN2/P4NuF60mKC9AsOY5mTeJplhxPenIczZvEk54cT/PkOAZ2akG3\nVmlh/BAi9ROpFeVEDk45/WDwbd6khAs+8RJFPXTIaMIL1wzkgxlr+GH5RjaWlFNUUsHGknJWFm1j\nY0k5m7ZVsP03WY82aZzXJ4eRvdtonW6JSqpBiIRSWQb/Ptpr0L5pPMQ1TLtCVbWjcEsZH85YzWuT\n85m1ajPxgRiG5mVxXt8cjs/NJDag8aty4OgWk8i+WPQZPH82nPhbr0bRUMpLIC4JzJi9ajOvT87n\n7akr2bC1nMzUBM4+sg0DOjSne5s0WqYlYnvqwiuyH5QgRPbVq1fA/P/Bj7+HZh32/TjlJTD3fZj2\nEiz+HHL6w8hHIaOLt7uyms/nFfD65Hw+n1tAZbX377JZchx5rdPo3ropea3S6N46jY4ZTVTLkAaj\nBCGyrzathIf7e+tejHpp795bXQ3Lv4OpL3kr55UXQ9N20HUYzHjNWxhpyF3eVOlB611sLatk7prN\nzF61mVmrNjN79Wbmrimm3F9FL8YgOT6WpPgASXEBkuMDJPp/k+ICdM5K4aYTOpOerHYN2TMlCJH9\n8e0D8PFdMOoVOGz4nstvXOYtejTtFdi0HOJTIO8s6D0K2h3tLaZUvAbe/RnM/xDaHgVnPepND1KL\nyqpqFq3byuxVRSwuKGZrBWyrqGRbeRUl5VVsq6ja8Xze2mKaJsVx+6mHc16fHHW3lTopQYjsj8py\n+M+x3i/+H3/vtR+EUl0F4x6Bz++BqnJvhb1eo+Dw0yE+effyzsH0V7yFkSrLYejvYMCPdl+Nr3Qz\nLPkKFn7izSu1tRCG/xX6XhEyjDmrN3Pn2zOZtGwjfds3448je5DXWl1qJTQlCJH9teQrePZMGHw7\nnHjH7vvXzYd3boL8iXDYaXDqvZDetn7H3rwa3v0pLBjr1TBGPgxlm2Hhp94jf4I3MWF8CnQ8HsqK\nYenXcOSlcNrfQyas6mrHGz/k85cP57JpWwVXDOrA/52cS2pi3H5eCDnUKEGINITXr/HmbvrxeGje\nydtWXQXjHvbWqIhPhlPvgyPO2/PkgTU55zVgf3g7lAWtUdGyJ3QZ6k1XnjMAYuO9c37xF/jqPmjV\nCy54Hpq1D3nYopJy7hs7j9ETlpOZksBvTu/GiF6t97pn1PotZSws2MLKom30aptO58yUvft8ErWU\nIEQawubV8HA/aDcILnnNm0L87Ztg5SQ4/Aw4/Z+Qmr3n49R5Dn9uqBZdoPOJkJJVe9l5H8Kb/i2p\nc56sc0DftBVF/PbtmcxYuYms1ARapSeRlZpAdloC2amJZKUlkJWWSGZKAoV+Mli0bgsLC7zHxpKK\nXY7XKaMJJ+dlc3JeNke2a7bH6UgkeilBiDSU7x6Gj34DPS+CWW9BfBM47T7oce7e1xoawobF8Mrl\nsHYmnHAHHH/b7m0YvqpqxxuT85m4dANri8so2FzK2s2lu335b9csOY4uWSl0yUqhc6b3t2XTRCYs\n2cDHs9cyfvF6KqocLZrEc9LhWZycl81xuZkkxQdCHk+ikxKESEOpqoDHjoeC2dDtTK/WUNev/AOh\nvATe+z+Y/jLkngJnP7ZXa4WXVlSxrriMguJS1hWX0Sw5ni5ZKbRISdi14KZ877ZWn8uhTV82l1bw\n5bx1fDJnLZ/NLaC4tJIm8QF+NLgz1x3XSYniIKEEIdKQNi7zfrl3OiEytYZQnINJT3ltGOAliKTm\nkNTMfx70t+NgaNNn746/ehqMvhCKV0MgHob9Gfpfu+PzV1RVM2HJBp4ft4z/zVpDy7REbht2GGcf\n2UbdbKOcEoRIY7FqCsx6G7ZtgJINsK0o6PkGr/utxcBxv4DBv4JAPXo1zf8IXrvSSy7nPOaNC1nw\nkXdb7cwHICF1l+ITlmzgT+/PZnr+Jnq0SeO3p+dxVKcW4fm8st+UIETEq2Vs2wgf3wlTXoDWR8I5\nT0BGbu3vmfgkfHAbtDzCGyiY1sobIf7tv+CzP0HzznDBc5Cdt8vbqqsdY6at4t7/zWXVplJOzsvm\njlMPp9Oh2vtpyzpvbfTYg2/0uhKEiOxq9hh49ydQUQrD7oF+V+96u6y6Gj65C757CLoOh3OfgoQa\nX+5LvobXr/bGZZzxT+h98W6nKa2o4qlvlvDo5wspq6xmeI+WZKUmkpoYu+ORkhDn/U2MJTcr5eAb\nq7FqCjx9OmR3h8ve2v06RTklCBHZ3ebV8M6PvdHZucO8AXopWVCxDd683lthb8D13qjtmFoanIvX\neONDln0DR17m9eiKS/JqK2XFsGUtFK9h87p8vp46k4I1a6CqlJiqMhKoIMEqiKfCe04F62OzaXfc\nJfQZPBICB8FyNUUr4Mkh3uctKYQOx8LFrzXY9PAHghKEiITmnLfM6sd3eSO1h/0ZJj7hrao37M9w\n1I17boivqvSmF/nmn9C0rdeuUbwGKkp2L2sxEJuEi03ABRKoDsRTFZNAZUw8FS6W+A3zSGYbWwLp\nJPQ8i7ie50H7o2tPUJFUugmeGuaNXblmLKyaCm/fAF1PhQufr1/7ThRQghCRuhXMhTevhTUzIDYJ\nzn3C68a7N+aP9ZJNQhqktvIGDaa0DPrb0rtPX0fCKSvdyvtvPE/c3LcYGphCEmXee7ufBd3P8aZJ\nr2WcR7CS8kpWFZWyetM2VhVtY2VRKeu3lBEfG0NinDfrbWJcDElxAZIC1XRb8TKtypfRfMQ90CRj\nz5+1qgJePA+WfgOXvuH1aAOY8AR8cKsX67lPRmdiq0EJQkT2rLLc6yrbbhC07h3RUCYs2cCvXxlP\nt+LvuCV7BrmbxmFVZV7yadMX2g6AnP5Ute7LnKIA4xevZ/KyjSzfUMKqom27Df4zg/SkOCqrHKWV\nVVRUOcBxSswk7ogdTceYtVQ5Y2tsOtUjHyG95+m1B+ccjLnZa+gf+Sgcecmu+7+5Hz75nXfL7cwH\n65XQIkkJQkQOOsWlFdz97mxem5xP/1axPNR3LVkbJlO2dAIJG+YSg7c+xqLqVkxxuSxN6k5hxgAC\nGV1o3SyZ1umJtG6aROv0JLLTEomP3flFXbFyGjb218Qu/4aKZrmsPfouPlsZYOCUX3GYrWBuu4vI\nveRfBBJCzML71X1eD67Bv4ITfx06+M/uga/uhYE3wvC/RM94mRCUIETkoPW/mWv49Vsz2FJWSXwg\nhi1llSRTyvD0lZySnk8v5pO1aTqBbeu9N6S18W75dDrBGxQYPD9W8Vr4/E/ww/PeuI4Tfw19r9rR\nIL5kdSFzX/wFp255m+WBdpSNeIzcXkfvfP/0V+HN67ypVs7+T+1f/M7B2F/D+Ee96U9O+m0YrkzD\niFiCMLPhwANAAHjSOffXGvuPB+4HegIXOedeD9p3BbD9qv7JOfdsXedSghA5dBUUl/Kvj+cTY8bA\nTi04qmNzstKCego5541uX/IlLP7S+7tto7cvs5u3ImBCmveFXVkGA38Ex9/qJYkanHOM+/g1un53\nG6luC1/m3MBRl95F2tqJ3hrlbQfCpW/uecyDc15X4h+eg6F/gGN/1oBXpOFEJEGYWQCYD5wM5AMT\ngVHOudlBZToAacCtwJjtCcLMmgOTgH6AAyYDfZ1zG2s7nxKEiOxQXQ1rpvsJ4wtYNg4qt8Fhp8Mp\nf6xz9b7tNq9fQ/5z15G36SsmWXd6BFawLb4FHw16ntjkZt4Sr/4yr0nxAbaUVlLgz2lVsLmMguIy\n1m3eyrXr/saQyq8Y12QomwbeyqD+/Wia1DA9nErKKxn9/XJKyyu5eUjXfTpGpBLEIOD3zrlh/us7\nAJxzfwlR9hngvaAEMQo4wTn3I//1Y8AXzrlaFwVWghCRWlWWeWMy0tvt3fucI//T/5Dxze8odgmc\nXX43+W7PkzMmxsWQlZpIVmoCLVMDnFP0DMcUvo65Kl6tPonJ7a/lmCN7cHJe9t4li+pqKFpGycoZ\nTJ88jqJl0+hYtYyyJq054pcf7fU6H1B3ggjnSJQ2wIqg1/nAwP14b5uahczseuB6gHbt9vI/vIg0\nHrEJe58cAMzIGXojDBhJXLXjf0nZlJSHXgu8SXyArDRvbY3UhNgaX9YDqd50J+s//BOj5r3Meflf\n8vTSYQx5cwRHdOnAqT1a0ad9Oh0zUnaurVFZ7k3jvnKyN1liwWxcwRysooRk4ChgXaAlcTndST/s\n+LA0hB8EQxVr55x7HHgcvBpEhMMRkUNVWmsCQAqQkrBvX5sxTVuTedGjsOFWYj7/CzfMeI2rAp/z\n7MoR/GHeEDKtiP5xSzihyQp62kJaly4gUF0OQHVSC5bHdeTr8sHMrMyheYdenDn0JPI67va7uUGF\nM0GsBIIX5c3xt9X3vSfUeO8XDRKViEgkNe+EnfsEHPszEj+7hx/NG831SS9jzuu2W1qawIzqTnxQ\ndTLTqjsz23JZVZlBZZXjzF6t+fGJXeianbqHkzSMcCaIiUCumXXE+8K/CNh9Nq/QxgJ/NrPtXQxO\nAUKsFC8icpDK7g6jRsOKidicd6BFLuT0IzHjMPpagBbrt9Jq1WbartpEeWU1lw/qQMeMJgc0xHB3\ncz0NrxtrAPivc+4eM7sbmOScG2Nm/YG3gGZAKbDGOdfdf+/VwPZRKPc4556u61xqpBYR2XsaKCci\nIiHVlSCie5IQERGJGCUIEREJSQlCRERCUoIQEZGQlCBERCQkJQgREQlJCUJEREI6ZMZBmNk6YNl+\nHCIDKGygcBqaYts3im3fKLZ9c7DG1t45lxlqxyGTIPaXmU2qbbBIpCm2faPY9o1i2zeHYmy6xSQi\nIiEpQYiISEhKEDs9HukA6qDY9o1i2zeKbd8ccrGpDUJEREJSDUJEREJSghARkZAafYIws+FmNs/M\nFprZ7ZGOJ5iZLTWzGWY21cwivtiFmf3XzArMbGbQtuZm9rGZLfD/NqvrGAcwrt+b2Ur/2k31F686\n4MysrZl9bmazzWyWmf3U3x4N16222CJ+7cws0cwmmNk0P7Y/+Ns7mtn3/r/XV8wsPopie8bMlgRd\nt94HOragGANmNsXM3vNf79t1c8412gfeSneLgE5APDANyIt0XEHxLQUyIh1HUDzHA32AmUHb7gVu\n95/fDvwtSuL6PXBrFFyzVkAf/3kqMB/Ii5LrVltsEb92gAEp/vM44HvgKOBV4CJ/+3+AG6MotmeA\n8yL9/5wf18+B0cB7/ut9um6NvQYxAFjonFvsnCsHXgZGRjimqOWc+wrYUGPzSOBZ//mzwFkHNChq\njSsqOOdWO+d+8J8XA3OANkTHdasttohzni3+yzj/4YCTgNf97ZG6brXFFhXMLAc4HXjSf23s43Vr\n7AmiDbAi6HU+UfIPxOeAj8xsspldH+lgapHtnFvtP18DZEcymBpuNrPp/i2oA34LpyYz6wAcifeL\nM6quW43YIAqunX+bZCpQAHyMV9svcs5V+kUi9u+1ZmzOue3X7R7/uv3LzBIiERtwP/BLoNp/3YJ9\nvG6NPUFEu2Odc32AU4Efm9nxkQ6oLs6rv0bLL6l/A52B3sBq4B+RDMbMUoA3gJ855zYH74v0dQsR\nW1RcO+dclXOuN5CDV9s/PBJxhFIzNjPrAdyBF2N/oDnwqwMdl5mdARQ45yY3xPEae4JYCbQNep3j\nb4sKzrmV/t8C4C28fyTRZq2ZtQLw/xZEOB4AnHNr/X/E1cATRPDamVkc3hfwi865N/3NUXHdQsUW\nTdfOj6cI+BwYBKSbWay/K+L/XoNiG+7fsnPOuTLgaSJz3Y4BRpjZUrxb5icBD7CP162xJ4iJQK7f\nwh8PXASMiXBMAJhZEzNL3f4cOAWYWfe7ImIMcIX//ArgnQjGssP2L1/f2UTo2vn3f58C5jjn/hm0\nK+LXrbbYouHamVmmmaX7z5OAk/HaSD4HzvOLReq6hYptblDCN7x7/Af8ujnn7nDO5TjnOuB9n33m\nnLuEfb1ukW5tj/QDOA2v98Yi4DeRjicork54vaqmAbOiITbgJbxbDhV49zGvwbu/+SmwAPgEaB4l\ncT0PzACm430Zt4rQNTsW7/bRdGCq/zgtSq5bbbFF/NoBPYEpfgwzgbv87Z2ACcBC4DUgIYpi+8y/\nbjOBF/B7OkXqAZzAzl5M+3TdNNWGiIiE1NhvMYmISC2UIEREJCQlCBERCUkJQkREQlKCEBGRkJQg\nRPaCmVUFzdY51RpwBmAz6xA8I61IpMXuuYiIBNnmvCkWRA55qkGINADz1u6417z1OyaYWRd/ewcz\n+8yfwO1TM2vnb882s7f8NQWmmdnR/qECZvaEv87AR/5IXZGIUIIQ2TtJNW4xXRi0b5Nz7gjgYbwZ\nNQEeAp51zvUEXgQe9Lc/CHzpnOuFt5bFLH97LvCIc647UAScG+bPI1IrjaQW2QtmtsU5lxJi+1Lg\nJOfcYn8CvDXOuRZmVog3VUWFv321cy7DzNYBOc6b2G37MTrgTR2d67/+FRDnnPtT+D+ZyO5UgxBp\nOK6W53ujLOh5FWonlAhSghBpOBcG/R3nP/8Ob1ZNgEuAr/3nnwI3wo7FZ5oeqCBF6ku/TkT2TpK/\nkth2/3PObe/q2szMpuPVAkb5224Bnjaz24B1wFX+9p8Cj5vZNXg1hRvxZqQViRpqgxBpAH4bRD/n\nXGGkYxFpKLrFJCIiIakGISIiIakGISIiISlBiIhISEoQIiISkhKEiIiEpAQhIiIh/T/ZcxDL3yQG\nKwAAAABJRU5ErkJggg==\n", 769 | "text/plain": [ 770 | "
" 771 | ] 772 | }, 773 | "metadata": { 774 | "tags": [] 775 | } 776 | } 777 | ] 778 | } 779 | ] 780 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Self-Driving Vehicle Simulation using Deep Learning(CNN) 2 | 3 | This repo contains code for predicting steering angles of self driving car. The inspiraion is taken from Udacity Self driving car module as well End to End Learning for Self-Driving Cars module from NVIDIA 4 | 5 | The End to End Learning for Self-Driving Cars research paper can be found at (https://arxiv.org/abs/1604.07316) This repository is built on PyTorch library. 6 | 7 | ## Abstract 8 | 9 | Data contains 3 camera(left, centre, right) output images as inputs along with steering angle as labels. Task is a supervised machine learning problem where objective is to predict steering angle based on camera inputs. 10 | 11 | Here, I have used CNN based architecture, which comprises of 5 Convolutions layers followed by a fully connected deep neural network with 3 hidden layers for predicting steering angles. Loss function used here is Mean Squared Error. 12 | 13 | 14 | ## Prerequisites 15 | 16 | We will use Python as the primary programming language and PyTorch as the Deep Learning framework. Other resources / software / library could be found as follows. 17 | 18 | 1. Self-driving car simulator developed by [Udacity](https://www.udacity.com/course/self-driving-car-engineer-nanodegree--nd013) with Unity. Download [here](https://github.com/udacity/self-driving-car-sim) 19 | 20 | 2. Install PyTorch environment (latest version the best) in your local machine. 21 | 22 | 3. Log in Google Colab (if you do not have GPU and would love to utilize the power of GPU, please try this and be sure to enable GPU as accelerator) 23 | 24 | 25 | ## Usage 26 | 27 | ``` 28 | git clone https://github.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch.git 29 | python3 drive.py model/model.pth 30 | ``` 31 | 32 | ## Dataset 33 | 34 | The Udacity provided dataset works well but it is not enough to get the car running in difficult terrain (like the second track in Udacity simulator). To gather the data from track 2, we would first need to create a folder in our project directory. Let’s call this folder "data". Now, start the simulator. Select the second track from the menu and go to the training mode option. 35 | 36 | ![Menu](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/menu.png) 37 | 38 | Click "RECORD" button on the right corner and select a directory as the folder to save your training image and driving log information. 39 | 40 | ![Record](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/recording.png) 41 | 42 | ![SelectDir](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/select_dir.png) 43 | 44 | Click "RECORD" again and move your car slowly and carefully. After you have completed recording your move, the training data will be stored in the folder you selected. Here I suggest you record at least 3 laps of the race. Please try best to stay at the center of the road. Also, record laps in reverse direction as it will give more data and thus would help avoid overfitting. 45 | 46 | ### Data 47 | 48 | 49 | | Left | Center | Right | 50 | |:-------------------:|:---------------------:|:--------------------:| 51 | | ![](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/left.jpg) | ![](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/center.jpg) | ![](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/right.jpg) | 52 | 53 | 54 | * /IMG/ - recorded images from cneter, left and right cameras. 55 | * driving_log.csv - saved the image information and associated information like steer angle, current speed, throttle and brake. 56 | 57 | ## Training Network 58 | 59 | Below fig shows architecture used in the project. 60 | 61 | ![Network](https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/master/images/training.png) 62 | 63 | 64 | ## Results 65 | 66 | ### Training loss vs Validation loss 67 | 68 |
69 | 70 |

Training loss vs Validation loss (generalized)

71 |
72 | 73 | ## Demo 74 | [![Watch the video](http://i3.ytimg.com/vi/7VmIJRY-JtY/maxresdefault.jpg)](https://www.youtube.com/watch?v=7VmIJRY-JtY) -------------------------------------------------------------------------------- /ai_driver.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """AI_Driver.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/drive/1duHGNGqfOBkiaLVnnrFDt-FHLskcOgm5 8 | """ 9 | 10 | !git clone https://github.com/LiyuanLucasLiu/RAdam.git 11 | !python RAdam/setup.py install 12 | 13 | # imports 14 | import os 15 | import sys 16 | 17 | import torch 18 | import torch.nn as nn 19 | import torch.optim as optim 20 | from torch.utils import data 21 | from torch.utils.data import DataLoader 22 | import torchvision.transforms as transforms 23 | from torch.utils.data.sampler import SubsetRandomSampler 24 | from RAdam import radam 25 | 26 | import cv2 27 | import matplotlib.image as mpimg 28 | import numpy as np 29 | import csv 30 | import requests 31 | import zipfile 32 | import time 33 | import pandas as pd 34 | 35 | # class for download 36 | class DataDownloader: 37 | 38 | def __init__(self, file_id, destination, download = True): 39 | self.file_id = file_id 40 | self.destination = destination 41 | 42 | if download: 43 | self.download_dataset() 44 | self.extract_zip() 45 | 46 | def download_dataset(self): 47 | def get_confirm_token(response): 48 | for key, value in response.cookies.items(): 49 | if key.startswith('download_warning'): 50 | return value 51 | 52 | return None 53 | 54 | def save_response_content(response): 55 | CHUNK_SIZE = 32768 56 | 57 | with open(self.destination, "wb") as f: 58 | for chunk in response.iter_content(CHUNK_SIZE): 59 | if chunk: # filter out keep-alive new chunks 60 | f.write(chunk) 61 | 62 | URL = "https://docs.google.com/uc?export=download" 63 | 64 | session = requests.Session() 65 | 66 | response = session.get(URL, params = { 'id' : self.file_id }, stream = True) 67 | token = get_confirm_token(response) 68 | 69 | if token: 70 | params = { 'id' : self.file_id, 'confirm' : token } 71 | response = session.get(URL, params = params, stream = True) 72 | 73 | save_response_content(response) 74 | 75 | def extract_zip(self): 76 | if not os.path.exists('input'): 77 | os.makedirs('input') 78 | 79 | if not os.path.exists('output'): 80 | os.makedirs('output') 81 | 82 | with zipfile.ZipFile(self.destination, 'r') as zip_ref: 83 | zip_ref.extractall('./input/') 84 | 85 | FILE_ID = '1VaYonsJUovGO1AamMQuC2LN47AZ4pkTm' 86 | DST_LOC = './self_driving_dataset.zip' 87 | DATA_CSV_FILE_PATH = './input/driving_log.csv' 88 | DATA_IMAGES_DIR = './input/IMG' 89 | MODEL_SAVE_PATH = './output/ai_driver_cnn.pth' 90 | IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3 91 | 92 | SAVE_DIR = './output/' 93 | 94 | data_download = DataDownloader(FILE_ID, DST_LOC, True) 95 | 96 | # Helper defs 97 | 98 | def load_image(data_dir, image_file): 99 | """ 100 | Load RGB images from a file 101 | """ 102 | name = image_file.split('/')[-1] 103 | return mpimg.imread(os.path.join(data_dir, name)) 104 | 105 | def crop(image): 106 | """ 107 | Crop the image (removing the sky at the top and the car front at the bottom) 108 | """ 109 | return image[60:-25, :, :] # remove the sky and the car front 110 | 111 | 112 | def resize(image): 113 | """ 114 | Resize the image to the input shape used by the network model 115 | """ 116 | return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) 117 | 118 | 119 | def rgb2yuv(image): 120 | """ 121 | Convert the image from RGB to YUV (This is what the NVIDIA model does) 122 | """ 123 | return cv2.cvtColor(image, cv2.COLOR_RGB2YUV) 124 | 125 | 126 | def preprocess(image): 127 | """ 128 | Combine all preprocess functions into one 129 | """ 130 | image = crop(image) 131 | image = resize(image) 132 | image = rgb2yuv(image) 133 | return image 134 | 135 | 136 | def choose_image(data_dir, center, left, right, steering_angle): 137 | """ 138 | Randomly choose an image from the center, left or right, and adjust 139 | the steering angle. 140 | """ 141 | choice = np.random.choice(3) 142 | if choice == 0: 143 | return load_image(data_dir, left), steering_angle + 0.2 144 | elif choice == 1: 145 | return load_image(data_dir, right), steering_angle - 0.2 146 | return load_image(data_dir, center), steering_angle 147 | 148 | 149 | def random_flip(image, steering_angle): 150 | """ 151 | Randomly flipt the image left <-> right, and adjust the steering angle. 152 | """ 153 | if np.random.rand() < 0.5: 154 | image = cv2.flip(image, 1) 155 | steering_angle = -steering_angle 156 | return image, steering_angle 157 | 158 | 159 | def random_translate(image, steering_angle, range_x, range_y): 160 | """ 161 | Randomly shift the image virtially and horizontally (translation). 162 | """ 163 | trans_x = range_x * (np.random.rand() - 0.5) 164 | trans_y = range_y * (np.random.rand() - 0.5) 165 | steering_angle += trans_x * 0.002 166 | trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]]) 167 | height, width = image.shape[:2] 168 | image = cv2.warpAffine(image, trans_m, (width, height)) 169 | return image, steering_angle 170 | 171 | 172 | def random_shadow(image): 173 | """ 174 | Generates and adds random shadow 175 | """ 176 | print(image.shape) 177 | # (x1, y1) and (x2, y2) forms a line 178 | # xm, ym gives all the locations of the image 179 | x1, y1 = IMAGE_WIDTH * np.random.rand(), 0 180 | x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT 181 | xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH] 182 | 183 | # mathematically speaking, we want to set 1 below the line and zero otherwise 184 | # Our coordinate is up side down. So, the above the line: 185 | # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1) 186 | # as x2 == x1 causes zero-division problem, we'll write it in the below form: 187 | # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0 188 | mask = np.zeros_like(image[:, :, 1]) 189 | mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1 190 | 191 | # choose which side should have shadow and adjust saturation 192 | cond = mask == np.random.randint(2) 193 | s_ratio = np.random.uniform(low=0.2, high=0.5) 194 | 195 | # adjust Saturation in HLS(Hue, Light, Saturation) 196 | hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) 197 | hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio 198 | return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) 199 | 200 | 201 | def random_brightness(image): 202 | """ 203 | Randomly adjust brightness of the image. 204 | """ 205 | # HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness). 206 | hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) 207 | ratio = 1.0 + 0.4 * (np.random.rand() - 0.5) 208 | hsv[:,:,2] = hsv[:,:,2] * ratio 209 | return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) 210 | 211 | 212 | def augument(data_dir, center, left, right, steering_angle, range_x=100, range_y=10): 213 | """ 214 | Generate an augumented image and adjust steering angle. 215 | (The steering angle is associated with the center image) 216 | """ 217 | image, steering_angle = choose_image(data_dir, center, left, right, steering_angle) 218 | image, steering_angle = random_flip(image, steering_angle) 219 | image, steering_angle = random_translate(image, steering_angle, range_x, range_y) 220 | # image = random_shadow(image) 221 | image = random_brightness(image) 222 | return image, steering_angle 223 | 224 | class CustomDataset(data.Dataset): 225 | 226 | def __init__(self, csv_file_path, image_dir, transform = None): 227 | self.csv_file_path = csv_file_path 228 | self.image_dir = image_dir 229 | self.transform = transform 230 | 231 | self.examples = [] 232 | 233 | with open(self.csv_file_path) as csvfile: 234 | reader = csv.reader(csvfile) 235 | next(reader, None) 236 | for line in reader: 237 | self.examples.append(line) 238 | 239 | 240 | def __getitem__(self, index): 241 | example = self.examples[index] 242 | center, left, right = example[0], example[1], example[2] 243 | steering_angle = float(example[3]) 244 | 245 | if np.random.rand() < 0.6: 246 | image, steering_angle = augument(self.image_dir, center, left, right, steering_angle) 247 | else: 248 | image = load_image(self.image_dir, center) 249 | 250 | image = preprocess(image) 251 | 252 | if self.transform is not None: 253 | image = self.transform(image) 254 | 255 | return image, steering_angle 256 | 257 | def __len__(self): 258 | return len(self.examples) 259 | 260 | batch_size = 128 261 | num_epochs = 40 262 | 263 | validation_split = 0.25 264 | shuffle_dataset = True 265 | random_seed = 42 266 | num_workers = 4 267 | 268 | print("Initializing Datasets and Dataloaders...") 269 | 270 | # Creating data indices for training and validation splits: 271 | #Create a dataset object 272 | transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)]) 273 | 274 | dataset = CustomDataset(DATA_CSV_FILE_PATH, DATA_IMAGES_DIR, transformations) 275 | dataset_size = len(dataset) 276 | # dataset_size = 3000 277 | indices = list(range(dataset_size)) 278 | split = int(np.floor(validation_split * dataset_size)) 279 | 280 | if shuffle_dataset : 281 | np.random.seed(random_seed) 282 | np.random.shuffle(indices) 283 | 284 | train_indices, val_indices = indices[split:], indices[:split] 285 | 286 | # Creating PT data samplers and loaders: 287 | train_sampler = SubsetRandomSampler(train_indices) 288 | valid_sampler = SubsetRandomSampler(val_indices) 289 | 290 | train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, 291 | sampler=train_sampler, num_workers=num_workers) 292 | validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, 293 | sampler=valid_sampler, num_workers=num_workers) 294 | 295 | test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, 296 | sampler=valid_sampler, num_workers=num_workers) 297 | 298 | 299 | data_loader_dict = { 300 | 'train': train_loader, 301 | 'val': validation_loader 302 | } 303 | 304 | # Detect if we have a GPU available 305 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 306 | 307 | class DriverNet(nn.Module): 308 | 309 | def __init__(self): 310 | super(DriverNet, self).__init__() 311 | 312 | self.conv_layers = nn.Sequential( 313 | nn.Conv2d(3, 24, kernel_size=5, stride=2), 314 | nn.ELU(), 315 | nn.Conv2d(24, 36, kernel_size=5, stride=2), 316 | nn.ELU(), 317 | nn.Conv2d(36, 48, kernel_size=5, stride=2), 318 | nn.ELU(), 319 | nn.Conv2d(48, 64, kernel_size=3, stride=1), 320 | nn.ELU(), 321 | nn.Conv2d(64, 64, kernel_size=3, stride=1), 322 | nn.ELU(), 323 | nn.Dropout(p=0.5) 324 | ) 325 | self.linear_layers = nn.Sequential( 326 | nn.Linear(in_features=64*1*18, out_features=100), 327 | nn.ELU(), 328 | nn.Dropout(p=0.5), 329 | nn.Linear(in_features=100, out_features=64), 330 | nn.ELU(), 331 | nn.Linear(in_features=64, out_features=10), 332 | nn.ELU(), 333 | nn.Linear(in_features=10, out_features=1) 334 | ) 335 | 336 | 337 | def forward(self, input): 338 | input = input.view(input.size(0), 3, 66, 200) 339 | output = self.conv_layers(input) 340 | output = output.view(output.size(0), -1) 341 | output = self.linear_layers(output) 342 | return output 343 | 344 | model_ft = DriverNet() 345 | 346 | # Send the model to GPU 347 | model_ft = model_ft.to(device) 348 | 349 | # Gather the parameters to be optimized/updated in this run. If we are 350 | # finetuning we will be updating all parameters. However, if we are 351 | # doing feature extract method, we will only update the parameters 352 | # that we have just initialized, i.e. the parameters with requires_grad 353 | # is True. 354 | params_to_update = model_ft.parameters() 355 | print("Params to learn:") 356 | 357 | for name,param in model_ft.named_parameters(): 358 | if param.requires_grad == True: 359 | print("\t",name) 360 | 361 | # Observe that all parameters are being optimized 362 | optimizer_ft = radam.RAdam(params_to_update) 363 | # optimizer_ft = optim.SGD(params_to_update, lr = 0.00008) 364 | # optimizer_ft = optim.Adam(params_to_update, lr = 0.0001) 365 | 366 | def toDevice(data, device): 367 | 368 | return data.float().to(device) 369 | 370 | def train_model(model, dataloaders, criterion, optimizer, num_epochs=25): 371 | since = time.time() 372 | 373 | epoch_number, train_losses, val_losses, = [], [], [] 374 | best_loss = 10000.0 375 | 376 | for epoch in range(num_epochs): 377 | print('Epoch {}/{}'.format(epoch, num_epochs - 1)) 378 | print('-' * 10) 379 | epoch_number.append(epoch) 380 | # Each epoch has a training and validation phase 381 | # Training loop 382 | train_loss = 0.0 383 | val_loss = 0.0 384 | 385 | # Training 386 | model.train() 387 | for inputs, labels in dataloaders['train']: 388 | inputs = toDevice(inputs, device) 389 | labels = toDevice(labels, device) 390 | 391 | optimizer.zero_grad() 392 | # Generate predictions 393 | out = model(inputs) 394 | # Calculate loss 395 | loss = criterion(out, labels.unsqueeze(1)) 396 | # Backpropagation 397 | loss.backward() 398 | # Update model parameters 399 | optimizer.step() 400 | 401 | train_loss += loss.item() 402 | 403 | # Validation 404 | model.eval() 405 | with torch.no_grad(): 406 | for inputs, labels in dataloaders['val']: 407 | inputs = toDevice(inputs, device) 408 | labels = toDevice(labels, device) 409 | # Generate predictions 410 | out = model(inputs) 411 | # Calculate loss 412 | loss = criterion(out, labels.unsqueeze(1)) 413 | 414 | val_loss += loss.item() 415 | 416 | # Average validation loss 417 | train_loss = train_loss / len(dataloaders['train']) 418 | val_loss = val_loss / len(dataloaders['val']) 419 | 420 | train_losses.append(train_loss) 421 | val_losses.append(val_loss) 422 | 423 | print('Train Loss: {:.4f}'.format(train_loss)) 424 | print('Val Loss: {:.4f}'.format(val_loss)) 425 | 426 | # If the validation loss is at a minimum 427 | if val_loss < best_loss: 428 | # Save the model 429 | torch.save(model, MODEL_SAVE_PATH) 430 | best_loss = val_loss 431 | 432 | time_elapsed = time.time() - since 433 | print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) 434 | print('Lead val Loss: {:4f}'.format(best_loss)) 435 | 436 | #creating dataframe and record all the losses and accuracies at each epoch 437 | log_frame = pd.DataFrame(columns = ["Epoch", "Train Loss", "Test Loss"]) 438 | log_frame["Epoch"] = epoch_number 439 | log_frame["Train Loss"] = train_losses 440 | log_frame["Test Loss"] = val_losses 441 | log_frame.to_csv(os.path.join(SAVE_DIR, "log2.csv"), index = False) 442 | 443 | # load best model weights 444 | # model.load_state_dict(best_model_wts) 445 | return model 446 | 447 | criterion = nn.MSELoss() 448 | 449 | # Train and evaluate 450 | model_ft = train_model(model_ft, data_loader_dict, criterion, optimizer_ft, num_epochs=num_epochs) 451 | 452 | frame = pd.read_csv(os.path.join(SAVE_DIR, "log.csv")) 453 | frame 454 | 455 | from matplotlib import pyplot as plt 456 | from matplotlib import style 457 | 458 | from numpy import genfromtxt 459 | 460 | data = genfromtxt(os.path.join(SAVE_DIR, "log2.csv"),delimiter=',', names=['Epoch', 'Train Loss', 'Test Loss']) 461 | epoch_list = [] 462 | train_loss_list = [] 463 | test_loss_list = [] 464 | for row in data: 465 | if not np.isnan(row[0]): 466 | epoch_list.append(row[0]) 467 | train_loss_list.append(row[1]) 468 | test_loss_list.append(row[2]) 469 | 470 | 471 | plt.plot(epoch_list, train_loss_list, label = "Training Loss") 472 | plt.plot(epoch_list, test_loss_list, label = "Testing Loss") 473 | 474 | plt.title('MSE Loss Vs Epoch') 475 | plt.ylabel('Loss') 476 | plt.xlabel('Epoch') 477 | 478 | plt.show() -------------------------------------------------------------------------------- /drive.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import base64 3 | from datetime import datetime 4 | import os 5 | import shutil 6 | 7 | import numpy as np 8 | import socketio 9 | import eventlet 10 | import eventlet.wsgi 11 | from PIL import Image 12 | from flask import Flask 13 | from io import BytesIO 14 | 15 | import torch 16 | from torch.autograd import Variable 17 | import torchvision.transforms as transforms 18 | from model import * 19 | import cv2 20 | import traceback 21 | 22 | sio = socketio.Server() 23 | app = Flask(__name__) 24 | model = None 25 | prev_image_array = None 26 | 27 | transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)]) 28 | 29 | IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3 30 | 31 | def crop(image): 32 | """ 33 | Crop the image (removing the sky at the top and the car front at the bottom) 34 | """ 35 | return image[60:-25, :, :] # remove the sky and the car front 36 | 37 | 38 | def resize(image): 39 | """ 40 | Resize the image to the input shape used by the network model 41 | """ 42 | return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) 43 | 44 | 45 | def rgb2yuv(image): 46 | """ 47 | Convert the image from RGB to YUV (This is what the NVIDIA model does) 48 | """ 49 | return cv2.cvtColor(image, cv2.COLOR_RGB2YUV) 50 | 51 | 52 | def preprocess(image): 53 | """ 54 | Combine all preprocess functions into one 55 | """ 56 | image = crop(image) 57 | image = resize(image) 58 | image = rgb2yuv(image) 59 | return image 60 | 61 | class SimplePIController: 62 | def __init__(self, Kp, Ki): 63 | self.Kp = Kp 64 | self.Ki = Ki 65 | self.set_point = 0. 66 | self.error = 0. 67 | self.integral = 0. 68 | 69 | def set_desired(self, desired): 70 | self.set_point = desired 71 | 72 | def update(self, measurement): 73 | # proportional error 74 | self.error = self.set_point - measurement 75 | 76 | # integral error 77 | self.integral += self.error 78 | 79 | return self.Kp * self.error + self.Ki * self.integral 80 | 81 | 82 | controller = SimplePIController(0.1, 0.002) 83 | set_speed = 10 84 | controller.set_desired(set_speed) 85 | 86 | # set min/max speed for our autonomous car 87 | MAX_SPEED = 25 88 | MIN_SPEED = 10 89 | 90 | # and a speed limit 91 | speed_limit = MAX_SPEED 92 | 93 | @sio.on('telemetry') 94 | def telemetry(sid, data): 95 | if data: 96 | # The current steering angle of the car 97 | steering_angle = float(data["steering_angle"]) 98 | # The current throttle of the car, how hard to push peddle 99 | throttle = float(data["throttle"]) 100 | # The current speed of the car 101 | speed = float(data["speed"]) 102 | # The current image from the center camera of the car 103 | image = Image.open(BytesIO(base64.b64decode(data["image"]))) 104 | try: 105 | image = np.asarray(image) # from PIL image to numpy array 106 | image = preprocess(image) # apply the preprocessing 107 | image = np.array([image]) # the model expects 4D array 108 | image = transformations(image) 109 | image = torch.Tensor(image) 110 | 111 | # predict the steering angle for the image 112 | steering_angle = model(image).view(-1).data.numpy()[0] 113 | 114 | global speed_limit 115 | if speed > speed_limit: 116 | speed_limit = MIN_SPEED # slow down 117 | else: 118 | speed_limit = MAX_SPEED 119 | throttle = 1.0 - steering_angle**2 - (speed/speed_limit)**2 120 | 121 | print('{} {} {}'.format(steering_angle, throttle, speed)) 122 | send_control(steering_angle, throttle) 123 | 124 | except Exception as e: 125 | print(traceback.format_exc()) 126 | 127 | else: 128 | 129 | sio.emit('manual', data={}, skip_sid=True) 130 | 131 | 132 | @sio.on('connect') 133 | def connect(sid, environ): 134 | print("connect ", sid) 135 | send_control(0, 0) 136 | 137 | 138 | def send_control(steering_angle, throttle): 139 | sio.emit( 140 | "steer", 141 | data={ 142 | 'steering_angle': steering_angle.__str__(), 143 | 'throttle': throttle.__str__() 144 | }, 145 | skip_sid=True) 146 | 147 | 148 | if __name__ == '__main__': 149 | parser = argparse.ArgumentParser(description='Remote Driving') 150 | parser.add_argument( 151 | 'model', 152 | type=str, 153 | help='Path to model h5 file. Model should be on the same path.' 154 | ) 155 | parser.add_argument( 156 | 'image_folder', 157 | type=str, 158 | nargs='?', 159 | default='', 160 | help='Path to image folder. This is where the images from the run will be saved.' 161 | ) 162 | args = parser.parse_args() 163 | 164 | # check that model Keras version is same as local Keras version 165 | # checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage) 166 | model = torch.load(args.model, map_location=lambda storage, loc: storage) 167 | # model = checkpoint['model'] 168 | 169 | if args.image_folder != '': 170 | print("Creating image folder at {}".format(args.image_folder)) 171 | if not os.path.exists(args.image_folder): 172 | os.makedirs(args.image_folder) 173 | else: 174 | shutil.rmtree(args.image_folder) 175 | os.makedirs(args.image_folder) 176 | print("RECORDING THIS RUN ...") 177 | else: 178 | print("NOT RECORDING THIS RUN ...") 179 | 180 | # wrap Flask application with engineio's middleware 181 | app = socketio.Middleware(sio, app) 182 | 183 | # deploy as an eventlet WSGI server 184 | eventlet.wsgi.server(eventlet.listen(('', 4567)), app) -------------------------------------------------------------------------------- /images/center.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/center.jpg -------------------------------------------------------------------------------- /images/left.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/left.jpg -------------------------------------------------------------------------------- /images/loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/loss.png -------------------------------------------------------------------------------- /images/menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/menu.png -------------------------------------------------------------------------------- /images/model_achitecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/model_achitecture.png -------------------------------------------------------------------------------- /images/recording.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/recording.png -------------------------------------------------------------------------------- /images/right.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/right.jpg -------------------------------------------------------------------------------- /images/select_dir.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/select_dir.png -------------------------------------------------------------------------------- /images/training.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/images/training.png -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | class DriverNet(nn.Module): 4 | 5 | def __init__(self): 6 | super(DriverNet, self).__init__() 7 | 8 | self.conv_layers = nn.Sequential( 9 | nn.Conv2d(3, 24, kernel_size=5, stride=2), 10 | nn.ELU(), 11 | nn.Conv2d(24, 36, kernel_size=5, stride=2), 12 | nn.ELU(), 13 | nn.Conv2d(36, 48, kernel_size=5, stride=2), 14 | nn.ELU(), 15 | nn.Conv2d(48, 64, kernel_size=3, stride=1), 16 | nn.ELU(), 17 | nn.Conv2d(64, 64, kernel_size=3, stride=1), 18 | nn.ELU(), 19 | nn.Dropout(p=0.5) 20 | ) 21 | self.linear_layers = nn.Sequential( 22 | nn.Linear(in_features=64*1*18, out_features=100), 23 | nn.ELU(), 24 | nn.Dropout(p=0.4), 25 | nn.Linear(in_features=100, out_features=64), 26 | nn.ELU(), 27 | nn.Linear(in_features=64, out_features=10), 28 | nn.ELU(), 29 | nn.Linear(in_features=10, out_features=1) 30 | ) 31 | 32 | 33 | def forward(self, x): 34 | x = x.view(x.size(0), 3, 66, 200) 35 | output = self.conv_layers(x) 36 | output = output.view(output.size(0), -1) 37 | output = self.linear_layers(output) 38 | return output -------------------------------------------------------------------------------- /model/model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/milsun/AI-Driver-CNN-DeepLearning-PyTorch/3737d439058f8021e939d3e33b169eed4b54b0c4/model/model.pth --------------------------------------------------------------------------------