├── examples ├── placeholder.png └── placeholder_small.png ├── video.py ├── drive.py ├── writeup_template.md └── README.md /examples/placeholder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thibo73800/CarND-Behavioral-Cloning-P3/master/examples/placeholder.png -------------------------------------------------------------------------------- /examples/placeholder_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thibo73800/CarND-Behavioral-Cloning-P3/master/examples/placeholder_small.png -------------------------------------------------------------------------------- /video.py: -------------------------------------------------------------------------------- 1 | from moviepy.editor import ImageSequenceClip 2 | import argparse 3 | 4 | 5 | def main(): 6 | parser = argparse.ArgumentParser(description='Create driving video.') 7 | parser.add_argument( 8 | 'image_folder', 9 | type=str, 10 | default='', 11 | help='Path to image folder. The video will be created from these images.' 12 | ) 13 | parser.add_argument( 14 | '--fps', 15 | type=int, 16 | default=60, 17 | help='FPS (Frames per second) setting for the video.') 18 | args = parser.parse_args() 19 | 20 | video_file = args.image_folder + '.mp4' 21 | print("Creating video {}, FPS={}".format(video_file, args.fps)) 22 | clip = ImageSequenceClip(args.image_folder, fps=args.fps) 23 | clip.write_videofile(video_file) 24 | 25 | 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /drive.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import base64 3 | from datetime import datetime 4 | import os 5 | import shutil 6 | 7 | import numpy as np 8 | import socketio 9 | import eventlet 10 | import eventlet.wsgi 11 | from PIL import Image 12 | from flask import Flask 13 | from io import BytesIO 14 | 15 | from keras.models import load_model 16 | import h5py 17 | from keras import __version__ as keras_version 18 | 19 | sio = socketio.Server() 20 | app = Flask(__name__) 21 | model = None 22 | prev_image_array = None 23 | 24 | 25 | class SimplePIController: 26 | def __init__(self, Kp, Ki): 27 | self.Kp = Kp 28 | self.Ki = Ki 29 | self.set_point = 0. 30 | self.error = 0. 31 | self.integral = 0. 32 | 33 | def set_desired(self, desired): 34 | self.set_point = desired 35 | 36 | def update(self, measurement): 37 | # proportional error 38 | self.error = self.set_point - measurement 39 | 40 | # integral error 41 | self.integral += self.error 42 | 43 | return self.Kp * self.error + self.Ki * self.integral 44 | 45 | 46 | controller = SimplePIController(0.1, 0.002) 47 | set_speed = 9 48 | controller.set_desired(set_speed) 49 | 50 | 51 | @sio.on('telemetry') 52 | def telemetry(sid, data): 53 | if data: 54 | # The current steering angle of the car 55 | steering_angle = data["steering_angle"] 56 | # The current throttle of the car 57 | throttle = data["throttle"] 58 | # The current speed of the car 59 | speed = data["speed"] 60 | # The current image from the center camera of the car 61 | imgString = data["image"] 62 | image = Image.open(BytesIO(base64.b64decode(imgString))) 63 | image_array = np.asarray(image) 64 | steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1)) 65 | 66 | throttle = controller.update(float(speed)) 67 | 68 | print(steering_angle, throttle) 69 | send_control(steering_angle, throttle) 70 | 71 | # save frame 72 | if args.image_folder != '': 73 | timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3] 74 | image_filename = os.path.join(args.image_folder, timestamp) 75 | image.save('{}.jpg'.format(image_filename)) 76 | else: 77 | # NOTE: DON'T EDIT THIS. 78 | sio.emit('manual', data={}, skip_sid=True) 79 | 80 | 81 | @sio.on('connect') 82 | def connect(sid, environ): 83 | print("connect ", sid) 84 | send_control(0, 0) 85 | 86 | 87 | def send_control(steering_angle, throttle): 88 | sio.emit( 89 | "steer", 90 | data={ 91 | 'steering_angle': steering_angle.__str__(), 92 | 'throttle': throttle.__str__() 93 | }, 94 | skip_sid=True) 95 | 96 | 97 | if __name__ == '__main__': 98 | parser = argparse.ArgumentParser(description='Remote Driving') 99 | parser.add_argument( 100 | 'model', 101 | type=str, 102 | help='Path to model h5 file. Model should be on the same path.' 103 | ) 104 | parser.add_argument( 105 | 'image_folder', 106 | type=str, 107 | nargs='?', 108 | default='', 109 | help='Path to image folder. This is where the images from the run will be saved.' 110 | ) 111 | args = parser.parse_args() 112 | 113 | # check that model Keras version is same as local Keras version 114 | f = h5py.File(args.model, mode='r') 115 | model_version = f.attrs.get('keras_version') 116 | keras_version = str(keras_version).encode('utf8') 117 | 118 | if model_version != keras_version: 119 | print('You are using Keras version ', keras_version, 120 | ', but the model was built using ', model_version) 121 | 122 | model = load_model(args.model) 123 | 124 | if args.image_folder != '': 125 | print("Creating image folder at {}".format(args.image_folder)) 126 | if not os.path.exists(args.image_folder): 127 | os.makedirs(args.image_folder) 128 | else: 129 | shutil.rmtree(args.image_folder) 130 | os.makedirs(args.image_folder) 131 | print("RECORDING THIS RUN ...") 132 | else: 133 | print("NOT RECORDING THIS RUN ...") 134 | 135 | # wrap Flask application with engineio's middleware 136 | app = socketio.Middleware(sio, app) 137 | 138 | # deploy as an eventlet WSGI server 139 | eventlet.wsgi.server(eventlet.listen(('', 4567)), app) 140 | -------------------------------------------------------------------------------- /writeup_template.md: -------------------------------------------------------------------------------- 1 | # **Behavioral Cloning** 2 | 3 | ## Writeup Template 4 | 5 | ### You can use this file as a template for your writeup if you want to submit it as a markdown file, but feel free to use some other method and submit a pdf if you prefer. 6 | 7 | --- 8 | 9 | **Behavioral Cloning Project** 10 | 11 | The goals / steps of this project are the following: 12 | * Use the simulator to collect data of good driving behavior 13 | * Build, a convolution neural network in Keras that predicts steering angles from images 14 | * Train and validate the model with a training and validation set 15 | * Test that the model successfully drives around track one without leaving the road 16 | * Summarize the results with a written report 17 | 18 | 19 | [//]: # (Image References) 20 | 21 | [image1]: ./examples/placeholder.png "Model Visualization" 22 | [image2]: ./examples/placeholder.png "Grayscaling" 23 | [image3]: ./examples/placeholder_small.png "Recovery Image" 24 | [image4]: ./examples/placeholder_small.png "Recovery Image" 25 | [image5]: ./examples/placeholder_small.png "Recovery Image" 26 | [image6]: ./examples/placeholder_small.png "Normal Image" 27 | [image7]: ./examples/placeholder_small.png "Flipped Image" 28 | 29 | ## Rubric Points 30 | ### Here I will consider the [rubric points](https://review.udacity.com/#!/rubrics/432/view) individually and describe how I addressed each point in my implementation. 31 | 32 | --- 33 | ### Files Submitted & Code Quality 34 | 35 | #### 1. Submission includes all required files and can be used to run the simulator in autonomous mode 36 | 37 | My project includes the following files: 38 | * model.py containing the script to create and train the model 39 | * drive.py for driving the car in autonomous mode 40 | * model.h5 containing a trained convolution neural network 41 | * writeup_report.md or writeup_report.pdf summarizing the results 42 | 43 | #### 2. Submission includes functional code 44 | Using the Udacity provided simulator and my drive.py file, the car can be driven autonomously around the track by executing 45 | ```sh 46 | python drive.py model.h5 47 | ``` 48 | 49 | #### 3. Submission code is usable and readable 50 | 51 | The model.py file contains the code for training and saving the convolution neural network. The file shows the pipeline I used for training and validating the model, and it contains comments to explain how the code works. 52 | 53 | ### Model Architecture and Training Strategy 54 | 55 | #### 1. An appropriate model architecture has been employed 56 | 57 | My model consists of a convolution neural network with 3x3 filter sizes and depths between 32 and 128 (model.py lines 18-24) 58 | 59 | The model includes RELU layers to introduce nonlinearity (code line 20), and the data is normalized in the model using a Keras lambda layer (code line 18). 60 | 61 | #### 2. Attempts to reduce overfitting in the model 62 | 63 | The model contains dropout layers in order to reduce overfitting (model.py lines 21). 64 | 65 | The model was trained and validated on different data sets to ensure that the model was not overfitting (code line 10-16). The model was tested by running it through the simulator and ensuring that the vehicle could stay on the track. 66 | 67 | #### 3. Model parameter tuning 68 | 69 | The model used an adam optimizer, so the learning rate was not tuned manually (model.py line 25). 70 | 71 | #### 4. Appropriate training data 72 | 73 | Training data was chosen to keep the vehicle driving on the road. I used a combination of center lane driving, recovering from the left and right sides of the road ... 74 | 75 | For details about how I created the training data, see the next section. 76 | 77 | ### Model Architecture and Training Strategy 78 | 79 | #### 1. Solution Design Approach 80 | 81 | The overall strategy for deriving a model architecture was to ... 82 | 83 | My first step was to use a convolution neural network model similar to the ... I thought this model might be appropriate because ... 84 | 85 | In order to gauge how well the model was working, I split my image and steering angle data into a training and validation set. I found that my first model had a low mean squared error on the training set but a high mean squared error on the validation set. This implied that the model was overfitting. 86 | 87 | To combat the overfitting, I modified the model so that ... 88 | 89 | Then I ... 90 | 91 | The final step was to run the simulator to see how well the car was driving around track one. There were a few spots where the vehicle fell off the track... to improve the driving behavior in these cases, I .... 92 | 93 | At the end of the process, the vehicle is able to drive autonomously around the track without leaving the road. 94 | 95 | #### 2. Final Model Architecture 96 | 97 | The final model architecture (model.py lines 18-24) consisted of a convolution neural network with the following layers and layer sizes ... 98 | 99 | Here is a visualization of the architecture (note: visualizing the architecture is optional according to the project rubric) 100 | 101 | ![alt text][image1] 102 | 103 | #### 3. Creation of the Training Set & Training Process 104 | 105 | To capture good driving behavior, I first recorded two laps on track one using center lane driving. Here is an example image of center lane driving: 106 | 107 | ![alt text][image2] 108 | 109 | I then recorded the vehicle recovering from the left side and right sides of the road back to center so that the vehicle would learn to .... These images show what a recovery looks like starting from ... : 110 | 111 | ![alt text][image3] 112 | ![alt text][image4] 113 | ![alt text][image5] 114 | 115 | Then I repeated this process on track two in order to get more data points. 116 | 117 | To augment the data sat, I also flipped images and angles thinking that this would ... For example, here is an image that has then been flipped: 118 | 119 | ![alt text][image6] 120 | ![alt text][image7] 121 | 122 | Etc .... 123 | 124 | After the collection process, I had X number of data points. I then preprocessed this data by ... 125 | 126 | 127 | I finally randomly shuffled the data set and put Y% of the data into a validation set. 128 | 129 | I used this training data for training the model. The validation set helped determine if the model was over or under fitting. The ideal number of epochs was Z as evidenced by ... I used an adam optimizer so that manually training the learning rate wasn't necessary. 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Behaviorial Cloning Project 2 | 3 | [![Udacity - Self-Driving Car NanoDegree](https://s3.amazonaws.com/udacity-sdc/github/shield-carnd.svg)](http://www.udacity.com/drive) 4 | 5 | Overview 6 | --- 7 | This repository contains starting files for the Behavioral Cloning Project. 8 | 9 | In this project, you will use what you've learned about deep neural networks and convolutional neural networks to clone driving behavior. You will train, validate and test a model using Keras. The model will output a steering angle to an autonomous vehicle. 10 | 11 | We have provided a simulator where you can steer a car around a track for data collection. You'll use image data and steering angles to train a neural network and then use this model to drive the car autonomously around the track. 12 | 13 | We also want you to create a detailed writeup of the project. Check out the [writeup template](https://github.com/udacity/CarND-Behavioral-Cloning-P3/blob/master/writeup_template.md) for this project and use it as a starting point for creating your own writeup. The writeup can be either a markdown file or a pdf document. 14 | 15 | To meet specifications, the project will require submitting five files: 16 | * model.py (script used to create and train the model) 17 | * drive.py (script to drive the car - feel free to modify this file) 18 | * model.h5 (a trained Keras model) 19 | * a report writeup file (either markdown or pdf) 20 | * video.mp4 (a video recording of your vehicle driving autonomously around the track for at least one full lap) 21 | 22 | This README file describes how to output the video in the "Details About Files In This Directory" section. 23 | 24 | Creating a Great Writeup 25 | --- 26 | A great writeup should include the [rubric points](https://review.udacity.com/#!/rubrics/432/view) as well as your description of how you addressed each point. You should include a detailed description of the code used (with line-number references and code snippets where necessary), and links to other supporting documents or external references. You should include images in your writeup to demonstrate how your code works with examples. 27 | 28 | All that said, please be concise! We're not looking for you to write a book here, just a brief description of how you passed each rubric point, and references to the relevant code :). 29 | 30 | You're not required to use markdown for your writeup. If you use another method please just submit a pdf of your writeup. 31 | 32 | The Project 33 | --- 34 | The goals / steps of this project are the following: 35 | * Use the simulator to collect data of good driving behavior 36 | * Design, train and validate a model that predicts a steering angle from image data 37 | * Use the model to drive the vehicle autonomously around the first track in the simulator. The vehicle should remain on the road for an entire loop around the track. 38 | * Summarize the results with a written report 39 | 40 | ### Dependencies 41 | This lab requires: 42 | 43 | * [CarND Term1 Starter Kit](https://github.com/udacity/CarND-Term1-Starter-Kit) 44 | 45 | The lab enviroment can be created with CarND Term1 Starter Kit. Click [here](https://github.com/udacity/CarND-Term1-Starter-Kit/blob/master/README.md) for the details. 46 | 47 | The following resources can be found in this github repository: 48 | * drive.py 49 | * video.py 50 | * writeup_template.md 51 | 52 | The simulator can be downloaded from the classroom. In the classroom, we have also provided sample data that you can optionally use to help train your model. 53 | 54 | ## Details About Files In This Directory 55 | 56 | ### `drive.py` 57 | 58 | Usage of `drive.py` requires you have saved the trained model as an h5 file, i.e. `model.h5`. See the [Keras documentation](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) for how to create this file using the following command: 59 | ```sh 60 | model.save(filepath) 61 | ``` 62 | 63 | Once the model has been saved, it can be used with drive.py using this command: 64 | 65 | ```sh 66 | python drive.py model.h5 67 | ``` 68 | 69 | The above command will load the trained model and use the model to make predictions on individual images in real-time and send the predicted angle back to the server via a websocket connection. 70 | 71 | Note: There is known local system's setting issue with replacing "," with "." when using drive.py. When this happens it can make predicted steering values clipped to max/min values. If this occurs, a known fix for this is to add "export LANG=en_US.utf8" to the bashrc file. 72 | 73 | #### Saving a video of the autonomous agent 74 | 75 | ```sh 76 | python drive.py model.h5 run1 77 | ``` 78 | 79 | The fourth argument, `run1`, is the directory in which to save the images seen by the agent. If the directory already exists, it'll be overwritten. 80 | 81 | ```sh 82 | ls run1 83 | 84 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_424.jpg 85 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_451.jpg 86 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_477.jpg 87 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_528.jpg 88 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_573.jpg 89 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_618.jpg 90 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_697.jpg 91 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_723.jpg 92 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_749.jpg 93 | [2017-01-09 16:10:23 EST] 12KiB 2017_01_09_21_10_23_817.jpg 94 | ... 95 | ``` 96 | 97 | The image file name is a timestamp of when the image was seen. This information is used by `video.py` to create a chronological video of the agent driving. 98 | 99 | ### `video.py` 100 | 101 | ```sh 102 | python video.py run1 103 | ``` 104 | 105 | Creates a video based on images found in the `run1` directory. The name of the video will be the name of the directory followed by `'.mp4'`, so, in this case the video will be `run1.mp4`. 106 | 107 | Optionally, one can specify the FPS (frames per second) of the video: 108 | 109 | ```sh 110 | python video.py run1 --fps 48 111 | ``` 112 | 113 | Will run the video at 48 FPS. The default FPS is 60. 114 | 115 | #### Why create a video 116 | 117 | 1. It's been noted the simulator might perform differently based on the hardware. So if your model drives succesfully on your machine it might not on another machine (your reviewer). Saving a video is a solid backup in case this happens. 118 | 2. You could slightly alter the code in `drive.py` and/or `video.py` to create a video of what your model sees after the image is processed (may be helpful for debugging). 119 | 120 | ### Tips 121 | - Please keep in mind that training images are loaded in BGR colorspace using cv2 while drive.py load images in RGB to predict the steering angles. 122 | 123 | ## How to write a README 124 | A well written README file can enhance your project and portfolio. Develop your abilities to create professional README files by completing [this free course](https://www.udacity.com/course/writing-readmes--ud777). 125 | 126 | --------------------------------------------------------------------------------