├── .coveragerc ├── .dockerignore ├── .gitignore ├── .gitmodules ├── .travis.yml ├── LICENSE ├── README.md ├── arduino ├── Makefile ├── order.h ├── parameters.h ├── slave.cpp └── slave.h ├── codecov.yml ├── constants.py ├── custom_model.pth ├── datasets ├── .gitkeep └── test_dataset │ ├── 160.jpg │ ├── 161.jpg │ ├── 162.jpg │ ├── 163.jpg │ ├── 164.jpg │ ├── 165.jpg │ ├── 166.jpg │ ├── 167.jpg │ ├── 168.jpg │ ├── 169.jpg │ ├── 170.jpg │ ├── 171.jpg │ ├── 172.jpg │ ├── 173.jpg │ ├── 174.jpg │ ├── 175.jpg │ ├── 176.jpg │ ├── 177.jpg │ ├── 178.jpg │ ├── 179.jpg │ ├── 180.jpg │ └── labels.json ├── debug └── .gitkeep ├── docker ├── Dockerfile.cpu ├── Dockerfile.rpi3 ├── entrypoint.sh └── qemu-arm-static ├── image_processing ├── __init__.py ├── image_processing.py ├── picamera_threads.py └── warp_image.py ├── logs └── .gitkeep ├── main.py ├── path_planning ├── __init__.py ├── bezier_curve.py └── stanley_controller.py ├── requirements.txt ├── ros_nodes ├── __init__.py ├── camera_node.py ├── keyboard_node.py └── serial_adapter.py ├── run_docker_cpu.sh ├── run_docker_rpi3.sh ├── run_tests.sh ├── teleop ├── __init__.py ├── teleop_client.py └── teleop_server.py ├── tests ├── __init__.py ├── common.py ├── test_image_processing.py ├── test_path_planning.py ├── test_training.py └── test_utils.py └── train ├── __init__.py ├── benchmark.py ├── convert_video.py ├── models.py ├── split_video.py ├── test.py ├── train.py └── utils.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = False 3 | omit = 4 | tests/* 5 | # ROS nodes are python 2 only 6 | ros_nodes/* 7 | # Files that require the picamera package 8 | # Only available on the Raspberry Pi 9 | main.py 10 | teleop/teleop_server.py 11 | image_processing/picamera_threads.py 12 | # Teleop client requires a X-server 13 | teleop/teleop_client.py 14 | # Needs an input video 15 | train/split_video.py 16 | train/convert_video.py 17 | 18 | [report] 19 | exclude_lines = 20 | pragma: no cover 21 | 22 | [html] 23 | directory = debug/coverage/ 24 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | logs/ 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | debug/ 2 | bin/ 3 | lib/ 4 | .idea/ 5 | __pycache__/ 6 | .pytest_cache/ 7 | *.x 8 | build/ 9 | tmp/ 10 | *.png 11 | *.jpg 12 | *.pyc 13 | *.jpeg 14 | render/ 15 | *.blend1 16 | *.log 17 | *.npz 18 | *.zip 19 | *.h264 20 | *.mp4 21 | *.pkl 22 | *.pth 23 | *.so 24 | .coverage* 25 | coverage.xml 26 | datasets/ 27 | docker/opencv/ 28 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "teleop/cpp_arduino_serial"] 2 | path = teleop/cpp_arduino_serial 3 | url = https://github.com/araffin/cpp-arduino-serial.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | os: 3 | - linux 4 | python: 5 | - "2.7" 6 | - "3.5" 7 | - "3.6" 8 | 9 | # command to install dependencies 10 | install: 11 | - pip install opencv-python 12 | - if [[ $TRAVIS_PYTHON_VERSION == '2.7' ]]; then pip install http://download.pytorch.org/whl/cpu/torch-0.4.0-cp27-cp27mu-linux_x86_64.whl; fi 13 | - if [[ $TRAVIS_PYTHON_VERSION == '3.5' ]]; then pip install http://download.pytorch.org/whl/cpu/torch-0.4.0-cp35-cp35m-linux_x86_64.whl; fi 14 | - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then pip install http://download.pytorch.org/whl/cpu/torch-0.4.0-cp36-cp36m-linux_x86_64.whl; fi 15 | - pip install -r requirements.txt 16 | - pip install codecov 17 | # command to run tests 18 | script: 19 | - ./run_tests.sh # or py.test for Python versions 3.5 and below 20 | 21 | after_success: 22 | - codecov 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Sergio Nicolas RODRIGUEZ RODRIGUEZ and Antonin RAFFIN 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autonomous Racing Robot With an Arduino, a Raspberry Pi and a Pi Camera 2 | 3 | [![Build Status](https://travis-ci.com/sergionr2/RacingRobot.svg?branch=master)](https://travis-ci.com/sergionr2/RacingRobot) 4 | [![codecov](https://codecov.io/gh/sergionr2/RacingRobot/branch/master/graph/badge.svg)](https://codecov.io/gh/sergionr2/RacingRobot) 5 | 6 | 7 | Autonomous toy racing car. CAMaleon team at the Toulouse Robot Race 2017. Humbavision team at IronCar. 8 | Medium article: [https://medium.com/@araffin/autonomous-racing-robot-with-an-arduino-a-raspberry-pi-and-a-pi-camera-3e72819e1e63](https://medium.com/@araffin/autonomous-racing-robot-with-an-arduino-a-raspberry-pi-and-a-pi-camera-3e72819e1e63) 9 | 10 | **Video of the car**: [https://www.youtube.com/watch?v=xhI71ZdSh6k](https://www.youtube.com/watch?v=xhI71ZdSh6k) 11 | 12 | [![The racing robot](https://cdn-images-1.medium.com/max/2000/1*UsmiJ4IzXi6U9svKjB22zw.jpeg)](https://www.youtube.com/watch?v=xhI71ZdSh6k) 13 | 14 | Table of Contents 15 | ================= 16 | * [Detailed Presentation](#detailed-presentation) 17 | * [3D Models and Training Data](#3d-models-and-training-data) 18 | * [3D Models](#3d-models) 19 | * [Training Data](#training-data) 20 | * [IronCar and Toulouse Robot Race Datasets](#ironcar-and-toulouse-robot-race-datasets) 21 | * [How to run everything ?](#how-to-run-everything-) 22 | * [Autonomous mode](#autonomous-mode) 23 | * [Remote Control Mode](#remote-control-mode) 24 | * [How to train the line detector ?](#how-to-train-the-line-detector-) 25 | * [Installation](#installation) 26 | * [Recommended : Use an image with everything already installed](#recommended--use-an-image-with-everything-already-installed) 27 | * [From Scratch](#from-scratch) 28 | * [Python Packages](#python-packages) 29 | * [Contributors](#contributors) 30 | 31 | ## Detailed Presentation 32 | 33 | We wrote an article on medium that detailed our approach. You can read it [here](https://medium.com/@araffin/autonomous-racing-robot-with-an-arduino-a-raspberry-pi-and-a-pi-camera-3e72819e1e63) 34 | 35 | En français: [http://enstar.ensta-paristech.fr/blog/public/racing_car/](http://enstar.ensta-paristech.fr/blog/public/racing_car/) 36 | 37 | ## 3D Models and Training Data 38 | 39 | ### 3D Models 40 | 41 | 3D models (we used onshape.com): 42 | - [Battery Holder](https://cad.onshape.com/documents/a94876919eca38f49abb5ed6/w/b7e86a4005e4b951f0e3d31a/e/5d33c78dde85b96e033f5c9b) 43 | - [Camera Holder](https://cad.onshape.com/documents/dfd040c2f0a4f5410fdcb118/w/d7cc8bc352d92670e7416569/e/0f263a0dde0b04a2688fd3c9) 44 | - [Additional Camera Piece](https://cad.onshape.com/documents/1c4a51d839f2a5989e78ef1f/w/1af5b4b508310461911ecd97/e/a35856fc588eb371f0bac58b) 45 | - [Raspberry Pi Holder](https://cad.onshape.com/documents/621b6943711d60790ddc2b9f/w/c29ba5f453ce625afc8128f6/e/1aa39940e0bdabd3303d76c4) 46 | 47 | Note: the Battery Holder was designed for this [External Battery](https://www.amazon.fr/gp/product/B00Y7S4JRQ/ref=oh_aui_detailpage_o00_s01?ie=UTF8&psc=1) 48 | 49 | 50 | ### Training Data 51 | 52 | #### IronCar and Toulouse Robot Race Datasets 53 | 54 | 55 | We release the different videos taken with the on-board camera, along we the labeled data (the labels are in a pickle file) for IronCar and Toulouse Robot Race: 56 | 57 | - [Videos](https://drive.google.com/open?id=1VJ46uBZUfxwUVPHGw1p8d6cgHDGervcL) 58 | - (outdated) [Toulouse Dataset](https://drive.google.com/open?id=1vj7N0aE-eyKg7OY0ZdlkwW2rl51UzwEW) 59 | - (outdated) [IronCar Dataset](https://drive.google.com/open?id=1FZdXnrO7WAo4A4-repE_dglCc2ZoAJwa) 60 | 61 | 62 | ## How to run everything ? 63 | 64 | For installation, see section **[Installation](#installation)**. 65 | 66 | ## Autonomous mode 67 | 68 | 0. Compile and upload the code on the Arduino 69 | ``` 70 | cd arduino/ 71 | make 72 | make upload 73 | ``` 74 | 75 | 1. Launch the main script on the Raspberry Pi, it will try to follow a line. 76 | All useful constants can be found in `constants.py`. 77 | ``` 78 | python main.py 79 | ``` 80 | 81 | ## Remote Control Mode 82 | 83 | 0. You need a computer in addition to the raspberry pi 84 | 1. Create a Local Wifi Network (e.g. using [create ap](https://github.com/oblique/create_ap)) 85 | 2. Connect the raspberry pi to this network ([Wifi on RPI](https://www.raspberrypi.org/documentation/configuration/wireless/wireless-cli.md)) 86 | 3. Launch teleoperation server (it will use the port 5556) 87 | ``` 88 | python -m teleop.teleop_server 89 | ``` 90 | 4. Launch teleoperation client on your computer (you have to edit the raspberry pi `IP` in `constants.py`) 91 | ``` 92 | python -m teleop.teleop_client 93 | ``` 94 | 5. Enjoy! You can now control the car with the keyboard. 95 | 96 | ## How to train the line detector ? 97 | 98 | 1. Record a video in the teleoperation mode: 99 | ``` 100 | python -m teleop.teleop_server -v my_video 101 | ``` 102 | 2. Convert the recorded video from h264 to mp4 using ffmpeg or [MP4Box](https://gpac.wp.imt.fr/mp4box/) 103 | ``` 104 | MP4Box -add video.h264 video.mp4 105 | ``` 106 | 107 | 3. Split the video into a sequence of images 108 | ``` 109 | python -m train.split_video -i video.mp4 -o path/to/dataset/folder 110 | ``` 111 | 112 | 4. Label the data using the labeling tool: [https://github.com/araffin/graph-annotation-tool](https://github.com/araffin/graph-annotation-tool) 113 | 114 | 115 | 5. Rename the json file that contains the labels to `labels.json` and put it in the same folder of the dataset (folder with the images) 116 | 117 | 5. Train the neural network (again please change the paths in the script) 118 | ``` 119 | python -m train.train -f path/to/dataset/folder 120 | ``` 121 | The best model (lowest error on the validation data) will be saved as *cnn_model_tmp.pth*. 122 | 123 | 124 | 6. Test the trained neural network (you can use `-i` option to test it on a video) 125 | 126 | ``` 127 | python -m train.test -f path/to/dataset/folder -w cnn_model_tmp.pth 128 | ``` 129 | 130 | ## Installation 131 | 132 | #### Recommended : Use an image with everything already installed 133 | 134 | 0. You need a 16GB micro sd card (warning, all data on that card will be overwritten) 135 | WARNING: for a smaller sd card, you need to resize the image before writing it (this [link](https://softwarebakery.com/shrinking-images-on-linux) and [repo](https://github.com/billw2/rpi-clone) may help) 136 | 137 | 1. Download the image [here](https://drive.google.com/open?id=1CUmSKOQ7i_XTrsLCRntypK9KcVaVwM4h) 138 | 139 | Infos about the linux image: 140 | OS: [Ubuntu MATE 16.04](https://ubuntu-mate.org/raspberry-pi/) for raspberry pi 141 | 142 | **Username**: enstar 143 | 144 | **Password**: enstar 145 | 146 | 147 | Installed softwares: 148 | - all the dependencies for that project (OpenCV >= 3, PyTorch, ...) 149 | - the current project (in the folder RacingRobot/) 150 | - ROS Kinetic 151 | 152 | Camera and SSH are enabled. 153 | 154 | 155 | 2. Identify the name of your sd card using: 156 | ``` 157 | fdisk -l 158 | ``` 159 | For instance, it gives: 160 | ``` 161 | /dev/mmcblk0p1 2048 131071 129024 63M c W95 FAT32 (LBA) 162 | /dev/mmcblk0p2 131072 30449663 30318592 14,5G 83 Linux 163 | ``` 164 | In that case, your sd card is named */dev/mmcblk0* (p1 and p2 stand for partition). 165 | 166 | 3. Write the downloaded image on the sd card. 167 | ``` 168 | gunzip --stdout ubuntu_ros_racing_robot.img.gz | sudo dd bs=4M of=/dev/mmcblk0 169 | ``` 170 | 171 | 4. Enjoy! 172 | The current project is located in `RacingRobot/`. 173 | 174 | 175 | If you want to back up an image of a raspberry pi: 176 | ``` 177 | sudo dd bs=4M if=/dev/mmcblk0 | gzip > ubuntu_ros_racing_robot.img.gz 178 | ``` 179 | 180 | #### From Scratch 181 | 182 | Update your pi 183 | ``` 184 | sudo apt-get update 185 | sudo apt-get upgrade 186 | sudo rpi-update 187 | ``` 188 | 189 | Arduino + Arduino Makefile + rlwrap + screen + MP4Box 190 | ``` 191 | sudo apt-get install arduino-core arduino-mk rlwrap screen gpac 192 | ``` 193 | 194 | - Arduino 1.0.5 195 | - [Arduino-Makefile](https://github.com/sudar/Arduino-Makefile) 196 | - OpenCV >= 3 197 | - libserial-dev (apt-get) 198 | - Python 2 or 3 199 | 200 | OpenCV 201 | - [PreCompiled](https://github.com/jabelone/OpenCV-for-Pi) This is the **recommended method** 202 | Compile from source: 203 | - [Docs](https://docs.opencv.org/3.4.1/d7/d9f/tutorial_linux_install.html) 204 | - [Tutorial](https://www.life2coding.com/install-opencv-3-4-0-python-3-raspberry-pi-3/) 205 | - [Raspbian Tuto](http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/) 206 | 207 | 208 | #### Python Packages 209 | All the required packages (except pytorch and torchvision) can be found in `requirements.txt`, install them using: 210 | 211 | ``` 212 | pip install -r requirements.txt 213 | ``` 214 | 215 | In short: 216 | - PySerial 217 | - TQDM (for progressbar) 218 | - [PyGame](http://www.pygame.org/wiki/CompileUbuntu#Installing%20pygame%20with%20pip) (for teleoperation) 219 | - Enum support (for Python 2) 220 | - ZeroMQ (for teleoperation) 221 | - Pytorch (you have to compile it from source for the RPI) 222 | - scikit-learn 223 | - scipy 224 | 225 | ``` 226 | pip install pyserial tqdm pygame enum34 scikit-learn scipy 227 | ``` 228 | 229 | ZeroMQ (Message Passing with sockets) for remote control mode 230 | ``` 231 | sudo apt-get install libzmq3-dev 232 | pip install pyzmq 233 | ``` 234 | 235 | Note: for using the serial port, you need to change current user permissions: 236 | ``` 237 | # Add user to dialout group to have the right to write on the serial port 238 | sudo usermod -a -G dialout $USER 239 | # You need to logout/login again for that change to be taken into account 240 | ``` 241 | 242 | 243 | Additional python dev-dependencies for training the neural network: 244 | On your laptop: 245 | ``` 246 | pip install pytorch 247 | pip install torchvision 248 | ``` 249 | 250 | On the raspberry pi : 251 | 252 | - You can try to use Python 2 wheel (not tested) that was created for this project: 253 | 254 | 0. Download Python Wheel [here](https://drive.google.com/open?id=1vFV73nZDbZ1GDRzz4YeBGDbc4S5Ddxwr) 255 | 256 | And then: 257 | ``` 258 | pip install torch-0.4.0a0+b23fa21-cp27-cp27mu-linux_armv7l.whl 259 | ``` 260 | 261 | Or follow this tutorial: 262 | [PyTorch on the Raspberry Pi](http://book.duckietown.org/fall2017/duckiebook/pytorch_install.html) 263 | 264 | 0. Make sure you have at least 3 Go of Swap. (see link above) 265 | 266 | ``` 267 | sudo dd if=/dev/zero of=/swap1 bs=1M count=3072 Status=progress 268 | sudo mkswap /swap1 269 | sudo swapon /swap1 270 | ``` 271 | 272 | 1. (optional) Install a recent version of cmake + scikit-build + ninja 273 | 274 | 2. Install PyTorch 275 | 276 | See [https://github.com/pytorch/pytorch](https://github.com/pytorch/pytorch) for dependencies. 277 | Additional dependencies: 278 | 279 | ``` 280 | sudo apt-get install libopenblas-dev libeigen3-dev libffi-dev 281 | ``` 282 | 283 | ``` 284 | # don't forget to set the env variables: 285 | export NO_CUDA=1 286 | export NO_DISTRIBUTED=1 287 | git clone --recursive https://github.com/pytorch/pytorch 288 | python setup.py install --user 289 | # torchvision is not used yet 290 | pip install torchvision --user 291 | ``` 292 | 293 | 294 | [Wifi on RPI](https://www.raspberrypi.org/documentation/configuration/wireless/wireless-cli.md) 295 | 296 | OpenCV with Python 2 (RPI), compiling from source (cf [Docs](https://docs.opencv.org/3.4.1/d7/d9f/tutorial_linux_install.html) and [tuto](https://www.life2coding.com/install-opencv-3-4-0-python-3-raspberry-pi-3/)): 297 | Additional dependencies: 298 | 299 | ``` 300 | sudo apt-get install libtbb-dev opencl-headers libomp-dev libopenblas-dev libeigen3-dev 301 | sudo apt-get install libatlas-base-dev gfortran -y 302 | ``` 303 | 304 | with [Gstreamer](https://stackoverflow.com/questions/37678324/compiling-opencv-with-gstreamer-cmake-not-finding-gstreamer) 305 | Then: 306 | 307 | ``` 308 | cmake -D CMAKE_INSTALL_PREFIX=/usr/local \ 309 | -D BUILD_opencv_java=OFF \ 310 | -D BUILD_opencv_python2=ON \ 311 | -D BUILD_opencv_python3=OFF \ 312 | -D PYTHON_DEFAULT_EXECUTABLE=$(which python) \ 313 | -D INSTALL_C_EXAMPLES=OFF \ 314 | -DINSTALL_PYTHON_EXAMPLES=ON -DBUILD_TIFF=ON -DWITH_CUDA=OFF -DWITH_OPENGL=ON -DWITH_OPENCL=ON -DWITH_IPP=ON -DWITH_TBB=ON -DWITH_EIGEN=ON -DWITH_V4L=ON -DWITH_VTK=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DCMAKE_BUILD_TYPE=RELEASE .. 315 | ``` 316 | 317 | 318 | OpenCV with Anaconda, compiling from source: 319 | ``` 320 | cmake -DPYTHON_EXECUTABLE=/home/ỳour_name/anaconda3/bin/python3 \ 321 | -DPYTHON_INCLUDE=/home/ỳour_name/anaconda3/include \ 322 | -DPYTHON_LIBRARY=/home/ỳour_name/anaconda3/lib/libpython3.6m.so \ 323 | -DPYTHON_PACKAGES_PATH=/home/ỳour_name/anaconda3/lib/python3.6/site-packages \ 324 | -DPYTHON_NUMPY_INCLUDE_DIR=/home/ỳour_name/anaconda3/lib/python3.6/site-packages/core/include -DINSTALL_PYTHON_EXAMPLES=ON -DBUILD_TIFF=ON -DBUILD_opencv_java=OFF -DWITH_CUDA=OFF -DWITH_OPENGL=ON -DWITH_OPENCL=ON -DWITH_IPP=ON -DWITH_TBB=ON -DWITH_EIGEN=ON -DWITH_V4L=ON -DWITH_VTK=OFF -DBUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF -DCMAKE_BUILD_TYPE=RELEASE .. 325 | ``` 326 | 327 | ## Docker Support (Experimental) 328 | 329 | Info: [http://www.hotblackrobotics.com/en/blog/2018/01/22/docker-images-arm/](http://www.hotblackrobotics.com/en/blog/2018/01/22/docker-images-arm/) 330 | 331 | Build docker image (laptop image): 332 | ``` 333 | docker build . -f docker/Dockerfile.cpu -t racing-robot-cpu 334 | ``` 335 | 336 | Build docker image (raspberry pi image): 337 | ``` 338 | docker build . -f docker/DockerfileBase.rpi3 -t racing-robot-rpi3-base 339 | ``` 340 | 341 | ``` 342 | docker build . -f docker/Dockerfile.rpi3 -t racing-robot-rpi3 343 | ``` 344 | 345 | 346 | ### Contributors 347 | - Sergio Nicolas Rodriguez Rodriguez 348 | - Antonin Raffin 349 | -------------------------------------------------------------------------------- /arduino/Makefile: -------------------------------------------------------------------------------- 1 | ### DISCLAIMER 2 | ### This is an example Makefile and it MUST be configured to suit your needs. 3 | ### For detailled explanations about all the avalaible options, 4 | ### please refer to https://github.com/sudar/Arduino-Makefile/blob/master/arduino-mk-vars.md 5 | ### Original project where this Makefile comes from: https://github.com/WeAreLeka/Bare-Arduino-Project 6 | 7 | ### PROJECT_DIR 8 | ### This is the path to where you have created/cloned your project 9 | PROJECT_DIR = $(shell pwd) 10 | 11 | ### ARDMK_DIR 12 | ### Path to the Arduino-Makefile directory. 13 | ARDMK_DIR = /usr/share/arduino 14 | 15 | ### ARDUINO_DIR 16 | ### Path to the Arduino application and ressources directory. 17 | 18 | ### or on Linux: (remove the one you don't want) 19 | ARDUINO_DIR = /usr/share/arduino 20 | 21 | ### USER_LIB_PATH 22 | ### Path to where the your project's libraries are stored. 23 | USER_LIB_PATH := $(PROJECT_DIR)/lib 24 | 25 | ### BOARD_TAG 26 | ### It must be set to the board you are currently using. (i.e uno, mega2560, etc.) 27 | BOARD_TAG = uno 28 | 29 | ### MONITOR_BAUDRATE 30 | ### It must be set to Serial baudrate value you are using. 31 | MONITOR_BAUDRATE = 115200 32 | 33 | ### AVR_TOOLS_DIR 34 | ### Path to the AVR tools directory such as avr-gcc, avr-g++, etc. 35 | ### On OS X with `homebrew`: 36 | AVR_TOOLS_DIR = /usr/local 37 | ### or on Linux: (remove the one you don't want) 38 | AVR_TOOLS_DIR = /usr 39 | 40 | ### AVRDDUDE 41 | ### Path to avrdude directory. 42 | ### On OS X with `homebrew`: 43 | AVRDDUDE = /usr/local/bin/avrdude 44 | ### or on Linux: (remove the one you don't want) 45 | AVRDDUDE = /usr/bin/avrdude 46 | 47 | ### CFLAGS_STD 48 | ### Set the C standard to be used during compilation. Documentation (https://github.com/WeAreLeka/Arduino-Makefile/blob/std-flags/arduino-mk-vars.md#cflags_std) 49 | CFLAGS_STD = -std=gnu11 50 | 51 | ### CXXFLAGS_STD 52 | ### Set the C++ standard to be used during compilation. Documentation (https://github.com/WeAreLeka/Arduino-Makefile/blob/std-flags/arduino-mk-vars.md#cxxflags_std) 53 | CXXFLAGS_STD = -std=gnu++11 54 | 55 | ### CXXFLAGS 56 | ### Flags you might want to set for debugging purpose. Comment to stop. 57 | CXXFLAGS += -pedantic -Wall -Wextra 58 | 59 | ### MONITOR_PORT 60 | ### The port your board is connected to. Using an '*' tries all the ports and finds the right one. 61 | MONITOR_PORT = /dev/ttyACM* 62 | 63 | ### CURRENT_DIR 64 | ### Do not touch - used for binaries path 65 | CURRENT_DIR = $(shell basename $(CURDIR)) 66 | 67 | ### OBJDIR 68 | ### This is were you put the binaries you just compile using 'make' 69 | OBJDIR = $(PROJECT_DIR)/bin/$(BOARD_TAG)/$(CURRENT_DIR) 70 | 71 | ### path to Arduino.mk, inside the ARDMK_DIR, don't touch. 72 | include $(ARDMK_DIR)/Arduino.mk 73 | -------------------------------------------------------------------------------- /arduino/order.h: -------------------------------------------------------------------------------- 1 | // Code using Robust Arduino Serial Protocol: https://github.com/araffin/arduino-robust-serial 2 | 3 | #ifndef ORDER_H 4 | #define ORDER_H 5 | 6 | // Define the orders that can be sent and received 7 | enum Order { 8 | HELLO = 0, 9 | SERVO = 1, 10 | MOTOR = 2, 11 | ALREADY_CONNECTED = 3, 12 | ERROR = 4, 13 | RECEIVED = 5, 14 | STOP = 6, 15 | }; 16 | 17 | typedef enum Order Order; 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /arduino/parameters.h: -------------------------------------------------------------------------------- 1 | #ifndef PARAMETERS_H 2 | #define PARAMETERS_H 3 | 4 | #define SERIAL_BAUD 115200 5 | #define MOTOR_PIN 3 6 | #define DIRECTION_PIN 4 7 | #define SERVOMOTOR_PIN 6 8 | // Initial angle of the servomotor 9 | #define INITIAL_THETA 110 10 | // Min and max values for motors 11 | #define THETA_MIN 60 12 | #define THETA_MAX 150 13 | #define SPEED_MAX 100 14 | // If DEBUG is set to true, the arduino will send back all the received messages 15 | #define DEBUG false 16 | // Set speed and angle to zero if no messages where received 17 | // after 500ms 18 | #define TIMEOUT 500 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /arduino/slave.cpp: -------------------------------------------------------------------------------- 1 | // Code using Robust Arduino Serial Protocol: https://github.com/araffin/arduino-robust-serial 2 | #include 3 | #include 4 | 5 | #include "order.h" 6 | #include "slave.h" 7 | #include "parameters.h" 8 | 9 | bool is_connected = false; ///< True if the connection with the master is available 10 | int8_t motor_speed = 0; 11 | int16_t servo_angle = INITIAL_THETA; 12 | Servo servomotor; 13 | unsigned long last_time_received; 14 | 15 | void setup() 16 | { 17 | // Init Serial 18 | Serial.begin(SERIAL_BAUD); 19 | 20 | // Init Motor 21 | pinMode(MOTOR_PIN, OUTPUT); 22 | pinMode(DIRECTION_PIN, OUTPUT); 23 | // Stop the car 24 | stop(); 25 | 26 | // Init Servo 27 | servomotor.attach(SERVOMOTOR_PIN); 28 | // Order between 0 and 180 29 | servomotor.write(INITIAL_THETA); 30 | 31 | // Wait until the arduino is connected to master 32 | while(!is_connected) 33 | { 34 | write_order(HELLO); 35 | wait_for_bytes(1, 1000); 36 | get_messages_from_serial(); 37 | } 38 | last_time_received = millis(); 39 | 40 | } 41 | 42 | void loop() 43 | { 44 | get_messages_from_serial(); 45 | // Check for timeout 46 | // stop if no messages were received for a while 47 | if(millis() - last_time_received > TIMEOUT){ 48 | motor_speed = 0; 49 | servo_angle = INITIAL_THETA; 50 | } 51 | 52 | update_motors_orders(); 53 | } 54 | 55 | void update_motors_orders() 56 | { 57 | servomotor.write(constrain(servo_angle, THETA_MIN, THETA_MAX)); 58 | motor_speed = constrain(motor_speed, -SPEED_MAX, SPEED_MAX); 59 | // Send motor speed order 60 | if (motor_speed > 0) 61 | { 62 | digitalWrite(DIRECTION_PIN, LOW); 63 | } 64 | else 65 | { 66 | digitalWrite(DIRECTION_PIN, HIGH); 67 | } 68 | analogWrite(MOTOR_PIN, convert_to_pwm(float(motor_speed))); 69 | } 70 | 71 | void stop() 72 | { 73 | analogWrite(MOTOR_PIN, 0); 74 | digitalWrite(DIRECTION_PIN, LOW); 75 | } 76 | 77 | int convert_to_pwm(float motor_speed) 78 | { 79 | // TODO: compensate the non-linear dependency speed = f(PWM_Value) 80 | return (int) round(abs(motor_speed)*(255./100.)); 81 | } 82 | 83 | void get_messages_from_serial() 84 | { 85 | if(Serial.available() > 0) 86 | { 87 | // The first byte received is the instruction 88 | Order order_received = read_order(); 89 | 90 | if(order_received == HELLO) 91 | { 92 | // If the cards haven't say hello, check the connection 93 | if(!is_connected) 94 | { 95 | is_connected = true; 96 | write_order(HELLO); 97 | } 98 | else 99 | { 100 | // If we are already connected do not send "hello" to avoid infinite loop 101 | write_order(ALREADY_CONNECTED); 102 | } 103 | } 104 | else if(order_received == ALREADY_CONNECTED) 105 | { 106 | is_connected = true; 107 | } 108 | else 109 | { 110 | switch(order_received) 111 | { 112 | case STOP: 113 | { 114 | motor_speed = 0; 115 | stop(); 116 | if(DEBUG) 117 | { 118 | write_order(STOP); 119 | } 120 | break; 121 | } 122 | case SERVO: 123 | { 124 | servo_angle = read_i16(); 125 | if(DEBUG) 126 | { 127 | write_order(SERVO); 128 | write_i16(servo_angle); 129 | } 130 | break; 131 | } 132 | case MOTOR: 133 | { 134 | // between -100 and 100 135 | motor_speed = read_i8(); 136 | if(DEBUG) 137 | { 138 | write_order(MOTOR); 139 | write_i8(motor_speed); 140 | } 141 | break; 142 | } 143 | // Unknown order 144 | default: 145 | write_order(ERROR); 146 | write_i16(404); 147 | return; 148 | } 149 | } 150 | write_order(RECEIVED); // Confirm the reception 151 | last_time_received = millis(); 152 | } 153 | } 154 | 155 | 156 | Order read_order() 157 | { 158 | return (Order) Serial.read(); 159 | } 160 | 161 | void wait_for_bytes(int num_bytes, unsigned long timeout) 162 | { 163 | unsigned long startTime = millis(); 164 | //Wait for incoming bytes or exit if timeout 165 | while ((Serial.available() < num_bytes) && (millis() - startTime < timeout)){} 166 | } 167 | 168 | // NOTE : Serial.readBytes is SLOW 169 | // this one is much faster, but has no timeout 170 | void read_signed_bytes(int8_t* buffer, size_t n) 171 | { 172 | size_t i = 0; 173 | int c; 174 | while (i < n) 175 | { 176 | c = Serial.read(); 177 | if (c < 0) break; 178 | *buffer++ = (int8_t) c; // buffer[i] = (int8_t)c; 179 | i++; 180 | } 181 | } 182 | 183 | int8_t read_i8() 184 | { 185 | wait_for_bytes(1, 100); // Wait for 1 byte with a timeout of 100 ms 186 | return (int8_t) Serial.read(); 187 | } 188 | 189 | int16_t read_i16() 190 | { 191 | int8_t buffer[2]; 192 | wait_for_bytes(2, 100); // Wait for 2 bytes with a timeout of 100 ms 193 | read_signed_bytes(buffer, 2); 194 | return (((int16_t) buffer[0]) & 0xff) | (((int16_t) buffer[1]) << 8 & 0xff00); 195 | } 196 | 197 | int32_t read_i32() 198 | { 199 | int8_t buffer[4]; 200 | wait_for_bytes(4, 200); // Wait for 4 bytes with a timeout of 200 ms 201 | read_signed_bytes(buffer, 4); 202 | return (((int32_t) buffer[0]) & 0xff) | (((int32_t) buffer[1]) << 8 & 0xff00) | (((int32_t) buffer[2]) << 16 & 0xff0000) | (((int32_t) buffer[3]) << 24 & 0xff000000); 203 | } 204 | 205 | void write_order(enum Order myOrder) 206 | { 207 | uint8_t* Order = (uint8_t*) &myOrder; 208 | Serial.write(Order, sizeof(uint8_t)); 209 | } 210 | 211 | void write_i8(int8_t num) 212 | { 213 | Serial.write(num); 214 | } 215 | 216 | void write_i16(int16_t num) 217 | { 218 | int8_t buffer[2] = {(int8_t) (num & 0xff), (int8_t) (num >> 8)}; 219 | Serial.write((uint8_t*)&buffer, 2*sizeof(int8_t)); 220 | } 221 | 222 | void write_i32(int32_t num) 223 | { 224 | int8_t buffer[4] = {(int8_t) (num & 0xff), (int8_t) (num >> 8 & 0xff), (int8_t) (num >> 16 & 0xff), (int8_t) (num >> 24 & 0xff)}; 225 | Serial.write((uint8_t*)&buffer, 4*sizeof(int8_t)); 226 | } 227 | -------------------------------------------------------------------------------- /arduino/slave.h: -------------------------------------------------------------------------------- 1 | // Code using Robust Arduino Serial Protocol: https://github.com/araffin/arduino-robust-serial 2 | 3 | #ifndef ARDUINO_SLAVE_H 4 | #define ARDUINO_SLAVE_H 5 | 6 | /*! 7 | * \brief Send updated motors orders to the two motors (servomotor + motor) 8 | */ 9 | void update_motors_orders(); 10 | 11 | /*! 12 | * Stop the car (set the speed to 0) 13 | */ 14 | void stop(); 15 | 16 | /*! 17 | * \brief Convert a speed order (in percentage of max speed) 18 | * into a pwm order (between 0 and 255) 19 | * \param motor_speed speed order in percentage of max speed 20 | * \return the speed order in pwm 21 | */ 22 | int convert_to_pwm(float motor_speed); 23 | 24 | /*! 25 | * \brief Read one byte from the serial and cast it to an Order 26 | * \return the order received 27 | */ 28 | Order read_order(); 29 | 30 | /*! 31 | * \brief Wait until there are enough bytes in the buffer 32 | * \param num_bytes the number of bytes 33 | * \param timeout (ms) The timeout, time after which we release the lock 34 | * even if there are not enough bytes 35 | */ 36 | void wait_for_bytes(int num_bytes, unsigned long timeout); 37 | 38 | /*! 39 | * \brief Read signed bytes and put them in a buffer 40 | * \param buffer an array of bytes 41 | * \param n number of bytes to read 42 | */ 43 | void read_signed_bytes(int8_t* buffer, size_t n); 44 | 45 | /*! 46 | * \brief Read one byte from a serial port and convert it to a 8 bits int 47 | * \return the decoded number 48 | */ 49 | int8_t read_i8(); 50 | 51 | /*! 52 | * \brief Read two bytes from a serial port and convert it to a 16 bits int 53 | * \return the decoded number 54 | */ 55 | int16_t read_i16(); 56 | 57 | 58 | /*! 59 | * \brief Read four bytes from a serial port and convert it to a 32 bits int 60 | * \return the decoded number 61 | */ 62 | int32_t read_i32(); 63 | 64 | /*! 65 | * \brief Send one order (one byte) 66 | * \param order type of order 67 | */ 68 | void write_order(enum Order order); 69 | 70 | /*! 71 | * \brief Write one byte int to serial port (between -127 and 127) 72 | * \param num an int of one byte 73 | */ 74 | void write_i8(int8_t num); 75 | 76 | /*! 77 | * \brief Send a two bytes signed int via the serial 78 | * \param num the number to send (max: (2**16/2 -1) = 32767) 79 | */ 80 | void write_i16(int16_t num); 81 | 82 | /*! 83 | * \brief Send a four bytes signed int (long) via the serial 84 | * \param num the number to send (−2,147,483,647, +2,147,483,647) 85 | */ 86 | void write_i32(int32_t num); 87 | 88 | /*! 89 | * \brief Listen the serial and decode the message received 90 | */ 91 | void get_messages_from_serial(); 92 | 93 | #endif 94 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: 2 | behavior: default 3 | -------------------------------------------------------------------------------- /constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | File containing all the constants used in the different files 3 | """ 4 | from __future__ import print_function, division, absolute_import 5 | 6 | import numpy as np 7 | 8 | # Camera Constants 9 | CAMERA_RESOLUTION = (640 // 2, 480 // 2) 10 | # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes 11 | # mode 4, larger FoV but FPS <= 40 12 | CAMERA_MODE = 4 13 | # Regions of interest 14 | MAX_WIDTH = CAMERA_RESOLUTION[0] 15 | MAX_HEIGHT = CAMERA_RESOLUTION[1] 16 | # r = [margin_left, margin_top, width, height] 17 | ROI = [0, 75, MAX_WIDTH, MAX_HEIGHT - 75] 18 | 19 | # Image Analyser 20 | RECORD_VIDEO = True # Record video to debug folder 21 | 22 | # Training 23 | FACTOR = 4 # Resize factor 24 | INPUT_WIDTH = ROI[2] // FACTOR 25 | INPUT_HEIGHT = ROI[3] // FACTOR 26 | N_CHANNELS = 3 27 | SPLIT_SEED = 42 # For train/val/test split 28 | MODEL_TYPE = "custom" # Network architecture {cnn or custom} 29 | WEIGHTS_PTH = MODEL_TYPE + "_model.pth" # Path to the trained model 30 | NUM_OUTPUT = 6 # Predict 3 points -> 6 outputs 31 | 32 | # Direction and speed 33 | THETA_MIN = 70 # value in [0, 255] sent to the servo 34 | THETA_MAX = 150 35 | MAX_SPEED_STRAIGHT_LINE = 30 # order between 0 and 100 36 | MAX_SPEED_SHARP_TURN = 25 37 | MIN_SPEED = 25 38 | 39 | # Path Planning 40 | TARGET_POINT = 0.5 # between [0, 1] position on the bezier curve 41 | 42 | # Stanley Steering Control params 43 | # Control gain (compromise between smooth control 44 | # and precise track following) 45 | K_STANLEY_CONTROL = 1 46 | CAR_LENGTH = 300 # Wheel base of vehicle (in the warped pixel space) 47 | MAX_STEERING_ANGLE = np.radians(30.0) # [rad] 48 | # Radius of curvature 49 | MIN_RADIUS = 5 # Below this value, we consider to be in a sharp turn 50 | MAX_RADIUS = 40 # Above this value, we consider to be in a straight line 51 | 52 | # PID Control 53 | Kp_turn = 65 54 | Kp_line = 65 55 | Kd = 4 56 | Ki = 0 57 | ALPHA = 1 # alpha of the moving mean for the turn coefficient 58 | # Main Program 59 | FPS = 40 60 | N_SECONDS = 3000 # number of seconds before exiting the program 61 | BAUDRATE = 115200 # Communication with the Arduino 62 | # Number of messages we can send to the Arduino without receiving an acknowledgment 63 | N_MESSAGES_ALLOWED = 3 64 | COMMAND_QUEUE_SIZE = 2 65 | 66 | # Image Processing 67 | # Straight line angle 68 | REF_ANGLE = - np.pi / 2 69 | # Max turn angle 70 | MAX_ANGLE = np.pi / 4 # 2 * np.pi / 3 71 | 72 | # Teleoperation 73 | TELEOP_PORT = "5556" 74 | RASPBERRY_IP = "192.168.12.252" 75 | 76 | 77 | # Arrow keys 78 | UP_KEY = 82 79 | DOWN_KEY = 84 80 | RIGHT_KEY = 83 81 | LEFT_KEY = 81 82 | ENTER_KEY = 10 83 | SPACE_KEY = 32 84 | EXIT_KEYS = [113, 27] # Escape and q 85 | S_KEY = 115 # S key 86 | -------------------------------------------------------------------------------- /custom_model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/custom_model.pth -------------------------------------------------------------------------------- /datasets/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/.gitkeep -------------------------------------------------------------------------------- /datasets/test_dataset/160.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/160.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/161.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/161.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/162.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/162.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/163.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/163.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/164.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/164.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/165.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/165.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/166.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/166.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/167.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/167.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/168.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/168.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/169.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/169.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/170.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/170.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/171.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/171.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/172.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/172.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/173.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/173.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/174.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/174.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/175.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/175.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/176.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/176.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/177.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/177.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/178.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/178.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/179.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/179.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/180.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/datasets/test_dataset/180.jpg -------------------------------------------------------------------------------- /datasets/test_dataset/labels.json: -------------------------------------------------------------------------------- 1 | {"172.jpg": [[260, 237], [196, 148], [161, 100]], "167.jpg": [[265, 223], [202, 145], [161, 99]], "180.jpg": [[228, 232], [172, 132], [143, 91]], "178.jpg": [[253, 234], [185, 144], [152, 100]], "171.jpg": [[263, 229], [199, 139], [161, 92]], "177.jpg": [[251, 236], [178, 137], [150, 100]], "165.jpg": [[266, 235], [192, 132], [163, 94]], "162.jpg": [[315, 229], [191, 126], [163, 96]], "179.jpg": [[249, 234], [181, 135], [149, 91]], "160.jpg": [[317, 223], [204, 128], [168, 90]], "175.jpg": [[252, 238], [181, 140], [155, 100]], "170.jpg": [[265, 232], [191, 144], [164, 112]], "169.jpg": [[248, 229], [192, 139], [171, 109]], "164.jpg": [[266, 235], [196, 136], [163, 96]], "166.jpg": [[266, 235], [192, 132], [163, 94]], "168.jpg": [[269, 224], [197, 130], [165, 96]], "163.jpg": [[308, 232], [196, 136], [163, 96]], "174.jpg": [[256, 231], [193, 143], [156, 95]], "173.jpg": [[259, 235], [196, 149], [157, 101]], "176.jpg": [[245, 233], [179, 135], [150, 96]], "161.jpg": [[310, 221], [201, 132], [170, 94]]} -------------------------------------------------------------------------------- /debug/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/debug/.gitkeep -------------------------------------------------------------------------------- /docker/Dockerfile.cpu: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # $ docker build . -t continuumio/miniconda3:latest -t continuumio/miniconda3:4.5.4 4 | # $ docker run --rm -it continuumio/miniconda3:latest /bin/bash 5 | # $ docker push continuumio/miniconda3:latest 6 | # $ docker push continuumio/miniconda3:4.5.4 7 | 8 | ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 9 | ENV PATH /opt/conda/bin:$PATH 10 | 11 | RUN apt-get update --fix-missing && \ 12 | apt-get install -y wget bzip2 ca-certificates curl git nano htop 13 | 14 | # OpenCV dependencies (libsm, libglib, ...) 15 | RUN apt-get -y install cmake build-essential arduino-mk zlib1g-dev \ 16 | libsm6 libxext6 libfontconfig1 libxrender1 libglib2.0-0 17 | 18 | RUN apt-get clean && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh -O ~/miniconda.sh && \ 22 | /bin/bash ~/miniconda.sh -b -p /opt/conda && \ 23 | rm ~/miniconda.sh && \ 24 | /opt/conda/bin/conda clean -tipsy && \ 25 | ln -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \ 26 | echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc 27 | 28 | COPY requirements.txt /tmp/ 29 | RUN conda install -y pytorch-cpu torchvision-cpu -c pytorch 30 | RUN pip install opencv-python 31 | RUN pip install cython 32 | RUN pip install -r /tmp/requirements.txt 33 | RUN conda clean -ya 34 | 35 | 36 | ENV TINI_VERSION v0.16.1 37 | ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /usr/bin/tini 38 | RUN chmod +x /usr/bin/tini 39 | 40 | ENTRYPOINT [ "/usr/bin/tini", "--" ] 41 | CMD [ "/bin/bash" ] 42 | -------------------------------------------------------------------------------- /docker/Dockerfile.rpi3: -------------------------------------------------------------------------------- 1 | FROM armv7/armhf-ubuntu:16.04 2 | 3 | COPY docker/qemu-arm-static /usr/bin/qemu-arm-static 4 | 5 | RUN locale-gen en_US.UTF-8 6 | ENV LANG en_US.UTF-8 7 | ENV VENV /root/venv 8 | 9 | RUN apt-get -y update && \ 10 | apt-get -y install git wget htop \ 11 | nano python-dev python3-dev python-pip \ 12 | pkg-config apt-utils 13 | 14 | # OpenCV + PyGame dependencies 15 | RUN apt-get -y install cmake build-essential arduino-mk zlib1g-dev \ 16 | libsm6 libxext6 libfontconfig1 libxrender1 libglib2.0-0 \ 17 | libpng12-dev libfreetype6-dev \ 18 | libtiff5-dev libjasper-dev libpng12-dev \ 19 | libjpeg-dev libavcodec-dev libavformat-dev \ 20 | libswscale-dev libv4l-dev libgtk2.0-dev \ 21 | libatlas-base-dev gfortran \ 22 | libsdl-image1.2-dev libsdl-mixer1.2-dev libsdl-ttf2.0-dev libsmpeg-dev \ 23 | libsdl1.2-dev libportmidi-dev libswscale-dev libavformat-dev libavcodec-dev libfreetype6-dev \ 24 | libzmq3-dev libopenblas-dev libeigen3-dev libffi-dev 25 | 26 | RUN pip install virtualenv && \ 27 | virtualenv $VENV --python=python3 && \ 28 | . $VENV/bin/activate && \ 29 | pip install --upgrade pip && \ 30 | pip install enum34==1.1.6 && \ 31 | pip install numpy==1.14.4 && \ 32 | pip install pytest==3.4.2 && \ 33 | pip install pytest-cov==2.5.1 && \ 34 | pip install pyserial==3.4 && \ 35 | pip install pyzmq==16.0.2 && \ 36 | pip install robust-serial==0.1 && \ 37 | pip install six==1.11.0 && \ 38 | pip install tqdm==4.19.5 && \ 39 | pip install ipython && \ 40 | pip install matplotlib && \ 41 | pip install pyyaml setuptools cffi typing && \ 42 | pip install scipy==0.19.1 && \ 43 | pip install scikit-learn==0.19.0 && \ 44 | pip install pygame==1.9.3 45 | 46 | ENV PATH=$VENV/bin:$PATH 47 | 48 | RUN apt-get -y install unzip 49 | 50 | # Compile OpenCV from source 51 | RUN wget https://github.com/opencv/opencv/archive/3.4.3.zip && \ 52 | unzip 3.4.3.zip && \ 53 | rm 3.4.3.zip 54 | 55 | ENV OPENCV_DIR opencv-3.4.3 56 | 57 | RUN . $VENV/bin/activate && \ 58 | cd $OPENCV_DIR && \ 59 | mkdir build && \ 60 | cd build && \ 61 | cmake -D CMAKE_BUILD_TYPE=RELEASE \ 62 | -D CMAKE_INSTALL_PREFIX=/usr/local \ 63 | -D INSTALL_PYTHON_EXAMPLES=ON \ 64 | -D BUILD_EXAMPLES=ON \ 65 | -D WITH_CUDA=OFF \ 66 | -D BUILD_TESTS=OFF -DBUILD_PERF_TESTS=OFF \ 67 | -D BUILD_opencv_java=OFF \ 68 | -D WITH_EIGEN=ON .. 69 | 70 | RUN . $VENV/bin/activate && \ 71 | cd $OPENCV_DIR/build/ && \ 72 | make -j8 && \ 73 | make install && \ 74 | ldconfig 75 | 76 | RUN cp /usr/local/lib/python3.5/site-packages/cv2.cpython-35m-arm-linux-gnueabihf.so \ 77 | $VENV/lib/python3.5/site-packages/cv2.so 78 | 79 | # Compile PyTorch From Source 80 | RUN git clone --recursive https://github.com/pytorch/pytorch 81 | 82 | RUN . $VENV/bin/activate && \ 83 | cd pytorch && \ 84 | MAX_JOBS=8 NO_DISTRIBUTED=1 NO_CAFFE2_OPS=1 NO_CUDA=1 python setup.py install 85 | 86 | # Clean up 87 | RUN apt-get clean && \ 88 | rm -rf /var/lib/apt/lists/* && \ 89 | rm -rf pytorch && \ 90 | rm -rf $OPENCV_DIR 91 | 92 | COPY docker/entrypoint.sh / 93 | RUN chmod +x /entrypoint.sh 94 | 95 | ENTRYPOINT ["/entrypoint.sh"] 96 | CMD ["/bin/bash"] 97 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # setup environment 5 | # source "" 6 | exec "$@" 7 | -------------------------------------------------------------------------------- /docker/qemu-arm-static: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/docker/qemu-arm-static -------------------------------------------------------------------------------- /image_processing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/image_processing/__init__.py -------------------------------------------------------------------------------- /image_processing/image_processing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main script for processing an image: 3 | it preprocesses the image, detects the line, estimate line curve 4 | and do the path planning 5 | """ 6 | from __future__ import print_function, with_statement, division, absolute_import 7 | 8 | import argparse 9 | 10 | import cv2 11 | import numpy as np 12 | 13 | from constants import REF_ANGLE, MAX_ANGLE, EXIT_KEYS, WEIGHTS_PTH, NUM_OUTPUT, MODEL_TYPE, TARGET_POINT 14 | from train import loadNetwork, predict 15 | from path_planning.bezier_curve import computeControlPoints, bezier 16 | 17 | # Load trained model 18 | model = loadNetwork(WEIGHTS_PTH, NUM_OUTPUT, MODEL_TYPE) 19 | 20 | 21 | def processImage(image, debug=False): 22 | """ 23 | :param image: (bgr image) 24 | :param debug: (bool) 25 | :return:(float, float) 26 | """ 27 | x, y = predict(model, image) 28 | if debug: 29 | return x, y 30 | 31 | # Compute bezier path and target point 32 | control_points = computeControlPoints(x, y, add_current_pos=True) 33 | target = bezier(TARGET_POINT, control_points) 34 | 35 | # Linear Regression to fit a line 36 | # It estimates the line curve 37 | 38 | # Case x = cst, m = 0 39 | if len(np.unique(x)) == 1: # pragma: no cover 40 | turn_percent = 0 41 | else: 42 | # Linear regression using least squares method 43 | # x = m*y + b -> y = 1/m * x - b/m if m != 0 44 | A = np.vstack([y, np.ones(len(y))]).T 45 | m, b = np.linalg.lstsq(A, x, rcond=-1)[0] 46 | 47 | # Compute the angle between the reference and the fitted line 48 | track_angle = np.arctan(1 / m) 49 | diff_angle = abs(REF_ANGLE) - abs(track_angle) 50 | # Estimation of the line curvature 51 | turn_percent = (diff_angle / MAX_ANGLE) * 100 52 | return turn_percent, target[0] 53 | -------------------------------------------------------------------------------- /image_processing/picamera_threads.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function, absolute_import 2 | 3 | import threading 4 | import time 5 | 6 | try: 7 | import queue 8 | except ImportError: 9 | import Queue as queue 10 | 11 | import picamera.array 12 | import cv2 13 | 14 | from image_processing.image_processing import processImage 15 | from constants import CAMERA_RESOLUTION, RECORD_VIDEO, CAMERA_MODE 16 | 17 | emptyException = queue.Empty 18 | fullException = queue.Full 19 | 20 | experiment_time = int(time.time()) 21 | 22 | 23 | class ImageProcessingThread(threading.Thread): 24 | """ 25 | Thread used to retrieve image and do the image processing 26 | :param viewer: (Viewer object) 27 | :param exit_condition: (Condition object) 28 | """ 29 | 30 | def __init__(self, viewer, exit_condition): 31 | super(ImageProcessingThread, self).__init__() 32 | self.deamon = True 33 | self.v = viewer 34 | self.exit_condition = exit_condition 35 | 36 | def run(self): 37 | v = self.v 38 | start_time = time.time() 39 | v.start() 40 | 41 | # Wait until the thread is notified to exit 42 | with self.exit_condition: 43 | self.exit_condition.wait() 44 | v.stop() 45 | 46 | print('FPS: {:.2f}'.format(v.analyser.frame_num / (time.time() - start_time))) 47 | 48 | 49 | class RGBAnalyser(picamera.array.PiRGBAnalysis): 50 | """ 51 | Class used to retrieve an image from the picamera 52 | and process it 53 | :param camera: (PiCamera object) 54 | :param out_queue: (Queue) queue used for output of image processing 55 | :param debug: (bool) set to true, queue will be filled with raw images 56 | """ 57 | 58 | def __init__(self, camera, out_queue, debug=False): 59 | super(RGBAnalyser, self).__init__(camera) 60 | self.frame_num = 0 61 | self.frame_queue = queue.Queue(maxsize=1) 62 | self.exit = False 63 | self.out_queue = out_queue 64 | self.debug = debug 65 | self.thread = None 66 | self.start() 67 | 68 | def analyse(self, frame): 69 | """ 70 | Override base function 71 | :param frame: BGR image 72 | """ 73 | self.frame_queue.put(item=frame, block=True) 74 | 75 | def extractInfo(self): 76 | # times = [] 77 | try: 78 | while not self.exit: 79 | try: 80 | frame = self.frame_queue.get(block=True, timeout=1) 81 | except queue.Empty: 82 | print("Frame queue empty") 83 | continue 84 | # 1 ms per loop 85 | # TODO: check that this conversion is not needed 86 | # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) 87 | if self.debug: 88 | self.out_queue.put(item=frame, block=False) 89 | else: 90 | try: 91 | # start_time = time.time() 92 | turn_percent, centroids = processImage(frame) 93 | # times.append(time.time() - start_time) 94 | self.out_queue.put(item=(turn_percent, centroids), block=False) 95 | except Exception as e: 96 | print("Exception in RBGAnalyser processing image: {}".format(e)) 97 | self.frame_num += 1 98 | except Exception as e: 99 | print("Exception in RBGAnalyser after loop: {}".format(e)) 100 | # s_per_loop_image = np.mean(times) 101 | # print("Image processing: {:.2f}ms per loop | {} fps".format(s_per_loop_image * 1000, int(1 / s_per_loop_image))) 102 | 103 | def start(self): 104 | t = threading.Thread(target=self.extractInfo) 105 | self.thread = t 106 | t.deamon = True 107 | t.start() 108 | 109 | def stop(self): 110 | self.exit = True 111 | self.thread.join() 112 | self.frame_queue.queue.clear() 113 | 114 | 115 | class Viewer(object): 116 | """ 117 | Class that initialize the camera and start the PiCamera Thread 118 | :param out_queue: (Queue) 119 | :param resolution: (int, int) 120 | :param debug: (bool) 121 | :param fps: (int) 122 | """ 123 | 124 | def __init__(self, out_queue, resolution, debug=False, fps=90): 125 | self.camera = picamera.PiCamera(resolution=CAMERA_RESOLUTION, sensor_mode=CAMERA_MODE, 126 | framerate=fps) 127 | self.out_queue = out_queue 128 | # self.camera.zoom = (0.0, 0.0, 1.0, 1.0) 129 | # self.camera.awb_gains = 1.5 130 | self.camera.awb_mode = 'auto' 131 | self.camera.exposure_mode = 'auto' 132 | self.debug = debug 133 | self.analyser = None 134 | print(self.camera.resolution) 135 | 136 | def start(self): 137 | self.analyser = RGBAnalyser(self.camera, self.out_queue, debug=self.debug) 138 | self.camera.start_recording(self.analyser, format='bgr') 139 | if RECORD_VIDEO: 140 | self.camera.start_recording('debug/{}.h264'.format(experiment_time), 141 | splitter_port=2, resize=CAMERA_RESOLUTION) 142 | 143 | def stop(self): 144 | self.camera.wait_recording() 145 | if RECORD_VIDEO: 146 | self.camera.stop_recording(splitter_port=2) 147 | self.camera.stop_recording() 148 | self.analyser.stop() 149 | -------------------------------------------------------------------------------- /image_processing/warp_image.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import cv2 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | from constants import MAX_WIDTH, MAX_HEIGHT 8 | 9 | # Transform Parameters 10 | height, width = 1200, 800 11 | # Orignal and transformed keypoints 12 | pts1 = np.float32( 13 | [[103, 93], 14 | [222, 95], 15 | [6, 130], 16 | [310, 128]]) 17 | 18 | pts2 = np.float32( 19 | [[0, 0], 20 | [width, 0], 21 | [0, height], 22 | [width, height]]) 23 | 24 | # Translation Matrix 25 | tx, ty = 0, 0 26 | translation_matrix = np.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) 27 | 28 | new_height, new_width = int(height * 1) + ty + 600, int(width * 1.2) + tx 29 | 30 | # calculate the perspective transform matrix 31 | transform_matrix = cv2.getPerspectiveTransform(pts1, pts2) 32 | complete_transform = np.dot(translation_matrix, transform_matrix) 33 | 34 | def imshow(im, name=""): # pragma: no cover 35 | plt.figure(name) 36 | # BGR to RGB 37 | plt.imshow(im[:, :, ::-1]) 38 | plt.grid(True) 39 | 40 | 41 | def showTransform(image): # pragma: no cover 42 | im = image.copy() 43 | for (cx, cy) in pts1: 44 | cv2.circle(im, (int(cx), int(cy)), 8, (0, 255, 0), -1) 45 | imshow(im, name="transform") 46 | 47 | 48 | def transformPoints(x, y): 49 | points = [] 50 | for i in range(len(x)): 51 | point = transformPoint(np.array([x[i], y[i], 1])) 52 | scale = point[2] 53 | point = point / scale 54 | points.append(point[:2]) 55 | return np.array(points) 56 | 57 | 58 | def transformPoint(point): 59 | """ 60 | :param points: (numpy array) 61 | :return: (numpy array) 62 | """ 63 | return np.matmul(complete_transform, point) 64 | 65 | 66 | def warpImage(image): 67 | # TODO: transform only points 68 | return cv2.warpPerspective(image, complete_transform, (new_width, new_height)) 69 | 70 | if __name__ == '__main__': # pragma: no cover 71 | parser = argparse.ArgumentParser(description='Transform image to have a top down view') 72 | parser.add_argument('-i', '--input_image', help='Input image', type=str, required=True) 73 | parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plots (for tests)') 74 | args = parser.parse_args() 75 | 76 | image = cv2.imread(args.input_image) 77 | assert image is not None, "Could not read image" 78 | orignal_image = image.copy() 79 | warp = cv2.warpPerspective(orignal_image, np.dot(translation_matrix, transform_matrix), (new_width, new_height)) 80 | if not args.no_display: 81 | imshow(image, name="original") 82 | showTransform(image) 83 | imshow(warp, name="warped") 84 | plt.show() 85 | -------------------------------------------------------------------------------- /logs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/logs/.gitkeep -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Main script for autonomous mode 3 | It launches all the thread and does the PID control 4 | use export OMP_NUM_THREADS=2 to improve performances 5 | """ 6 | from __future__ import division, print_function 7 | 8 | import logging 9 | import signal 10 | import threading 11 | import time 12 | from datetime import datetime 13 | 14 | # Python 2/3 support 15 | try: 16 | import queue 17 | except ImportError: 18 | import Queue as queue 19 | 20 | import numpy as np 21 | from tqdm import tqdm 22 | from robust_serial import write_order, Order 23 | from robust_serial.threads import CommandThread, ListenerThread 24 | from robust_serial.utils import open_serial_port, CustomQueue 25 | 26 | from image_processing.picamera_threads import ImageProcessingThread, Viewer 27 | from constants import THETA_MIN, THETA_MAX, MAX_SPEED_SHARP_TURN, MAX_SPEED_STRAIGHT_LINE, \ 28 | MIN_SPEED, Kp_turn, Kp_line, Kd, Ki, FPS, N_SECONDS, ALPHA, CAMERA_RESOLUTION, \ 29 | BAUDRATE, N_MESSAGES_ALLOWED, COMMAND_QUEUE_SIZE 30 | 31 | emptyException = queue.Empty 32 | fullException = queue.Full 33 | 34 | # Logging 35 | log = logging.getLogger('racing_robot') 36 | log.setLevel(logging.DEBUG) 37 | 38 | # Formatter for logger 39 | formatter = logging.Formatter('%(asctime)s -- %(levelname)s - %(message)s') 40 | 41 | # Create file handler which logs even debug messages 42 | fh = logging.FileHandler('logs/{}.log'.format(datetime.now().strftime("%y-%m-%d_%Hh%M_%S"))) 43 | fh.setLevel(logging.INFO) 44 | fh.setFormatter(formatter) 45 | log.addHandler(fh) 46 | 47 | # Create console handler 48 | ch = logging.StreamHandler() 49 | ch.setLevel(logging.INFO) 50 | ch.setFormatter(formatter) 51 | log.addHandler(ch) 52 | 53 | 54 | def forceStop(command_queue, n_received_semaphore): 55 | """ 56 | Stop The car 57 | :param command_queue: (CustomQueue) Queue for sending orders to the Arduino 58 | :param n_received_semaphore: (threading.Semaphore) Semaphore to regulate orders sent to the Arduino 59 | """ 60 | command_queue.clear() 61 | n_received_semaphore.release() 62 | n_received_semaphore.release() 63 | command_queue.put((Order.MOTOR, 0)) 64 | command_queue.put((Order.SERVO, int((THETA_MIN + THETA_MAX) / 2))) 65 | 66 | 67 | def mainControl(command_queue, n_received_semaphore, out_queue, resolution, n_seconds=5): 68 | """ 69 | :param command_queue: (CustomQueue) Queue for sending orders to the Arduino 70 | :param n_received_semaphore: (threading.Semaphore) Semaphore to regulate orders sent to the Arduino 71 | :param out_queue: (Queue) Output of image processing 72 | :param resolution: (int, int) Camera Resolution 73 | :param n_seconds: (int) number of seconds to keep this script alive 74 | """ 75 | # Moving mean for line curve estimation 76 | mean_h = 0 77 | start_time = time.time() 78 | error, error_d, error_i = 0, 0, 0 79 | last_error = 0 80 | # Neutral Angle 81 | theta_init = (THETA_MAX + THETA_MIN) / 2 82 | # Middle of the image 83 | x_center = resolution[0] // 2 84 | max_error_px = resolution[0] // 2 # max error in pixel 85 | # Use mutable to be modified by signal handler 86 | should_exit = [False] 87 | 88 | # Stop the robot on ctrl+c and exit the script 89 | def ctrl_c(signum, frame): 90 | log.info("STOP") 91 | should_exit[0] = True 92 | 93 | signal.signal(signal.SIGINT, ctrl_c) 94 | last_time = time.time() 95 | last_time_update = time.time() 96 | pbar = tqdm(total=n_seconds) 97 | # Number of time the command queue was full 98 | n_full = 0 99 | n_total = 0 100 | 101 | log.debug("Entering in the control loop") 102 | 103 | while time.time() - start_time < n_seconds and not should_exit[0]: 104 | # Display progress bar 105 | if time.time() - last_time_update > 1: 106 | pbar.update(int(time.time() - last_time_update)) 107 | last_time_update = time.time() 108 | 109 | # Output of image processing 110 | turn_percent, x_target = out_queue.get() 111 | 112 | # Compute the error to the center of the line 113 | # We want the line to be in the middle of the image 114 | error = (x_center - x_target) / max_error_px 115 | 116 | # Represent line curve as a number in [0, 1] 117 | # h = 0 -> straight line 118 | # h = 1 -> sharp turn 119 | h = np.clip(turn_percent / 100.0, 0, 1) 120 | # Update moving mean 121 | mean_h += ALPHA * (h - mean_h) 122 | 123 | # We are using the moving mean (which is less noisy) 124 | # for line curve estimation 125 | h = mean_h 126 | # Reduce max speed if it is a sharp turn 127 | v_max = h * MAX_SPEED_SHARP_TURN + (1 - h) * MAX_SPEED_STRAIGHT_LINE 128 | # Different Kp depending on the line curve 129 | Kp = h * Kp_turn + (1 - h) * Kp_line 130 | 131 | # Represent error as a number in [0, 1] 132 | # t = 0 -> no error, we are perfectly on the line 133 | # t = 1 -> maximal error 134 | t = np.clip(error, 0, 1) 135 | # Reduce speed if we have a high error 136 | speed_order = t * MIN_SPEED + (1 - t) * v_max 137 | 138 | error_d = error - last_error 139 | # Update derivative error 140 | last_error = error 141 | 142 | # PID Control 143 | dt = time.time() - last_time 144 | u_angle = Kp * error + Kd * (error_d / dt) + Ki * (error_i * dt) 145 | # Update integral error 146 | error_i += error 147 | last_time = time.time() 148 | 149 | angle_order = theta_init - u_angle 150 | angle_order = np.clip(angle_order, THETA_MIN, THETA_MAX).astype(int) 151 | 152 | # Send orders to Arduino 153 | try: 154 | command_queue.put_nowait((Order.MOTOR, int(speed_order))) 155 | command_queue.put_nowait((Order.SERVO, angle_order)) 156 | except fullException: 157 | n_full += 1 158 | # print("Command queue is full") 159 | n_total += 1 160 | 161 | # Logging 162 | log.debug("Error={:.2f} error_d={:.2f} error_i={:.2f}".format(error, error_d, error_i)) 163 | log.debug("Turn percent={:.2f} x_target={:.2f}".format(turn_percent, x_target)) 164 | log.debug("v_max={:.2f} mean_h={:.2f}".format(v_max, mean_h)) 165 | log.debug("speed={:.2f} angle={:.2f}".format(speed_order, angle_order)) 166 | 167 | # SEND STOP ORDER at the end 168 | forceStop(command_queue, n_received_semaphore) 169 | # Make sure STOP order is sent 170 | time.sleep(0.2) 171 | pbar.close() 172 | log.info("{:.2f}% of time the command queue was full".format(100 * n_full / n_total)) 173 | log.info("Main loop: {:.2f} Hz".format((n_total - n_full) / (time.time() - start_time))) 174 | 175 | 176 | if __name__ == '__main__': 177 | serial_file = None 178 | try: 179 | # Open serial port (for communication with Arduino) 180 | serial_file = open_serial_port(baudrate=BAUDRATE) 181 | except Exception as e: 182 | raise e 183 | 184 | is_connected = False 185 | # Initialize communication with Arduino 186 | while not is_connected: 187 | log.info("Waiting for arduino...") 188 | write_order(serial_file, Order.HELLO) 189 | bytes_array = bytearray(serial_file.read(1)) 190 | if not bytes_array: 191 | time.sleep(2) 192 | continue 193 | byte = bytes_array[0] 194 | if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]: 195 | is_connected = True 196 | 197 | log.info("Connected to Arduino") 198 | 199 | # Image processing queue, output centroids 200 | out_queue = queue.Queue() 201 | condition_lock = threading.Lock() 202 | exit_condition = threading.Condition(condition_lock) 203 | 204 | log.info("Starting Image Processing Thread") 205 | # It starts 2 threads: 206 | # - one for retrieving images from camera 207 | # - one for processing the images 208 | image_thread = ImageProcessingThread(Viewer(out_queue, CAMERA_RESOLUTION, debug=False, fps=FPS), exit_condition) 209 | # Wait for camera warmup 210 | time.sleep(1) 211 | 212 | # Event to notify threads that they should terminate 213 | exit_event = threading.Event() 214 | 215 | # Communication with the Arduino 216 | # Create Command queue for sending orders 217 | command_queue = CustomQueue(COMMAND_QUEUE_SIZE) 218 | n_received_semaphore = threading.Semaphore(N_MESSAGES_ALLOWED) 219 | # Lock for accessing serial file (to avoid reading and writing at the same time) 220 | serial_lock = threading.Lock() 221 | 222 | log.info("Starting Communication Threads") 223 | # Threads for arduino communication 224 | threads = [CommandThread(serial_file, command_queue, exit_event, n_received_semaphore, serial_lock), 225 | ListenerThread(serial_file, exit_event, n_received_semaphore, serial_lock), image_thread] 226 | for thread in threads: 227 | thread.start() 228 | 229 | log.info("Starting Control Thread") 230 | mainControl(command_queue, n_received_semaphore, out_queue, 231 | resolution=CAMERA_RESOLUTION, n_seconds=N_SECONDS) 232 | 233 | # End the threads 234 | exit_event.set() 235 | n_received_semaphore.release() 236 | 237 | log.info("Exiting...") 238 | with exit_condition: 239 | exit_condition.notify_all() 240 | 241 | for thread in threads: 242 | thread.join() 243 | -------------------------------------------------------------------------------- /path_planning/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/path_planning/__init__.py -------------------------------------------------------------------------------- /path_planning/bezier_curve.py: -------------------------------------------------------------------------------- 1 | """ 2 | Path Planning with Bezier curve 3 | 4 | Original Author: Atsushi Sakai(@Atsushi_twi) 5 | From https://github.com/AtsushiSakai/PythonRobotics 6 | """ 7 | from __future__ import division, print_function 8 | 9 | import argparse 10 | 11 | import scipy.special 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | from constants import MAX_WIDTH, MAX_HEIGHT 16 | 17 | demo_cp = np.array([[5., 1.], [-2.78, 1.], [-11.5, -4.5], [-6., -8.]]) 18 | 19 | 20 | def computeControlPoints(x, y, add_current_pos=False): 21 | """ 22 | Compute the control points for creating bezier path 23 | The image processing predicts (x, y) points belonging to the line 24 | :param x: (numpy array) 25 | :param y: (numpy array) 26 | :param add_current_pos: (bool) 27 | :return: (numpy array) 28 | """ 29 | control_points = np.concatenate((x[None].T, y[None].T), axis=1) 30 | 31 | if add_current_pos: 32 | current_position = np.array([MAX_WIDTH // 2, MAX_HEIGHT]).reshape(1, -1) 33 | control_points = np.concatenate((current_position, control_points)) 34 | 35 | return control_points 36 | 37 | 38 | def calcBezierPath(control_points, n_points=100): 39 | """ 40 | Compute bezier path (trajectory) given control points 41 | :param control_points: (numpy array) 42 | :param n_points: (int) n_points in the trajectory 43 | :return: (numpy array) 44 | """ 45 | traj = [] 46 | for t in np.linspace(0, 1, n_points): 47 | traj.append(bezier(t, control_points)) 48 | 49 | return np.array(traj) 50 | 51 | 52 | def bernsteinPoly(n, i, t): 53 | """ 54 | Bernstein polynom 55 | :param n: (int) polynom degree 56 | :param i: (int) 57 | :param t: (float) 58 | :return: (float) 59 | """ 60 | return scipy.special.comb(n, i) * t ** i * (1 - t) ** (n - i) 61 | 62 | 63 | def bezier(t, control_points): 64 | """ 65 | Return one point on the bezier curve 66 | :param t: (float) 67 | :param control_points: (numpy array) 68 | :return: (numpy array) 69 | """ 70 | n = len(control_points) - 1 71 | return np.sum([bernsteinPoly(n, i, t) * control_points[i] for i in range(n + 1)], axis=0) 72 | 73 | 74 | def bezierDerivativesControlPoints(control_points, n_derivatives): 75 | """ 76 | Compute control points of the successive derivatives 77 | of a given bezier curve 78 | https://pomax.github.io/bezierinfo/#derivatives 79 | :param control_points: (numpy array) 80 | :param n_derivatives: (int) 81 | ex: n_derivatives=2 -> compute control_points for first and second derivatives 82 | :return: ([numpy array]) 83 | """ 84 | W = {0: control_points} 85 | for i in range(n_derivatives): 86 | n = len(W[i]) 87 | W[i + 1] = np.array([(n - 1) * (W[i][j + 1] - W[i][j]) for j in range(n - 1)]) 88 | return W 89 | 90 | 91 | def curvature(dx, dy, ddx, ddy): 92 | """ 93 | Compute curvature at one point 94 | given first and second derivatives 95 | :param dx: (float) First derivative along x axis 96 | :param dy: (float) 97 | :param ddx: (float) Second derivative along x axis 98 | :param ddy: (float) 99 | :return: (float) 100 | """ 101 | return (dx * ddy - dy * ddx) / (dx ** 2 + dy ** 2) ** (3 / 2) 102 | 103 | 104 | def calcYaw(dx, dy): 105 | """ 106 | Compute the yaw angle given the derivative 107 | at one point on the curve 108 | :param dx: (float) 109 | :param dy: (float) 110 | :return: (float) 111 | """ 112 | return np.arctan2(dy, dx) 113 | 114 | 115 | def calcTrajectory(control_points, n_points=100): 116 | """ 117 | Compute bezier path along with derivative and curvature 118 | :param control_points: (numpy array) 119 | :param n_points: (int) 120 | :return: ([float], [float], [float], [float]) 121 | """ 122 | cp = control_points 123 | derivatives_cp = bezierDerivativesControlPoints(cp, 2) 124 | 125 | rx, ry, ryaw, rk = [], [], [], [] 126 | for t in np.linspace(0, 1, n_points): 127 | ix, iy = bezier(t, cp) 128 | dx, dy = bezier(t, derivatives_cp[1]) 129 | ddx, ddy = bezier(t, derivatives_cp[2]) 130 | rx.append(ix) 131 | ry.append(iy) 132 | ryaw.append(calcYaw(dx, dy)) 133 | rk.append(curvature(dx, dy, ddx, ddy)) 134 | 135 | return rx, ry, ryaw, rk 136 | 137 | 138 | def main(show_animation): 139 | cp = demo_cp 140 | rx, ry, ryaw, rk = calcTrajectory(cp, 100) 141 | 142 | t = 0.8 143 | x_target, y_target = bezier(t, cp) 144 | derivatives_cp = bezierDerivativesControlPoints(cp, 2) 145 | point = bezier(t, cp) 146 | dt = bezier(t, derivatives_cp[1]) 147 | ddt = bezier(t, derivatives_cp[2]) 148 | cu = curvature(dt[0], dt[1], ddt[0], ddt[1]) 149 | # Normalize derivative 150 | dt /= np.linalg.norm(dt, 2) 151 | tangent = np.array([point, point + dt]) 152 | normal = np.array([point, point + [- dt[1], dt[0]]]) 153 | # Radius of curvature 154 | r = 1 / cu 155 | 156 | curvature_center = point + np.array([- dt[1], dt[0]]) * r 157 | circle = plt.Circle(tuple(curvature_center), r, color=(0, 0.8, 0.8), fill=False, linewidth=1) 158 | 159 | 160 | if show_animation: # pragma: no cover 161 | fig, ax = plt.subplots() 162 | ax.plot(rx, ry, label="Bezier Path") 163 | ax.plot(cp.T[0], cp.T[1], '--o', label="Control Points") 164 | ax.plot(x_target, y_target, '--o', label="Target Point") 165 | ax.plot(tangent[:, 0], tangent[:, 1], label="Tangent") 166 | ax.plot(normal[:, 0], normal[:, 1], label="Normal") 167 | ax.add_artist(circle) 168 | ax.legend() 169 | ax.axis("equal") 170 | ax.grid(True) 171 | plt.show() 172 | 173 | 174 | if __name__ == '__main__': 175 | parser = argparse.ArgumentParser(description='Test a line detector') 176 | parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plots (for tests)') 177 | args = parser.parse_args() 178 | main(not args.no_display) 179 | -------------------------------------------------------------------------------- /path_planning/stanley_controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Path tracking simulation with Stanley steering control and PID speed control. 4 | https://www.ri.cmu.edu/pub_files/2009/2/Automatic_Steering_Methods_for_Autonomous_Automobile_Path_Tracking.pdf 5 | 6 | Original Author: Atsushi Sakai(@Atsushi_twi) 7 | From https://github.com/AtsushiSakai/PythonRobotics 8 | """ 9 | from __future__ import division, print_function 10 | 11 | import argparse 12 | 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | 16 | from .bezier_curve import calcTrajectory, demo_cp 17 | from constants import K_STANLEY_CONTROL, CAR_LENGTH, MAX_STEERING_ANGLE,\ 18 | MAX_SPEED_STRAIGHT_LINE, MIN_SPEED, MIN_RADIUS, MAX_RADIUS 19 | 20 | Kp_speed = 0.1 # speed propotional gain 21 | dt = 1 # [s] time difference 22 | 23 | 24 | class State(object): 25 | def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0): 26 | super(State, self).__init__() 27 | self.x = x 28 | self.y = y 29 | self.yaw = yaw 30 | self.v = v 31 | 32 | def update(self, acceleration, delta): 33 | """ 34 | :param acceleration: (float) Speed control 35 | :param delta: (float) Steering control 36 | """ 37 | delta = np.clip(delta, -MAX_STEERING_ANGLE, MAX_STEERING_ANGLE) 38 | 39 | self.x += self.v * np.cos(self.yaw) * dt 40 | self.y += self.v * np.sin(self.yaw) * dt 41 | self.yaw += self.v / CAR_LENGTH * np.tan(delta) * dt 42 | self.yaw = normalizeAngle(self.yaw) 43 | self.v += acceleration * dt 44 | 45 | 46 | def stanleyControl(state, cx, cy, cyaw, last_target_idx): 47 | """ 48 | :param state: (State object) 49 | :param cx: ([float]) 50 | :param cy: ([float]) 51 | :param cyaw: ([float]) 52 | :param last_target_idx: (int) 53 | :return: (float, int, float) 54 | """ 55 | # Cross track error 56 | current_target_idx, error_front_axle = calcTargetIndex(state, cx, cy) 57 | 58 | if last_target_idx >= current_target_idx: 59 | current_target_idx = last_target_idx 60 | 61 | # theta_e corrects the heading error 62 | theta_e = normalizeAngle(cyaw[current_target_idx] - state.yaw) 63 | # theta_d corrects the cross track error 64 | theta_d = np.arctan2(K_STANLEY_CONTROL * error_front_axle, state.v) 65 | # Steering control 66 | delta = theta_e + theta_d 67 | 68 | return delta, current_target_idx, error_front_axle 69 | 70 | 71 | def normalizeAngle(angle): 72 | """ 73 | :param angle: (float) 74 | :return: (float) Angle in radian in [-pi, pi] 75 | """ 76 | while angle > np.pi: 77 | angle -= 2.0 * np.pi 78 | 79 | while angle < -np.pi: 80 | angle += 2.0 * np.pi 81 | 82 | return angle 83 | 84 | 85 | def calcTargetIndex(state, cx, cy): 86 | """ 87 | :param state: (State object) 88 | :param cx: [float] 89 | :param cy: [float] 90 | :return: (int, float) 91 | """ 92 | # Calc front axle position 93 | fx = state.x + CAR_LENGTH * np.cos(state.yaw) 94 | fy = state.y + CAR_LENGTH * np.sin(state.yaw) 95 | 96 | # Search nearest point index 97 | dx = [fx - icx for icx in cx] 98 | dy = [fy - icy for icy in cy] 99 | d = [np.sqrt(idx ** 2 + idy ** 2) for (idx, idy) in zip(dx, dy)] 100 | error_front_axle = min(d) 101 | target_idx = d.index(error_front_axle) 102 | 103 | target_yaw = normalizeAngle(np.arctan2(fy - cy[target_idx], fx - cx[target_idx]) - state.yaw) 104 | if target_yaw > 0.0: 105 | error_front_axle = - error_front_axle 106 | 107 | return target_idx, error_front_axle 108 | 109 | 110 | def main(show_animation): 111 | cp = demo_cp * 100 112 | cx, cy, cyaw, ck = calcTrajectory(cp, n_points=200) 113 | 114 | target_speed = MAX_SPEED_STRAIGHT_LINE 115 | max_simulation_time = 100.0 116 | 117 | # initial state 118 | state = State(x=100, y=50, yaw=np.radians(-180.0), v=0) 119 | 120 | last_idx = len(cx) - 1 121 | current_t = 0.0 122 | x = [state.x] 123 | y = [state.y] 124 | yaw = [state.yaw] 125 | v = [state.v] 126 | t = [0.0] 127 | cross_track_errors = [] 128 | curvatures = [] 129 | target_idx, _ = calcTargetIndex(state, cx, cy) 130 | max_speed = target_speed 131 | max_radius = 40 132 | min_radius = 5 133 | 134 | while max_simulation_time >= current_t and last_idx > target_idx: 135 | # Compute Acceleration 136 | acceleration = Kp_speed * (target_speed - state.v) 137 | delta, target_idx, cross_track_error = stanleyControl(state, cx, cy, cyaw, target_idx) 138 | state.update(acceleration, delta) 139 | cross_track_errors.append(cross_track_error) 140 | current_radius = 1 / ck[target_idx] 141 | 142 | h = 1 - (np.clip(current_radius, MIN_RADIUS, MAX_RADIUS) - MIN_RADIUS) / (MAX_RADIUS - MIN_RADIUS) 143 | target_speed = h * MIN_SPEED + (1 - h) * MAX_SPEED_STRAIGHT_LINE 144 | 145 | curvatures.append(current_radius) 146 | 147 | current_t += dt 148 | 149 | x.append(state.x) 150 | y.append(state.y) 151 | yaw.append(state.yaw) 152 | v.append(state.v) 153 | t.append(current_t) 154 | 155 | if show_animation: # pragma: no cover 156 | plt.cla() 157 | plt.plot(cx, cy, ".r", label="course") 158 | plt.plot(x, y, "-b", label="trajectory") 159 | plt.arrow(state.x, state.y, np.cos(state.yaw), np.sin(state.yaw), 160 | fc="r", ec="k", head_width=0.5, head_length=0.5) 161 | plt.plot(cx[target_idx], cy[target_idx], "xg", label="target") 162 | plt.axis("equal") 163 | plt.grid(True) 164 | plt.title("Speed[km/h]:" + str(state.v)[:4]) 165 | plt.pause(0.001) 166 | 167 | if show_animation: # pragma: no cover 168 | plt.plot(cx, cy, ".r", label="course") 169 | plt.plot(x, y, "-b", label="trajectory") 170 | plt.legend() 171 | plt.xlabel("x[m]") 172 | plt.ylabel("y[m]") 173 | plt.axis("equal") 174 | plt.grid(True) 175 | 176 | _, ax1 = plt.subplots(1) 177 | plt.plot(t, [iv for iv in v], "-r") 178 | plt.xlabel("Time[s]") 179 | plt.ylabel("Speed[km/h]") 180 | plt.grid(True) 181 | 182 | fig, ax2 = plt.subplots(1) 183 | plt.plot(t[1:], cross_track_errors, "-r", label="cross_track_error") 184 | plt.plot(t[1:], curvatures, "-b", label="curvature radius") 185 | plt.xlabel("Time[s]") 186 | plt.grid(True) 187 | plt.legend() 188 | plt.show() 189 | 190 | 191 | if __name__ == '__main__': 192 | parser = argparse.ArgumentParser(description='Test a line detector') 193 | parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plots (for tests)') 194 | args = parser.parse_args() 195 | main(not args.no_display) 196 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | enum34==1.1.6 2 | matplotlib==2.2.2 3 | numpy==1.14.4 4 | pygame==1.9.3 5 | pytest==3.4.2 6 | pytest-cov==2.5.1 7 | pyserial==3.4 8 | pyzmq==16.0.2 9 | robust-serial==0.1 10 | scikit-learn==0.19.0 11 | scipy==0.19.1 12 | six==1.11.0 13 | tqdm==4.19.5 14 | -------------------------------------------------------------------------------- /ros_nodes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/ros_nodes/__init__.py -------------------------------------------------------------------------------- /ros_nodes/camera_node.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | 3 | import threading 4 | import time 5 | 6 | try: 7 | import queue 8 | except ImportError: 9 | import Queue as queue 10 | 11 | import picamera.array 12 | import cv2 13 | 14 | import rospy 15 | from cv_bridge import CvBridge, CvBridgeError 16 | from sensor_msgs.msg import Image, CompressedImage 17 | 18 | 19 | class RGBAnalyser(picamera.array.PiRGBAnalysis): 20 | """ 21 | Class used to retrieve an image from the picamera 22 | and process it 23 | :param camera: (PiCamera object) 24 | :param image_publisher: (ROS Publisher) 25 | """ 26 | 27 | def __init__(self, camera, image_publisher): 28 | super(RGBAnalyser, self).__init__(camera) 29 | self.frame_num = 0 30 | self.referenceFrame = None 31 | self.frame_queue = queue.Queue(maxsize=1) 32 | self.exit = False 33 | self.bridge = CvBridge() 34 | self.image_publisher = image_publisher 35 | self.thread = None 36 | self.start() 37 | 38 | def analyse(self, frame): 39 | """ 40 | :param frame: BGR image object 41 | """ 42 | self.frame_queue.put(item=frame, block=True) 43 | 44 | def extractInfo(self): 45 | try: 46 | while not self.exit: 47 | try: 48 | frame = self.frame_queue.get(block=True, timeout=1) 49 | except queue.Empty: 50 | print("Queue empty") 51 | continue 52 | try: 53 | # Publish new image 54 | msg = self.bridge.cv2_to_imgmsg(frame, 'rgb8') 55 | if not self.exit: 56 | self.image_publisher.publish(msg) 57 | except CvBridgeError as e: 58 | print("Error Converting cv image: {}".format(e.message)) 59 | self.frame_num += 1 60 | except Exception as e: 61 | print("Exception after loop: {}".format(e)) 62 | raise 63 | 64 | def start(self): 65 | t = threading.Thread(target=self.extractInfo) 66 | self.thread = t 67 | t.deamon = True 68 | t.start() 69 | 70 | def stop(self): 71 | self.exit = True 72 | self.thread.join() 73 | self.frame_queue.queue.clear() 74 | 75 | 76 | class Viewer(object): 77 | """ 78 | Class that initialize the camera and start the PiCamera Thread 79 | :param image_publisher: (ROS publisher) 80 | :param resolution: (int, int) 81 | :param fps: (int) 82 | """ 83 | 84 | def __init__(self, image_publisher, resolution, fps=90): 85 | self.camera = picamera.PiCamera() 86 | # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes 87 | self.camera.sensor_mode = 7 88 | self.camera.resolution = resolution 89 | print(self.camera.resolution) 90 | self.camera.framerate = fps 91 | self.image_publisher = image_publisher 92 | # self.camera.zoom = (0.0, 0.0, 1.0, 1.0) 93 | # self.camera.awb_gains = 1.5 94 | self.camera.awb_mode = 'auto' 95 | self.camera.exposure_mode = 'auto' 96 | 97 | def start(self): 98 | self.analyser = RGBAnalyser(self.camera, self.image_publisher) 99 | self.camera.start_recording(self.analyser, format='rgb') 100 | 101 | def stop(self): 102 | self.camera.wait_recording() 103 | self.camera.stop_recording() 104 | self.analyser.stop() 105 | 106 | 107 | if __name__ == '__main__': 108 | image_publisher = rospy.Publisher('/picamera/image', Image, queue_size=10) 109 | resolution = (640 // 4, 480 // 4) 110 | fps = 30 111 | print("Starting PiCameraNode") 112 | rospy.init_node('PiCameraNode', anonymous=True) 113 | 114 | v = Viewer(image_publisher, resolution, fps) 115 | start_time = time.time() 116 | v.start() 117 | 118 | try: 119 | rospy.spin() 120 | except KeyboardInterrupt: 121 | print("Shutting down") 122 | v.stop() 123 | 124 | print('FPS: {:.2f}'.format(v.analyser.frame_num / (time.time() - start_time))) 125 | -------------------------------------------------------------------------------- /ros_nodes/keyboard_node.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, with_statement, division 2 | 3 | import pygame 4 | import rospy 5 | from pygame.locals import * 6 | from std_msgs.msg import Int16, Int8 7 | from robust_serial import Order 8 | 9 | from constants import THETA_MIN, THETA_MAX 10 | 11 | UP = (1, 0) 12 | LEFT = (0, 1) 13 | RIGHT = (0, -1) 14 | DOWN = (-1, 0) 15 | STOP = (0, 0) 16 | KEY_CODE_SPACE = 32 17 | 18 | MAX_SPEED = 50 19 | MAX_TURN = 45 20 | STEP_SPEED = 10 21 | STEP_TURN = 30 22 | 23 | TELEOP_RATE = 1 / 60 # 60 fps 24 | 25 | moveBindingsGame = { 26 | K_UP: UP, 27 | K_LEFT: LEFT, 28 | K_RIGHT: RIGHT, 29 | K_DOWN: DOWN 30 | } 31 | 32 | 33 | def control(x, theta, control_speed, control_turn): 34 | target_speed = MAX_SPEED * x 35 | target_turn = MAX_TURN * theta 36 | if target_speed > control_speed: 37 | control_speed = min(target_speed, control_speed + STEP_SPEED) 38 | elif target_speed < control_speed: 39 | control_speed = max(target_speed, control_speed - STEP_SPEED) 40 | else: 41 | control_speed = target_speed 42 | 43 | if target_turn > control_turn: 44 | control_turn = min(target_turn, control_turn + STEP_TURN) 45 | elif target_turn < control_turn: 46 | control_turn = max(target_turn, control_turn - STEP_TURN) 47 | else: 48 | control_turn = target_turn 49 | return control_speed, control_turn 50 | 51 | 52 | def pygameMain(): 53 | # Pygame require a window 54 | pygame.init() 55 | window = pygame.display.set_mode((800, 500), RESIZABLE) 56 | pygame.font.init() 57 | font = pygame.font.SysFont('Open Sans', 25) 58 | small_font = pygame.font.SysFont('Open Sans', 20) 59 | end = False 60 | 61 | def writeText(screen, text, x, y, font, color=(62, 107, 153)): 62 | text = str(text) 63 | text = font.render(text, True, color) 64 | screen.blit(text, (x, y)) 65 | 66 | def clear(): 67 | window.fill((0, 0, 0)) 68 | 69 | def updateScreen(window, speed, turn): 70 | clear() 71 | writeText(window, 'Linear: {:.2f}, Angular: {:.2f}'.format(speed, turn), 20, 0, font, (255, 255, 255)) 72 | help_str = 'Use arrow keys to move, q or ESCAPE to exit.' 73 | writeText(window, help_str, 20, 50, small_font) 74 | help_2 = 'space key, k : force stop --- anything else : stop smoothly' 75 | writeText(window, help_2, 20, 100, small_font) 76 | 77 | control_speed, control_turn = 0, 0 78 | updateScreen(window, control_speed, control_turn) 79 | 80 | while not end: 81 | x, theta = 0, 0 82 | keys = pygame.key.get_pressed() 83 | for keycode in moveBindingsGame.keys(): 84 | if keys[keycode]: 85 | x_tmp, th_tmp = moveBindingsGame[keycode] 86 | x += x_tmp 87 | theta += th_tmp 88 | 89 | if keys[K_k] or keys[K_SPACE]: 90 | x, theta = 0, 0 91 | control_speed, control_turn = 0, 0 92 | 93 | control_speed, control_turn = control(x, theta, control_speed, control_turn) 94 | # Send Orders 95 | angle_order = sendToServer(control_speed, control_turn) 96 | 97 | updateScreen(window, control_speed, angle_order) 98 | 99 | for event in pygame.event.get(): 100 | if event.type == QUIT or event.type == KEYDOWN and event.key in [K_ESCAPE, K_q]: 101 | end = True 102 | pygame.display.flip() 103 | # Limit FPS 104 | pygame.time.Clock().tick(1 / TELEOP_RATE) 105 | 106 | 107 | pub_servo = rospy.Publisher("/arduino/servo", Int16, queue_size=2) 108 | pub_motor = rospy.Publisher("/arduino/motor", Int8, queue_size=2) 109 | 110 | 111 | def sendToServer(control_speed, control_turn): 112 | """ 113 | :param control_speed: (float) 114 | :param control_turn: (float) 115 | """ 116 | # Send Orders 117 | t = (control_turn + MAX_TURN) / (2 * MAX_TURN) 118 | angle_order = int(THETA_MIN * t + THETA_MAX * (1 - t)) 119 | pub_servo.publish(angle_order) 120 | pub_motor.publish(control_speed) 121 | return angle_order 122 | 123 | 124 | if __name__ == '__main__': 125 | print("Starting Teleop client node") 126 | rospy.init_node('teleop_client', anonymous=True) 127 | try: 128 | pygameMain() 129 | # rospy.spin() 130 | except KeyboardInterrupt: 131 | print("Shutting down") 132 | -------------------------------------------------------------------------------- /ros_nodes/serial_adapter.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | 4 | import numpy as np 5 | import rospy 6 | from std_msgs.msg import Int16, Int8 7 | from robust_serial import write_order, Order 8 | from robust_serial.threads import CommandThread, ListenerThread 9 | from robust_serial.utils import open_serial_port, CustomQueue 10 | 11 | from constants import BAUDRATE, N_MESSAGES_ALLOWED, COMMAND_QUEUE_SIZE, THETA_MIN, THETA_MAX 12 | 13 | 14 | def servoCallback(data): 15 | servo_order = data.data 16 | servo_order = np.clip(servo_order, 0, 180) 17 | command_queue.put((Order.SERVO, servo_order)) 18 | 19 | 20 | def motorCallback(data): 21 | speed = data.data 22 | command_queue.put((Order.MOTOR, speed)) 23 | 24 | 25 | def listener(): 26 | rospy.init_node('serial_adapter', anonymous=True) 27 | # Declare the Subscriber to motors orders 28 | rospy.Subscriber("arduino/servo", Int16, servoCallback, queue_size=2) 29 | rospy.Subscriber("arduino/motor", Int8, motorCallback, queue_size=2) 30 | rospy.spin() 31 | 32 | 33 | def forceStop(): 34 | """ 35 | Stop The car 36 | """ 37 | command_queue.clear() 38 | n_received_semaphore.release() 39 | n_received_semaphore.release() 40 | command_queue.put((Order.MOTOR, 0)) 41 | command_queue.put((Order.SERVO, int((THETA_MIN + THETA_MAX) / 2))) 42 | 43 | 44 | if __name__ == '__main__': 45 | serial_file = None 46 | try: 47 | # Open serial port (for communication with Arduino) 48 | serial_file = open_serial_port(baudrate=BAUDRATE) 49 | except Exception as e: 50 | raise e 51 | 52 | is_connected = False 53 | # Initialize communication with Arduino 54 | while not is_connected: 55 | print("Waiting for arduino...") 56 | write_order(serial_file, Order.HELLO) 57 | bytes_array = bytearray(serial_file.read(1)) 58 | if not bytes_array: 59 | time.sleep(2) 60 | continue 61 | byte = bytes_array[0] 62 | if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]: 63 | is_connected = True 64 | 65 | print("Connected to Arduino") 66 | 67 | # Create Command queue for sending orders 68 | command_queue = CustomQueue(COMMAND_QUEUE_SIZE) 69 | n_received_semaphore = threading.Semaphore(N_MESSAGES_ALLOWED) 70 | # Lock for accessing serial file (to avoid reading and writing at the same time) 71 | serial_lock = threading.Lock() 72 | # Event to notify threads that they should terminate 73 | exit_event = threading.Event() 74 | 75 | print("Starting Communication Threads") 76 | # Threads for arduino communication 77 | threads = [CommandThread(serial_file, command_queue, exit_event, n_received_semaphore, serial_lock), 78 | ListenerThread(serial_file, exit_event, n_received_semaphore, serial_lock)] 79 | for thread in threads: 80 | thread.start() 81 | 82 | try: 83 | listener() 84 | except rospy.ROSInterruptException: 85 | pass 86 | 87 | # End the threads 88 | exit_event.set() 89 | n_received_semaphore.release() 90 | 91 | print("Exiting...") 92 | for thread in threads: 93 | thread.join() 94 | -------------------------------------------------------------------------------- /run_docker_cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Launch an experiment using the docker cpu image 3 | 4 | cmd_line="$@" 5 | 6 | echo "Executing in the docker (cpu image):" 7 | echo $cmd_line 8 | 9 | 10 | docker run -it --rm --network host --ipc=host \ 11 | --mount src=$(pwd),target=/tmp/racing_robot,type=bind araffin/racing-robot-cpu\ 12 | bash -c "cd /tmp/racing_robot/ && $cmd_line" 13 | -------------------------------------------------------------------------------- /run_docker_rpi3.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Launch an experiment using the docker cpu image 3 | 4 | cmd_line="$@" 5 | 6 | echo "Executing in the docker (Raspberry Pi 3 image):" 7 | echo $cmd_line 8 | 9 | 10 | docker run -it --rm --network host --ipc=host \ 11 | --mount src=$(pwd),target=/tmp/racing_robot,type=bind araffin/racing-robot-rpi3\ 12 | bash -c "cd /tmp/racing_robot/ && $cmd_line" 13 | -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pytest -v --cov-config .coveragerc --cov-report html --cov-report term --cov=. tests/ 3 | -------------------------------------------------------------------------------- /teleop/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/teleop/__init__.py -------------------------------------------------------------------------------- /teleop/teleop_client.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, with_statement, division 2 | 3 | import zmq 4 | import pygame 5 | from pygame.locals import * 6 | 7 | from constants import RASPBERRY_IP, TELEOP_PORT 8 | 9 | 10 | UP = (1, 0) 11 | LEFT = (0, 1) 12 | RIGHT = (0, -1) 13 | DOWN = (-1, 0) 14 | STOP = (0, 0) 15 | KEY_CODE_SPACE = 32 16 | 17 | MAX_SPEED = 30 18 | MAX_TURN = 45 19 | THETA_MIN = 70 20 | THETA_MAX = 150 21 | STEP_SPEED = 10 22 | STEP_TURN = 30 23 | 24 | TELEOP_RATE = 1 / 60 # 60 fps 25 | 26 | moveBindingsGame = { 27 | K_UP: UP, 28 | K_LEFT: LEFT, 29 | K_RIGHT: RIGHT, 30 | K_DOWN: DOWN 31 | } 32 | 33 | 34 | def control(x, theta, control_speed, control_turn): 35 | target_speed = MAX_SPEED * x 36 | target_turn = MAX_TURN * theta 37 | if target_speed > control_speed: 38 | control_speed = min(target_speed, control_speed + STEP_SPEED) 39 | elif target_speed < control_speed: 40 | control_speed = max(target_speed, control_speed - STEP_SPEED) 41 | else: 42 | control_speed = target_speed 43 | 44 | if target_turn > control_turn: 45 | control_turn = min(target_turn, control_turn + STEP_TURN) 46 | elif target_turn < control_turn: 47 | control_turn = max(target_turn, control_turn - STEP_TURN) 48 | else: 49 | control_turn = target_turn 50 | return control_speed, control_turn 51 | 52 | 53 | def pygameMain(): 54 | # Pygame require a window 55 | pygame.init() 56 | window = pygame.display.set_mode((800, 500), RESIZABLE) 57 | pygame.font.init() 58 | font = pygame.font.SysFont('Open Sans', 25) 59 | small_font = pygame.font.SysFont('Open Sans', 20) 60 | end = False 61 | 62 | def writeText(screen, text, x, y, font, color=(62, 107, 153)): 63 | text = str(text) 64 | text = font.render(text, True, color) 65 | screen.blit(text, (x, y)) 66 | 67 | def clear(): 68 | window.fill((0, 0, 0)) 69 | 70 | def updateScreen(window, speed, turn): 71 | clear() 72 | writeText(window, 'Linear: {:.2f}, Angular: {:.2f}'.format(speed, turn), 20, 0, font, (255, 255, 255)) 73 | help_str = 'Use arrow keys to move, q or ESCAPE to exit.' 74 | writeText(window, help_str, 20, 50, small_font) 75 | help_2 = 'space key, k : force stop --- anything else : stop smoothly' 76 | writeText(window, help_2, 20, 100, small_font) 77 | 78 | control_speed, control_turn = 0, 0 79 | updateScreen(window, control_speed, control_turn) 80 | 81 | while not end: 82 | x, theta = 0, 0 83 | keys = pygame.key.get_pressed() 84 | for keycode in moveBindingsGame.keys(): 85 | if keys[keycode]: 86 | x_tmp, th_tmp = moveBindingsGame[keycode] 87 | x += x_tmp 88 | theta += th_tmp 89 | 90 | if keys[K_k] or keys[K_SPACE]: 91 | x, theta = 0, 0 92 | control_speed, control_turn = 0, 0 93 | 94 | control_speed, control_turn = control(x, theta, control_speed, control_turn) 95 | # Send Orders 96 | angle_order = sendToServer(socket, control_speed, control_turn) 97 | 98 | updateScreen(window, control_speed, angle_order) 99 | 100 | for event in pygame.event.get(): 101 | if event.type == QUIT or event.type == KEYDOWN and event.key in [K_ESCAPE, K_q]: 102 | end = True 103 | pygame.display.flip() 104 | # Limit FPS 105 | pygame.time.Clock().tick(1 / TELEOP_RATE) 106 | 107 | 108 | def sendToServer(socket, control_speed, control_turn): 109 | """ 110 | :param socket: (zmq socket object) 111 | :param control_speed: (float) 112 | :param control_turn: (float) 113 | """ 114 | # Send Orders 115 | t = (control_turn + MAX_TURN) / (2 * MAX_TURN) 116 | angle_order = int(THETA_MIN * t + THETA_MAX * (1 - t)) 117 | socket.send_json((control_speed, angle_order)) 118 | return angle_order 119 | 120 | 121 | if __name__ == '__main__': 122 | context = zmq.Context() 123 | socket = context.socket(zmq.PAIR) 124 | print("Connecting to ... {}".format(RASPBERRY_IP)) 125 | socket.connect("tcp://{}:{}".format(RASPBERRY_IP, TELEOP_PORT)) 126 | 127 | msg = socket.recv() 128 | print("Connected To Server") 129 | try: 130 | pygameMain() 131 | except KeyboardInterrupt as e: 132 | pass 133 | finally: 134 | socket.send_json((-999, -999)) 135 | socket.close() 136 | -------------------------------------------------------------------------------- /teleop/teleop_server.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, with_statement, division 2 | 3 | import argparse 4 | import time 5 | import threading 6 | 7 | try: 8 | import queue 9 | except ImportError: 10 | import Queue as queue 11 | 12 | import zmq 13 | try: 14 | import picamera 15 | except ImportError: 16 | raise ImportError("Picamera package not found, you must run this code on the Raspberry Pi") 17 | 18 | from robust_serial import write_order, Order 19 | from robust_serial.threads import CommandThread, ListenerThread 20 | from robust_serial.utils import open_serial_port, CustomQueue 21 | 22 | from constants import CAMERA_RESOLUTION, CAMERA_MODE, TELEOP_PORT, FPS 23 | 24 | 25 | emptyException = queue.Empty 26 | fullException = queue.Full 27 | 28 | # Listen to port 5556 29 | context = zmq.Context() 30 | socket = context.socket(zmq.PAIR) 31 | socket.bind("tcp://*:{}".format(TELEOP_PORT)) 32 | 33 | parser = argparse.ArgumentParser(description='Teleoperation server') 34 | parser.add_argument('-v', '--video_file', help='Video filename', default="", type=str) 35 | args = parser.parse_args() 36 | 37 | record_video = args.video_file != "" 38 | if record_video: 39 | print("Recording a video to {}".format(args.video_file)) 40 | 41 | try: 42 | serial_file = open_serial_port(baudrate=115200) 43 | except Exception as e: 44 | raise e 45 | 46 | is_connected = False 47 | # Initialize communication with Arduino 48 | while not is_connected: 49 | print("Waiting for arduino...") 50 | write_order(serial_file, Order.HELLO) 51 | bytes_array = bytearray(serial_file.read(1)) 52 | if not bytes_array: 53 | time.sleep(2) 54 | continue 55 | byte = bytes_array[0] 56 | if byte in [Order.HELLO.value, Order.ALREADY_CONNECTED.value]: 57 | is_connected = True 58 | 59 | print("Connected to Arduino") 60 | 61 | # Create Command queue for sending orders 62 | command_queue = CustomQueue(2) 63 | # Number of messages we can send to the Arduino without receiving an acknowledgment 64 | n_messages_allowed = 3 65 | n_received_semaphore = threading.Semaphore(n_messages_allowed) 66 | # Lock for accessing serial file (to avoid reading and writing at the same time) 67 | serial_lock = threading.Lock() 68 | 69 | # Event to notify threads that they should terminate 70 | exit_event = threading.Event() 71 | 72 | print("Starting Communication Threads") 73 | # Threads for arduino communication 74 | threads = [CommandThread(serial_file, command_queue, exit_event, n_received_semaphore, serial_lock), 75 | ListenerThread(serial_file, exit_event, n_received_semaphore, serial_lock)] 76 | for t in threads: 77 | t.start() 78 | 79 | print("Waiting for client...") 80 | socket.send(b'1') 81 | print("Connected To Client") 82 | i = 0 83 | 84 | with picamera.PiCamera(resolution=CAMERA_RESOLUTION, sensor_mode=CAMERA_MODE, framerate=FPS) as camera: 85 | if record_video: 86 | camera.start_recording("{}.h264".format(args.video_file)) 87 | 88 | while True: 89 | control_speed, angle_order = socket.recv_json() 90 | print("({}, {})".format(control_speed, angle_order)) 91 | try: 92 | command_queue.put_nowait((Order.MOTOR, control_speed)) 93 | command_queue.put_nowait((Order.SERVO, angle_order)) 94 | except fullException: 95 | print("Queue full") 96 | 97 | if control_speed == -999: 98 | socket.close() 99 | break 100 | if record_video: 101 | camera.stop_recording() 102 | 103 | print("Sending STOP order...") 104 | # Stop the car at the end 105 | command_queue.clear() 106 | n_received_semaphore.release() 107 | command_queue.put((Order.MOTOR, 0)) 108 | # Make sure STOP order is sent 109 | time.sleep(0.2) 110 | # End the threads 111 | exit_event.set() 112 | n_received_semaphore.release() 113 | print("Exiting...") 114 | 115 | for t in threads: 116 | t.join() 117 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sergionr2/RacingRobot/6885ddb37407dff15845d29f641bc7c39279b216/tests/__init__.py -------------------------------------------------------------------------------- /tests/common.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | NUM_EPOCHS = 2 4 | DATASET = 'datasets/test_dataset/' 5 | TEST_IMAGE_PATH = DATASET + '170.jpg' 6 | SEED = 0 7 | BATCHSIZE = 4 8 | 9 | 10 | def assertEq(left, right): 11 | assert left == right, "{} != {}".format(left, right) 12 | 13 | 14 | def assertNeq(left, right): 15 | assert left != right, "{} == {}".format(left, right) 16 | -------------------------------------------------------------------------------- /tests/test_image_processing.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | import subprocess 4 | 5 | from .common import * 6 | from constants import * 7 | from image_processing.image_processing import processImage 8 | from image_processing.warp_image import transformPoints, warpImage 9 | 10 | np.random.seed(0) 11 | test_image = np.random.randint(0, 255, size=(MAX_WIDTH, MAX_HEIGHT, 3), dtype=np.uint8) 12 | 13 | def testProcessImage(): 14 | processImage(test_image) 15 | processImage(test_image, debug=True) 16 | 17 | 18 | def testWarpDemo(): 19 | args = ['-i', TEST_IMAGE_PATH, '--no-display'] 20 | ok = subprocess.call(['python', '-m', 'image_processing.warp_image'] + args) 21 | assertEq(ok, 0) 22 | 23 | 24 | def testWarping(): 25 | x, y = processImage(test_image, debug=True) 26 | transformPoints(x, y) 27 | warpImage(test_image) 28 | -------------------------------------------------------------------------------- /tests/test_path_planning.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | import subprocess 4 | 5 | import pytest 6 | import numpy as np 7 | 8 | from .common import * 9 | from constants import * 10 | from path_planning.bezier_curve import demo_cp, calcBezierPath 11 | from path_planning.stanley_controller import normalizeAngle 12 | 13 | def testStanleyControlDemo(): 14 | ok = subprocess.call(['python', '-m', 'path_planning.stanley_controller', '--no-display']) 15 | assertEq(ok, 0) 16 | 17 | def testBezierCurveDemo(): 18 | ok = subprocess.call(['python', '-m', 'path_planning.bezier_curve', '--no-display']) 19 | assertEq(ok, 0) 20 | 21 | 22 | def testCalcBezierPath(): 23 | path = calcBezierPath(demo_cp, n_points=10) 24 | assertEq(path[0, 0], demo_cp[0, 0]) 25 | assertEq(path[0, 1], demo_cp[0, 1]) 26 | 27 | assertEq(path[-1, 0], demo_cp[-1, 0]) 28 | assertEq(path[-1, 1], demo_cp[-1, 1]) 29 | 30 | 31 | def testNormalizeAngle(): 32 | angle = np.pi / 3 + 4 * (2 * np.pi) 33 | assertEq(normalizeAngle(angle), pytest.approx(np.pi / 3)) 34 | 35 | angle = np.pi / 4 - 5 * (2 * np.pi) 36 | assertEq(normalizeAngle(angle), pytest.approx(np.pi / 4)) 37 | -------------------------------------------------------------------------------- /tests/test_training.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | import subprocess 4 | 5 | import pytest 6 | import torch as th 7 | 8 | from .common import * 9 | from constants import * 10 | from train.train import main 11 | from train.utils import adjustLearningRate, loadNetwork 12 | from train.models import CustomNet 13 | 14 | 15 | training_args = ['-f', DATASET, '--num_epochs', NUM_EPOCHS, '-bs', BATCHSIZE, 16 | '--seed', SEED] 17 | training_args = list(map(str, training_args)) 18 | 19 | 20 | def testTrain(): 21 | for model_type in ['custom', 'cnn', 'mlp']: 22 | args = training_args + ['--model_type', model_type] 23 | ok = subprocess.call(['python', '-m', 'train.train'] + args) 24 | assertEq(ok, 0) 25 | weights = "{}_model_tmp.pth".format(model_type) 26 | loadNetwork(weights, model_type=model_type) 27 | 28 | 29 | def testFailIfModelNotSupported(): 30 | with pytest.raises(ValueError): 31 | main(DATASET, device="cuda", model_type="dummy_model") 32 | 33 | 34 | def testTrainFromSavedModel(): 35 | args = training_args + ['--model_type', MODEL_TYPE, '--load_model', WEIGHTS_PTH] 36 | ok = subprocess.call(['python', '-m', 'train.train'] + args) 37 | assertEq(ok, 0) 38 | 39 | 40 | def testTestScript(): 41 | args = ['-f', DATASET, '--model_type', MODEL_TYPE, '-w', WEIGHTS_PTH, '--no-display'] 42 | ok = subprocess.call(['python', '-m', 'train.test'] + args) 43 | assertEq(ok, 0) 44 | 45 | 46 | def testBenchmarkScript(): 47 | args = ['-n', '1', '--model_type', MODEL_TYPE, '-w', WEIGHTS_PTH] 48 | ok = subprocess.call(['python', '-m', 'train.benchmark'] + args) 49 | assertEq(ok, 0) 50 | 51 | 52 | def testAdjustLearningRate(): 53 | model = CustomNet() 54 | optimizer = th.optim.SGD(model.parameters(), lr=1) 55 | adjustLearningRate(optimizer, epoch=1, n_epochs=2, lr_init=1, batch=10, 56 | n_batch=10, method='cosine') 57 | current_lr = optimizer.param_groups[0]['lr'] 58 | assertEq(current_lr, 0) 59 | 60 | decay_rate = 0.9 61 | adjustLearningRate(optimizer, epoch=5, n_epochs=10, lr_init=1, batch=1, 62 | n_batch=10, method='multistep', decay_rate=decay_rate) 63 | current_lr = optimizer.param_groups[0]['lr'] 64 | assertEq(current_lr, decay_rate) 65 | 66 | adjustLearningRate(optimizer, epoch=8, n_epochs=10, lr_init=1, batch=1, 67 | n_batch=10, method='multistep', decay_rate=decay_rate) 68 | current_lr = optimizer.param_groups[0]['lr'] 69 | assertEq(current_lr, decay_rate ** 2) 70 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | import numpy as np 4 | 5 | from .common import * 6 | from train.utils import preprocessImage, loadNetwork, predict, loadLabels 7 | from constants import * 8 | 9 | test_image = 234 * np.ones((MAX_WIDTH, MAX_HEIGHT, 3), dtype=np.uint8) 10 | 11 | 12 | def testPreprocessing(): 13 | image = preprocessImage(test_image, INPUT_WIDTH, INPUT_HEIGHT) 14 | # Check normalization 15 | assertEq(len(np.where(np.abs(image) > 1)[0]), 0) 16 | # Check resize 17 | assertEq(image.size, 3 * INPUT_WIDTH * INPUT_HEIGHT) 18 | # Check normalization on one element 19 | assertEq(image[0, 0, 0], ((234 / 255.) - 0.5) * 2) 20 | 21 | 22 | def testPredict(): 23 | model = loadNetwork(WEIGHTS_PTH, NUM_OUTPUT, MODEL_TYPE) 24 | x, y = predict(model, test_image) 25 | 26 | 27 | def testLoadLabels(): 28 | loadLabels(DATASET) 29 | dataset_no_trailing_slash = DATASET[:-1] 30 | folders = [DATASET, dataset_no_trailing_slash] 31 | loadLabels(folders) 32 | -------------------------------------------------------------------------------- /train/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .utils import preprocessImage, JsonDataset, loadNetwork, predict 4 | -------------------------------------------------------------------------------- /train/benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Compute time to process an image 3 | It processes the images N_ITER times and print statistics 4 | """ 5 | from __future__ import print_function, with_statement, division 6 | 7 | import time 8 | import argparse 9 | 10 | import cv2 11 | import numpy as np 12 | import torch as th 13 | from torch import jit 14 | from torch.jit import trace 15 | 16 | from constants import NUM_OUTPUT, MAX_WIDTH, MAX_HEIGHT, INPUT_WIDTH, INPUT_HEIGHT, N_CHANNELS 17 | from .utils import loadLabels, loadNetwork, predict, preprocessImage 18 | 19 | parser = argparse.ArgumentParser(description='Benchmark line detection algorithm') 20 | parser.add_argument('-n', '--num_iterations', help='Number of iteration', default=1, type=int) 21 | parser.add_argument('-w', '--weights', help='Saved weights', default="custom_model_tmp.pth", type=str) 22 | parser.add_argument('--model_type', help='Model type: cnn', default="custom", type=str, choices=['cnn', 'custom']) 23 | parser.add_argument('--jit', action='store_true', default=False, help='Use JIT') 24 | 25 | args = parser.parse_args() 26 | 27 | N_ITER = args.num_iterations 28 | 29 | model = loadNetwork(args.weights, NUM_OUTPUT, args.model_type) 30 | model = model.to("cpu") 31 | # 1st way to compile a function 32 | # model = jit.compile(model.forward, nderivs=1, enabled=args.jit) 33 | 34 | # 2nd way to use the JIT 35 | # Give an example input to compile the model 36 | # TODO: use real image for Benchmark 37 | example_input = th.ones((1, N_CHANNELS, INPUT_HEIGHT, INPUT_WIDTH)).to(th.float) 38 | # Backward compatibility with pytorch 0.4.1 39 | try: 40 | model = trace(example_input)(model) 41 | except TypeError: 42 | model = trace(model, example_inputs=example_input) 43 | 44 | time_deltas = [] 45 | for i in range(N_ITER): 46 | image = np.random.randint(255) * np.ones((MAX_WIDTH, MAX_HEIGHT, 3), dtype=np.uint8) 47 | start_time = time.time() 48 | x, y = predict(model, image) 49 | time_deltas.append(time.time() - start_time) 50 | 51 | time_deltas = np.array(time_deltas) 52 | print("Total time: {:.6f}s".format(time_deltas.sum())) 53 | print("Mean time: {:.4f}ms".format(1000 * time_deltas.mean())) 54 | print("Std time: {:.4f}ms".format(1000 * time_deltas.std())) 55 | print("Median time: {:.4f}ms".format(1000 * np.median(time_deltas))) 56 | -------------------------------------------------------------------------------- /train/convert_video.py: -------------------------------------------------------------------------------- 1 | """ 2 | Convert h264 videos (in the current folder) to mp4 using MP4Box 3 | """ 4 | import os 5 | import subprocess 6 | 7 | files = [f for f in os.listdir(".") if f.endswith(".h264")] 8 | 9 | for f in files: 10 | print("Converting {}".format(f)) 11 | ok = subprocess.call(['MP4Box', '-add', f, f[:-4] + "mp4"]) 12 | -------------------------------------------------------------------------------- /train/models.py: -------------------------------------------------------------------------------- 1 | """ 2 | Different neural network architectures for detecting the line 3 | """ 4 | from __future__ import print_function, division, absolute_import 5 | 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | 9 | 10 | class MlpNetwork(nn.Module): 11 | """ 12 | MLP network for detecting the line 13 | :param input_dim: (int) 3 x H x W 14 | :param drop_p: (float) Dropout proba 15 | """ 16 | 17 | def __init__(self, input_dim, num_output=6, drop_p=0.0): 18 | super(MlpNetwork, self).__init__() 19 | self.fc1 = nn.Linear(input_dim, 20) 20 | self.fc2 = nn.Linear(20, 4) 21 | self.fc3 = nn.Linear(4, num_output) 22 | self.drop_p = drop_p 23 | self.activation_fn = F.relu 24 | self._initializeWeights() 25 | 26 | def _initializeWeights(self): 27 | for m in self.modules(): 28 | if isinstance(m, nn.Linear): 29 | pass 30 | # m.weight.data.normal_(0, 0.005) 31 | # m.weight.data.uniform_(-0.005, 0.005) 32 | # nn.init.xavier_uniform(m.weight.data, gain=nn.init.calculate_gain('relu')) 33 | # nn.init.kaiming_normal(m.weight.data) 34 | # nn.init.kaiming_uniform(m.weight.data) 35 | # m.bias.data.zero_() 36 | # m.bias.data.uniform_(-0.1, 0.1) 37 | 38 | def forward(self, x): 39 | # Flatten input 40 | x = x.view(x.size(0), -1) 41 | x = F.dropout(x, p=self.drop_p, training=self.training) 42 | x = self.activation_fn(self.fc1(x)) 43 | x = self.activation_fn(self.fc2(x)) 44 | x = self.activation_fn(self.fc3(x)) 45 | return x 46 | 47 | 48 | class ConvolutionalNetwork(nn.Module): 49 | def __init__(self, drop_p=0.0, num_output=6): 50 | super(ConvolutionalNetwork, self).__init__() 51 | self.conv_layers = nn.Sequential( 52 | nn.Conv2d(3, 20, kernel_size=7, stride=2, padding=3), 53 | nn.ReLU(inplace=True), 54 | nn.MaxPool2d(kernel_size=3, stride=2, padding=1), 55 | 56 | nn.Conv2d(20, 20, kernel_size=3, stride=1, padding=1), 57 | nn.ReLU(inplace=True), 58 | nn.MaxPool2d(kernel_size=3, stride=2) 59 | ) 60 | 61 | # Factor = 1 62 | # self.fc1 = nn.Linear(20 * 20 * 39, 16) 63 | # Factor = 2 64 | # self.fc1 = nn.Linear(20 * 10 * 19, 16) 65 | # Factor = 4 66 | self.fc1 = nn.Linear(20 * 5 * 9, 16) 67 | self.fc2 = nn.Linear(16, num_output) 68 | self.drop_p = drop_p 69 | 70 | def forward(self, x): 71 | x = F.dropout(x, p=self.drop_p, training=self.training) 72 | x = self.conv_layers(x) 73 | # print(x.shape) 74 | x = x.view(x.size(0), -1) 75 | x = F.relu(self.fc1(x)) 76 | x = self.fc2(x) 77 | return x 78 | 79 | 80 | class CustomNet(nn.Module): 81 | def __init__(self, num_output=6): 82 | super(CustomNet, self).__init__() 83 | 84 | self.model = nn.Sequential( 85 | nn.Conv2d(3, 20, kernel_size=7, stride=2, padding=1), 86 | nn.ReLU(inplace=True), 87 | nn.MaxPool2d(kernel_size=3, stride=2), 88 | 89 | nn.Conv2d(20, 20, kernel_size=3, stride=1, padding=1), 90 | nn.ReLU(inplace=True), 91 | nn.MaxPool2d(kernel_size=3, stride=2), 92 | ) 93 | self.fc = nn.Sequential( 94 | nn.Linear(20 * 4 * 8, 32), 95 | # nn.Linear(20*9*18, 32), 96 | nn.ReLU(inplace=True), 97 | nn.Linear(32, 32), 98 | nn.ReLU(inplace=True), 99 | nn.Linear(32, num_output) 100 | ) 101 | 102 | def forward(self, x): 103 | x = self.model(x) 104 | # print(x.shape) 105 | x = x.view(x.size(0), -1) 106 | x = self.fc(x) 107 | return x 108 | -------------------------------------------------------------------------------- /train/split_video.py: -------------------------------------------------------------------------------- 1 | """ 2 | Convert a video to a sequence of images 3 | """ 4 | from __future__ import print_function, division 5 | 6 | import argparse 7 | 8 | import cv2 9 | import numpy as np 10 | from tqdm import tqdm 11 | 12 | from constants import RIGHT_KEY, LEFT_KEY, ENTER_KEY, EXIT_KEYS 13 | 14 | parser = argparse.ArgumentParser(description='Split a video into a sequence of images') 15 | parser.add_argument('-i', '--input_video', help='Input Video', default="", type=str, required=True) 16 | parser.add_argument('-o', '--output_folder', help='Output folder', default="", type=str, required=True) 17 | parser.add_argument('--display', action='store_true', default=False, help='Display the images') 18 | 19 | args = parser.parse_args() 20 | 21 | output_folder = args.output_folder 22 | 23 | # OpenCV 3.x.x compatibility 24 | if not hasattr(cv2, 'cv'): 25 | # 0-based index of the frame to be decoded/captured next. 26 | image_zero_index = cv2.CAP_PROP_POS_FRAMES 27 | frame_count = cv2.CAP_PROP_FRAME_COUNT 28 | else: 29 | image_zero_index = cv2.cv.CV_CAP_PROP_POS_FRAMES 30 | frame_count = cv2.cv.CV_CAP_PROP_FRAME_COUNT 31 | 32 | video_file = args.input_video 33 | # Read the video 34 | cap = cv2.VideoCapture(video_file) 35 | 36 | current_idx = cap.get(image_zero_index) 37 | n_frames = int(cap.get(frame_count)) 38 | print("{} frames".format(n_frames)) 39 | 40 | # Progress Bar 41 | pbar = tqdm(total=n_frames) 42 | 43 | while True: 44 | # Read next frame 45 | while True: 46 | flag, img = cap.read() 47 | if flag: 48 | break 49 | else: 50 | # The next frame is not ready, so we try to read it again 51 | cap.set(image_zero_index, current_idx - 1) 52 | cv2.waitKey(1000) 53 | continue 54 | 55 | original_img = img.copy() 56 | if not args.display: 57 | key = RIGHT_KEY # Next frame 58 | if current_idx == n_frames - 1: 59 | break 60 | else: 61 | cv2.imshow("current", img) 62 | key = cv2.waitKey(0) & 0xff 63 | 64 | if key in EXIT_KEYS: 65 | cv2.destroyAllWindows() 66 | exit() 67 | elif key in [LEFT_KEY, RIGHT_KEY, ENTER_KEY]: 68 | current_idx += 1 if key in [RIGHT_KEY, ENTER_KEY] else -1 69 | current_idx = np.clip(current_idx, 0, n_frames - 1) 70 | # Save image 71 | path = '{}/{}.jpg'.format(output_folder, int(current_idx)) 72 | cv2.imwrite(path, original_img) 73 | # print("Saved {}".format(int(current_idx))) 74 | pbar.update(1) 75 | 76 | cap.set(image_zero_index, current_idx) 77 | 78 | pbar.close() 79 | -------------------------------------------------------------------------------- /train/test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test the trained model on images 3 | """ 4 | from __future__ import print_function, division, absolute_import 5 | 6 | import argparse 7 | import os 8 | import sys 9 | 10 | import cv2 11 | import numpy as np 12 | 13 | from constants import RIGHT_KEY, LEFT_KEY, EXIT_KEYS, ROI, NUM_OUTPUT, TARGET_POINT, MODEL_TYPE, WEIGHTS_PTH 14 | from constants import MAX_WIDTH, MAX_HEIGHT 15 | from path_planning.bezier_curve import calcBezierPath, computeControlPoints, bezier, calcTrajectory 16 | from path_planning.stanley_controller import stanleyControl, State, calcTargetIndex 17 | from image_processing.warp_image import warpImage, transformPoints 18 | from .utils import loadLabels, loadNetwork, predict, computeMSE 19 | 20 | parser = argparse.ArgumentParser(description='Test a line detector') 21 | parser.add_argument('-i', '--input_video', help='Input Video', default="", type=str) 22 | parser.add_argument('-f', '--folders', help='Dataset folders', nargs='+', default=[""], type=str) 23 | parser.add_argument('-w', '--weights', help='Saved weights', default=WEIGHTS_PTH, type=str) 24 | parser.add_argument('--model_type', help='Model type: {cnn, custom}', default=MODEL_TYPE, type=str, 25 | choices=['cnn', 'custom']) 26 | parser.add_argument('--no-display', action='store_true', default=False, help='Compute only mse') 27 | parser.add_argument('--no-mse', action='store_true', default=False, help='Do not compute mse') 28 | 29 | args = parser.parse_args() 30 | 31 | assert args.folders[0] != "" or args.input_video != "", "You must specify a video or dataset for testing" 32 | 33 | video = None 34 | if args.input_video != "": # pragma: no cover 35 | assert os.path.isfile(args.input_video), "Invalid path to input video" 36 | image_zero_index = cv2.CAP_PROP_POS_FRAMES 37 | frame_count = cv2.CAP_PROP_FRAME_COUNT 38 | video = cv2.VideoCapture(args.input_video) 39 | 40 | model = loadNetwork(args.weights, NUM_OUTPUT, args.model_type) 41 | 42 | labels, train_labels, val_labels, test_labels = {}, {}, {}, {} 43 | images = None 44 | 45 | # Load images from a folder 46 | if video is None: 47 | if os.path.isfile("{}/labels.json".format(args.folders[0])): 48 | train_labels, val_labels, test_labels, labels = loadLabels(args.folders) 49 | 50 | if images is None: 51 | images = [] 52 | for folder in args.folders: 53 | tmp_images = ["{}/{}".format(folder, f) for f in os.listdir(folder) if f.endswith('.jpg')] 54 | tmp_images.sort(key=lambda name: int(name.split('.jpg')[0].split('/')[-1])) 55 | images += tmp_images 56 | 57 | idx_val = set(val_labels.keys()) 58 | idx_test = set(test_labels.keys()) 59 | n_frames = len(images) 60 | current_idx = 0 61 | else: # pragma: no cover 62 | # Load video 63 | if not video.isOpened(): # pragma: no cover 64 | print("Error opening video, check your opencv version (you may need to compile it from source)") 65 | sys.exit(1) 66 | current_idx = video.get(image_zero_index) 67 | n_frames = int(video.get(frame_count)) 68 | 69 | print("{} frames".format(n_frames)) 70 | 71 | if n_frames <= 0: # pragma: no cover 72 | print("Not enough frame, check your path") 73 | sys.exit(1) 74 | 75 | if len(train_labels) > 0 and not args.no_mse: 76 | computeMSE(model, train_labels, val_labels, test_labels, batchsize=16) 77 | if args.no_display: 78 | sys.exit(0) 79 | 80 | while True: # pragma: no cover 81 | if video is not None: 82 | while True: 83 | flag, image = video.read() 84 | if flag: 85 | break 86 | else: 87 | # The next frame is not ready, so we try to read it again 88 | video.set(image_zero_index, current_idx - 1) 89 | cv2.waitKey(1000) 90 | continue 91 | text = "" 92 | name = str(current_idx) 93 | else: 94 | path = images[current_idx] 95 | image = cv2.imread(path) 96 | # image = cv2.flip(image, 1) 97 | # Image from train/validation/test set ? 98 | text = "train" 99 | if path in idx_val: 100 | text = "val" 101 | elif path in idx_test: 102 | text = "test" 103 | 104 | x, y = predict(model, image) 105 | points = transformPoints(x, y).astype(np.int32) 106 | 107 | # print(current_idx) 108 | # Compute bezier path 109 | control_points = computeControlPoints(x, y, add_current_pos=True) 110 | target = bezier(TARGET_POINT, control_points).astype(np.int32) 111 | path = calcBezierPath(control_points).astype(np.int32) 112 | 113 | orignal_image = image.copy() 114 | cv2.putText(image, text, (0, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255)) 115 | 116 | true_labels = None 117 | if video is None and labels.get(images[current_idx]) is not None: 118 | true_labels = np.array(labels[images[current_idx]]) 119 | 120 | for i in range(len(path) - 1): 121 | cv2.line(image, (path[i, 0], path[i, 1]), (path[i + 1, 0], path[i + 1, 1]), 122 | color=(0, 0, int(0.8 * 255)), thickness=3) 123 | # Show Target point 124 | cv2.circle(image, tuple(target), radius=10, color=(0, 0, int(0.9 * 255)), 125 | thickness=1, lineType=8, shift=0) 126 | 127 | # Draw prediction and label 128 | for i in range(len(x) - 1): 129 | cv2.line(image, (x[i], y[i]), (x[i + 1], y[i + 1]), color=(176, 114, 76), 130 | thickness=3) 131 | if true_labels is not None: 132 | cv2.line(image, (true_labels[i, 0], true_labels[i, 1]), (true_labels[i + 1, 0], true_labels[i + 1, 1]), 133 | color=(104, 168, 85), thickness=3) 134 | cv2.imshow('Prediction', image) 135 | 136 | # Draw prediction on warped image 137 | warped_image = warpImage(orignal_image) 138 | for i in range(len(points)): 139 | cv2.circle(warped_image, (points[i, 0], points[i, 1]), radius=50, color=(int(0.9 * 255), 0, 0), 140 | thickness=10, lineType=8, shift=0) 141 | for i in range(len(x) - 1): 142 | cv2.line(warped_image, (points[i, 0], points[i, 1]), (points[i + 1, 0], points[i + 1, 1]), color=(176, 114, 76), 143 | thickness=10) 144 | 145 | path = calcBezierPath(points, n_points=10).astype(np.int32) 146 | 147 | cp = transformPoints(x, y) 148 | cp[:, 1] *= -1 149 | current_pos = transformPoints([MAX_WIDTH / 2], [MAX_HEIGHT])[0] 150 | # TODO: compute v in the pixel space 151 | state = State(x=current_pos[0], y=-current_pos[1], yaw=np.radians(90.0), v=10) 152 | cx, cy, cyaw, ck = calcTrajectory(cp, n_points=10) 153 | target_idx, _ = calcTargetIndex(state, cx, cy) 154 | target = (int(cx[target_idx]), - int(cy[target_idx])) 155 | 156 | delta, _, cross_track_error = stanleyControl(state, cx, cy, cyaw, target_idx) 157 | # print(delta, cross_track_error) 158 | 159 | # Target 160 | cv2.circle(warped_image, target, radius=50, color=(0, 0, int(0.9 * 255)), 161 | thickness=10, lineType=8, shift=0) 162 | 163 | # Draw bezier curve 164 | for i in range(len(path) - 1): 165 | cv2.line(warped_image, (path[i, 0], path[i, 1]), (path[i + 1, 0], path[i + 1, 1]), 166 | color=(0, 0, int(0.8 * 255)), thickness=10) 167 | 168 | warped_image = cv2.resize(warped_image, (warped_image.shape[1] // 2, warped_image.shape[0] // 2), 169 | interpolation=cv2.INTER_LINEAR) 170 | cv2.imshow('Warped image', warped_image) 171 | # r = ROI 172 | # im_cropped = image[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])] 173 | # cv2.imshow('Crop', im_cropped) 174 | 175 | key = cv2.waitKey(0) & 0xff 176 | if key in EXIT_KEYS: 177 | cv2.destroyAllWindows() 178 | break 179 | elif key in [LEFT_KEY, RIGHT_KEY]: 180 | current_idx += 1 if key == RIGHT_KEY else -1 181 | current_idx = np.clip(current_idx, 0, n_frames - 1) 182 | if video is not None: 183 | video.set(image_zero_index, current_idx) 184 | 185 | if video is not None: # pragma: no cover 186 | video.release() 187 | -------------------------------------------------------------------------------- /train/train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Train a neural network to detect a black&white line 3 | """ 4 | from __future__ import print_function, division, absolute_import 5 | 6 | import argparse 7 | import time 8 | 9 | import numpy as np 10 | import torch as th 11 | import torch.utils.data 12 | import torch.nn as nn 13 | 14 | from constants import NUM_OUTPUT, INPUT_WIDTH, INPUT_HEIGHT, N_CHANNELS 15 | from .utils import JsonDataset, loadLabels 16 | from .models import ConvolutionalNetwork, CustomNet, MlpNetwork 17 | 18 | evaluate_print = 1 # Print info every 1 epoch 19 | VAL_BATCH_SIZE = 64 # Batch size for validation and test data 20 | 21 | 22 | def main(folders, num_epochs=100, batchsize=32, 23 | learning_rate=0.0001, seed=42, device=th.device("cpu"), random_flip=0.5, 24 | model_type="custom", evaluate_print=1, saved_model_path=""): 25 | """ 26 | :param folders: ([str]) 27 | :param num_epochs: (int) 28 | :param batchsize: (int) 29 | :param learning_rate: (float) 30 | :param seed: (int) 31 | :param device: (th.device) 32 | :param random_flip: (float) 33 | :param model_type: (str) 34 | :param evaluate_print: (int) 35 | :param saved_model_path: (str) 36 | """ 37 | 38 | train_labels, val_labels, test_labels, _ = loadLabels(folders) 39 | 40 | # Seed the random generator 41 | np.random.seed(seed) 42 | th.manual_seed(seed) 43 | if device == "cuda": 44 | th.cuda.manual_seed(seed) 45 | 46 | # Retrieve number of samples per set 47 | n_train, n_val, n_test = len(train_labels), len(val_labels), len(test_labels) 48 | 49 | # Keywords for pytorch dataloader 50 | kwargs = {'num_workers': 1, 'pin_memory': False} if device == "cuda" else {} 51 | # Create data loaders 52 | train_loader = th.utils.data.DataLoader( 53 | JsonDataset(train_labels, preprocess=True, random_flip=random_flip), 54 | batch_size=batchsize, shuffle=True, **kwargs) 55 | 56 | # Random flip also for val ? 57 | val_loader = th.utils.data.DataLoader(JsonDataset(val_labels, preprocess=True), 58 | batch_size=VAL_BATCH_SIZE, shuffle=False, **kwargs) 59 | 60 | test_loader = th.utils.data.DataLoader(JsonDataset(test_labels, preprocess=True), 61 | batch_size=VAL_BATCH_SIZE, shuffle=False, **kwargs) 62 | 63 | model_name = "{}_model_tmp".format(model_type) 64 | print("Trained model will be saved as {}".format(model_name)) 65 | 66 | if model_type == "cnn": 67 | model = ConvolutionalNetwork(num_output=NUM_OUTPUT, drop_p=0.0) 68 | elif model_type == "custom": 69 | model = CustomNet(num_output=NUM_OUTPUT) 70 | elif model_type == "mlp": 71 | model = MlpNetwork(INPUT_WIDTH * INPUT_HEIGHT * N_CHANNELS, num_output=NUM_OUTPUT) 72 | else: 73 | raise ValueError("Model type not supported") 74 | 75 | if saved_model_path != "": 76 | print("Loading saved model {}".format(saved_model_path)) 77 | model.load_state_dict(th.load(saved_model_path)) 78 | 79 | model = model.to(device) 80 | 81 | # L2 penalty 82 | # weight_decay = 1e-4 83 | weight_decay = 0 84 | # Optimizers 85 | # optimizer = th.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) 86 | optimizer = th.optim.SGD(model.parameters(), lr=learning_rate, 87 | momentum=0.9, weight_decay=weight_decay, nesterov=True) 88 | 89 | # Loss functions 90 | loss_fn = nn.MSELoss(size_average=False) 91 | # loss_fn = nn.SmoothL1Loss(size_average=False) 92 | best_error = np.inf 93 | best_model_path = "{}.pth".format(model_name) 94 | 95 | # Finally, launch the training loop. 96 | print("Starting training...") 97 | # We iterate over epochs: 98 | for epoch in range(num_epochs): 99 | # Switch to training mode 100 | model.train() 101 | train_loss, val_loss = 0, 0 102 | start_time = time.time() 103 | # Full pass on training data 104 | # Update the model after each minibatch 105 | for i, (inputs, targets) in enumerate(train_loader): 106 | # Adjust learning rate 107 | # adjustLearningRate(optimizer, epoch, num_epochs, lr_init=learning_rate, 108 | # batch=i, n_batch=len(train_loader), method='multistep') 109 | # Move tensors to gpu if necessary 110 | inputs, targets = inputs.to(device), targets.to(device) 111 | 112 | optimizer.zero_grad() 113 | predictions = model(inputs) 114 | loss = loss_fn(predictions, targets) 115 | loss.backward() 116 | train_loss += loss.item() 117 | optimizer.step() 118 | 119 | # Do a full pass on validation data 120 | model.eval() 121 | val_loss = 0 122 | with th.no_grad(): 123 | for inputs, targets in val_loader: 124 | inputs, targets = inputs.to(device), targets.to(device) 125 | # We don't need to compute gradient 126 | predictions = model(inputs) 127 | loss = loss_fn(predictions, targets) 128 | val_loss += loss.item() 129 | 130 | # Compute error per sample 131 | val_error = val_loss / n_val 132 | 133 | if val_error < best_error: 134 | best_error = val_error 135 | # Move back weights to cpu 136 | # Save Weights 137 | th.save(model.to("cpu").state_dict(), best_model_path) 138 | 139 | model.to(device) 140 | 141 | if (epoch + 1) % evaluate_print == 0: 142 | # Then we print the results for this epoch: 143 | # Losses are averaged over the samples 144 | print("Epoch {} of {} took {:.3f}s".format( 145 | epoch + 1, num_epochs, time.time() - start_time)) 146 | print(" training loss:\t\t{:.6f}".format(train_loss / n_train)) 147 | print(" validation loss:\t\t{:.6f}".format(val_error)) 148 | 149 | # After training, we compute and print the test error: 150 | model.load_state_dict(th.load(best_model_path)) 151 | test_loss = 0 152 | with th.no_grad(): 153 | for inputs, targets in test_loader: 154 | inputs, targets = inputs.to(device), targets.to(device) 155 | # We don't need to compute gradient 156 | predictions = model(inputs) 157 | loss = loss_fn(predictions, targets) 158 | test_loss += loss.item() 159 | 160 | print("Final results:") 161 | print(" best validation loss:\t\t{:.6f}".format(best_error)) 162 | print(" test loss:\t\t\t{:.6f}".format(test_loss / n_test)) 163 | 164 | 165 | if __name__ == '__main__': 166 | parser = argparse.ArgumentParser(description='Train a line detector') 167 | parser.add_argument('-f', '--folders', help='Training folders', type=str, nargs='+', required=True) 168 | parser.add_argument('--num_epochs', help='Number of epoch', default=50, type=int) 169 | parser.add_argument('-bs', '--batchsize', help='Batch size', default=4, type=int) 170 | parser.add_argument('--seed', help='Random Seed', default=42, type=int) 171 | parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training') 172 | parser.add_argument('--load_model', help='Start from a saved model', default="", type=str) 173 | parser.add_argument('--model_type', help='Model type: {cnn, custom, mlp}', default="custom", 174 | type=str, choices=['cnn', 'custom', 'mlp']) 175 | parser.add_argument('-lr', '--learning_rate', help='Learning rate', default=1e-3, type=float) 176 | args = parser.parse_args() 177 | 178 | args.cuda = not args.no_cuda and th.cuda.is_available() 179 | device = th.device("cuda" if args.cuda else "cpu") 180 | main(folders=args.folders, num_epochs=args.num_epochs, batchsize=args.batchsize, 181 | learning_rate=args.learning_rate, device=device, 182 | seed=args.seed, saved_model_path=args.load_model, model_type=args.model_type) 183 | -------------------------------------------------------------------------------- /train/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division, absolute_import 2 | 3 | import json 4 | 5 | import cv2 6 | import numpy as np 7 | import torch as th 8 | from torch.utils.data import Dataset 9 | from sklearn.model_selection import train_test_split 10 | 11 | from constants import MAX_WIDTH, MAX_HEIGHT, ROI, INPUT_HEIGHT, INPUT_WIDTH, SPLIT_SEED, N_CHANNELS 12 | from .models import ConvolutionalNetwork, CustomNet, MlpNetwork 13 | 14 | 15 | def adjustLearningRate(optimizer, epoch, n_epochs, lr_init, batch, 16 | n_batch, method='cosine', decay_rate=0.7): 17 | """ 18 | :param optimizer: (PyTorch Optimizer object) 19 | :param epoch: (int) 20 | :param n_epochs: (int) 21 | :param lr_init: (float) 22 | :param batch: (int) 23 | :param n_batch: (int) 24 | :param method: (str) one of ("cosine", "multistep") 25 | :param decay_rate: (float) 26 | """ 27 | if method == 'cosine': 28 | T_total = n_epochs * n_batch 29 | T_cur = (epoch % n_epochs) * n_batch + batch 30 | lr = 0.5 * lr_init * (1 + np.cos(np.pi * T_cur / T_total)) 31 | elif method == 'multistep': 32 | lr = lr_init 33 | if epoch >= n_epochs * 0.75: 34 | lr *= decay_rate ** 2 35 | elif epoch >= n_epochs * 0.5: 36 | lr *= decay_rate 37 | # else: 38 | # # Sets the learning rate to the initial LR decayed by 10 every 30 epochs 39 | # lr = lr_init * (0.1 ** (epoch // 30)) 40 | 41 | for param_group in optimizer.param_groups: 42 | param_group['lr'] = lr 43 | 44 | 45 | def predict(model, image): 46 | """ 47 | :param model: (PyTorch Model) 48 | :param image: (numpy tensor) 49 | :return: (numpy array, numpy array) 50 | """ 51 | im = preprocessImage(image, INPUT_WIDTH, INPUT_HEIGHT) 52 | # Re-order channels for pytorch 53 | im = im.transpose((2, 0, 1)).astype(np.float32) 54 | with th.no_grad(): 55 | predictions = model(th.from_numpy(im[None]))[0].data.numpy() 56 | x, y = transformPrediction(predictions) 57 | return x, y 58 | 59 | 60 | def loadNetwork(weights, num_output=6, model_type="cnn"): 61 | """ 62 | :param weights: (str) 63 | :param num_output: (int) 64 | :param model_type: (str) 65 | :return: (PyTorch Model) 66 | """ 67 | if model_type == "cnn": 68 | model = ConvolutionalNetwork(num_output=num_output) 69 | elif model_type == "custom": 70 | model = CustomNet(num_output=num_output) 71 | elif model_type == "mlp": 72 | model = MlpNetwork(INPUT_WIDTH * INPUT_HEIGHT * N_CHANNELS, num_output=num_output) 73 | 74 | model.load_state_dict(th.load(weights)) 75 | model.eval() 76 | return model 77 | 78 | 79 | def preprocessImage(image, width, height): 80 | """ 81 | Preprocessing script to convert image into neural net input array 82 | :param image: (cv2 image) 83 | :param width: (int) 84 | :param height: (int) 85 | :return: (numpy tensor) 86 | """ 87 | # Crop the image 88 | r = ROI 89 | image = image[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])] 90 | # The resizing is a bottleneck in the computation 91 | x = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR) 92 | # Normalize 93 | x = x / 255. 94 | x -= 0.5 95 | x *= 2 96 | return x 97 | 98 | 99 | def transformPrediction(y): 100 | """ 101 | Transform the model output back 102 | to original image space (pixel position) 103 | :param y: (numpy array) 104 | :return: (numpy array, numpy array) 105 | """ 106 | margin_left, margin_top, _, _ = ROI 107 | points = y.flatten() 108 | x = points[::2] 109 | y = points[1::2] 110 | y = (y * MAX_HEIGHT) + margin_top 111 | x = (x * MAX_WIDTH) + margin_left 112 | return x, y 113 | 114 | 115 | def loadLabels(folders): 116 | if not isinstance(folders, list): 117 | folders = [folders] 118 | 119 | labels = {} 120 | images = [] 121 | for folder in folders: 122 | if not folder.endswith('/'): 123 | folder += '/' 124 | tmp_labels = json.load(open(folder + 'labels.json')) 125 | tmp_images = list(tmp_labels.keys()) 126 | tmp_images.sort(key=lambda name: int(name.split('.jpg')[0])) 127 | tmp_labels = {"{}/{}".format(folder, name): tmp_labels[name] for name in tmp_images} 128 | labels.update(tmp_labels) 129 | tmp_images = ["{}/{}".format(folder, name) for name in tmp_images] 130 | images += tmp_images 131 | 132 | # Split the data into three subsets 133 | train_keys, tmp_keys = train_test_split(images, test_size=0.4, random_state=SPLIT_SEED) 134 | val_keys, test_keys = train_test_split(tmp_keys, test_size=0.5, random_state=SPLIT_SEED) 135 | 136 | train_labels = {key: labels[key] for key in train_keys} 137 | val_labels = {key: labels[key] for key in val_keys} 138 | test_labels = {key: labels[key] for key in test_keys} 139 | 140 | print("{} labels".format(len(labels))) 141 | return train_labels, val_labels, test_labels, labels 142 | 143 | 144 | class JsonDataset(Dataset): 145 | """ 146 | :param labels: (dict) 147 | :param preprocess: (bool) 148 | :param random_flip: (float) probability of flipping the image 149 | """ 150 | 151 | def __init__(self, labels, preprocess=False, random_flip=0.0): 152 | self.keys = list(labels.keys()) 153 | self.labels = labels.copy() 154 | self.preprocess = preprocess 155 | self.random_flip = random_flip 156 | 157 | def __getitem__(self, index): 158 | """ 159 | :param index: (int) 160 | :return: (th.Tensor, th.Tensor) 161 | """ 162 | image = self.keys[index] 163 | margin_left, margin_top = 0, 0 164 | im = cv2.imread(image) 165 | 166 | # Crop the image and normalize it 167 | if self.preprocess: 168 | margin_left, margin_top, _, _ = ROI 169 | im = preprocessImage(im, INPUT_WIDTH, INPUT_HEIGHT) 170 | 171 | # Normalize labels 172 | labels = np.array(self.labels[image]).astype(np.float32) 173 | assert len(labels) == 3, "Error with label of image {}".format(image) 174 | labels[:, 0] = (labels[:, 0] - margin_left) / MAX_WIDTH 175 | labels[:, 1] = (labels[:, 1] - margin_top) / MAX_HEIGHT 176 | 177 | # Randomly flip the image to augment the dataset 178 | if np.random.random() < self.random_flip: 179 | im = cv2.flip(im, 1) 180 | labels[:, 0] = 1 - labels[:, 0] 181 | 182 | # 3 points -> 6 values to predict 183 | y = labels.flatten().astype(np.float32) 184 | 185 | # swap color axis because 186 | # numpy image: H x W x C 187 | # torch image: C X H X W 188 | im = im.transpose((2, 0, 1)).astype(np.float32) 189 | return th.from_numpy(im), th.from_numpy(y) 190 | 191 | def __len__(self): 192 | return len(self.keys) 193 | 194 | 195 | def computeLossWithDataLoader(model, labels, batchsize, shuffle=False): 196 | """ 197 | :param model: (Pytorch Model) 198 | :param labels: (dict) 199 | :param batchsize: (int) 200 | :param shuffle: (bool) 201 | :return: (float) 202 | """ 203 | dataloader = th.utils.data.DataLoader(JsonDataset(labels, preprocess=True), 204 | batch_size=batchsize, shuffle=shuffle) 205 | 206 | loss_fn = th.nn.MSELoss(size_average=True) 207 | total_loss = 0 208 | with th.no_grad(): 209 | for inputs, targets in dataloader: 210 | predictions = model(inputs) 211 | loss = loss_fn(predictions, targets) 212 | total_loss += loss.item() 213 | return total_loss 214 | 215 | 216 | def computeMSE(model, train_labels, val_labels, test_labels, batchsize=32): 217 | """ 218 | Compute Mean Square Error 219 | and print its value for the different sets 220 | :param model: (Pytorch Model) 221 | :param train_labels: (dict) 222 | :param val_labels: (dict) 223 | :param test_labels: (dict) 224 | :param batchsize: (int) 225 | """ 226 | model.eval() 227 | error_train = computeLossWithDataLoader(model, train_labels, batchsize) 228 | error_val = computeLossWithDataLoader(model, val_labels, batchsize) 229 | error_test = computeLossWithDataLoader(model, test_labels, batchsize) 230 | 231 | print('Train error={:.6f}'.format(error_train)) 232 | print('Val error={:.6f}'.format(error_val)) 233 | print('Test error={:.6f}'.format(error_test)) 234 | print('Total error={:.6f}'.format((error_train + error_val + error_test) / 3)) 235 | --------------------------------------------------------------------------------