├── .binder
└── environment.yml
├── .github
└── workflows
│ └── book.yml
├── .gitignore
├── CONTRIBUTORS.md
├── LICENSE
├── README.md
├── book
├── Appendix
│ ├── CarlaInstallation.md
│ ├── ExerciseSetup.md
│ ├── Feedback.md
│ └── NextChapters.md
├── CameraCalibration
│ ├── Discussion.md
│ ├── VanishingPointCameraCalibration.ipynb
│ └── images
│ │ ├── Vanishing_point.svg
│ │ ├── car_rpy.svg
│ │ ├── images_raw
│ │ ├── p--5-y-0.0-r-0.0.png
│ │ ├── p-0-y--10.0-r-0.0.png
│ │ ├── p-0-y--5.0-r-0.0.png
│ │ ├── p-0-y-0.0-r--20.0.png
│ │ ├── p-0-y-0.0-r-0.0.png
│ │ ├── p-0-y-0.0-r-20.0.png
│ │ ├── p-0-y-10.0-r-0.0.png
│ │ ├── p-5-y-0.0-r-0.0.png
│ │ ├── trans--2.png
│ │ └── trans-2.png
│ │ ├── images_vp
│ │ ├── p--5-y-0.png
│ │ ├── p-0-y--10.png
│ │ ├── p-0-y-0-r--20.png
│ │ ├── p-0-y-0-r-20.png
│ │ ├── p-0-y-0.png
│ │ ├── p-0-y-10.png
│ │ ├── p-5-y-0.png
│ │ ├── trans--2.png
│ │ └── trans-2.png
│ │ ├── test.png
│ │ └── vanishing_point.png
├── Control
│ ├── BicycleModel.md
│ ├── ControlOverview.md
│ ├── Discussion.md
│ ├── PID.ipynb
│ ├── PurePursuit.md
│ ├── images
│ │ ├── car_pixabay.svg
│ │ ├── control.gif
│ │ ├── control_carla.gif
│ │ └── pure_pursuit_simple_geometry.png
│ └── tikz
│ │ ├── Ackermann
│ │ ├── ICR.svg
│ │ ├── ICR.tex
│ │ ├── ICR_Slip.svg
│ │ ├── ICR_Slip.tex
│ │ ├── ICR_construction.svg
│ │ ├── ICR_construction.tex
│ │ ├── WheelAngle.svg
│ │ └── WheelAngle.tex
│ │ ├── BicycleModel
│ │ ├── BicycleModel.svg
│ │ ├── BicycleModel.tex
│ │ ├── BicycleModelGeometry.svg
│ │ ├── BicycleModelGeometry.tex
│ │ ├── BicycleModel_x_y_theta.svg
│ │ └── BicycleModel_x_y_theta.tex
│ │ └── PurePursuit
│ │ ├── PurePursuitLawOfSines.svg
│ │ ├── PurePursuitLawOfSines.tex
│ │ ├── PurePursuitWrongDelta.svg
│ │ ├── PurePursuitWrongDelta.tex
│ │ ├── PurePursuit_delta_11p3.svg
│ │ ├── PurePursuit_delta_11p3.tex
│ │ ├── PurePursuit_delta_15.svg
│ │ ├── PurePursuit_delta_15.tex
│ │ ├── PurePursuit_delta_20.svg
│ │ ├── PurePursuit_delta_20.tex
│ │ ├── PurePursuit_delta_25.svg
│ │ └── PurePursuit_delta_25.tex
├── Introduction
│ ├── carla_vehicle_lanes.jpg
│ └── intro.md
├── LaneDetection
│ ├── CameraBasics.ipynb
│ ├── Discussion.md
│ ├── InversePerspectiveMapping.ipynb
│ ├── LaneDetectionOverview.md
│ ├── Segmentation.ipynb
│ ├── images
│ │ ├── Bayer_pattern_on_sensor.svg
│ │ ├── Pixel-example.png
│ │ ├── carla_lane_ground_truth.svg
│ │ ├── carla_lane_image.svg
│ │ ├── carla_lane_label.svg
│ │ ├── carla_scene.png
│ │ ├── ipm.png
│ │ ├── photo_sensor.jpeg
│ │ ├── surface.png
│ │ └── ypr_aircraft.svg
│ └── tikz
│ │ ├── camera_projection
│ │ ├── CameraProjection.svg
│ │ ├── CameraProjection.synctex.gz
│ │ └── CameraProjection.tex
│ │ ├── camera_projection_side_view
│ │ ├── camera_projection_side_view.svg
│ │ └── camera_projection_side_view.tex
│ │ ├── coordinate_systems
│ │ ├── coordinate_systems.svg
│ │ └── coordinate_systems.tex
│ │ ├── inverse_perspective
│ │ ├── inverse_perspective.synctex.gz
│ │ └── inverse_perspective.tex
│ │ ├── iso8850
│ │ ├── bev_2.jpg
│ │ ├── iso8850.png
│ │ ├── iso8850.tex
│ │ └── iso8850_crop.png
│ │ ├── no_pinhole
│ │ ├── no_pinhole.svg
│ │ └── no_pinhole.tex
│ │ ├── pinhole_box
│ │ ├── pinhole_box.svg
│ │ └── pinhole_box.tex
│ │ ├── uv
│ │ ├── uv_grid.svg
│ │ └── uv_grid.tex
│ │ └── virtual_pinhole_box
│ │ ├── virtual_pinhole_box.svg
│ │ └── virtual_pinhole_box.tex
├── _config.yml
├── _toc.yml
├── car_sketch.png
├── car_sketch_wide.png
├── environment.yml
├── references.bib
└── requirements.txt
├── code
├── __init__.py
├── environment.yml
├── exercises
│ ├── __init__.py
│ ├── camera_calibration
│ │ └── calibrated_lane_detector.py
│ ├── control
│ │ ├── get_target_point.py
│ │ └── pure_pursuit.py
│ └── lane_detection
│ │ ├── __init__.py
│ │ ├── camera_geometry.py
│ │ ├── lane_detector.py
│ │ └── lane_segmentation.ipynb
├── solutions
│ ├── camera_calibration
│ │ └── calibrated_lane_detector.py
│ ├── control
│ │ ├── get_target_point.py
│ │ └── pure_pursuit.py
│ └── lane_detection
│ │ ├── README.md
│ │ ├── camera_geometry.py
│ │ ├── camera_geometry_numba.py
│ │ ├── collect_data.py
│ │ ├── fastai_model.pth
│ │ ├── lane_detector.py
│ │ └── lane_segmentation.ipynb
├── tests
│ ├── __init__.py
│ ├── camera_calibration
│ │ ├── calibrated_lane_detector.ipynb
│ │ └── carla_sim.py
│ ├── control
│ │ ├── __init__.py
│ │ ├── carla_sim.py
│ │ ├── clothoid_generator.py
│ │ ├── control.gif
│ │ ├── control.ipynb
│ │ ├── simulation.py
│ │ ├── target_point.ipynb
│ │ ├── track.py
│ │ └── vehicle.py
│ └── lane_detection
│ │ ├── __init__.py
│ │ ├── camera_geometry_unit_test.py
│ │ ├── inverse_perspective_mapping.ipynb
│ │ ├── lane_boundary_projection.ipynb
│ │ └── lane_detector.ipynb
└── util
│ ├── __init__.py
│ ├── carla_util.py
│ ├── geometry_util.py
│ └── seg_data_util.py
└── data
├── Image_yaw_2_pitch_-3.png
├── Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set.png
├── Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_boundary.txt
├── Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_label.png
├── Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_trafo.txt
├── calibration_video.mp4
├── carla_vehicle.png
├── carla_vehicle_bg.png
├── carla_vehicle_bg_2.png
├── cte_yaw_4_no_calib.npy
├── cte_yaw_4_perfect_calib.npy
├── cte_yaw_4_with_calib.npy
└── prob_left.npy
/.binder/environment.yml:
--------------------------------------------------------------------------------
1 | name: aad_binder
2 | channels:
3 | - defaults
4 | dependencies:
5 | - matplotlib
6 | - numpy
7 | - ipywidgets
8 |
--------------------------------------------------------------------------------
/.github/workflows/book.yml:
--------------------------------------------------------------------------------
1 | name: deploy-book
2 |
3 | # Only run this when the master branch changes
4 | on:
5 | push:
6 | branches:
7 | - master
8 |
9 |
10 | # This job installs dependencies, build the book, and pushes it to `gh-pages`
11 | jobs:
12 | deploy-book:
13 | runs-on: "ubuntu-latest"
14 | defaults:
15 | run:
16 | shell: bash -l {0}
17 | steps:
18 | # download aad repository
19 | - uses: actions/checkout@v3
20 |
21 | # Install dependencies
22 | - name: Set up Python 3.7
23 | uses: actions/setup-python@v4
24 | with:
25 | python-version: 3.8
26 |
27 | - name: Install dependencies
28 | run: |
29 | pip install -r book/requirements.txt
30 |
31 | # Build the book
32 | - name: Build the book
33 | run: |
34 | jupyter-book build book/
35 |
36 | # Push the book's HTML to github-pages
37 | - name: GitHub Pages action
38 | uses: peaceiris/actions-gh-pages@v3
39 | with:
40 | github_token: ${{ secrets.GITHUB_TOKEN }}
41 | publish_dir: book/_build/html
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | untracked
2 | *.rar
3 | *.pdf
4 | __pycache__
5 | .ipynb_checkpoints
6 | debug
7 | .vscode
8 | literature
9 | mpc-tools-casadi
10 | _build
11 | MPC.ipynb
--------------------------------------------------------------------------------
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 | The initial version of this book, which contained the chapters on lane detection and control, was written by [Mario Theers](https://github.com/homasfermi).
3 | The chapter on camera calibration was written by [Mankaran Singh](https://github.com/MankaranSingh), with minor contributions from [Mario Theers](https://github.com/homasfermi).
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This work is licensed under a Creative Commons Attribution 4.0 International License.
2 | You can find the license here:
3 | https://creativecommons.org/licenses/by/4.0/
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://zenodo.org/badge/latestdoi/314818745)
2 |
3 | Algorithms for Automated Driving
4 | ============================
5 |
6 | 
7 |
8 | Each chapter of this (mini-)book guides you in programming one important software component for automated driving.
9 | Currently, this book contains three chapters: **Lane Detection**, **Control** and **Camera Calibration**. You will implement software that
10 | * detects lane boundaries from a camera image using deep learning
11 | * controls steering wheel and throttle to keep the vehicle within the detected lane at the desired speed
12 | * determines how the camera is positioned and oriented with respect to the vehicle (a prerequisite to properly join the lane detection and the control module)
13 |
14 | The software you will write is in python, and you will apply it in the [open-source driving simulator CARLA](https://carla.org/). Ideally, your computer is powerful enough to run CARLA, but if it is not, you can still work through the exercises. For the exercise on control there is a simplistic simulator that comes with this course. We recommend to work through the chapters in order, but if you want to, you can read the **Control** chapter, before the **Lane Detection** chapter.
15 |
16 | To work through this book, you
17 | * should understand the following math and physics concepts: derivative, integral, trigonometry, sine/cosine of an angle, matrix, vector, coordinate system, velocity, acceleration, angular velocity, cross product, rotation matrix
18 | * should be familiar with programming in python. In particular, you should be comfortable with multidimensional arrays in numpy. You do **not** need a powerful computer (see [Exercise Setup](https://thomasfermi.github.io/Algorithms-for-Automated-Driving/Appendix/ExerciseSetup.html))
19 | * need to know what supervised learning is, and how to train a neural network with a deep learning framework like pytorch, fastai, tensorflow, keras, or something similar. This prerequisite is only necessary for the chapter on lane detection. If you do not fulfill it, you can skip this chapter, or study one of the [courses I recommend](https://thomasfermi.github.io/Algorithms-for-Automated-Driving/LaneDetection/Segmentation.html) and then come back here.
20 |
21 | If you find a bug in the exercise code or some confusing explanations in the book, please [raise an issue on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving). If you have questions about the material or want to connect with me or other students, please use [github discussions](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions). Once you finish the book or decide to stop working through it, please consider giving me some feedback by filling out [this questionnaire](https://forms.gle/TioqZiUsB5e5wSVG7) (If you open the link in your browser's incognito mode, the questionnaire should be anonymous).
22 |
23 | ## Read the book!
24 | Please follow this [link](https://thomasfermi.github.io/Algorithms-for-Automated-Driving/Introduction/intro.html)!
25 |
26 | ## Discord
27 | As of 2021, we have a discord server 🥳. Please follow this [link](https://discord.gg/57YEzkCFHN) to join the community!
28 |
29 | ## Help wanted!
30 | Are you interested in contributing to the book by adding a new chapter? Or do you have other ideas for improvements? Please let us know by joining the discussion [on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions/4)!
31 |
--------------------------------------------------------------------------------
/book/Appendix/CarlaInstallation.md:
--------------------------------------------------------------------------------
1 | # Carla Installation
2 | For some parts of this course you **can** use the Carla simulator.
3 | It is most convenient to install Carla on your local machine. However, it might not be powerful enough, since Carla is quite ressource hungry. If you find that Carla is not running well on your machine, you can try running it on Colab. Personally, I found that running Carla through Colab was an unpleasant experience.
4 |
5 | ````{tab} Local installation
6 | You can get Carla at [the Carla github repo](https://github.com/carla-simulator/carla/blob/master/Docs/download.md). Download version 0.9.10 (or newer if no breaking API changes will be introduced in the future) and move it to a location where you want to keep it.
7 | The Carla simulation can be controlled via a python API. In your Carla folder you will find a subfolder `PythonAPI` which contains the python package as well as some examples.
8 | I recommend that you use an anaconda environment called `aad` for this course (see [Exercise Setup](ExerciseSetup.md)). An easy way to *install* the carla python package into your anaconda enviroment is the following:
9 | * Go to your anaconda installation folder and then into the `site-packages` subfolder of your environment. The path may be something like `~/anaconda3/envs/aad/lib/pythonX.X/site-packages/` or `C:\Users\{YOUR_USERNAME}\anaconda3\envs\aad\Lib\site-packages`
10 | * Create a file `carla.pth` and open it with a text editor
11 | * Paste in the path to the carla egg file, then save. The carla egg file is located in `{PATH_TO_YOUR_CARLA_FOLDER}/PythonAPI/carla/dist/`. Hence, I pasted `C:\Users\mario\Documents\Carla_0910\PythonAPI\carla\dist\carla-0.9.10-py3.7-win-amd64.egg` into the `carla.pth` file.
12 | Do not move the Carla folder afterwards, since it will break this link and hence the anaconda installation.
13 | ````
14 |
15 | ````{tab} Running on Colab
16 | If you want to run Carla on [Google Colab](https://colab.research.google.com/), check out [Michael Bossello's carla-colab repository](https://github.com/MichaelBosello/carla-colab). When you follow this link, you will see a nice image of the Carla simulator and above there is a button "Open in Colab". Click that button. Then go through the notebook step by step and follow the instructions. Note that if you move your python code to the remote machine, and execute it, the `import carla` statements will not work. Add the following lines before the `import carla` statement
17 | ```python
18 | import sys
19 | sys.path.append("/home/colab/carla/PythonAPI/carla/dist/")
20 | ```
21 | This will let python know where to look for the carla python package.
22 | ````
--------------------------------------------------------------------------------
/book/Appendix/ExerciseSetup.md:
--------------------------------------------------------------------------------
1 | # Exercise Setup
2 |
3 | You can work on the exercises on your local machine, or in the cloud using Google Colab. Dependent on your choice, please select the corresponding tab in the following tutorial. If you know how to work with [anaconda](https://www.anaconda.com/products/individual) and are ok with an anaconda environment taking more than 1GB of disk space on your machine, I would recommend you to use your local machine. However, there is one deep learning exercise where you temporarily might want to switch to Colab, if you do not own a GPU.
4 |
5 | ## Downloading the exercises
6 |
7 | If you know how [git](https://git-scm.com/) works, please clone this [book's github repo](https://github.com/thomasfermi/Algorithms-for-Automated-Driving).
8 | ```bash
9 | git clone https://github.com/thomasfermi/Algorithms-for-Automated-Driving.git
10 | ```
11 | Otherwise visit this [book's github repo](https://github.com/thomasfermi/Algorithms-for-Automated-Driving) and click on the green button that says "Code". In the pop-up menu, please select "Download zip". Extract the zip to a directory of your choice.
12 |
13 | ````{tab} Local installation
14 | Nothing more to do.
15 | ````
16 |
17 | ````{tab} Google Colab
18 | Open [Google Drive](https://drive.google.com/drive/my-drive). In the top left navigation you can see "My Drive". Right click "My Drive" and select "New folder". Name this folder "aad". You will see the folder appear. Double-click it. Now open a file explorer on your computer and navigate to the folder "Algorithms-for-Automated-Driving" that you have downloaded from github. Select all folders except the "book" folder and drag and drop them into the empty "aad" folder in your Google Drive.
19 | ````
20 |
21 |
22 |
23 | ## Python environment
24 |
25 |
26 | `````{tab} Local installation
27 | If you do not have anaconda, please [download and install it](https://www.anaconda.com/products/individual).
28 | Please create a conda environment called `aad` (Algorithms for Automated Driving) for this course using the environment.yml file within "Algorithms-for-Automated-Driving/code"
29 | ````bash
30 | cd Algorithms-for-Automated-Driving/code
31 | conda env create -f environment.yml
32 | ````
33 |
34 | ````{admonition} Tip: Use mamba!
35 | :class: tip, dropdown
36 | You may find that creating a conda environment takes a lot of time. I recommend to install mamba:
37 | ```bash
38 | conda install mamba -n base -c conda-forge
39 | ```
40 | Installing mamba takes some time, but afterwards setting up environments like the one for this book is way faster. Just write `mamba` instead of `conda`:
41 | ```bash
42 | mamba env create -f environment.yml
43 | ```
44 | ````
45 |
46 | Be sure to activate that environment to work with it
47 | ```bash
48 | conda activate aad
49 | ```
50 | If you are working on Windows, consider [adding anaconda to your PowerShell](https://www.scivision.dev/conda-powershell-python/).
51 | `````
52 |
53 |
54 | `````{tab} Google Colab
55 | When you run code in Google Colab, you will have most of the libraries you need already installed. Just import whatever you need. If it is missing, you will get an error message that explains how to install it.
56 | `````
57 |
58 |
59 | ## Navigating the exercises
60 |
61 | Within the `Algorithms-for-Automated-Driving` folder you will find a subfolder `book` containing the source code which created this book (not too interesting for you right now, you can even delete it if you want), a folder `data`, and a folder `code`. Within the `code` folder you have subfolders `exercises`, `solutions`, `tests`, and `util`. You will complete exercises by writing code in the `exercises` folder and testing it with code from the `tests` folder. You should *not* look into the `solutions` directory, unless you are desperate and really can't solve the exercise on your own.
62 |
63 |
64 | ````{tab} Local installation
65 | To edit the source code, I recommend to use [Visual Studio Code](https://code.visualstudio.com/), since it has nice integration for jupyter notebooks. You can open the `code` folder with Visual Studio code and then easily navigate between the `tests` and the `exercises`. An alternative to Visual Studio code is jupyter lab, which you can start from a terminal:
66 | ```bash
67 | conda activate aad
68 | cd Algorithms-for-Automated-Driving
69 | jupyter lab
70 | ```
71 | In the book's exercise sections, I typically tell you to start working on the exercise by opening some jupyter notebook (.ipynb file).
72 | When you open the .ipynb file with VS code be sure to select the "aad" conda environment as your python kernel.
73 | Once you opened the notebook, read through it cell by cell. Execute each cell by pressing ctrl+enter. Typically the first section of the notebook is for setting up Google Colab. This won't do anything on your machine. You can also delete these Colab-specific cells if you want.
74 | ````
75 |
76 | ````{tab} Google Colab
77 | In the book's exercise sections, I typically tell you to start working on the exercise by opening some jupyter notebook (.ipynb file).
78 | Open [Google Drive](https://drive.google.com/drive/my-drive) and navigate to the .ipynb file specified in the book. Double-click the .ipynb file and then at the very top select "Open with Google Colaboratory". If you do not see this option, click "Connect more apps" and search for "colab". Once you opened the notebook, read through it cell by cell. Execute each cell, either by pressing ctrl+enter or by clicking the run button on the cell. The first few cells will mount your Google Drive in Colab. Once you completed this, you can click on the folder icon in the left navigation, and then for example on "drive", "My Drive", "aad", "code", "exercises", "lane_detection", "camera_geometry.py". This way you can work on python scripts. Be sure to press ctrl+s to save your work. It will be synchronized with your Google Drive.
79 | ````
80 |
81 | ## Getting help
82 | If you have a question about the exercises, feel free to ask it on [github discussions](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions) or on the [discord server](https://discord.gg/57YEzkCFHN).
--------------------------------------------------------------------------------
/book/Appendix/Feedback.md:
--------------------------------------------------------------------------------
1 | # Feedback
2 |
3 | If you find bugs or errors you can [raise an issue on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving). For questions, please use [github discussions](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions) or the [discord server](https://discord.gg/57YEzkCFHN).
4 |
5 | Also: I would be grateful if you would fill out this [feedback form](https://forms.gle/TioqZiUsB5e5wSVG7).
6 |
7 |
--------------------------------------------------------------------------------
/book/Appendix/NextChapters.md:
--------------------------------------------------------------------------------
1 | # Future Chapters
2 |
3 | I would like to add the following chapters to this book in the future
4 |
5 | * **Model Predictive Control** The pure pursuit controller does not work well when driving curves at high speeds. In this case the assumption of zero slip for the kinematic bicycle model does not apply. In this chapter we will design a model predictive controller based on the dynamic bicycle model, which accounts for nonzero side slip angles.
6 | * **HD Map Localization** Carla has a very nice API to access a high definition (HD) map of the road. How can we use our detected lane boundaries, a GPS sensor, a yaw rate sensor, and a speedometer to estimate our position on the HD map? This is relevant for navigation, and can also be used for improved vehicle control.
7 |
8 | If you have some additional wishes for future chapters, please raise an issue on the [book's github repo](https://github.com/thomasfermi/Algorithms-for-Automated-Driving). If you want to motivate me to continue working on this book, please star the [book's github repo](https://github.com/thomasfermi/Algorithms-for-Automated-Driving) 😉.
9 |
10 | ```{admonition} Help wanted!
11 | :class: dropdown
12 | Are you interested in contributing to the book by adding a new chapter? Or do you have other ideas for improvements? Please let me know by joining the discussion [on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions/4)!
13 | ```
14 |
--------------------------------------------------------------------------------
/book/CameraCalibration/Discussion.md:
--------------------------------------------------------------------------------
1 |
2 | # Discussion
3 |
4 | ## Limitations
5 |
6 | The method we presented just assumed that the roll is zero. Also we did not estimate the height $h$ of the camera. In the real world you could estimate the height using a tape measure and will probably only make an error of around 5 percent. Assuming a roll of zero does not seem to lead to practical problems, since this is done in the [source code](https://github.com/commaai/openpilot/blob/d74def61f88937302f7423eea67895d5f4c596b5/selfdrive/locationd/calibrationd.py#L5) of openpilot, which is known to perfrom really well. As a bonus exercise you can run experiments with `code/tests/camera_calibration/carla_sim.py` where you change the roll of the camera or you slightly modify the height. Investigate how this affects the control of the vehicle. Regarding estimation of height and roll we also recommend to have a look at [this paper](https://arxiv.org/abs/2008.03722).
7 |
8 | Another limitation: The method we discussed in this chapter only works if your autonomous vehicle software stack uses lane detection in image space and if it is used in areas with good lane markings. But what if your software doesn't predict lanes in image space? Maybe it predicts lanes in world space as [openpilot](https://github.com/commaai/openpilot), or maybe it doesn't predict lanes at all and makes predictions [end-to-end](https://developer.nvidia.com/blog/deep-learning-self-driving-cars/). In either approaches, this method of camera calibration isn't going to work. As an alternative, we can use visual odometery (VO) based camera calibration.
9 |
10 |
11 |
12 | ## Alternative: VO-based camera calibration
13 |
14 | In this approach, visual odometery is performed to find the motion of the camera. This approach also needs to be performed when the car is aligned with the lane line, i.e. when it is going straight and fast. Since the output of VO would be motion of the camera, this information can be used to find out the orientation of the camera with respect to the car. See how openpilot performs this in the [calibrationd](https://github.com/commaai/openpilot/blob/master/selfdrive/locationd/calibrationd.py#L148) module. The vehicle forwards axis is more or less identical to the direction of the translation vector, since the vehicle is driving straight. But having the vehicle forwards axis with respect to the camera reference frame means that you can estimate how the optical axis (the z-axis) of the camera is tilted with respect to the vehicle forwards direction. Hence you get the extrinsic rotation matrix! However, you also need to assume the roll is zero in this approach. To get started with visual odometery, the fastest way is to get started is [PySlam](https://github.com/luigifreda/pyslam). It has lots of methods for visual odometery including novel approaches based on deep learning.
15 |
16 | ## Further reading
17 |
18 | - A great paper to get started with more advanced methods is Ref. {cite}`lee2020online`: [Online Extrinsic Camera Calibration for Temporally Consistent IPM Using Lane Boundary Observations with a Lane Width Prior](https://arxiv.org/abs/2008.03722). This paper also discusses estimation of roll and height.
19 | - Visual Odometery: [PySlam](https://github.com/luigifreda/pyslam) and the resources mentioned in the repo.
20 | - VO [Blog post](http://avisingh599.github.io/vision/monocular-vo/) by Avi Singh.
21 | - [Minimal python](https://github.com/yoshimasa1700/mono_vo_python/) implementation of VO.
22 | - [VO Lectures with exercises](https://web.archive.org/web/20200709104300/http://rpg.ifi.uzh.ch/teaching.html) by David Scaramuzza
23 |
24 |
25 | ## References
26 | The formalism of how to compute the camera orientation from the vanishing point was adapted from Ref. {cite}`ShiCourseraCalibration`. The idea to use lane boundaries to determine the vanishing point can be found in the paper by Lee et al. {cite}`lee2020online` and within the references of that paper.
27 |
28 | ```{bibliography}
29 | :filter: docname in docnames
30 | ```
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p--5-y-0.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p--5-y-0.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y--10.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y--10.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y--5.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y--5.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y-0.0-r--20.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y-0.0-r--20.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y-0.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y-0.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y-0.0-r-20.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y-0.0-r-20.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-0-y-10.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-0-y-10.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/p-5-y-0.0-r-0.0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/p-5-y-0.0-r-0.0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/trans--2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/trans--2.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_raw/trans-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_raw/trans-2.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p--5-y-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p--5-y-0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-0-y--10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-0-y--10.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-0-y-0-r--20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-0-y-0-r--20.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-0-y-0-r-20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-0-y-0-r-20.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-0-y-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-0-y-0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-0-y-10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-0-y-10.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/p-5-y-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/p-5-y-0.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/trans--2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/trans--2.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/images_vp/trans-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/images_vp/trans-2.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/test.png
--------------------------------------------------------------------------------
/book/CameraCalibration/images/vanishing_point.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/CameraCalibration/images/vanishing_point.png
--------------------------------------------------------------------------------
/book/Control/ControlOverview.md:
--------------------------------------------------------------------------------
1 | Overview
2 | ============================
3 |
4 | ```{figure} images/control_carla.gif
5 | ---
6 | scale: 100%
7 | name: ControlCarla
8 | ---
9 | Vehicle being controlled by pure pursuit and PID.
10 | ```
11 |
12 | In this module, we are going to control a vehicle in the Carla simulator.
13 | Our algorithm's input will be the current vehicle speed, as well as the desired speed and desired trajectory.
14 | The algorithm's output will be the actuator signals: gas pedal, and steering wheel.
15 |
16 | Our approach will be a PID controller for the gas pedal (longitudinal control) and a method called pure pursuit for steering (lateral control).
17 |
18 | We will begin by learning about [PID control](./PID.ipynb). Subsequently, we introduce a mathematical model that describes how the vehicle will move as a function of the steering wheel angle, the so-called [Kinematic Bicycle Model](./BicycleModel.md). Using that model we introduce the [Pure Pursuit](./PurePursuit.md) method for lateral control. In the final exercise, you will implement what you learned to control a vehicle in Carla. If your computer cannot run Carla, don't worry: you can still use the simplistic simulator I created for this course.
19 |
20 | ```{note}
21 | You can work through this chapter, even if you did not work through the chapter on Lane Detection at all. However, you miss out on the fun of piping together your lane-detection module with your control module.
22 | ```
23 |
24 | ```{tip}
25 | What is covered in this chapter is somewhat close to the content of weeks 4-7 in the [Coursera course "Introduction to Self-Driving Cars"](https://www.coursera.org/learn/intro-self-driving-cars). If you like, you can audit that course for free on Coursera and watch the videos.
26 | ```
--------------------------------------------------------------------------------
/book/Control/Discussion.md:
--------------------------------------------------------------------------------
1 | # Discussion
2 |
3 | ## Limitations and outlook
4 | The lateral control we implemented in this chapter is a good starting point, but real highway lane-keeping systems are more sophisticated.
5 | In the Carla simulations you might have observed that the vehicle is sliding/drifting when driving curves at high speed.
6 | In this case, we are outside of the validity regime of the kinematic bicycle model, which is the basis our pure pursuit controller. Hence, it is no wonder that our lateral control is performing suboptimal here.
7 |
8 | Control algorithms based on the **dynamic** bicycle model are more promising in this situation. You can learn about the dynamic bicycle model in the [lectures on Vehicle Dynamics and Control by Prof. Georg Schildbach on youtube](https://www.youtube.com/playlist?list=PLW3FM5Kyc2_4PGkumkAHNXzWtgHhaYe1d). A very sophisticated control method that can work with the dynamic bicycle model is Model Predictive Control (MPC), for which I recommend [this youtube playlist by MathWorks](https://www.youtube.com/playlist?list=PLn8PRpmsu08ozoeoXgxPSBKLyd4YEHww8) as an introduction. I plan to write a [chapter on MPC in the future](../Appendix/NextChapters.md).
9 |
10 | Finally, I recommend Ref. {cite}`snider2009automatic`. It discusses several controls methods for automated driving, and has a nice empirical performance comparison table at the end of section 5.
11 |
12 |
13 | ## Control in a real ADAS system: openpilot
14 | ```{margin}
15 | Take my discussion of openpilot with a grain of salt here. Their documentation is very limited, so my discussion is based on what I could piece together by reading their code.
16 | ```
17 | It is interesting to look at a real-world control system for automated driving: [openpilot](https://github.com/commaai/openpilot/).
18 | The relevant code is available on [github](https://github.com/commaai/openpilot/) in `openpilot/selfdrive/controls/`.
19 | As you can see [here](https://github.com/commaai/openpilot/blob/master/selfdrive/controls/lib/vehicle_model.py), the software is using the dynamic bicycle model. Model Predictive control is used to [plan](https://github.com/commaai/openpilot/blob/059cf6b43e579b8634090a0ecac4fb1c6c7a205e/selfdrive/controls/plannerd.py) a trajectory.
20 | Lateral control is based on this plan, and dependent on the vehicle a different algorithm is used.
21 | You will find lateral control with PID, [LQR](https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator) or [INDI](http://www.aerostudents.com/courses/advanced-flight-control/nonlinearDynamicInversion.pdf) in the [source code](https://github.com/commaai/openpilot/blob/ee99b59bade8d3b5057a5b3f22ad8b3edd102c78/selfdrive/controls/controlsd.py). [Longitudinal control](https://github.com/commaai/openpilot/blob/254814cc793dc4668ea9fd25f092b0712fb5b8a0/selfdrive/controls/lib/longcontrol.py) is done using a [PIController](https://github.com/commaai/openpilot/blob/f575a9ec12990ac2a764a5f416795d1c618f4609/selfdrive/controls/lib/pid.py).
22 |
23 | ## References
24 |
25 | ```{bibliography}
26 | :filter: docname in docnames
27 | ```
--------------------------------------------------------------------------------
/book/Control/PurePursuit.md:
--------------------------------------------------------------------------------
1 | Pure Pursuit
2 | ============================
3 |
4 | ## Algorithm
5 |
6 | In this section we want to control the front wheel angle $\delta$, such that the vehicle follows a given path. This is known as **lateral vehicle control**.
7 | In the pure pursuit method a target point (TP) on the desired path is identified, which is a **look-ahead distance** $l_d$ away from the vehicle. The angle $\delta$ is chosen such that the vehicle will reach the target point according to the kinematic bicycle model.
8 | The look-ahead distance is a parameter, and is typically chosen to depend on the speed $v$ via $l_d = K_{dd} v$, where the constant $K_{dd}$ needs to be tuned. We can also enforce a minimal and maximal look-ahead distance, so as to avoid undesirable behavior at very high and very low speeds.
9 |
10 | Let us draw the bicycle model and a given path we should follow. We also draw a circle of radius $l_d$ around the center of the rear wheel. The intersection of this circle with the path is our target point TP. According to the kinematic bicycle model, the vehicle will move along the orange arc, which is determined by the front wheel angle $\delta$. We want to choose $\delta$, such that the orange vehicle trajectory will move to the target point.
11 |
12 | ```{figure} tikz/PurePursuit/PurePursuitWrongDelta.svg
13 | ---
14 | width: 70%
15 | name: PurePursuitWrongDelta
16 | ---
17 | Bicycle model should follow a path. With the current front wheel angle $\delta$, it will not reach the target point TP.
18 | ```
19 |
20 | Since the above drawing is generated programmatically (using [tikz](https://en.wikipedia.org/wiki/PGF/TikZ)), we can change the value of $\delta$ until the vehicle trajectory goes through the target point:
21 |
22 | ````{tab} δ = 25°
23 | ```{figure} tikz/PurePursuit/PurePursuit_delta_25.svg
24 | ---
25 | width: 90%
26 | name: PurePursuit_delta_25
27 | ---
28 | ```
29 | ````
30 |
31 | ````{tab} δ = 20°
32 | ```{figure} tikz/PurePursuit/PurePursuit_delta_20.svg
33 | ---
34 | width: 90%
35 | name: PurePursuit_delta_20
36 | ---
37 | ```
38 | ````
39 |
40 | ````{tab} δ = 15°
41 | ```{figure} tikz/PurePursuit/PurePursuit_delta_15.svg
42 | ---
43 | width: 90%
44 | name: PurePursuit_delta_15
45 | ---
46 | ```
47 | ````
48 |
49 | ````{tab} δ = 11.3°
50 | ```{figure} tikz/PurePursuit/PurePursuit_delta_11p3.svg
51 | ---
52 | width: 90%
53 | name: PurePursuit_delta_11p3
54 | ---
55 | ```
56 | ````
57 |
58 | But there is a more elegant solution than just trying out a bunch of different $\delta$ values. We can actually compute the optimal $\delta$ based on the magenta triangle in the sketch below
59 |
60 | ```{figure} tikz/PurePursuit/PurePursuitLawOfSines.svg
61 | ---
62 | width: 90%
63 | name: PurePursuitLawOfSines
64 | ---
65 | The magenta triangle helps us to establish a formula for $\delta$.
66 | ```
67 |
68 | First, we note that the distance from the instantaneous center of rotation (ICR) to the target point (TP) is equal to $R$, since TP lies on the orange circle of radius $R$ around ICR. Hence, the magenta triangle is [isosceles](https://en.wikipedia.org/wiki/Isosceles_triangle) and $\gamma_2=\gamma_3$. From the figure we can see that $\gamma_3+\alpha=90°$. Hence $\gamma_2=\gamma_3=90°-\alpha$. Since the sum of all angles in a triangle equals $180°$, we have
69 |
70 | $$180°=\gamma_1+\gamma_2+\gamma_3 = \gamma_1 + (90°-\alpha) + (90°-\alpha)$$
71 |
72 | which yields $\gamma_1=2\alpha$. According to the [law of sines](https://en.wikipedia.org/wiki/Law_of_sines)
73 |
74 | $$ \frac{l_d}{\sin(\gamma_1)} = \frac{R}{\sin(\gamma_2)} $$
75 |
76 | Here, we used that the distance between the rear wheel and the target point TP is $l_d$. If we substitute $\gamma_1=2\alpha$ and $\gamma_2=90°-\alpha$ into the above formula, we obtain
77 |
78 | $$ \frac{l_d}{\sin(2 \alpha)} = \frac{R}{\sin(90° - \alpha)} $$
79 |
80 | Due to the [trigonometric addition formulas](https://mathworld.wolfram.com/TrigonometricAdditionFormulas.html), we have $\sin(90° - \alpha) = \cos(\alpha)$ and $\sin(2\alpha)=\sin(\alpha+\alpha)=2\sin(\alpha) \cos(\alpha)$. Hence, we can further simplify the formula above to find
81 |
82 | $$ \frac{l_d}{2\sin(\alpha) \cos(\alpha)} = \frac{R}{\cos(\alpha)} $$
83 |
84 | which yields $R=l_d/(2 \sin(\alpha))$. For the kinematic bicycle model we have previously derived a formula for the wheel angle $\delta$ as a function of $R$. It was $\delta = \arctan(L/R)$, where $L$ is the wheel base, i.e., the distance between the wheels. Combining this with the newly found formula for $R$ we finally obtain
85 |
86 | $$ \delta = \arctan \left(\frac{2 L \sin(\alpha)}{l_d}\right) $$ (eq-pp)
87 |
88 | This is the angle $\delta$ we need to pick to reach the target point! We can write down the pure pursuit algorithm now:
89 |
90 | ```{admonition} Pure pursuit algorithm
91 | For each instant in time:
92 | * Compute the look ahead distance $l_d$ as `l_d = np.clip(K_dd * speed, min_ld, max_ld)`. The function `np.clip` is documented [here](https://numpy.org/doc/stable/reference/generated/numpy.clip.html). `K_dd`, `min_ld`, and `max_ld` are parameters that you can tune.
93 | * Find the target point TP as the intersection of the desired path with a circle of radius $l_d$ around the rear wheel.
94 | * Using the target point coordinates `(x_tp,y_tp)`, determine $\alpha$ as `alpha=arctan2(y_tp,x_tp)`
95 | * Use equation {eq}`eq-pp` to compute the pure pursuit front wheel angle $\delta$
96 | * **Act**: Turn your steering wheel to set the front wheel angle to $\delta$
97 | ```
98 |
99 |
100 | ## Exercise
101 | In this exercise you will implement both pure pursuit and PID.
102 |
103 | If you did not do the chapter on lane detection, you probably did not set up your python environment, and you did not download the exercise code. In this case, please visit [the appendix](../Appendix/ExerciseSetup.md) to do this now.
104 |
105 | To start working, open `code/tests/control/target_point.ipynb` and follow the instructions. Next, open `code/tests/control/control.ipynb` and follow the instructions. This exercise uses a simplistic vehicle simulator within the Jupyter Notebook to test your code. If you completed these exercises successfully, you **can** also run your controller in a Carla simulation:
106 |
107 | * Start Carla by executing the file `CarlaUE4.exe` (Windows) or `CarlaUE4.sh` (Linux) in your Carla folder (If you did not download Carla yet, see [the appendix](../Appendix/CarlaInstallation.md)).
108 | * Execute `python -m code.tests.control.carla_sim --ex` from the parent directory of `code` and witness your control algorithm in action! If you omit the `--ex` flag, you will see the sample solution.
109 | * By default, the center of the lane is queried from Carla's HD map and given as reference path to your controller. But, if you run `python -m code.tests.control.carla_sim --ex --ld` your `LaneDetector` will be used: The average of the left and right lane boundary, i.e., $(y_l(x)+y_r(x))/2$ will be given to your controller as the reference path. Note that there is a "TODO" item in `carla_sim.py` regarding the correct call to your `LaneDetector` constructor. You should work on this to make sure the simulation runs without error. Running the Carla simulation and your `LaneDetector` at the same time will eat up a lot of hardware resources. Hence, the simulation will probably run with only a few frames per second on your machine, unless it is very powerful.
110 |
--------------------------------------------------------------------------------
/book/Control/images/control.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/Control/images/control.gif
--------------------------------------------------------------------------------
/book/Control/images/control_carla.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/Control/images/control_carla.gif
--------------------------------------------------------------------------------
/book/Control/images/pure_pursuit_simple_geometry.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/Control/images/pure_pursuit_simple_geometry.png
--------------------------------------------------------------------------------
/book/Control/tikz/Ackermann/ICR.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.0]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\ArrowLength}{1}
7 | \pgfmathsetmacro{\vheight}{4.5}
8 | \pgfmathsetmacro{\vwidth}{2}
9 | \pgfmathsetmacro{\deltavar}{15}
10 | \pgfmathsetmacro{\xdist}{6}
11 |
12 | \pgfmathsetmacro{\myrot}{14}
13 | \pgfmathsetmacro{\myshift}{1}
14 | %\pgfmathsetmacro{\myrot}{0}
15 | %\pgfmathsetmacro{\myshift}{0}
16 | \begin{scope}[shift={(0,\myshift)},rotate=\myrot]
17 |
18 | % RECTANGLE
19 | \draw [line width = 5, gray, fill=gray!30!white] (-1+\xdist,-1) rectangle (\xdist+\vwidth+1,\vheight+1);
20 |
21 | \end{scope}
22 |
23 | %\draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
24 |
25 |
26 |
27 |
28 | \foreach \x/\y in {7.1/3.2, {\xdist+\vwidth}/0} {
29 | %\draw[] (0,0) -- (\x,\y);
30 | %/draw[->] (\x,\y) -- ({\atan({\y/\x})}, \sin(\x));
31 | }
32 |
33 |
34 | % radius R and angle P for right bottom (rb)
35 | \pgfmathsetmacro{\Rrb}{ sqrt( (\xdist+\vwidth)^2 }
36 | \pgfmathsetmacro{\Prb}{ 0 }
37 |
38 | % radius R and angle P for right top(rt)
39 | \pgfmathsetmacro{\Rrt}{ sqrt( (\xdist+\vwidth)^2 +(\vheight)^2 }
40 | \pgfmathsetmacro{\Prt}{ atan( \vheight / (\xdist+\vwidth) }
41 |
42 |
43 | \foreach \r/\phi in {10/40, 8/20, 8/55} {
44 |
45 | \pgfmathsetmacro{\x}{ \r*cos(\phi) }
46 | \pgfmathsetmacro{\y}{ \r*sin(\phi) }
47 |
48 | \draw[] (0,0) -- (\x,\y);
49 | \draw[->, red, line width = 1.6] (\x,\y) -- ( {\x -sin(\phi)*\ArrowLength}, {\y + cos(\phi)*\ArrowLength});
50 |
51 | \draw[color=black] ({\x - sin(\phi)*0.4*\ArrowLength}, {\y + cos(\phi)*0.4*\ArrowLength}) arc ({90+\phi}:{180+\phi}:0.4*\ArrowLength);
52 |
53 | \draw[fill] ({\x - 0.2*\ArrowLength* sin(\phi+45)},{\y+0.2*\ArrowLength*cos(\phi+45)}) circle (0.03);
54 | \draw[fill, blue] (\x,\y) circle (0.07);
55 | }
56 |
57 |
58 | % top left
59 | %\draw[ultra thick, green,->] (0, \vheight) -- ({0-sin(\deltavar)*\ArrowLength},{\vheight+\ArrowLength)});
60 |
61 |
62 | %angle delta left
63 | %\draw[color=black] ({\ax+2},0) arc (0:\deltavar:2);
64 | %\draw (\ax+1.3, 0.3) node[black] {$\delta$};
65 |
66 | % circular arc
67 | %\draw[thick, ->] (0,0) arc (0:40:-\ax);
68 |
69 | % ICR
70 | \node[red] at (-0.3,0.7){\Large ICR};
71 | \draw[red, fill] (0,0) circle(0.1);
72 |
73 |
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/Ackermann/ICR_Slip.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[thick]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\ArrowLength}{1}
7 | \pgfmathsetmacro{\vheight}{4}
8 | \pgfmathsetmacro{\vwidth}{2.6}
9 | \pgfmathsetmacro{\deltavar}{45}
10 | \pgfmathsetmacro{\deltavarB}{35}
11 | \pgfmathsetmacro{\xdist}{8}
12 |
13 | \definecolor{blue}{RGB}{100,100,100}
14 |
15 | \pgfmathsetmacro{\voff}{1.5}
16 |
17 | \pgfmathsetmacro{\myrot}{14}
18 | \pgfmathsetmacro{\myshift}{1}
19 | %\pgfmathsetmacro{\myrot}{0}
20 | %\pgfmathsetmacro{\myshift}{0}
21 | \begin{scope}[shift={(0,\myshift)},rotate=\myrot]
22 |
23 | \draw [line width = 5, gray, fill=gray!30!white] (-0.4+\xdist,-1.3+\voff) rectangle (\xdist+\vwidth+0.4,\vheight+1.4+\voff);
24 |
25 | % TIRES
26 | %bottom left
27 | \node(tire1)[draw=blue, thick, fill=white,
28 | shape=rounded rectangle,
29 | drop shadow={opacity=.5,shadow xshift=0pt},
30 | minimum width=1.5cm,
31 | minimum height=0.5cm,
32 | rotate=90+\myrot] at (\xdist,\voff) {};
33 | \draw[dashed, ->, blue] (\xdist,\voff) -- (\xdist,\voff+1.5) ;
34 |
35 | %top left
36 | \node(tire2)[draw=blue, thick, fill=white,
37 | shape=rounded rectangle,
38 | drop shadow={opacity=.5,shadow xshift=0pt},
39 | minimum width=1.5cm,
40 | minimum height=0.5cm,
41 | rotate=\deltavar-90+\myrot] at (\xdist,\vheight+\voff) {};
42 | \draw[dashed, ->, blue] (\xdist,\vheight+\voff) -- ({\xdist-1.5*sin(\deltavar)},{\vheight+\voff+1.5*cos(\deltavar)}) ;
43 |
44 | %top right
45 | \node(tire3)[draw=blue, thick, fill=white,
46 | shape=rounded rectangle,
47 | drop shadow={opacity=.5,shadow xshift=0pt},
48 | minimum width=1.5cm,
49 | minimum height=0.5cm,
50 | rotate=\deltavarB-90+\myrot] at (\xdist+\vwidth,\vheight+\voff) {};
51 |
52 | \draw[dashed, ->, blue] (\xdist+\vwidth,\vheight+\voff) -- ({\xdist+\vwidth-1.5*sin(\deltavarB)},{\vheight+\voff+1.5*cos(\deltavarB)}) ;
53 |
54 | %bottom right
55 | \node(tire4)[draw=blue, thick, fill=white,
56 | shape=rounded rectangle,
57 | drop shadow={opacity=.5,shadow xshift=0pt},
58 | minimum width=1.5cm,
59 | minimum height=0.5cm,
60 | rotate=90+\myrot] at (\xdist+\vwidth,\voff) {};
61 |
62 | \draw[dashed, ->, blue] (\xdist+\vwidth,\voff) -- (\xdist+\vwidth,\voff+1.5) ;
63 |
64 |
65 | %\draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
66 |
67 |
68 |
69 |
70 | \foreach \x/\y in {7.1/3.2, {\xdist+\vwidth}/0} {
71 | %\draw[] (0,0) -- (\x,\y);
72 | %/draw[->] (\x,\y) -- ({\atan({\y/\x})}, \sin(\x));
73 | }
74 |
75 |
76 | % radius R and angle P for right bottom (rb)
77 | \pgfmathsetmacro{\Rrb}{ sqrt( (\xdist+\vwidth)^2 + \voff^2}
78 | \pgfmathsetmacro{\Prb}{ atan(\voff/(\xdist+\vwidth) }
79 |
80 | % radius R and angle P for right top(rt)
81 | \pgfmathsetmacro{\Rrt}{ sqrt( (\xdist+\vwidth)^2 +(\vheight+\voff)^2 }
82 | \pgfmathsetmacro{\Prt}{ atan( (\vheight+\voff) / (\xdist+\vwidth) }
83 |
84 | % radius R and angle P for left top(lt)
85 | \pgfmathsetmacro{\Rlt}{ sqrt( (\xdist)^2 +(\vheight+\voff)^2 }
86 | \pgfmathsetmacro{\Plt}{ atan( (\vheight+\voff) / (\xdist) }
87 |
88 | % radius R and angle P for left bottom(lb)
89 | \pgfmathsetmacro{\Rlb}{ sqrt( (\xdist)^2 + \voff^2 }
90 | \pgfmathsetmacro{\Plb}{ atan(\voff/(\xdist) }
91 |
92 |
93 | \foreach \r/\phi in {\Rrb / \Prb, \Rrt / \Prt, \Rlt / \Plt , \Rlb / \Plb } {
94 |
95 | \pgfmathsetmacro{\x}{ \r*cos(\phi) }
96 | \pgfmathsetmacro{\y}{ \r*sin(\phi) }
97 |
98 | \draw[] (0,0) -- (\x,\y);
99 | \draw[->,red] (\x,\y) -- ( {\x -sin(\phi)*\ArrowLength}, {\y + cos(\phi)*\ArrowLength});
100 | \draw[color=black] ({\x - sin(\phi)*0.4*\ArrowLength}, {\y + cos(\phi)*0.4*\ArrowLength}) arc ({90+\phi}:{180+\phi}:0.4*\ArrowLength);
101 |
102 | \draw[fill] ({\x - 0.2*\ArrowLength* sin(\phi+45)},{\y+0.2*\ArrowLength*cos(\phi+45)}) circle (0.03);
103 | }
104 |
105 |
106 | % top left
107 | %\draw[ultra thick, green,->] (0, \vheight) -- ({0-sin(\deltavar)*\ArrowLength},{\vheight+\ArrowLength)});
108 |
109 |
110 | % ICR
111 | \node[red] at (-0.2,0.7){\Large ICR};
112 | \draw[red, fill] (0,0) circle(0.1);
113 |
114 | \end{scope}
115 |
116 |
117 |
118 |
119 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/Ackermann/ICR_construction.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.0]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\ArrowLength}{1}
7 | \pgfmathsetmacro{\vheight}{4.5}
8 | \pgfmathsetmacro{\vwidth}{2}
9 | \pgfmathsetmacro{\deltavar}{15}
10 | \pgfmathsetmacro{\xdist}{6}
11 |
12 | \pgfmathsetmacro{\cs}{10}
13 | \pgfmathsetmacro{\csb}{1}
14 | \path[clip] (-3,-\csb)--(-3,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 |
17 | \pgfmathsetmacro{\myrot}{14}
18 | \pgfmathsetmacro{\myshift}{1}
19 | %\pgfmathsetmacro{\myrot}{0}
20 | %\pgfmathsetmacro{\myshift}{0}
21 | \begin{scope}[shift={(0,\myshift)},rotate=\myrot]
22 |
23 | % RECTANGLE
24 | \draw [line width = 5, gray, fill=gray!30!white] (-1+\xdist,-1) rectangle (\xdist+\vwidth+1,\vheight+1);
25 |
26 | \end{scope}
27 |
28 | %\draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
29 |
30 |
31 |
32 |
33 | \foreach \x/\y in {7.1/3.2, {\xdist+\vwidth}/0} {
34 | %\draw[] (0,0) -- (\x,\y);
35 | %/draw[->] (\x,\y) -- ({\atan({\y/\x})}, \sin(\x));
36 | }
37 |
38 |
39 | % radius R and angle P for right bottom (rb)
40 | \pgfmathsetmacro{\Rrb}{ sqrt( (\xdist+\vwidth)^2 }
41 | \pgfmathsetmacro{\Prb}{ 0 }
42 |
43 | % radius R and angle P for right top(rt)
44 | \pgfmathsetmacro{\Rrt}{ sqrt( (\xdist+\vwidth)^2 +(\vheight)^2 }
45 | \pgfmathsetmacro{\Prt}{ atan( \vheight / (\xdist+\vwidth) }
46 |
47 |
48 | \foreach \r/\phi in {8/20, 8/55} {
49 |
50 | \pgfmathsetmacro{\x}{ \r*cos(\phi) }
51 | \pgfmathsetmacro{\y}{ \r*sin(\phi) }
52 | \pgfmathsetmacro{\R}{ 15 }
53 |
54 | \draw[dashed] ({-1*\R*cos(\phi)}, {-1*\R*sin(\phi)}) -- ({\R*cos(\phi)}, {\R*sin(\phi)});
55 | %\draw[] (0,0) -- (\x,\y);
56 | \draw[->, red, line width = 1.2] (\x,\y) -- ( {\x -sin(\phi)*\ArrowLength}, {\y + cos(\phi)*\ArrowLength});
57 |
58 | \draw[color=black] ({\x - sin(\phi)*0.4*\ArrowLength}, {\y + cos(\phi)*0.4*\ArrowLength}) arc ({90+\phi}:{180+\phi}:0.4*\ArrowLength);
59 |
60 | \draw[fill] ({\x - 0.2*\ArrowLength* sin(\phi+45)},{\y+0.2*\ArrowLength*cos(\phi+45)}) circle (0.03);
61 | \draw[fill, blue] (\x,\y) circle (0.07);
62 | }
63 |
64 |
65 | % ICR
66 | \node[red] at (-0.2,0.7){\Large ICR};
67 | \draw[red, fill] (0,0) circle(0.1);
68 |
69 | % top left
70 | %\draw[ultra thick, green,->] (0, \vheight) -- ({0-sin(\deltavar)*\ArrowLength},{\vheight+\ArrowLength)});
71 |
72 |
73 | %angle delta left
74 | %\draw[color=black] ({\ax+2},0) arc (0:\deltavar:2);
75 | %\draw (\ax+1.3, 0.3) node[black] {$\delta$};
76 |
77 | % circular arc
78 | %\draw[thick, ->] (0,0) arc (0:40:-\ax);
79 |
80 |
81 |
82 |
83 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/Ackermann/WheelAngle.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[thick]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\ArrowLength}{1}
7 | \pgfmathsetmacro{\vheight}{4}
8 | \pgfmathsetmacro{\vwidth}{2.3}
9 | \pgfmathsetmacro{\deltavar}{35}
10 | \pgfmathsetmacro{\deltavarB}{25}
11 | \pgfmathsetmacro{\xdist}{8}
12 |
13 | \pgfmathsetmacro{\voff}{1.5}
14 | \definecolor{blue}{RGB}{100,100,100}
15 |
16 | \pgfmathsetmacro{\myrot}{70}
17 | \pgfmathsetmacro{\myshift}{1}
18 | %\pgfmathsetmacro{\myrot}{0}
19 | %\pgfmathsetmacro{\myshift}{0}
20 | \begin{scope}[shift={(0,\myshift)},rotate=\myrot]
21 |
22 |
23 | % RECTANGLE
24 | \draw [line width = 5, gray, fill=gray!30!white] (-0.4+\xdist,-1.3+\voff) rectangle (\xdist+\vwidth+0.4,\vheight+1.4+\voff);
25 |
26 | % TIRES
27 | %bottom left
28 | \node(tire1)[draw=blue, thick, fill=white,
29 | shape=rounded rectangle,
30 | drop shadow={opacity=.5,shadow xshift=0pt},
31 | minimum width=1.5cm,
32 | minimum height=0.5cm,
33 | rotate=90+\myrot] at (\xdist,\voff) {};
34 |
35 |
36 | %top left
37 | \node(tire2)[draw=blue, thick, fill=white,
38 | shape=rounded rectangle,
39 | drop shadow={opacity=.5,shadow xshift=0pt},
40 | minimum width=1.5cm,
41 | minimum height=0.5cm,
42 | rotate=\deltavar-90+\myrot] at (\xdist,\vheight+\voff) {};
43 | \draw[dashed, ->, blue] (\xdist,\vheight+\voff) -- ({\xdist-1.5*sin(\deltavar)},{\vheight+\voff+1.5*cos(\deltavar)}) ;
44 |
45 | \draw[ ->] (\xdist,\vheight+\voff) --(\xdist,\vheight+\voff+1.5) ;
46 | \draw[magenta,->] (\xdist,\vheight+\voff+0.8) arc (90:90+\deltavar:0.8);
47 | \node[magenta] at (7.8,6.5) {$\delta_l$};
48 |
49 | %top right
50 | \node(tire3)[draw=blue, thick, fill=white,
51 | shape=rounded rectangle,
52 | drop shadow={opacity=.5,shadow xshift=0pt},
53 | minimum width=1.5cm,
54 | minimum height=0.5cm,
55 | rotate=\deltavarB-90+\myrot] at (\xdist+\vwidth,\vheight+\voff) {};
56 |
57 | \draw[dashed, ->, blue] (\xdist+\vwidth,\vheight+\voff) -- ({\xdist+\vwidth-1.5*sin(\deltavarB)},{\vheight+\voff+1.5*cos(\deltavarB)}) ;
58 |
59 |
60 | \draw[ ->] (\xdist+\vwidth,\vheight+\voff) --(\xdist+\vwidth,\vheight+\voff+1.5) ;
61 | \draw[magenta,->] (\xdist+\vwidth,\vheight+\voff+0.8) arc (90:90+\deltavarB:0.8);
62 | \node[magenta] at (10.1,6.5) {$\delta_r$};
63 |
64 | %bottom right
65 | \node(tire4)[draw=blue, thick, fill=white,
66 | shape=rounded rectangle,
67 | drop shadow={opacity=.5,shadow xshift=0pt},
68 | minimum width=1.5cm,
69 | minimum height=0.5cm,
70 | rotate=90+\myrot] at (\xdist+\vwidth,\voff) {};
71 |
72 |
73 |
74 |
75 | %\draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
76 |
77 |
78 |
79 | \end{scope}
80 |
81 |
82 |
83 |
84 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/BicycleModel/BicycleModel.svg:
--------------------------------------------------------------------------------
1 |
2 |
28 |
--------------------------------------------------------------------------------
/book/Control/tikz/BicycleModel/BicycleModel.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{4}
7 | \pgfmathsetmacro{\deltavar}{25}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\vwh}{1.2}
10 |
11 |
12 |
13 | \pgfmathsetmacro{\myrot}{70}
14 | \pgfmathsetmacro{\myshift}{1}
15 | %\pgfmathsetmacro{\myrot}{0}
16 | %\pgfmathsetmacro{\myshift}{0}
17 | \begin{scope}[shift={(0,\myshift)},rotate=\myrot]
18 |
19 | % NODES
20 |
21 | \draw [line width = 5, gray, fill=gray!30!white] (-1.7,-1.5) rectangle (1.7,5.5);
22 |
23 |
24 | \node(tire1)[draw=blue, thick, fill=white,
25 | shape=rounded rectangle,
26 | drop shadow={opacity=.5,shadow xshift=0pt},
27 | minimum width=1.5cm,
28 | minimum height=0.5cm,
29 | rotate=90+\myrot] at (0,0) {};
30 |
31 | \node(tire2)[draw=blue, thick, fill=white,
32 | shape=rounded rectangle,
33 | drop shadow={opacity=.5,shadow xshift=0pt},
34 | minimum width=1.5cm,
35 | minimum height=0.5cm,
36 | rotate=\deltavar-90+\myrot] at (0,\dist) {};
37 |
38 | \node(tireFL)[draw=gray, thick, fill=white,
39 | shape=rounded rectangle,
40 | drop shadow={opacity=.5,shadow xshift=0pt},
41 | minimum width=1.5cm,
42 | minimum height=0.5cm,
43 | rotate=90+\myrot] at (-\vwh,0) {};
44 |
45 | \node(tireBL)[draw=gray, thick, fill=white,
46 | shape=rounded rectangle,
47 | drop shadow={opacity=.5,shadow xshift=0pt},
48 | minimum width=1.5cm,
49 | minimum height=0.5cm,
50 | rotate=\deltavar-90+\myrot] at (-\vwh,\dist) {};
51 |
52 | \node(tireFR)[draw=gray, thick, fill=white,
53 | shape=rounded rectangle,
54 | drop shadow={opacity=.5,shadow xshift=0pt},
55 | minimum width=1.5cm,
56 | minimum height=0.5cm,
57 | rotate=90+\myrot] at (\vwh,0) {};
58 |
59 | \node(tireBR)[draw=gray, thick, fill=white,
60 | shape=rounded rectangle,
61 | drop shadow={opacity=.5,shadow xshift=0pt},
62 | minimum width=1.5cm,
63 | minimum height=0.5cm,
64 | rotate=\deltavar-90+\myrot] at (\vwh,\dist) {};
65 |
66 |
67 |
68 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
69 |
70 | % angle delta top
71 | %\draw[dashed] (0, \dist) -- (0,\dist+2);
72 | %\draw[dashed] (0, \dist) -- ({-sin(\deltavar)*2},{\dist+2)});
73 | %\draw[color=black] (0,\dist+1.5) arc (90:90+\deltavar:1.5);
74 | %\draw (-0.2, \dist+1.1) node[black] {$\delta$};
75 |
76 |
77 | \end{scope}
78 |
79 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/BicycleModel/BicycleModelGeometry.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{4}
7 | \pgfmathsetmacro{\deltavar}{25}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 |
10 |
11 | % NODES
12 |
13 |
14 | \node(tire1)[draw=blue, thick, fill=white,
15 | shape=rounded rectangle,
16 | drop shadow={opacity=.5,shadow xshift=0pt},
17 | minimum width=1.8cm,
18 | minimum height=0.5cm,
19 | rotate=90] at (0,0) {};
20 |
21 | \node(tire2)[draw=blue, thick, fill=white,
22 | shape=rounded rectangle,
23 | drop shadow={opacity=.5,shadow xshift=0pt},
24 | minimum width=1.8cm,
25 | minimum height=0.5cm,
26 | rotate=\deltavar-90] at (0,\dist) {};
27 |
28 | \node(triangle_left) at (\ax,0){};
29 |
30 |
31 |
32 | % TRIANGLE
33 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
34 | \draw[dashed] (triangle_left.center) -- (tire2.center);
35 | \draw[dashed] (triangle_left.center) -- (tire1.center);
36 |
37 | % length R and L
38 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {$R$};
39 | \draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {$L$};
40 |
41 | % angle delta top
42 | \draw[dashed] (0, \dist) -- (0,\dist+2);
43 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*2},{\dist+2)});
44 | \draw[color=black] (0,\dist+1.5) arc (90:90+\deltavar:1.5);
45 | \draw (-0.2, \dist+1.1) node[black] {$\delta$};
46 |
47 |
48 | %angle delta left
49 | \draw[color=black] ({\ax+2},0) arc (0:\deltavar:2);
50 | \draw (\ax+1.3, 0.3) node[black] {$\delta$};
51 |
52 | % circular arc
53 | \draw[thick, ->] (0,0) arc (0:40:-\ax);
54 |
55 | % ICR
56 | \node[red] at (\ax,0.5){ICR};
57 | \draw[red, fill] (triangle_left) circle(0.05);
58 |
59 |
60 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/BicycleModel/BicycleModel_x_y_theta.tex:
--------------------------------------------------------------------------------
1 | \documentclass[crop,tikz]{standalone}
2 | \usepackage{tikz}
3 | \usepackage{verbatim}
4 |
5 | \usepackage{amsmath}
6 |
7 | \begin{document}
8 |
9 | \begin{tikzpicture}
10 | \usetikzlibrary{shapes.misc,shadows}
11 | \usetikzlibrary{calc}
12 | \usetikzlibrary{positioning,backgrounds}
13 |
14 | \pgfmathsetmacro{\dist}{4}
15 | \pgfmathsetmacro{\deltavar}{25}
16 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
17 |
18 | \pgfmathsetmacro{\imgrot}{-40}
19 |
20 | \draw [<->,thick] (-6,1) node (yaxis) [above] {$Y$}
21 | |- (2,-3) node (xaxis) [right] {$X$};
22 |
23 | \draw[color=black] (-1.5,-3) arc (0:40:1.3);
24 | \draw (-1.8,-2.7) node[black] {$\theta$};
25 |
26 |
27 | \draw (5,4) node[magenta] {$\begin{pmatrix} \cos(\theta) \\ \sin(\theta) \end{pmatrix}$};
28 |
29 | \begin{scope} [rotate=\imgrot]
30 |
31 |
32 | % NODES
33 |
34 |
35 | \node(tire1)[draw=blue, thick, fill=white,
36 | shape=rounded rectangle,
37 | drop shadow={opacity=.5,shadow xshift=0pt},
38 | minimum width=1.8cm,
39 | minimum height=0.5cm,
40 | rotate=90+\imgrot] at (0,0) {};
41 |
42 | \node(tire2)[draw=blue, thick, fill=white,
43 | shape=rounded rectangle,
44 | drop shadow={opacity=.5,shadow xshift=0pt},
45 | minimum width=1.8cm,
46 | minimum height=0.5cm,
47 | rotate=\deltavar-90+\imgrot] at (0,\dist) {};
48 |
49 | \node(triangle_left) at (\ax,0){};
50 |
51 |
52 |
53 | % TRIANGLE
54 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
55 | \draw[dashed] (triangle_left.center) -- (tire2.center);
56 | \draw[dashed] (triangle_left.center) -- (tire1.center);
57 |
58 | % length R and L
59 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {$R$};
60 | \draw[<->] (1.7, 0) -- (1.7,\dist) node[midway, fill=white] {$L$};
61 |
62 | % angle delta top
63 | \draw[dashed, blue] (0, \dist-7.9) -- (0,\dist);
64 | \draw[dashed,->,magenta, line width=1] (0, \dist) -- (0,\dist+2.3);
65 | \draw[dashed, blue] (0, \dist) -- ({-sin(\deltavar)*2},{\dist+2)});
66 | \draw[color=black] (0,\dist+1.5) arc (90:90+\deltavar:1.5);
67 | \draw (-0.2, \dist+1.1) node[black] {$\delta$};
68 |
69 |
70 | %angle delta left
71 | \draw[color=black] ({\ax+2},0) arc (0:\deltavar:2);
72 | \draw (\ax+1.3, 0.3) node[black] {$\delta$};
73 |
74 | % circular arc
75 | \draw[thick, ->] (0,0) arc (0:40:-\ax);
76 |
77 | % ICR
78 | \node[red] at (\ax,0.5){ICR};
79 | \draw[red,fill] (triangle_left) circle(0.05);
80 |
81 | %car
82 | \node [transform shape] () at (0,2) {\includegraphics[width=0.22\linewidth]{../../images/car_pixabay.pdf}};
83 |
84 | \end{scope}
85 |
86 | \draw[dashed, black] (-6,0) -- (tire1.center);
87 | \node[black] at (-6.3,0){$y$};
88 | \draw[dashed, black] (tire1.center) -- (0,-3);
89 | \node[black] at (0,-3.3){$x$};
90 |
91 | \end{tikzpicture}
92 | \end{document}
93 |
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuitLawOfSines.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{11.3}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{17}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+6);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*6},{\dist+6)});
50 | \draw[color=black] (0,\dist+3.3) arc (90:90+\deltavar:3.3);
51 | \draw (-0.27, \dist+2.9) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+1.9, 0.85) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:70:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red](ICR) at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 |
76 | % magenta triangle
77 | \draw[magenta, line width=3] (0,0) -- (triangle_left) -- (-4.8,11) -- (0,0);
78 |
79 | %angle \gamma_1
80 | \draw[color=magenta, line width=3] ({\ax+5},0) arc (0:47:5);
81 | \draw (-11.2,1.5) node[magenta] {\Huge $\gamma_1$};
82 |
83 | % gamma_2
84 | \draw[color=magenta, line width=3] (-3.6, 8.2) arc (290:226:3.05);
85 | \draw (-5, 9.2) node[magenta] {\Huge $\gamma_2$};
86 |
87 | % gamma bottom
88 | \draw[color=magenta, line width=3] (-4.5,0) arc (180:111:4.3);
89 | \draw (-2.6,1.7) node[magenta] {\Huge $\gamma_3$};
90 |
91 | % \alpha
92 | \draw[color=magenta, line width=3] (0,8) arc (90:113.2:8);
93 | \draw (-1.9, 8.6) node[magenta] {\Huge $\alpha$};
94 |
95 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuitWrongDelta.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{20}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{13}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+2.7);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*2.7},{\dist+2.7)});
50 | \draw[color=black] (0,\dist+2.3) arc (90:90+0.9*\deltavar:2.3);
51 | \draw (-0.27, \dist+1.7) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+2.2, 0.4) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:90:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red] at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuit_delta_11p3.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{11.3}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{17}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+4);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*4},{\dist+4)});
50 | \draw[color=black] (0,\dist+3.3) arc (90:90+\deltavar:3.3);
51 | \draw (-0.27, \dist+2.9) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+1.9, 0.85) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:70:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red] at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuit_delta_15.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{15}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{17}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+4);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*4},{\dist+4)});
50 | \draw[color=black] (0,\dist+3.3) arc (90:90+\deltavar:3.3);
51 | \draw (-0.3, \dist+2.9) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+1.9, 0.93) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:70:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red] at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuit_delta_20.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{20}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{17}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+2.7);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*2.7},{\dist+2.7)});
50 | \draw[color=black] (0,\dist+2.3) arc (90:90+0.9*\deltavar:2.3);
51 | \draw (-0.27, \dist+1.7) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+2.2, 0.4) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:70:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red] at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Control/tikz/PurePursuit/PurePursuit_delta_25.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture}[line width=1.7pt]
2 | \usetikzlibrary{shapes.misc,shadows}
3 | \usetikzlibrary{calc}
4 | \usetikzlibrary{positioning,backgrounds}
5 |
6 | \pgfmathsetmacro{\dist}{3}
7 | \pgfmathsetmacro{\deltavar}{25}
8 | \pgfmathsetmacro{\ax}{tan(90-\deltavar)*\dist*(-1)}
9 | \pgfmathsetmacro{\lookahead}{12}
10 | \pgfmathsetmacro{\cs}{17}
11 | \pgfmathsetmacro{\csb}{4}
12 |
13 |
14 | \path[clip] (-\cs,-\csb)--(-\cs,\cs)--(\cs,\cs)--(\cs, -\csb) --cycle;
15 |
16 | % NODES
17 | \node(tire1)[draw=blue, thick, fill=white,
18 | shape=rounded rectangle,
19 | drop shadow={opacity=.5,shadow xshift=0pt},
20 | minimum width=1.8cm,
21 | minimum height=0.5cm,
22 | line width =2,
23 | rotate=90] at (0,0) {};
24 |
25 | \node(tire2)[draw=blue, thick, fill=white,
26 | shape=rounded rectangle,
27 | drop shadow={opacity=.5,shadow xshift=0pt},
28 | minimum width=1.8cm,
29 | minimum height=0.5cm,
30 | line width =2,
31 | rotate=\deltavar-90] at (0,\dist) {};
32 |
33 | \node(triangle_left) at (\ax,0){};
34 |
35 |
36 |
37 | % TRIANGLE
38 | \draw[line width=2, color=blue] (tire1.center) -- (tire2.center);
39 | \draw[dashed] (triangle_left.center) -- (tire2.center);
40 | \draw[dashed] (triangle_left.center) -- (tire1.center);
41 |
42 | % length R and L
43 | \draw[<->] (\ax, -1) -- (0,-1) node[midway, fill=white] {\Huge $R$};
44 | %\draw[<->] (1, 0) -- (1,\dist) node[midway, fill=white] {\Huge $L$};
45 | \draw[<->] (\lookahead, 0) -- (0,0) node[midway, fill=white] {\Huge $l_d$};
46 |
47 | % angle delta top
48 | \draw[dashed] (0, \dist) -- (0,\dist+2.7);
49 | \draw[dashed] (0, \dist) -- ({-sin(\deltavar)*2.7},{\dist+2.7)});
50 | \draw[color=black] (0,\dist+2.3) arc (90:90+0.9*\deltavar:2.3);
51 | \draw (-0.27, \dist+1.7) node[black] {\Huge $\delta$};
52 |
53 | %angle delta left
54 | \draw[color=black] ({\ax+2.7},0) arc (0:1.3*\deltavar:2);
55 | \draw (\ax+2.2, 0.4) node[black] {\Huge $\delta$};
56 |
57 | % circle
58 | \draw[ ->, orange, line width=3] (0,0) arc (0:70:-\ax);
59 |
60 | % circle LookAhead
61 | \draw[ ->, dotted] (\lookahead,0) arc (0:360:\lookahead);
62 |
63 | % ICR
64 | \node[red] at (\ax,0.5)[above]{\Huge ICR};
65 | \draw[red, fill] (triangle_left) circle(0.13);
66 |
67 | % PATH
68 | \draw[green!80!gray, line width =3](1, -6) --(0,-3) -- (-0.5, -1) -- (-1,0) -- (-1.5,1) -- (-2,2) -- (-3, 8) -- (-6,13) ;
69 | \node[green!80!gray] at (0.6,-2.6)[right=-0.4]{\Huge path};
70 |
71 | % target
72 | \node[red] at (-4.8,11)[above right=0.5 and -0.1]{\Huge TP};
73 | \draw[red, fill] (-4.8,11) circle(0.13);
74 |
75 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/Introduction/carla_vehicle_lanes.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/Introduction/carla_vehicle_lanes.jpg
--------------------------------------------------------------------------------
/book/Introduction/intro.md:
--------------------------------------------------------------------------------
1 | Algorithms for Automated Driving
2 | ============================
3 |
4 |
5 | ```{figure} carla_vehicle_lanes.jpg
6 | ---
7 | width: 66%
8 | name: directive-fig
9 | ---
10 | ```
11 |
12 | Each chapter of this (mini-)book guides you in programming one important software component for automated driving.
13 | Currently, this book contains three chapters: **Lane Detection**, **Control** and **Camera Calibration**. You will implement software that
14 | * detects lane boundaries from a camera image using deep learning
15 | * controls steering wheel and throttle to keep the vehicle within the detected lane at the desired speed
16 | * determines how the camera is positioned and oriented with respect to the vehicle (a prerequisite to properly join the lane detection and the control module)
17 |
18 | The software you will write is in python, and you will apply it in the [open-source driving simulator CARLA](https://carla.org/). Ideally, your computer is powerful enough to run CARLA, but if it is not, you can still work through the exercises. For the exercise on control there is a simplistic simulator that comes with this course. We recommend to work through the chapters in order, but if you want to, you can read the **Control** chapter, before the **Lane Detection** chapter.
19 |
20 | To work through this book, you
21 | * should understand the following math and physics concepts: derivative, integral, trigonometry, sine/cosine of an angle, matrix, vector, coordinate system, velocity, acceleration, angular velocity, cross product, rotation matrix
22 | * should be familiar with programming in python. In particular, you should be comfortable with multidimensional arrays in numpy. You do **not** need a powerful computer (see [Exercise Setup](../Appendix/ExerciseSetup.md))
23 | * need to know what supervised learning is, and how to train a neural network with a deep learning framework like pytorch, fastai, tensorflow, keras, or something similar. This prerequisite is only necessary for the chapter on lane detection. If you do not fulfill it, you can skip this chapter, or study one of the [courses I recommend](../LaneDetection/Segmentation.ipynb) and then come back here.
24 |
25 | If you find a bug in the exercise code or some confusing explanations in the book, please [raise an issue on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving). If you have questions about the material or want to connect with me or other students, please use [github discussions](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions). Once you finish the book or decide to stop working through it, please consider giving some feedback by filling out [this questionnaire](https://forms.gle/TioqZiUsB5e5wSVG7) (If you open the link in your browser's incognito mode, the questionnaire should be anonymous).
26 |
27 | ```{admonition} Join us on discord!
28 | :class: seealso
29 | To join the discord server for this book, please follow this [link](https://discord.gg/57YEzkCFHN)!
30 | ```
31 |
32 | ```{admonition} Help wanted!
33 | Are you interested in contributing to the book by adding a new chapter? Or do you have other ideas for improvements? Please let us know by joining the discussion [on github](https://github.com/thomasfermi/Algorithms-for-Automated-Driving/discussions/4)!
34 | ```
35 |
--------------------------------------------------------------------------------
/book/LaneDetection/Discussion.md:
--------------------------------------------------------------------------------
1 | # Discussion
2 |
3 | ## Limitations of the presented approach
4 | * The second stage of the pipeline is implemented on CPU. For better performance everything should run on GPU or even dedicated hardware.
5 | * We only detect the ego lane boundaries. But it is obvious how to extend our approach to include the left and right lanes.
6 | * The semantic segmentation approach will have problems when there are lane changes. A cleaner solution would be *instance segmentation*.
7 | * We created the lane boundary labels automatically using Carla's high definition map. Creating labels for a camera installed in a real car is way more challenging. The options I know of are
8 | 1. You can have humans manually label each image separately. This approach is done for example for NVIDIA's PilotNet (Ref. {cite}`bojarski2020nvidia`).
9 | 2. Similar to our virtual approach, you can create labels using a high definition map of a real highway (Ref. {cite}`llamas2019`). The challenge is to perfectly localize the vehicle within the map in order to get good labels. Furthermore, to train a lane detection system that works on different highways across the world, you will need high definition maps of lots of different highways.
10 | 3. If you have some system that detects lane markings on a short distance reliably, you can combine that with visual-inertial odometry to create good labels. Examples of such short-distance lane-detection systems would be a [lidar](https://en.wikipedia.org/wiki/Lidar), or a camera-based lane-detection system that works well for the first say 5 meters, but is not so good further away. Once you logged some seconds or minutes of driving, you can use [visual-inertial odometry](https://en.wikipedia.org/wiki/Visual_odometry#Visual_inertial_odometry) to obtain the vehicle trajectory and then stitch together a local map of lane boundaries. Subsequently, you can project those mapped lane boundaries into each image you have logged.
11 | * The inverse perspective mapping step relies on very good calibration parameters, i.e., on knowing the position and orientation of the camera with respect to the road. Since we are running simulations here, we exactly know those parameters. In the real world you need to calibrate your camera. Getting the camera intrinsics is typically no problem [if you have a chess boad](https://docs.opencv.org/master/dc/dbb/tutorial_py_calibration.html). Obtaining the camera extrinsics (orientation and height) is more challenging and might become [another chapter of this book](../Appendix/NextChapters.md) at some point.
12 | * We are assuming that the road is flat, which is obviously not true everywhere. We are also neglecting that the car dips or "nose-dives" a bit when breaking. In this case, the vehicle's forwards axis is not parallel to the road, which is something we assumed in our derivations.
13 |
14 |
15 | ## Comparison to literature
16 |
17 | As our approach to lane detection in this chapter is heavily inspired by the baseline described in Ref. {cite}`gansbeke2019endtoend`, we want to list the differences
18 | * We are using an approach known as *inverse perspective mapping* to transform from pixel to road coordinates, since this allows us to fit a lane boundary model in meters. Describing the lane boundary in meters rather than pixels is necessary if we want to use the lane boundary model for control algorithms (see next chapter). Ref. {cite}`gansbeke2019endtoend` also transforms to a bird's eye view, but they use a homography for that. The resulting coordinates are not in meters. Note that this is not the aim of the paper, and hence should not be seen as a criticism.
19 | * For the image segmentation we are using an off-the-shelf neural network from the great pytorch library [segmentation models pytorch](https://github.com/qubvel/segmentation_models.pytorch).
20 | * Our pipeline is similar to the **baseline model** in Ref. {cite}`gansbeke2019endtoend`, not their actual model. Their actual model is an end-to-end neural network which fuses the two-step pipeline of the baseline model into one single neural network. This is advantageous, since it increases the accuracy, and speed of execution. Of course, creating an end-to-end network is also possible for our slightly modified approach, but we keep this as an exercise for the reader 😉.
21 |
22 | ## Comparison to a real ADAS system: openpilot
23 |
24 | ```{margin}
25 | Take my discussion of openpilot with a grain of salt here. Their documentation is very limited, so my discussion is based on what I could piece together by reading their code.
26 | ```
27 | It is interesting to see how a real world lane-detection system works. Luckily, there is one ADAS company that open sources their software: comma.ai. As you can read in the [source code of their product openpilot](https://github.com/commaai/openpilot) their lane-detection system is designed roughly as follows
28 | * Perform online [calibration](https://github.com/commaai/openpilot/blob/0b849d5a4e417d73e4b821b909839f379d70e75d/selfdrive/locationd/calibrationd.py) to estimate camera extrinsics
29 | * Apply homography (warpPerspective) to the camera image in order to compute the image that you would get from a camera with *default* extrinsics. In the openpilot documentation this is referred to as [calibrated frame](https://github.com/commaai/openpilot/tree/master/common/transformations).
30 | * Train a neural net with the default-perspective images. The output of the neural network is the path the vehicle should take (somewhat close to the center between the lane boundaries). I am not totally sure, but based on their [medium article](https://medium.com/@comma_ai/towards-a-superhuman-driving-agent-1f7391e2e8ec) I think they create labels like this: Take recorded videos and estimate vehicle trajectory using visual odometry. Then for each image frame, transform this trajectory into the vehicle reference frame at that point in time and use this as a label.
31 |
32 | ## Further Reading
33 | If you want to read some more about lane detection, I recommend the following ressources:
34 |
35 | ```{glossary}
36 | [Papers with code](https://paperswithcode.com/task/lane-detection/)
37 | Here you can see papers grouped by the datasets they tackled, and also ranked by stars on github.
38 | [awesome-lane-detection](https://github.com/amusi/awesome-lane-detection)
39 | This github repo lists papers, code, blogs/tutorials and datasets connected to lane detection.
40 | ```
41 |
42 |
43 | ## References
44 |
45 | ```{bibliography}
46 | :filter: docname in docnames
47 | ```
--------------------------------------------------------------------------------
/book/LaneDetection/LaneDetectionOverview.md:
--------------------------------------------------------------------------------
1 |
2 | Overview
3 | ============================
4 |
5 | ## Lane Detection System
6 | In this chapter, I will guide you to build a simple but effective lane detection pipeline and apply it to images captured in the [Carla Simulator](https://carla.org/).
7 | The pipeline takes an image as *input* and yields a mathematical model of the lane boundaries as an *output*.
8 | The image is captured by a dashcam - a camera that is fixated behind the windshield of a vehicle. The lane boundary model is a polynomial
9 |
10 | $$
11 | y(x)=c_0+c_1 x+c_2 x^2 +c_3 x^3
12 | $$
13 |
14 | Here, both $x$ and $y$ are measured in meters. They define a coordinate system on the road as shown in {numref}`model_iso8850`.
15 |
16 |
17 | ```{figure} tikz/iso8850/iso8850_crop.png
18 | ---
19 | align: center
20 | width: 80%
21 | name: model_iso8850
22 | ---
23 | Road coordinate system. The perspective is known as *bird's eye view*.
24 | ```
25 |
26 | The pipeline consists of two steps
27 | * Using a neural network, detect those pixels in an image that are lane boundaries
28 | * Associate the lane boundary pixels to points on the road, $(x_i,y_i), i=0,1,2\dots$. Then fit a polynomial.
29 |
30 | The approach is inspired by the "baseline" method described in Ref. {cite}`gansbeke2019endtoend`, which performs close to state-of-the-art lane-detection methods.
31 |
32 | ## Lane Boundary Segmentation - Deep Learning
33 | In the chapter [Lane Boundary Segmentation](./Segmentation.ipynb), we will train a neural network, which takes an image and estimates for each pixel the probability that it belongs to the left lane boundary, the probability that it belongs to the right lane boundary, and the probability that it belongs to neither. As you might know, a neural network needs data to learn. Luckily, it is easy to gather this data using the Carla simulator:
34 | We are going to create a *vehicle* on the Carla map and attach an rgb camera *sensor* to it.
35 | Then we will move the vehicle to different positions on the map and capture images with our camera.
36 | The 3d world coordinates of the lane boundaries are obtained from the Carla simulator's high definition map and can be projected into the image using the *pinhole camera model*.
37 |
38 | ```{figure} images/carla_lane_ground_truth.svg
39 | ---
40 | align: center
41 | width: 67%
42 | name: carla_lane_ground_truth
43 | ---
44 | The Lane boundaries from Carla's HD map are projected into the dashcam image (blue and orange).
45 | ```
46 |
47 | For each simulation step, we save two separate images:
48 | * The image captured by the dashcam
49 | * A *label image* that only consists of the projected lane boundaries
50 |
51 | You will learn how to create the label images in the chapter [Basics of image formation](./CameraBasics.ipynb).
52 |
53 |
54 | ## From pixels to meters - Inverse Perspective Mapping
55 | A camera maps the three-dimensional world into a two-dimensional image plane. In general, it is not possible to take a single image and to reconstruct the three-dimensional coordinates of the objects depicted in that image. Using the *pinhole camera model*, we can reconstruct from which direction the light ray came, that was scattered off the depicted object, but not how many meters it has traveled.
56 | This is different for the light that was scattered from the road into our camera sensor. Using the assumption that the road is flat, and our knowledge of the camera height and orientation with respect to the road, it is a basic geometry problem to compute the $x,y,z$ position of each "road pixel" ($x,y,z$ in meters!). This computation is known as *inverse perspective mapping* and you will learn about it in the chapter [From Pixels to Meters](./InversePerspectiveMapping.ipynb).
57 |
58 | From our deep learning model we have a list of probabilities $p_i(\textrm{left boundary}), i=0,1,2, \dots$ for all the pixels. Using *inverse perspective mapping* we can even write down a list of tuples $(x_i,y_i,p_i(\textrm{left boundary})), i=0,1,2, \dots$, since we know the road coordinates $(x_i,y_i)$ of each pixel.
59 |
60 | We can now filter this list and throw away all tuples where $p_i(\textrm{left boundary})$ is small. The filtered list of $(x_i,y_i)$ can be fed into a method for polynomial fitting, which will result in a polynomial $y(x)=c_0+c_1 x+c_2 x^2 +c_3 x^3$ describing the left lane boundary. The same procedure is repeated for the right boundary.
61 |
62 | ## Outlook
63 | Once you have finished all exercises of this chapter, you will have implemented a python class `LaneDetector` that can yield lane boundary polynomials $y_l(x), y_r(x)$ for the left and right lane boundary, given an image from a dashcam in the Carla simulator.
64 | In the next chapter, we will write a lane-keeping system for a vehicle in the Carla simulator. This lane-keeping system needs the desired speed and a reference path as inputs. You can take the centerline between the lane boundaries that your `LaneDetector` class computes, and feed that into your lane-keeping system.
65 |
66 | ## References
67 | ```{bibliography}
68 | :filter: docname in docnames
69 | ```
--------------------------------------------------------------------------------
/book/LaneDetection/images/Bayer_pattern_on_sensor.svg:
--------------------------------------------------------------------------------
1 |
2 |
60 |
--------------------------------------------------------------------------------
/book/LaneDetection/images/Pixel-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/images/Pixel-example.png
--------------------------------------------------------------------------------
/book/LaneDetection/images/carla_scene.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/images/carla_scene.png
--------------------------------------------------------------------------------
/book/LaneDetection/images/ipm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/images/ipm.png
--------------------------------------------------------------------------------
/book/LaneDetection/images/photo_sensor.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/images/photo_sensor.jpeg
--------------------------------------------------------------------------------
/book/LaneDetection/images/surface.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/images/surface.png
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/camera_projection/CameraProjection.synctex.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/tikz/camera_projection/CameraProjection.synctex.gz
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/camera_projection/CameraProjection.tex:
--------------------------------------------------------------------------------
1 | \documentclass[border=10pt,multi,tikz,convert={outfile=\jobname.svg}]{standalone}
2 | \usepackage{tikz-3dplot}
3 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
4 | \begin{document}
5 | \tdplotsetmaincoords{-60}{-35}
6 | \begin{tikzpicture}
7 | [
8 | tdplot_main_coords,
9 | >=Stealth,
10 | my dashed/.style={dashed, thick, ->, shorten >=-15pt, shorten <=-15pt, every node/.append style={font=\footnotesize}},
11 | my box/.style={thin, gray!70},
12 | my blue/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}},
13 | my label/.append style={midway, font=\scriptsize},
14 | my vectors/.style={green!50!black, {Stealth[scale=.75]}-{Stealth[scale=.75]}},
15 | my red/.style={thick, red, line cap=round},
16 | my grey/.style={gray!70},
17 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
18 | ]
19 | % \draw [help lines] (-2,0,0) -- (2,0,0) node[anchor=north west]{$x$} (0,0,0) -- (0,7,0) node[anchor=north east]{$y$} (0,0,0) -- (0,0,2) node[anchor=north]{$z$} (-2,7,0) -- (2,7,0);
20 | \draw [my grey] (0,4,0) -- (0,7,0) (-2,7,0) -- (2,7,0);
21 | \coordinate (o) at (0,0,0);
22 | \path [draw=gray!70, text=gray, fill=gray!20, opacity=0.8, text opacity=1] (-1.5,4,1.75) coordinate (a) -- ++(0,0,-3.5) coordinate (b) -- ++(3,0,0) coordinate (c) -- ++(0,0,3.5) coordinate (d) -- cycle node [pos=.95, above, sloped, anchor=south west] {$Z_c=f$} ;
23 | % \foreach \i in {a,b,c,d} \node [red, font=\scriptsize] at (\i) {\i};
24 | \draw [my grey] (-2,0,0) -- (2,0,0) (0,0,0) -- (0,4,0) (0,0,0) -- (0,0,2);
25 | \draw [thick, ->, every node/.style={font=\footnotesize, inner sep=0pt}, color=blue] (o) node [anchor=north west] {} (o) edge node [pos=1, anchor=north east] {$Z_c$} ++(0,1,0) edge node [pos=1, anchor=north] {$Y_c$} ++(0,0,1) -- ++(1,0,0) node [anchor=north west] {$X_c$};
26 | \draw [my box] (o) ++(0,4,-.5) coordinate (p1) -- ++(1,0,0) coordinate (p2) -- ++(0,0,-1.25) coordinate (p3);
27 | \foreach \i in {0,1,...,4} \draw [my box] (p1) ++(\i*.25,0,0) -- ++(0,0,-.25);
28 | \foreach \i in {0,1,...,5} \draw [my box] (p2) ++(0,0,-\i*.25) -- ++(-.25,0,0);
29 | \draw [my box] (p1) ++(0,0,-.25) -- ++(.75,0,0) -- ++(0,0,-1);
30 | \draw [my dashed, cyan] ($(b)!1/2!(c)$) -- ($(d)!1/2!(a)$) node [below=15pt, anchor=north] {$y$};
31 | \draw [my dashed, cyan] ($(b)!1/2!(a)$) -- ($(d)!1/2!(c)$) node [above right=17pt, anchor=north west] {$x$};
32 | \draw [my dashed, green!50!black, <->] (a) node [below=15pt, anchor=north] {$v$} -- (b) -- (c) node [above right=17pt, anchor=north west] {$u$};
33 | %\path [green!50!black, every node/.style={font=\scriptsize, inner sep=0pt}] (p2) node [above right, anchor=south west] {$(u,v)$};
34 | \path [red, every node/.style={font=\scriptsize, inner sep=0pt}] (p2) node [above right, anchor=south west] {~$p$};
35 | \path (p2) ++(-.125,0,0) coordinate (q2) ++(0,0,-.125) coordinate (r2);
36 | \draw [my blue=1] ($(0,4,0)+($(q2)-(p1)$)$) coordinate (s2) -- (r2) node (d1) {};
37 | \scoped[on background layer]{\draw [my blue=1.75] ($($1.75*($(s2)-(0,4,0)$)$)+(0,7,0)$) -- ++($1.75*($(r2)-(s2)$)$) node (d2) [label={[label distance=-28pt]above:{$P=(X_c,Y_c,Z_c)$}}] {};}
38 | \draw [my red] (o) -- (d1.center);
39 | \scoped[on background layer]{\draw [my red] (d1.center) -- (d2.center);}
40 | %\path [description] (0,6.5,0) [out=-95, in=95] to (-.75,6.5,.25) node {optical \\ axis};
41 | \end{tikzpicture}
42 | \end{document}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/camera_projection_side_view/camera_projection_side_view.tex:
--------------------------------------------------------------------------------
1 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
2 | \begin{tikzpicture} [
3 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
4 | imagearrow/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*0*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}}
5 | ]
6 |
7 | \tikzstyle{every node}=[font=\footnotesize]
8 |
9 | \draw[thick] (-1,0) -- (5,0);
10 |
11 | \draw [->,thick] (2,0) -- (2,0.8) ;
12 | \draw [thick](2,0.4) node[right]{$x$};
13 | \draw [thick](2,1.2) node{$p$};
14 |
15 | %\draw [thick, red] (5,0) -- (5,2);
16 | \draw [imagearrow = 2] (5,0) -- (5,2);
17 |
18 | \draw [thick](5,1) node[right]{$X_c$};
19 | \draw [thick](5.2,2) node[above]{$P=(X_c,Y_c,Z_c)$};
20 |
21 | \draw[->,red, thick] (5,2) -- (0,0);
22 |
23 | \draw[gray, <->, thick] (0,-0.8) -- (2,-0.8);
24 | \draw[gray] (1,-1.1) node{$f$};
25 |
26 | % coordinate systems
27 | \draw [<->,thick, blue, dashed] (0,1) node (yaxis) [right] {$X_c$}
28 | |- (1,0) node (xaxis) [below] {$Z_c$};
29 |
30 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/coordinate_systems/coordinate_systems.tex:
--------------------------------------------------------------------------------
1 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
2 | \begin{tikzpicture} [
3 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
4 | imagearrow/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*0*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}}
5 | ]
6 |
7 | \definecolor{bluewindow}{RGB}{215,238,244}
8 | \definecolor{bluecar}{RGB}{64,81,181}
9 |
10 |
11 | %\node[inner sep=0pt] (russell) at (0,0)
12 | % {\includegraphics{Capture.png}};
13 |
14 |
15 | \draw[color=bluecar, fill] (-11.3,0.2) -- (-7,3.5) -- (1.6,3.5) -- (5.8,0.74) -- (12,-0.26) -- (11.1, -3.2) -- (-11.3, -3.2) -- cycle;
16 | \draw[color=bluewindow, fill] (-2.2,3.1) -- (1.3,3.1) -- (3.7,0.75) -- (-2.2,0.75) -- cycle;
17 | \draw[color=bluewindow, fill](1.83,3.31) -- (1.7,3.2) -- (4.7,0.76) -- (5.7,0.76) -- cycle;
18 | \draw[color=bluewindow,fill] (-5.7,3.1) -- (-2.7,3.1) -- (-2.7,0.75) -- (-7.8,0.75) -- cycle;
19 |
20 | \draw[fill] (-7.6,-3.3) circle(1.8);
21 | \draw[fill, gray] (-7.6,-3.3) circle(1);
22 |
23 | \draw[fill] (8.2,-3.3) circle(1.8);
24 | \draw[fill, gray] (8.2,-3.3) circle(1);
25 |
26 |
27 | % coordinate systems
28 | %\draw [<->,ultra thick, dashed] (3,4.3) node (yaxis) [right] {\LARGE $X_c$}
29 | % |- (5,2.7) node (xaxis) [below] {\LARGE $Z_c$};
30 |
31 | % define some math for coordinate axes
32 | \pgfmathsetmacro{\len}{2.5}
33 | \pgfmathsetmacro{\cosp}{0.94}
34 | \pgfmathsetmacro{\sinp}{0.34}
35 |
36 | \pgfmathsetmacro{\ocx}{2.58}
37 | \pgfmathsetmacro{\ocy}{2.75}
38 |
39 | \pgfmathsetmacro{\orx}{2.58}
40 | \pgfmathsetmacro{\ory}{-5.1}
41 |
42 | \pgfmathsetmacro{\owx}{-11.5}
43 | \pgfmathsetmacro{\owy}{4.4}
44 | \pgfmathsetmacro{\eps}{0.8}
45 |
46 | % cam system
47 | \draw[->, ultra thick, green] (\ocx,\ocy) -- (\ocx-\len*\sinp, \ocy-\len*\cosp) node[below left=-0.1 and -0.1]{\Huge $Y_c$};
48 | \draw[->, ultra thick, green] (\ocx,\ocy) -- (\ocx+\len*\cosp, \ocy-\len*\sinp) node[right]{\Huge $Z_c$};
49 |
50 | %default system
51 | \draw[->, ultra thick, dashed, gray] (\ocx,\ocy) -- (\ocx, \ocy-\len) node[below=-0.05]{\Huge $Y_d$};
52 | \draw[->, ultra thick, dashed, gray] (\ocx,\ocy) -- (\ocx+\len, \ocy) node[above right= -0.1 and 0]{\Huge $Z_d$};
53 |
54 | %road system
55 | \draw[->, ultra thick] (\orx,\ory) -- (\orx, \ory-\len*0.5) node[below=-0.05]{\Huge $Y_r$};
56 | \draw[->, ultra thick] (\orx,\ory) -- (\orx+\len, \ory) node[below right= 0.01 and 0]{\Huge $Z_r$};
57 |
58 | % road system ISO 8855
59 | \draw[->, ultra thick, dashed, orange] (\orx,\ory) -- (\orx, \ory+0.5*\len) node[right=0.15]{\Huge $Z_i$};
60 | \draw[->, ultra thick, dashed, orange] (\orx,\ory) -- (\orx+\len, \ory) node[above right= 0.01 and 0]{\Huge $X_i$};
61 |
62 | % world
63 | \draw[->, ultra thick, purple] (\owx, \owy) -- (\owx+\eps, \owy+2) node[right=0.15]{\Huge $Z_w$};
64 | \draw[->, ultra thick, purple] (\owx, \owy) -- (\owx+2, \owy-\eps) node[right=0.1]{\Huge $X_w$};
65 | \draw[->, ultra thick, purple] (\owx, \owy) -- (\owx-1.5, \owy - 1.5+\eps) node[below=0.15]{\Huge $Y_w$};
66 |
67 |
68 | \draw[fill] (\ocx,\ocy) circle(0.11);
69 | \draw[fill] (\orx,\ory) circle(0.11);
70 |
71 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/inverse_perspective/inverse_perspective.synctex.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/tikz/inverse_perspective/inverse_perspective.synctex.gz
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/inverse_perspective/inverse_perspective.tex:
--------------------------------------------------------------------------------
1 | \documentclass[border=10pt,multi,tikz,convert={outfile=\jobname.svg}]{standalone}
2 | \usepackage{tikz-3dplot}
3 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
4 | \begin{document}
5 | \tdplotsetmaincoords{-60}{-35}
6 | \begin{tikzpicture}
7 | [
8 | tdplot_main_coords,
9 | >=Stealth,
10 | my dashed/.style={dashed, thick, ->, shorten >=-15pt, shorten <=-15pt, every node/.append style={font=\footnotesize}},
11 | my box/.style={thin, gray!70},
12 | my blue/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}},
13 | my label/.append style={midway, font=\scriptsize},
14 | my vectors/.style={green!50!black, {Stealth[scale=.75]}-{Stealth[scale=.75]}},
15 | my red/.style={thick, red, line cap=round},
16 | my grey/.style={gray!70},
17 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
18 | ]
19 | % \draw [help lines] (-2,0,0) -- (2,0,0) node[anchor=north west]{$x$} (0,0,0) -- (0,7,0) node[anchor=north east]{$y$} (0,0,0) -- (0,0,2) node[anchor=north]{$z$} (-2,7,0) -- (2,7,0);
20 | \draw [my grey] (0,4,0) -- (0,7,0) (-2,7,0) -- (2,7,0);
21 | \coordinate (o) at (0,0,0);
22 | \path [draw=gray!70, text=gray, fill=gray!20, opacity=0.8, text opacity=1] (-1.5,4,1.75) coordinate (a) -- ++(0,0,-3.5) coordinate (b) -- ++(3,0,0) coordinate (c) -- ++(0,0,3.5) coordinate (d) -- cycle node [pos=.95, above, sloped, anchor=south west] {$Z_c=f$} ;
23 | % \foreach \i in {a,b,c,d} \node [red, font=\scriptsize] at (\i) {\i};
24 | \draw [my grey] (-2,0,0) -- (2,0,0) (0,0,0) -- (0,4,0) (0,0,0) -- (0,0,2);
25 | \draw [thick, ->, every node/.style={font=\footnotesize, inner sep=0pt}, color=blue] (o) node [anchor=north west] {} (o) edge node [pos=1, anchor=north east] {$Z_c$} ++(0,1,0) edge node [pos=1, anchor=north] {$Y_c$} ++(0,0,1) -- ++(1,0,0) node [anchor=north west] {$X_c$};
26 | \draw [my box] (o) ++(0,4,-.5) coordinate (p1) -- ++(1,0,0) coordinate (p2) -- ++(0,0,-1.25) coordinate (p3);
27 | \foreach \i in {0,1,...,4} \draw [my box] (p1) ++(\i*.25,0,0) -- ++(0,0,-.25);
28 | \foreach \i in {0,1,...,5} \draw [my box] (p2) ++(0,0,-\i*.25) -- ++(-.25,0,0);
29 | \draw [my box] (p1) ++(0,0,-.25) -- ++(.75,0,0) -- ++(0,0,-1);
30 | \draw [my dashed, cyan] ($(b)!1/2!(c)$) -- ($(d)!1/2!(a)$) node [below=15pt, anchor=north] {$y$};
31 | \draw [my dashed, cyan] ($(b)!1/2!(a)$) -- ($(d)!1/2!(c)$) node [above right=17pt, anchor=north west] {$x$};
32 | \draw [my dashed, green!50!black, <->] (a) node [below=15pt, anchor=north] {$v$} -- (b) -- (c) node [above right=17pt, anchor=north west] {$u$};
33 | %\path [green!50!black, every node/.style={font=\scriptsize, inner sep=0pt}] (p2) node [above right, anchor=south west] {$(u,v)$};
34 | \path [red, every node/.style={font=\scriptsize, inner sep=0pt}] (p2) node [above right, anchor=south west] {~$p$};
35 | \path (p2) ++(-.125,0,0) coordinate (q2) ++(0,0,-.125) coordinate (r2);
36 | \draw [my blue=1] ($(0,4,0)+($(q2)-(p1)$)$) coordinate (s2) -- (r2) node (d1) {};
37 | \scoped[on background layer]{\draw [my blue=1.75] ($($1.75*($(s2)-(0,4,0)$)$)+(0,7,0)$) -- ++($1.75*($(r2)-(s2)$)$) node (d2) [label={[label distance=-28pt]above:{$P=(X_c,Y_c,Z_c)$}}] {};}
38 | \draw [my red] (o) -- (d1.center);
39 | \scoped[on background layer]{\draw [my red] (d1.center) -- (d2.center);}
40 | %\path [description] (0,6.5,0) [out=-95, in=95] to (-.75,6.5,.25) node {optical \\ axis};
41 | \end{tikzpicture}
42 | \end{document}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/iso8850/bev_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/tikz/iso8850/bev_2.jpg
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/iso8850/iso8850.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/tikz/iso8850/iso8850.png
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/iso8850/iso8850.tex:
--------------------------------------------------------------------------------
1 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
2 | \begin{tikzpicture} [
3 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
4 | imagearrow/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*0*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}}
5 | ]
6 |
7 | \definecolor{bluewindow}{RGB}{215,238,244}
8 | \definecolor{bluecar}{RGB}{64,81,181}
9 |
10 |
11 | \node[inner sep=0pt] (russell) at (0,0)
12 | {\includegraphics{bev_2.jpg}};
13 |
14 |
15 |
16 | % define some math for coordinate axes
17 | \pgfmathsetmacro{\len}{2.5}
18 | \pgfmathsetmacro{\cosp}{0.94}
19 | \pgfmathsetmacro{\sinp}{0.34}
20 |
21 | \pgfmathsetmacro{\ocx}{2.58}
22 | \pgfmathsetmacro{\ocy}{2.75}
23 |
24 | \pgfmathsetmacro{\orx}{-0.06}
25 | \pgfmathsetmacro{\ory}{-1.72}
26 |
27 | \pgfmathsetmacro{\owx}{-11.5}
28 | \pgfmathsetmacro{\owy}{4.4}
29 | \pgfmathsetmacro{\eps}{0.8}
30 |
31 |
32 | %road system
33 | \draw[->, ultra thick] (\orx,\ory) -- (\orx, \ory+\len) node[above right]{\Huge $x$};
34 | \draw[->, ultra thick] (\orx,\ory) -- (\orx-\len, \ory) node[below left]{\Huge $y$};
35 |
36 | %\draw[blue] (-1.8,4) node {\Huge $y_l(x)$};
37 | %\draw[red] (3.2,5) node {\Huge $y_r(x)$};
38 |
39 | \draw[blue] (-3.5,4.7) node {\Huge $y_l(x)$};
40 | \draw[red] (1.3,4.7) node {\Huge $y_r(x)$};
41 |
42 |
43 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/iso8850/iso8850_crop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/LaneDetection/tikz/iso8850/iso8850_crop.png
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/no_pinhole/no_pinhole.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture} [
2 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
3 | ]
4 |
5 | \definecolor{mygreen}{RGB}{28, 150, 28}
6 | %tree
7 | %\draw[fill, color=olive] (0,0) rectangle (1,5);
8 | %\draw[fill, color=mygreen] (0.5,5) circle (1.8);
9 | \draw[fill=mygreen, color = mygreen] (-1.3,2) -- (2.3,2) -- (0.5,7) -- cycle;
10 | \draw[fill=olive, color = olive] (0,0) -- (1,0) -- (0.5,4) -- cycle;
11 | \draw[fill=olive, color = olive] (0.5,3) -- (0.5,3.3) -- (1,3.5) -- cycle;
12 |
13 | \draw[fill=olive, color = olive] (0.5,2.5) -- (0.5,2.7) -- (0,3.0) -- cycle;
14 |
15 | %image sensor
16 | \draw[fill, color=black] (8,2) rectangle (8.1,3.4);
17 | \path[description] (8.05,3.6) [out=90, in=270] to (9,6) node[above] {\Large image sensor};
18 |
19 | %rays from tree
20 | \foreach \y in {2.2, 2.7, 3.2}
21 | {
22 | %\draw[line width=2, ->, color=green] (2.2,4.5) -- (8.0,\y);
23 | %\draw[line width=2, ->, color=olive] (1,1) -- (8.0,\y);
24 | \draw[line width=2, ->, color=mygreen] (1.2,4.9) -- (8.0,\y);
25 | \draw[line width=2, ->, color=olive] (0.8,0.937) -- (8.0,\y);
26 | }
27 |
28 | \path[description] (4.4,4.2) [out=90, in=270] to (5.2,4.8) node[above] {\Large light rays};
29 |
30 |
31 |
32 | \draw[fill, color=yellow] (4,8) circle (0.7);
33 |
34 | \draw[line width=1.5,color=yellow] (4,8) -- (4-1.5,8-1.5);
35 | \draw[line width=1.5,color=yellow] (4,8) -- (4-1.5/0.74,8);
36 | \draw[line width=1.5,color=yellow] (4,8) -- (4,8-1.5/0.74);
37 |
38 | % ..
39 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/pinhole_box/pinhole_box.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture} [
2 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
3 | ]
4 |
5 | \definecolor{mygreen}{RGB}{28, 150, 28}
6 | %tree
7 | \draw[fill=mygreen, color = mygreen] (-1.3,2) -- (2.3,2) -- (0.5,7) -- cycle;
8 | \draw[fill=olive, color = olive] (0,0) -- (1,0) -- (0.5,4) -- cycle;
9 | \draw[fill=olive, color = olive] (0.5,3) -- (0.5,3.3) -- (1,3.5) -- cycle;
10 |
11 | \draw[fill=olive, color = olive] (0.5,2.5) -- (0.5,2.7) -- (0,3.0) -- cycle;
12 |
13 |
14 | %image sensor
15 | \draw[fill, color=black] (8,2) rectangle (8.1,3.4);
16 | \path[description] (8.05,3.6) [out=90, in=270] to (9,6) node[above] {\Large image sensor};
17 |
18 | %box
19 | \draw[line width=1.2] (6.6,1) rectangle (8.1,4.4);
20 | \draw[fill, color=white] (6.6,2.75) circle (0.07); %pinhole
21 | \path[description] (6.5,2.95) [out=90+45, in=270] to (6,5) node[above] {\Large pin hole};
22 |
23 |
24 | %rays from tree
25 | \draw[line width=2, ->, color=mygreen] (1.2,4.9) -- (8.0,2.2);
26 | \draw[line width=2, ->, color=olive] (0.8,0.937) -- (8.0,3.2);
27 |
28 | \draw[fill, color=yellow] (4,8) circle (0.7);
29 |
30 | \draw[line width=1.5,color=yellow] (4,8) -- (4-1.5,8-1.5);
31 | \draw[line width=1.5,color=yellow] (4,8) -- (4-1.5/0.74,8);
32 | \draw[line width=1.5,color=yellow] (4,8) -- (4,8-1.5/0.74);
33 |
34 | % ..
35 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/uv/uv_grid.tex:
--------------------------------------------------------------------------------
1 | \begin{tikzpicture} [
2 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
3 | ]
4 |
5 | \definecolor{blue}{RGB}{110,120,250}
6 |
7 | \foreach \x in {0,...,4}
8 | \foreach \y in {0,...,4}
9 | %\draw [fill=blue!30!white, line width=1.5] (\x,\y) rectangle (\x+1.0,\y+1.0);
10 | \draw [fill=blue, line width=1.5] (\x,\y) rectangle (\x+1.0,\y+1.0);
11 | \draw [fill=green, line width=1.5] (2,4) rectangle (3,5);
12 | \draw [fill=green, line width=1.5] (2,3) rectangle (3,4);
13 | \draw [fill=green, line width=1.5] (3,3) rectangle (4,4);
14 | \draw [fill=green, line width=1.5] (1,3) rectangle (2,4);
15 |
16 | \draw [fill=brown, line width=1.5] (2,2) rectangle (3,3);
17 | \draw [fill=brown, line width=1.5] (2,1) rectangle (3,2);
18 | \draw [fill=brown, line width=1.5] (2,0) rectangle (3,1);
19 |
20 | \draw[dashed, line width=2, ->, color=green!50!black] (-0.1,5) -- (-0.1,3) node[below, left] {$v$};
21 | \draw[dashed, line width=2, ->, color=green!50!black] (0,5.1) -- (2,5.1) node[right, above] {$u$};
22 | \draw[fill, color=green!50!black] (-0.1,5.1) circle (0.07);
23 | \path[description] (3.5,3.5) [out=-35, in=120] to (6.3,3) node {img[1,3]=[0,255,0]};
24 |
25 | \path[description] (0.5,4.5) [out=180, in=90] to (-2,4) node {img[0,0]=[100,120,250]};
26 | % ..
27 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/LaneDetection/tikz/virtual_pinhole_box/virtual_pinhole_box.tex:
--------------------------------------------------------------------------------
1 | \usetikzlibrary{calc,arrows.meta,positioning,backgrounds}
2 | \begin{tikzpicture} [
3 | description/.style={draw=gray!70, thick, line cap=round, every node/.style={align=center, font=\scriptsize\sffamily, anchor=north}},
4 | imagearrow/.style={red, line cap=round, -{Triangle[width=3*#1]}, line width=#1, shorten >=#1*0*1.75pt, every node/.append style={fill, circle, inner sep=0pt, minimum size=#1*3.5pt, anchor=center, outer sep=0pt}}
5 | ]
6 |
7 | \definecolor{mygreen}{RGB}{28, 150, 28}
8 |
9 | %boxes
10 | \draw[gray,line width=1, dashed] (6,-1) rectangle (7,1);
11 | \draw[line width=1] (7,-1) rectangle (8,1);
12 |
13 | % object
14 | \draw[imagearrow=2] (1,0) -- (1,2.5);
15 |
16 |
17 | %light ray from bottom
18 | \draw[red, ->, thick] (1,0) -- (8,0);
19 |
20 | % coordinate systems
21 | \draw [<->,thick, blue, dashed] (7,-2) node (yaxis) [below] {$Y_c$}
22 | |- (5,0) node (xaxis) [below] {$Z_c$};
23 |
24 |
25 | % pinhole
26 | \draw[color=white, fill] (7,0) circle (0.1);
27 | % redraw part of red light ray that was deleted (hacky!)
28 | \draw[red, ->, thick] (6.88,0) -- (8,0);
29 |
30 | % light ray from top
31 | \draw[red, ->, thick] (0.98,2.49) -- (8,-0.44);
32 |
33 |
34 | % descriptions
35 | %\path[description](1,2.6) [out=90, in=270] to (2,3) node[above] {Point $P$ \\ $(X_c=0,Y_c=-h,Z_c=d)$};
36 | %\path[description](8.1,-0.44) [out=0, in=90] to (10,-2) node[below] {image of $P$ \\$(X_c=0,Y_c=h',Z_c=-f)$};
37 | %\path[description](5.94,0.48) [out=90+45, in=270] to (7,3) node[above] {virtual image of $P$ \\$(X_c=0,Y_c=h',Z_c=f)$};
38 | \path[description](6,1.1) [out=90, in=-90] to (5.5,2) node[above] {image plane $Z_c=f$};
39 |
40 |
41 | %length
42 |
43 | \draw[gray, <->, thick] (7,1.4) -- (8,1.4);
44 | \draw[gray] (7.5,1.6) node{\scriptsize $f$};
45 |
46 | \draw[gray, <->, thick] (6,1.4) -- (7,1.4);
47 | \draw[gray] (6.5,1.6) node{\scriptsize $f$};
48 |
49 | \draw[gray, <->, thick] (8.2,-0.44) -- (8.2,0);
50 | \draw[gray] (8.55,-0.22) node{\scriptsize $h'$};
51 |
52 | \draw[gray, <->, thick] (5.8,0.44) -- (5.8,0);
53 | \draw[gray] (5.5,0.26) node{\scriptsize $h'$};
54 |
55 | \draw[gray, <->, thick] (0.5,0) -- (0.5,2.5);
56 | \draw[gray] (0.2,1.25) node{\scriptsize $h$};
57 |
58 | \draw[gray, <->, thick] (1,-1.5) -- (7,-1.5);
59 | \draw[gray] (4,-1.8) node{\scriptsize $d$};
60 |
61 | \end{tikzpicture}
--------------------------------------------------------------------------------
/book/_config.yml:
--------------------------------------------------------------------------------
1 | # Book settings
2 | title: Algorithms for Automated Driving
3 | author: Mario Theers and Mankaran Singh
4 |
5 |
6 | logo: car_sketch_wide.png
7 |
8 | html:
9 | favicon: car_sketch.png
10 | home_page_in_navbar : false
11 | use_edit_page_button: true
12 | use_repository_button: true
13 | use_issues_button: true
14 | extra_footer:
This work is licensed under a Creative Commons Attribution 4.0 International License.
15 | google_analytics_id: UA-183782120-1
16 |
17 | repository:
18 | url: "https://github.com/thomasfermi/Algorithms-for-Automated-Driving"
19 | branch: master
20 | path_to_book: book
21 |
22 | launch_buttons:
23 | #notebook_interface: "classic"
24 | binderhub_url: "https://mybinder.org"
25 | colab_url: "https://colab.research.google.com"
26 | thebe: true
27 |
28 | sphinx:
29 | extra_extensions:
30 | - sphinx_inline_tabs
31 | config:
32 | html_show_copyright: false
33 | html_js_files:
34 | - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js
35 |
36 | bibtex_bibfiles:
37 | - references.bib
38 |
39 | latex:
40 | latex_documents:
41 | targetname: book.tex
42 |
43 | exclude_patterns: [Control/MPC.ipynb]
44 |
--------------------------------------------------------------------------------
/book/_toc.yml:
--------------------------------------------------------------------------------
1 | format: jb-book
2 | root: Introduction/intro
3 | parts:
4 | - caption: Lane Detection
5 | chapters:
6 | - file: LaneDetection/LaneDetectionOverview.md
7 | - file: LaneDetection/CameraBasics.ipynb
8 | - file: LaneDetection/Segmentation.ipynb
9 | - file: LaneDetection/InversePerspectiveMapping.ipynb
10 | - file: LaneDetection/Discussion.md
11 | - caption: Control
12 | chapters:
13 | - file: Control/ControlOverview.md
14 | - file: Control/PID.ipynb
15 | - file: Control/BicycleModel.md
16 | - file: Control/PurePursuit.md
17 | - file: Control/Discussion.md
18 | - caption: Camera Calibration
19 | chapters:
20 | - file: CameraCalibration/VanishingPointCameraCalibration.ipynb
21 | - file: CameraCalibration/Discussion.md
22 | - caption: Appendix
23 | chapters:
24 | - file: Appendix/ExerciseSetup.md
25 | - file: Appendix/CarlaInstallation.md
26 | - file: Appendix/NextChapters.md
27 | - file: Appendix/Feedback.md
28 |
--------------------------------------------------------------------------------
/book/car_sketch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/car_sketch.png
--------------------------------------------------------------------------------
/book/car_sketch_wide.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/book/car_sketch_wide.png
--------------------------------------------------------------------------------
/book/environment.yml:
--------------------------------------------------------------------------------
1 | name: aad-book
2 | channels:
3 | - conda-forge
4 | - pytorch
5 | - nvidia
6 | - fastai
7 | dependencies:
8 | - python=3.7
9 | - matplotlib
10 | - numpy
11 | - numba
12 | - opencv
13 | - jupyterlab
14 | - ipywidgets
15 | - pytorch=1.9.0
16 | - torchvision=0.10.0
17 | - fastai=2.5.0
18 | - albumentations
19 | - tqdm
20 | - pip
21 | - pip:
22 | - fastseg==0.1.2
23 | - pyclothoids
24 | - jupyter-book==0.13.2
25 | - pygame
26 | - imageio
27 | - imageio-ffmpeg
--------------------------------------------------------------------------------
/book/references.bib:
--------------------------------------------------------------------------------
1 | @misc{gansbeke2019endtoend,
2 | title={End-to-end Lane Detection through Differentiable Least-Squares Fitting},
3 | author={Wouter Van Gansbeke and Bert De Brabandere and Davy Neven and Marc Proesmans and Luc Van Gool},
4 | year={2019},
5 | eprint={1902.00293},
6 | archivePrefix={arXiv},
7 | primaryClass={cs.CV}
8 | }
9 |
10 | @misc{bojarski2020nvidia,
11 | title={The NVIDIA PilotNet Experiments},
12 | author={Mariusz Bojarski and Chenyi Chen and Joyjit Daw and Alperen Değirmenci and Joya Deri and Bernhard Firner and Beat Flepp and Sachin Gogri and Jesse Hong and Lawrence Jackel and Zhenhua Jia and BJ Lee and Bo Liu and Fei Liu and Urs Muller and Samuel Payne and Nischal Kota Nagendra Prasad and Artem Provodin and John Roach and Timur Rvachov and Neha Tadimeti and Jesper van Engelen and Haiguang Wen and Eric Yang and Zongyi Yang},
13 | year={2020},
14 | eprint={2010.08776},
15 | archivePrefix={arXiv},
16 | primaryClass={cs.CV}
17 | }
18 |
19 | @misc{Yakubovskiy:2019,
20 | Author = {Pavel Yakubovskiy},
21 | Title = {Segmentation Models Pytorch},
22 | Year = {2020},
23 | Publisher = {GitHub},
24 | Journal = {GitHub repository},
25 | Howpublished = {\url{https://github.com/qubvel/segmentation_models.pytorch}}
26 | }
27 |
28 | @book{hartley2003multiple,
29 | title={Multiple view geometry in computer vision},
30 | author={Hartley, Richard and Zisserman, Andrew},
31 | year={2003},
32 | publisher={Cambridge university press}
33 | }
34 |
35 | @inproceedings{llamas2019,
36 | title={Unsupervised Labeled Lane Marker Dataset Generation Using Maps},
37 | author={Behrendt, Karsten and Soussan, Ryan},
38 | booktitle={Proceedings of the IEEE International Conference on Computer Vision},
39 | year={2019}
40 | }
41 |
42 | @article{snider2009automatic,
43 | title={Automatic steering methods for autonomous automobile path tracking},
44 | author={Snider, Jarrod M and others},
45 | journal={Robotics Institute, Pittsburgh, PA, Tech. Rep. CMU-RITR-09-08},
46 | year={2009}
47 | }
48 |
49 |
50 | @misc{ShiCourseraCalibration,
51 | author = {Jianbo Shi and Kostas Daniilidis},
52 | title = {Vanishing Points; How to Compute Camera Orientation},
53 | year = {Accessed on June 2021},
54 | url = {https://www.coursera.org/lecture/robotics-perception/vanishing-points-how-to-compute-camera-orientation-flqF4}
55 | }
56 |
57 | @misc{lee2020online,
58 | title={Online Extrinsic Camera Calibration for Temporally Consistent IPM Using Lane Boundary Observations with a Lane Width Prior},
59 | author={Jeong-Kyun Lee and Young-Ki Baik and Hankyu Cho and Seungwoo Yoo},
60 | year={2020},
61 | eprint={2008.03722},
62 | archivePrefix={arXiv},
63 | primaryClass={cs.CV}
64 | }
--------------------------------------------------------------------------------
/book/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib==3.7.1
2 | numba==0.56.4
3 | numpy==1.23.5
4 | opencv-python==4.7.0.72
5 | ipywidgets==8.0.4
6 | --extra-index-url https://download.pytorch.org/whl/cpu
7 | torch==1.13.1+cpu
8 | torchvision==0.14.1+cpu
9 | fastai==2.7.11
10 | fastseg==0.1.2
11 | pyclothoids==0.1.4
12 | jupyter-book==0.14.0
13 | sphinx_inline_tabs==2022.1.2b11
14 | sphinx==4.3.2
--------------------------------------------------------------------------------
/code/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/__init__.py
--------------------------------------------------------------------------------
/code/environment.yml:
--------------------------------------------------------------------------------
1 | name: aad
2 | channels:
3 | - conda-forge
4 | - pytorch
5 | - nvidia
6 | - fastai
7 | dependencies:
8 | - python=3.7
9 | - matplotlib
10 | - numpy
11 | - numba
12 | - opencv
13 | - jupyterlab
14 | - ipywidgets
15 | - pytorch=1.9.0
16 | - torchvision=0.10.0
17 | - fastai=2.5.0
18 | - albumentations
19 | - tqdm
20 | - pip
21 | - pip:
22 | - fastseg==0.1.2
23 | - pyclothoids
24 | - pygame
25 | - imageio
26 | - imageio-ffmpeg
27 |
--------------------------------------------------------------------------------
/code/exercises/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/exercises/__init__.py
--------------------------------------------------------------------------------
/code/exercises/camera_calibration/calibrated_lane_detector.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from ..lane_detection.lane_detector import LaneDetector
3 | from ..lane_detection.camera_geometry import CameraGeometry
4 |
5 |
6 | def get_intersection(line1, line2):
7 | m1, c1 = line1
8 | m2, c2 = line2
9 | #TODO: find intersection of the line.
10 | raise NotImplementedError
11 |
12 | def get_py_from_vp(u_i, v_i, K):
13 | #TODO compute pitch and yaw given the camera intrinsic matrix and vanishing point.
14 | raise NotImplementedError
15 | return pitch, yaw
16 |
17 | class CalibratedLaneDetector(LaneDetector):
18 | def __init__(self, calib_cut_v = 200, cam_geom=CameraGeometry(), model_path='./fastai_model.pth'):
19 | # call parent class constructor
20 | super().__init__(cam_geom, model_path)
21 |
22 | self.calib_cut_v = calib_cut_v
23 | self.estimated_pitch_deg = 0
24 | self.estimated_yaw_deg = 0
25 | self.update_cam_geometry()
26 | self.mean_residuals_thresh = 1e6 #TODO: adjust this thresh hold to avoid calibration process at curves.
27 | self.pitch_yaw_history = []
28 | self.calibration_success = False
29 |
30 | def get_fit_and_probs(self, image):
31 | _, left_probs, right_probs = self.detect(image)
32 | line_left = self._fit_line_v_of_u(left_probs)
33 | line_right = self._fit_line_v_of_u(right_probs)
34 | if (line_left is not None) and (line_right is not None):
35 | # TODO: If both `line_left` and `line_right` are not None,
36 | # try to compute the vanishing point using your `get_intersection` function.
37 | # Then compute pitch and yaw from the vanishing point
38 | # Finally store the pitch and yaw values in `self.pitch_yaw_history`.
39 | # This `get_fit_and_probs` function will be called again and again over time.
40 | # Once enough data is gathered in `self.pitch_yaw_history`,
41 | # compute mean values for pitch and yaw and store them in ` self.estimated_pitch_deg`and ` self.estimated_yaw_deg`
42 | # Finally call `update_cam_geometry()` so that the new estimated values are being used.
43 | raise NotImplementedError
44 |
45 | left_poly = self.fit_poly(left_probs)
46 | right_poly = self.fit_poly(right_probs)
47 | return left_poly, right_poly, left_probs, right_probs
48 |
49 | def _fit_line_v_of_u(self, probs):
50 | v_list, u_list = np.nonzero(probs > 0.3)
51 | if v_list.size == 0:
52 | return None
53 | coeffs, residuals, _, _, _ = np.polyfit(
54 | u_list, v_list, deg=1, full=True)
55 |
56 | mean_residuals = residuals/len(u_list)
57 | #print(mean_residuals)
58 | if mean_residuals > self.mean_residuals_thresh:
59 | return None
60 | else:
61 | return np.poly1d(coeffs)
62 |
63 | def update_cam_geometry(self):
64 | self.cg = CameraGeometry(
65 | height = self.cg.height,
66 | roll_deg = self.cg.roll_deg,
67 | image_width = self.cg.image_width,
68 | image_height = self.cg.image_height,
69 | field_of_view_deg = self.cg.field_of_view_deg,
70 | pitch_deg = self.estimated_pitch_deg,
71 | yaw_deg = self.estimated_yaw_deg )
72 | self.cut_v, self.grid = self.cg.precompute_grid()
73 |
74 |
--------------------------------------------------------------------------------
/code/exercises/control/get_target_point.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 |
4 | def get_target_point(lookahead, polyline):
5 | """ Determines the target point for the pure pursuit controller
6 |
7 | Parameters
8 | ----------
9 | lookahead : float
10 | The target point is on a circle of radius `lookahead`
11 | The circle's center is (0,0)
12 | poyline: array_like, shape (M,2)
13 | A list of 2d points that defines a polyline.
14 |
15 | Returns:
16 | --------
17 | target_point: numpy array, shape (,2)
18 | Point with positive x-coordinate where the circle of radius `lookahead`
19 | and the polyline intersect.
20 | Return None if there is no such point.
21 | If there are multiple such points, return the one that the polyline
22 | visits first.
23 | """
24 | # Hint: A polyline is a list of line segments.
25 | # The formulas for the intersection of a line segment and a circle are given
26 | # here https://mathworld.wolfram.com/Circle-LineIntersection.html
27 | raise NotImplementedError
--------------------------------------------------------------------------------
/code/exercises/control/pure_pursuit.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .get_target_point import get_target_point
3 |
4 | # TODO: Tune parameters of PID with these global variables
5 | param_Kp = 0
6 | param_Ki = 0
7 | param_Kd = 0
8 | # TODO: Tune parameters of Pure Pursuit with these global variables
9 | param_K_dd = 0
10 | # The above parameters will be used in the Carla simulation
11 | # The simple simulation in tests/control/control.ipynb does not use these parameters
12 |
13 |
14 | class PurePursuit:
15 | def __init__(self, K_dd=param_K_dd, wheel_base=2.65, waypoint_shift=1.4):
16 | self.K_dd = K_dd
17 | self.wheel_base = wheel_base # called L in the book
18 | self.waypoint_shift = waypoint_shift
19 |
20 | def get_control(self, waypoints, speed):
21 | # transform x coordinates of waypoints such that coordinate origin is in rear wheel
22 | waypoints[:,0] += self.waypoint_shift
23 |
24 | # TODO: implement pure pursuit algorithm to get the steering angle
25 | steer = 0 # called delta in the book
26 |
27 | # undo transform to waypoints
28 | waypoints[:,0] -= self.waypoint_shift
29 | return steer
30 |
31 |
32 | class PIDController:
33 | """ PID Controller copied from book """
34 | def __init__(self, Kp, Ki, Kd, set_point):
35 | self.Kp = Kp
36 | self.Ki = Ki
37 | self.Kd = Kd
38 | self.set_point = set_point
39 | self.int_term = 0
40 | self.derivative_term = 0
41 | self.last_error = None
42 |
43 | def get_control(self, measurement, dt):
44 | error = self.set_point - measurement
45 | self.int_term += error*self.Ki*dt
46 | if self.last_error is not None:
47 | self.derivative_term = (error-self.last_error)/dt*self.Kd
48 | self.last_error = error
49 | return self.Kp * error + self.int_term + self.derivative_term
50 |
51 |
52 | class PurePursuitPlusPID:
53 | def __init__(self, pure_pursuit=PurePursuit(), pid=PIDController(param_Kp, param_Ki, param_Kd, 0)):
54 | self.pure_pursuit = pure_pursuit
55 | self.pid = pid
56 |
57 | def get_control(self,waypoints, speed, desired_speed, dt):
58 | self.pid.set_point = desired_speed
59 | a = self.pid.get_control(speed,dt)
60 | steer = self.pure_pursuit.get_control(waypoints, speed)
61 | return a, steer
62 |
63 |
--------------------------------------------------------------------------------
/code/exercises/lane_detection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/exercises/lane_detection/__init__.py
--------------------------------------------------------------------------------
/code/exercises/lane_detection/camera_geometry.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def get_intrinsic_matrix(field_of_view_deg, image_width, image_height):
4 | """
5 | Returns intrinsic matrix K.
6 | """
7 | # For our Carla camera alpha_u = alpha_v = alpha
8 | # alpha can be computed given the cameras field of view via
9 | field_of_view_rad = field_of_view_deg * np.pi/180
10 | alpha = (image_width / 2.0) / np.tan(field_of_view_rad / 2.)
11 | # TODO step 1: Complete this function
12 | raise NotImplementedError
13 |
14 | def project_polyline(polyline_world, trafo_world_to_cam, K):
15 | """
16 | Returns array uv which contains the pixel coordinates of the polyline.
17 |
18 | Parameters
19 | ----------
20 | polyline_world : array_like, shape (M,3)
21 | Each row of this array is a vertex (x,y,z) of the polyline.
22 | trafo_world_to_cam : array_like, shape (4,4)
23 | Transformation matrix, that maps vectors (x_world, y_world, z_world, 1)
24 | to vectors (x_cam, y_cam, z_cam, 1).
25 | K: array_like, shape (3,3)
26 | Intrinsic matrix of the camera.
27 |
28 | Returns:
29 | --------
30 | uv : ndarray, shape (M,2)
31 | Pixel coordinates of the projected polyline
32 | First column is u, second column is v
33 | """
34 | # TODO step 1: Write this function
35 | raise NotImplementedError
36 |
37 |
38 | class CameraGeometry(object):
39 | def __init__(self, height=1.3, yaw_deg=0, pitch_deg=-5, roll_deg=0, image_width=1024, image_height=512, field_of_view_deg=45):
40 | # scalar constants
41 | self.height = height
42 | self.pitch_deg = pitch_deg
43 | self.roll_deg = roll_deg
44 | self.yaw_deg = yaw_deg
45 | self.image_width = image_width
46 | self.image_height = image_height
47 | self.field_of_view_deg = field_of_view_deg
48 | # camera intriniscs and extrinsics
49 | self.intrinsic_matrix = get_intrinsic_matrix(field_of_view_deg, image_width, image_height)
50 | self.inverse_intrinsic_matrix = np.linalg.inv(self.intrinsic_matrix)
51 | ## Note that "rotation_cam_to_road" has the math symbol R_{rc} in the book
52 | yaw = np.deg2rad(yaw_deg)
53 | pitch = np.deg2rad(pitch_deg)
54 | roll = np.deg2rad(roll_deg)
55 | cy, sy = np.cos(yaw), np.sin(yaw)
56 | cp, sp = np.cos(pitch), np.sin(pitch)
57 | cr, sr = np.cos(roll), np.sin(roll)
58 | rotation_road_to_cam = np.array([[cr*cy+sp*sr+sy, cr*sp*sy-cy*sr, -cp*sy],
59 | [cp*sr, cp*cr, sp],
60 | [cr*sy-cy*sp*sr, -cr*cy*sp -sr*sy, cp*cy]])
61 | self.rotation_cam_to_road = rotation_road_to_cam.T # for rotation matrices, taking the transpose is the same as inversion
62 |
63 | # TODO step 2: replace the 'None' values in the following code with correct expressions
64 |
65 | self.translation_cam_to_road = None
66 | self.trafo_cam_to_road = None
67 | # compute vector nc. Note that R_{rc}^T = R_{cr}
68 | self.road_normal_camframe = None
69 |
70 |
71 | def camframe_to_roadframe(self,vec_in_cam_frame):
72 | return self.rotation_cam_to_road @ vec_in_cam_frame + self.translation_cam_to_road
73 |
74 | def uv_to_roadXYZ_camframe(self,u,v):
75 | """
76 | Inverse perspective mapping from pixel coordinates to 3d coordinates.
77 |
78 | Parameters
79 | ----------
80 | u,v: Both float
81 | Pixel coordinates of some part of the road.
82 |
83 | Returns:
84 | --------
85 | XYZ: array_like, shape(3,)
86 | Three dimensional point in the camera reference frame that lies on the road
87 | and was mapped by the camera to pixel coordinates u,v
88 | """
89 | # TODO step 2: Write this function
90 | raise NotImplementedError
91 |
92 | def uv_to_roadXYZ_roadframe(self,u,v):
93 | r_camframe = self.uv_to_roadXYZ_camframe(u,v)
94 | return self.camframe_to_roadframe(r_camframe)
95 |
96 | def uv_to_roadXYZ_roadframe_iso8855(self,u,v):
97 | X,Y,Z = self.uv_to_roadXYZ_roadframe(u,v)
98 | return np.array([Z,-X,-Y]) # read book section on coordinate systems to understand this
99 |
100 | def precompute_grid(self,dist=60):
101 | """
102 | Precomputes a grid that will be used for polynomial fitting at a later stage.
103 |
104 | Parameters
105 | ----------
106 | dist : float
107 | Distance thereshold in meters. For the grid, only pixel coordinates [u,v]
108 | are considered that depict parts of the road plane that are no more than
109 | a distance `dist` away along the road.
110 |
111 | Returns:
112 | --------
113 | cut_v: float
114 | Threshold for the pixel coordinate v, that corresponds to the `dist`input.
115 |
116 | grid: array_like, shape (M,2)
117 | A list of x,y coordinates. Each element corresponds to the x-y coordinates
118 | of one pixel [u,v] (v>cut_v).
119 | """
120 | cut_v = int(self.compute_minimum_v(dist=dist)+1)
121 | # TODO step 3: compute `grid`
122 | grid = None
123 | return cut_v, grid
124 |
125 | def compute_minimum_v(self, dist):
126 | """
127 | Find cut_v such that pixels with v 0.3)
56 | if v_list.size == 0:
57 | return None
58 | coeffs, residuals, _, _, _ = np.polyfit(
59 | u_list, v_list, deg=1, full=True)
60 |
61 | mean_residuals = residuals/len(u_list)
62 | #print(mean_residuals)
63 | if mean_residuals > self.mean_residuals_thresh:
64 | return None
65 | else:
66 | return np.poly1d(coeffs)
67 |
68 | def add_to_pitch_yaw_history(self, pitch, yaw):
69 | self.pitch_yaw_history.append([pitch, yaw])
70 | if len(self.pitch_yaw_history) > 50:
71 | py = np.array(self.pitch_yaw_history)
72 | mean_pitch = np.mean(py[:,0])
73 | mean_yaw = np.mean(py[:,1])
74 | self.estimated_pitch_deg = np.rad2deg(mean_pitch)
75 | self.estimated_yaw_deg = np.rad2deg(mean_yaw)
76 | self.update_cam_geometry()
77 | self.calibration_success = True
78 | self.pitch_yaw_history = []
79 | print("yaw, pitch = ", self.estimated_yaw_deg, self.estimated_pitch_deg)
80 |
81 | def update_cam_geometry(self):
82 | self.cg = CameraGeometry(
83 | height = self.cg.height,
84 | roll_deg = self.cg.roll_deg,
85 | image_width = self.cg.image_width,
86 | image_height = self.cg.image_height,
87 | field_of_view_deg = self.cg.field_of_view_deg,
88 | pitch_deg = self.estimated_pitch_deg,
89 | yaw_deg = self.estimated_yaw_deg )
90 | self.cut_v, self.grid = self.cg.precompute_grid()
91 |
92 |
--------------------------------------------------------------------------------
/code/solutions/control/get_target_point.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 |
4 | # Function from https://stackoverflow.com/a/59582674/2609987
5 | def circle_line_segment_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9):
6 | """ Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.
7 |
8 | :param circle_center: The (x, y) location of the circle center
9 | :param circle_radius: The radius of the circle
10 | :param pt1: The (x, y) location of the first point of the segment
11 | :param pt2: The (x, y) location of the second point of the segment
12 | :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment.
13 | :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent
14 | :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.
15 |
16 | Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html
17 | """
18 |
19 | (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center
20 | (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)
21 | dx, dy = (x2 - x1), (y2 - y1)
22 | dr = (dx ** 2 + dy ** 2)**.5
23 | big_d = x1 * y2 - x2 * y1
24 | discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2
25 |
26 | if discriminant < 0: # No intersection between circle and line
27 | return []
28 | else: # There may be 0, 1, or 2 intersections with the segment
29 | intersections = [
30 | (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,
31 | cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)
32 | for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct
33 | if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment
34 | fraction_along_segment = [(xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]
35 | intersections = [pt for pt, frac in zip(intersections, fraction_along_segment) if 0 <= frac <= 1]
36 | if len(intersections) == 2 and abs(discriminant) <= tangent_tol: # If line is tangent to circle, return just one point (as both intersections have same location)
37 | return [intersections[0]]
38 | else:
39 | return intersections
40 |
41 | def get_target_point(lookahead, polyline):
42 | """ Determines the target point for the pure pursuit controller
43 |
44 | Parameters
45 | ----------
46 | lookahead : float
47 | The target point is on a circle of radius `lookahead`
48 | The circle's center is (0,0)
49 | poyline: array_like, shape (M,2)
50 | A list of 2d points that defines a polyline.
51 |
52 | Returns:
53 | --------
54 | target_point: numpy array, shape (,2)
55 | Point with positive x-coordinate where the circle of radius `lookahead`
56 | and the polyline intersect.
57 | Return None if there is no such point.
58 | If there are multiple such points, return the one that the polyline
59 | visits first.
60 | """
61 | intersections = []
62 | for j in range(len(polyline)-1):
63 | pt1 = polyline[j]
64 | pt2 = polyline[j+1]
65 | intersections += circle_line_segment_intersection((0,0), lookahead, pt1, pt2, full_line=False)
66 | filtered = [p for p in intersections if p[0]>0]
67 | if len(filtered)==0:
68 | return None
69 | return filtered[0]
--------------------------------------------------------------------------------
/code/solutions/control/pure_pursuit.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .get_target_point import get_target_point
3 |
4 | # TODO: Tune parameters of PID with these global variables
5 | param_Kp = 2
6 | param_Ki = 0
7 | param_Kd = 0
8 | # TODO: Tune parameters of Pure Pursuit with these global variables
9 | param_K_dd = 0.4
10 | # The above parameters will be used in the Carla simulation
11 | # The simple simulation in tests/control/control.ipynb does not use these parameters
12 |
13 |
14 | class PurePursuit:
15 | def __init__(self, K_dd=param_K_dd, wheel_base=2.65, waypoint_shift=1.4):
16 | self.K_dd = K_dd
17 | self.wheel_base = wheel_base
18 | self.waypoint_shift = waypoint_shift
19 |
20 | def get_control(self, waypoints, speed):
21 | # transform x coordinates of waypoints such that coordinate origin is in rear wheel
22 | waypoints[:,0] += self.waypoint_shift
23 | look_ahead_distance = np.clip(self.K_dd * speed, 3,20)
24 |
25 | track_point = get_target_point(look_ahead_distance, waypoints)
26 | if track_point is None:
27 | return 0
28 |
29 | alpha = np.arctan2(track_point[1], track_point[0])
30 |
31 | # Change the steer output with the lateral controller.
32 | steer = np.arctan((2 * self.wheel_base * np.sin(alpha)) / look_ahead_distance)
33 |
34 | # undo transform to waypoints
35 | waypoints[:,0] -= self.waypoint_shift
36 | return steer
37 |
38 |
39 | class PIDController:
40 | def __init__(self, Kp, Ki, Kd, set_point):
41 | self.Kp = Kp
42 | self.Ki = Ki
43 | self.Kd = Kd
44 | self.set_point = set_point
45 | self.int_term = 0
46 | self.derivative_term = 0
47 | self.last_error = None
48 |
49 | def get_control(self, measurement, dt):
50 | error = self.set_point - measurement
51 | self.int_term += error*self.Ki*dt
52 | if self.last_error is not None:
53 | self.derivative_term = (error-self.last_error)/dt*self.Kd
54 | self.last_error = error
55 | return self.Kp * error + self.int_term + self.derivative_term
56 |
57 |
58 | class PurePursuitPlusPID:
59 | def __init__(self, pure_pursuit=PurePursuit(), pid=PIDController(param_Kp, param_Ki, param_Kd, 0)):
60 | self.pure_pursuit = pure_pursuit
61 | self.pid = pid
62 |
63 | def get_control(self,waypoints, speed, desired_speed, dt):
64 | self.pid.set_point = desired_speed
65 | a = self.pid.get_control(speed,dt)
66 | steer = self.pure_pursuit.get_control(waypoints, speed)
67 | return a, steer
68 |
69 |
--------------------------------------------------------------------------------
/code/solutions/lane_detection/README.md:
--------------------------------------------------------------------------------
1 | The following files are provided with the course and it is **not** expected that the student/reader would write them as part of the exercises:
2 |
3 | * `collect_data.py`
4 | * `camera_geometry_numba.py` : You can import the `CameraGeometry` class from this file instead of importing it from `camera_geometry.py`. This will lead to a speed-up due to [numba](http://numba.pydata.org/)
--------------------------------------------------------------------------------
/code/solutions/lane_detection/camera_geometry.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | def get_intrinsic_matrix(field_of_view_deg, image_width, image_height):
4 | # For our Carla camera alpha_u = alpha_v = alpha
5 | # alpha can be computed given the cameras field of view via
6 | field_of_view_rad = field_of_view_deg * np.pi/180
7 | alpha = (image_width / 2.0) / np.tan(field_of_view_rad / 2.)
8 | Cu = image_width / 2.0
9 | Cv = image_height / 2.0
10 | return np.array([[alpha, 0, Cu],
11 | [0, alpha, Cv],
12 | [0, 0, 1.0]])
13 |
14 | def project_polyline(polyline_world, trafo_world_to_cam, K):
15 | x,y,z = polyline_world[:,0], polyline_world[:,1], polyline_world[:,2]
16 | homvec = np.stack((x,y,z,np.ones_like(x)))
17 | proj_mat = K @ trafo_world_to_cam[:3,:]
18 | pl_uv_cam = (proj_mat @ homvec).T
19 | u = pl_uv_cam[:,0] / pl_uv_cam[:,2]
20 | v = pl_uv_cam[:,1] / pl_uv_cam[:,2]
21 | return np.stack((u,v)).T
22 |
23 |
24 | class CameraGeometry(object):
25 | def __init__(self, height=1.3, yaw_deg=0, pitch_deg=-5, roll_deg=0, image_width=1024, image_height=512, field_of_view_deg=45):
26 | # scalar constants
27 | self.height = height
28 | self.pitch_deg = pitch_deg
29 | self.roll_deg = roll_deg
30 | self.yaw_deg = yaw_deg
31 | self.image_width = image_width
32 | self.image_height = image_height
33 | self.field_of_view_deg = field_of_view_deg
34 | # camera intriniscs and extrinsics
35 | self.intrinsic_matrix = get_intrinsic_matrix(field_of_view_deg, image_width, image_height)
36 | self.inverse_intrinsic_matrix = np.linalg.inv(self.intrinsic_matrix)
37 | ## Note that "rotation_cam_to_road" has the math symbol R_{rc} in the book
38 | yaw = np.deg2rad(yaw_deg)
39 | pitch = np.deg2rad(pitch_deg)
40 | roll = np.deg2rad(roll_deg)
41 | cy, sy = np.cos(yaw), np.sin(yaw)
42 | cp, sp = np.cos(pitch), np.sin(pitch)
43 | cr, sr = np.cos(roll), np.sin(roll)
44 | rotation_road_to_cam = np.array([[cr*cy+sp*sr+sy, cr*sp*sy-cy*sr, -cp*sy],
45 | [cp*sr, cp*cr, sp],
46 | [cr*sy-cy*sp*sr, -cr*cy*sp -sr*sy, cp*cy]])
47 | self.rotation_cam_to_road = rotation_road_to_cam.T # for rotation matrices, taking the transpose is the same as inversion
48 | self.translation_cam_to_road = np.array([0,-self.height,0])
49 | self.trafo_cam_to_road = np.eye(4)
50 | self.trafo_cam_to_road[0:3,0:3] = self.rotation_cam_to_road
51 | self.trafo_cam_to_road[0:3,3] = self.translation_cam_to_road
52 | # compute vector nc. Note that R_{rc}^T = R_{cr}
53 | self.road_normal_camframe = self.rotation_cam_to_road.T @ np.array([0,1,0])
54 |
55 |
56 | def camframe_to_roadframe(self,vec_in_cam_frame):
57 | return self.rotation_cam_to_road @ vec_in_cam_frame + self.translation_cam_to_road
58 |
59 | def uv_to_roadXYZ_camframe(self,u,v):
60 | # NOTE: The results depend very much on the pitch angle (0.5 degree error yields bad result)
61 | # Here is a paper on vehicle pitch estimation:
62 | # https://refubium.fu-berlin.de/handle/fub188/26792
63 | uv_hom = np.array([u,v,1])
64 | Kinv_uv_hom = self.inverse_intrinsic_matrix @ uv_hom
65 | denominator = self.road_normal_camframe.dot(Kinv_uv_hom)
66 | return self.height*Kinv_uv_hom/denominator
67 |
68 | def uv_to_roadXYZ_roadframe(self,u,v):
69 | r_camframe = self.uv_to_roadXYZ_camframe(u,v)
70 | return self.camframe_to_roadframe(r_camframe)
71 |
72 | def uv_to_roadXYZ_roadframe_iso8855(self,u,v):
73 | X,Y,Z = self.uv_to_roadXYZ_roadframe(u,v)
74 | return np.array([Z,-X,-Y]) # read book section on coordinate systems to understand this
75 |
76 | def precompute_grid(self,dist=60):
77 | cut_v = int(self.compute_minimum_v(dist=dist)+1)
78 | xy = []
79 | for v in range(cut_v, self.image_height):
80 | for u in range(self.image_width):
81 | X,Y,Z= self.uv_to_roadXYZ_roadframe_iso8855(u,v)
82 | xy.append(np.array([X,Y]))
83 | xy = np.array(xy)
84 | return cut_v, xy
85 |
86 | def compute_minimum_v(self, dist):
87 | """
88 | Find cut_v such that pixels with v 0.3
44 | if mask.sum() > 0:
45 | coeffs = np.polyfit(self.grid[:,0][mask], self.grid[:,1][mask], deg=3, w=probs_flat[mask])
46 | else:
47 | coeffs = np.array([0.,0.,0.,0.])
48 | return np.poly1d(coeffs)
49 |
50 | def __call__(self, image):
51 | if isinstance(image, str):
52 | image = self.read_imagefile_to_array(image)
53 | left_poly, right_poly, _, _ = self.get_fit_and_probs(image)
54 | return left_poly, right_poly
55 |
56 | def get_fit_and_probs(self, img):
57 | _, left, right = self.detect(img)
58 | left_poly = self.fit_poly(left)
59 | right_poly = self.fit_poly(right)
60 | return left_poly, right_poly, left, right
61 |
62 |
63 |
--------------------------------------------------------------------------------
/code/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/tests/__init__.py
--------------------------------------------------------------------------------
/code/tests/control/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/tests/control/__init__.py
--------------------------------------------------------------------------------
/code/tests/control/clothoid_generator.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | from pyclothoids import Clothoid
3 | import numpy as np
4 |
5 | def normalize_angle(theta):
6 | return np.arctan2(np.sin(theta), np.cos(theta))
7 |
8 | def get_random_racetrack():
9 | steps = 5
10 | res = 100
11 | length = 200
12 | radius = 5
13 | theta_jump = np.pi/10
14 | xc, yc, thetac = 0,0,0
15 | rx,ry = np.zeros(steps*res),np.zeros(steps*res)
16 | for i in range(steps):
17 | c,s = np.cos(thetac), np.sin(thetac)
18 | l = np.random.uniform(0.8*length, 1.2*length)
19 | l = length
20 | x = xc + l*c + np.random.uniform(-radius,radius)
21 | y = yc + l*s + np.random.uniform(-radius,radius)
22 | theta = thetac+np.random.uniform(-theta_jump, theta_jump)
23 | clothoid = Clothoid.G1Hermite(xc,yc,thetac, x,y, theta)
24 | rx[i*res:(i+1)*res], ry[i*res:(i+1)*res]= clothoid.SampleXY(res)
25 | xc,yc,thetac = x,y,theta
26 | return rx,ry
--------------------------------------------------------------------------------
/code/tests/control/control.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/tests/control/control.gif
--------------------------------------------------------------------------------
/code/tests/control/simulation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import copy
3 | import cv2
4 | import sys
5 | from track import Track
6 | from vehicle import Vehicle
7 |
8 | sys.path.append("../../util")
9 | from geometry_util import dist_point_linestring
10 | import matplotlib.pyplot as plt
11 |
12 | import PIL.Image
13 | from io import BytesIO
14 | import IPython.display
15 | import time
16 |
17 | # helper functions
18 | def resize(img, scale_percent):
19 | scale_percent = 60 # percent of original size
20 | width = int(img.shape[1] * scale_percent / 100)
21 | height = int(img.shape[0] * scale_percent / 100)
22 | dim = (width, height)
23 | # resize image
24 | resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
25 | return resized
26 |
27 |
28 | def show_img(a, fmt="png"):
29 | a = np.uint8(a)
30 | f = BytesIO()
31 | PIL.Image.fromarray(a).save(f, fmt)
32 | IPython.display.display(IPython.display.Image(data=f.getvalue()))
33 |
34 |
35 | def lane_from_centerline(x, y, width):
36 | dx = np.gradient(x)
37 | dy = np.gradient(y)
38 | theta = np.arctan2(dy, dx)
39 | c, s = np.cos(theta), np.sin(theta)
40 |
41 | xl = x - s * 0.5 * width
42 | yl = y + c * 0.5 * width
43 |
44 | xr = x + s * 0.5 * width
45 | yr = y - c * 0.5 * width
46 |
47 | return xl, yl, xr, yr
48 |
49 |
50 | def uv_fix(u, v):
51 | mask = (u > -20) & (u < 1100) & (v > -20) & (v < 600)
52 | return u[mask], v[mask]
53 |
54 |
55 | def xy_to_XYZ(x, y):
56 | return np.stack((y, np.zeros_like(x) + 3, x + 15))
57 |
58 |
59 | def xy_to_uv(x, y, K):
60 | X = xy_to_XYZ(x, y)
61 | uv1 = (K @ X).T
62 | u, v = uv1[:, 0] / uv1[:, 2], uv1[:, 1] / uv1[:, 2]
63 | # return u,v
64 | return uv_fix(u, v)
65 |
66 |
67 | def xy_to_shape(x, y):
68 | theta = np.linspace(0, 2 * np.pi, 8)
69 | c, s = np.cos(theta), np.sin(theta)
70 | r = 0.3
71 | X = x + r * c
72 | Y = y + r * s
73 | return np.stack((X, Y)).T
74 |
75 |
76 | def render_shape_xy(image, x, y, K):
77 | shape = xy_to_shape(x, y)
78 | u, v = xy_to_uv(shape[:, 0], shape[:, 1], K)
79 | pl = np.stack((u, v)).T
80 | cv2.polylines(
81 | image, np.int32([pl]), isClosed=True, color=[255, 0, 0], thickness=2
82 | )
83 |
84 |
85 | class Simulation:
86 | def __init__(self, vehicle, track, controller, desired_velocity=25):
87 | self.controller = controller
88 | self.track = track
89 | self.vehicle = vehicle
90 | self.desired_velocity = desired_velocity
91 | vehicle.x, vehicle.y, vehicle.theta = track.get_start_pose()
92 | self.dt = 0.05
93 | self.traj = []
94 | self.cross_track_errors = []
95 | self.velocities = []
96 | self.waypoints = self.track.get_vehicle_path(
97 | self.vehicle.x, self.vehicle.y, self.vehicle.theta
98 | )
99 | self.obj = self.track.get_obj(
100 | self.vehicle.x, self.vehicle.y, self.vehicle.theta
101 | )
102 | self.K = np.array(
103 | [
104 | [1.23607734e03, 0.00000000e00, 5.12000000e02],
105 | [0.00000000e00, 1.23607734e03, 2.56000000e02],
106 | [0.00000000e00, 0.00000000e00, 1.00000000e00],
107 | ]
108 | )
109 | self.a, self.delta = 0, 0
110 | image_fn = "../../../data/carla_vehicle_bg_2.png"
111 | image_vehicle = cv2.imread(image_fn)
112 | self.image_vehicle = cv2.cvtColor(image_vehicle, cv2.COLOR_BGR2RGB)
113 |
114 | def step(self):
115 | self.waypoints = self.track.get_vehicle_path(
116 | self.vehicle.x, self.vehicle.y, self.vehicle.theta
117 | )
118 | self.obj = self.track.get_obj(
119 | self.vehicle.x, self.vehicle.y, self.vehicle.theta
120 | )
121 | self.a, self.delta = self.controller.get_control(
122 | self.waypoints, self.vehicle.v, self.desired_velocity, self.dt
123 | )
124 | self.a = np.clip(self.a, 0, 3)
125 | self.vehicle.update(self.dt, self.delta, self.a)
126 | self.traj.append([self.vehicle.x, self.vehicle.y])
127 | self.cross_track_errors.append(
128 | dist_point_linestring(np.array([0, 0]), self.waypoints)
129 | )
130 | self.velocities.append(self.vehicle.v)
131 |
132 | def plot_error(self):
133 | plt.plot(self.cross_track_errors)
134 | plt.title("Cross Track Error")
135 | plt.xlabel("Simulation step")
136 | plt.ylabel("error in meters")
137 |
138 | def plot_velocity(self):
139 | plt.plot(self.velocities)
140 | plt.plot([self.desired_velocity] * len(self.velocities), ls="--")
141 | plt.title("Velocity")
142 | plt.xlabel("Simulation step")
143 | plt.ylabel("v (m/s)")
144 |
145 | def cv_plot(self):
146 | wp = self.waypoints
147 | x, y = wp[:, 0], wp[:, 1]
148 | u, v = xy_to_uv(x, y, self.K)
149 | xl, yl, xr, yr = lane_from_centerline(x, y, width=3)
150 | ul, vl = xy_to_uv(xl, yl, self.K)
151 | ur, vr = xy_to_uv(xr, yr, self.K)
152 |
153 | # render lane
154 | arr = copy.deepcopy(self.image_vehicle)
155 | for lb in [np.stack((ul, vl)).T, np.stack((ur, vr)).T]:
156 | cv2.polylines(
157 | arr,
158 | np.int32([lb]),
159 | isClosed=False,
160 | color=[255, 255, 255],
161 | thickness=3,
162 | )
163 |
164 | # render objects beside lane
165 | x, y = self.obj[:, 0], self.obj[:, 1]
166 | point_array = np.stack((x, y)).T
167 | for point in point_array:
168 | x, y = point
169 | render_shape_xy(arr, x, y, self.K)
170 |
171 | # render steering wheel
172 | center = np.array([920, 80])
173 | radius = 40
174 | theta = np.linspace(0, 2 * np.pi, 20)
175 | u = center[0] + radius * np.cos(theta)
176 | v = center[1] + radius * np.sin(theta)
177 | pl = np.stack((u, v)).T
178 | cv2.polylines(
179 | arr, np.int32([pl]), isClosed=True, color=[0, 0, 255], thickness=3
180 | )
181 | u0 = center[0] + radius * np.cos(self.delta * 50)
182 | v0 = center[1] + radius * np.sin(self.delta * 50)
183 | u1 = center[0] - radius * np.cos(self.delta * 50)
184 | v1 = center[1] - radius * np.sin(self.delta * 50)
185 | pl = np.array([[u0, v0], [u1, v1]])
186 | cv2.polylines(
187 | arr, np.int32([pl]), isClosed=True, color=[0, 0, 255], thickness=3
188 | )
189 |
190 | # render text
191 | cte = self.cross_track_errors[-1]
192 | mystring = "cross track error = {:.2f}m, velocity={:.2f}m/s".format(
193 | cte, self.vehicle.v
194 | )
195 |
196 | font = cv2.FONT_HERSHEY_SIMPLEX
197 | org = (50, 50)
198 | fontScale = 1
199 | color = (255, 0, 0)
200 | thickness = 2
201 | arr = cv2.putText(
202 | arr, mystring, org, font, fontScale, color, thickness, cv2.LINE_AA
203 | )
204 |
205 | arr = resize(arr[0:512, 0:1024, :], 50)
206 | return arr
207 |
208 |
--------------------------------------------------------------------------------
/code/tests/control/target_point.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Target Point"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## Setting up Colab"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "colab_nb = 'google.colab' in str(get_ipython())"
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": null,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "if colab_nb:\n",
33 | " from google.colab import drive\n",
34 | " drive.mount('/content/drive')"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "if colab_nb:\n",
44 | " %cd /content/drive/My Drive/aad/code/tests/control"
45 | ]
46 | },
47 | {
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "## Exercise"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "You need to implement the function `get_target_point()` in `code/exercises/control/get_target_point.py`"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "First you can set `run_student_code = False` and see the sample solution at work. After that set `run_student_code = True` and see your implementation at work. It should behave like the sample solution!"
66 | ]
67 | },
68 | {
69 | "cell_type": "code",
70 | "execution_count": null,
71 | "metadata": {},
72 | "outputs": [],
73 | "source": [
74 | "run_student_code = False"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "%load_ext autoreload\n",
84 | "%autoreload 2\n",
85 | "import numpy as np\n",
86 | "import matplotlib.pyplot as plt\n",
87 | "import sys\n",
88 | "from pathlib import Path\n",
89 | "sys.path.append(str(Path('../../')))\n",
90 | "if run_student_code:\n",
91 | " from exercises.control.get_target_point import get_target_point\n",
92 | "else:\n",
93 | " from solutions.control.get_target_point import get_target_point"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "from ipywidgets import interact\n",
103 | "\n",
104 | "def test_target_point(lookahead=5):\n",
105 | " # create data\n",
106 | " polyline = np.array([[1,1], [2,3], [3,6], [4,7]])\n",
107 | " # plot data\n",
108 | " fig, ax = plt.subplots()\n",
109 | " ax.plot(polyline[:,0], polyline[:,1], color=\"g\")\n",
110 | " circle = plt.Circle((0, 0), lookahead, color=\"k\", fill=False)\n",
111 | " ax.add_artist(circle)\n",
112 | " # get function output and plot it\n",
113 | " intersec = get_target_point(lookahead, polyline)\n",
114 | " if intersec is not None:\n",
115 | " plt.scatter([intersec[0]], [intersec[1]], color=\"r\")\n",
116 | " plt.axis(\"equal\")\n",
117 | "\n",
118 | " plt.show()\n",
119 | "\n",
120 | "interact(test_target_point, lookahead=(0,10,0.1));"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "from ipywidgets import interact\n",
130 | "\n",
131 | "def test_geometry(lookahead=5):\n",
132 | " # create data\n",
133 | " polyline = np.array([[-4,-7],[-3,-6],[-2,-3],[-1,-1],[1,1], [2,3], [3,6], [4,7]])\n",
134 | " # plot data\n",
135 | " fig, ax = plt.subplots()\n",
136 | " ax.plot(polyline[:,0], polyline[:,1], color=\"g\")\n",
137 | " circle = plt.Circle((0, 0), lookahead, color=\"k\", fill=False)\n",
138 | " ax.add_artist(circle)\n",
139 | " # get function output and plot it\n",
140 | " intersec = get_target_point(lookahead, polyline)\n",
141 | " if intersec is not None:\n",
142 | " plt.scatter([intersec[0]], [intersec[1]], color=\"r\")\n",
143 | " plt.axis(\"equal\")\n",
144 | "\n",
145 | " plt.show()\n",
146 | "\n",
147 | "interact(test_geometry, lookahead=(0,10,0.1));"
148 | ]
149 | },
150 | {
151 | "cell_type": "markdown",
152 | "metadata": {},
153 | "source": []
154 | },
155 | {
156 | "cell_type": "code",
157 | "execution_count": null,
158 | "metadata": {},
159 | "outputs": [],
160 | "source": []
161 | }
162 | ],
163 | "metadata": {},
164 | "nbformat": 4,
165 | "nbformat_minor": 2
166 | }
167 |
--------------------------------------------------------------------------------
/code/tests/control/track.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | #from track_generator import get_random_racetrack
4 | from clothoid_generator import get_random_racetrack
5 | import copy
6 |
7 | class Track:
8 | #def __init__(self, rad = 0.2, edgy = 0.01, scale=1000, seed=42):
9 | # x,y = get_random_racetrack(rad, edgy, scale, seed)
10 | # self.waypoints = np.stack((x,y)).T
11 |
12 | def __init__(self, seed=42):
13 | np.random.seed(seed)
14 | x,y = get_random_racetrack()
15 | self.waypoints = np.stack((x,y)).T
16 | track_length = np.linalg.norm(np.diff(self.waypoints,axis=0),axis=1).sum()
17 | obj_dist = 50
18 | num_obj = int(track_length/obj_dist)*2
19 | index_diff = int(len(self.waypoints)*obj_dist/track_length)
20 | self.obj = np.zeros((num_obj,2))
21 | j = 0
22 | while j+1= len(self.waypoints)-1:
25 | index = -2
26 | p0 = self.waypoints[index]
27 | p1 = self.waypoints[index+1]
28 | dx = p1[0]-p0[0]
29 | dy = p1[1]-p0[1]
30 | theta = np.arctan2(dy,dx)
31 | self.obj[j] = p0 + np.array([-np.sin(theta), np.cos(theta)]) * 3
32 | self.obj[j+1] = p0 - np.array([-np.sin(theta), np.cos(theta)]) * 3
33 | j+=2
34 |
35 |
36 |
37 | def transform(self,x,y,theta, data):
38 | xc = data[:,0] - x
39 | yc = data[:,1] - y
40 | c,s = np.cos(theta), np.sin(theta)
41 | xcr = xc * c + yc*s
42 | ycr = -xc*s + yc*c
43 | close = xcr**2 +ycr**2 < 600**2
44 | in_front = xcr > -5
45 | ret_x, ret_y = xcr[close & in_front], ycr[close & in_front]
46 | path =np.stack((ret_x, ret_y)).T
47 | return path
48 |
49 | def get_vehicle_path(self, x,y,theta):
50 | return self.transform(x,y,theta,self.waypoints)
51 |
52 | def get_obj(self,x,y,theta):
53 | return self.transform(x,y,theta,self.obj)
54 |
55 | def get_start_pose(self):
56 | px, py = self.waypoints[:,0], self.waypoints[:,1]
57 | theta = np.arctan2(py[1]-py[0], px[1]-px[0])
58 | return px[0], py[0], theta
59 |
60 | def plot(self, color="red"):
61 | plt.plot(self.waypoints[:,0], self.waypoints[:,1], color=color)
62 | plt.scatter(self.obj[:,0], self.obj[:,1], color=color)
63 |
64 | if __name__ == "__main__":
65 | track=Track()
66 | track.plot()
67 | plt.axis("equal")
68 | plt.show()
--------------------------------------------------------------------------------
/code/tests/control/vehicle.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | #from scipy.integrate import odeint
3 |
4 | def normalize_angle(theta):
5 | return np.arctan2(np.sin(theta), np.cos(theta))
6 |
7 | class Vehicle:
8 | def __init__(self, x=0, y=0, theta=0, v=0, wheel_base=2):
9 | self.x = x
10 | self.y = y
11 | self.v = v
12 | self.theta = normalize_angle(theta)
13 | self.wheel_base = wheel_base
14 |
15 | def update(self, dt, delta, a):
16 | a = max(0.0,a)
17 | self.x += dt*self.v * np.cos(self.theta)
18 | self.y += dt*self.v * np.sin(self.theta)
19 | theta_dot = self.v*np.tan(delta)/self.wheel_base
20 | self.theta = normalize_angle(self.theta + theta_dot*dt)
21 | aeff = a - self.v**2 * 0.001
22 | self.v += dt*aeff
23 |
24 | def print(self):
25 | print("vehicle.x")
26 | print(self.x)
27 | print("vehicle.y")
28 | print(self.y)
29 | print("vehicle.theta")
30 | print(self.theta)
31 |
32 |
33 | if __name__=="__main__":
34 | vehicle = Vehicle(0,0,0,10)
35 | vehicle.update(0.1, 0.2, 1)
36 | vehicle.print()
--------------------------------------------------------------------------------
/code/tests/lane_detection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/tests/lane_detection/__init__.py
--------------------------------------------------------------------------------
/code/tests/lane_detection/camera_geometry_unit_test.py:
--------------------------------------------------------------------------------
1 | # To run this, cd into the parent directory of the code folder an then run
2 | # python -m code.tests.lane_detection.camera_geometry_unit_test 1
3 | import numpy as np
4 | from pathlib import Path
5 | import argparse
6 | import logging
7 | from ...solutions.lane_detection.camera_geometry import CameraGeometry as sln_CameraGeometry
8 |
9 | def compare_arrays(sln, ex, failure_string, success_string):
10 | if ex is None:
11 | print("You returned None instead of a proper numpy array!")
12 | if type(sln) != type(ex):
13 | print(failure_string, "You did not return a numpy array!")
14 | return False
15 | if sln.shape != ex.shape:
16 | print(failure_string)
17 | print("The numpy array you have returned should have shape {} but its shape is {}!".format(sln.shape, ex.shape))
18 | return False
19 | if np.isclose(sln, ex).all():
20 | print(success_string)
21 | return True
22 | else:
23 | print(failure_string, "You returned:\n {}\n but the solution is:\n {}\n.".format(ex, sln))
24 | return False
25 |
26 | def test_project_polyline(boundary, trafo_world_to_cam, K):
27 | from ...exercises.lane_detection.camera_geometry import project_polyline as ex_project_polyline
28 | from ...solutions.lane_detection.camera_geometry import project_polyline as sln_project_polyline
29 |
30 | res_sln = sln_project_polyline(boundary[:,0:3], trafo_world_to_cam, K)
31 |
32 | try:
33 | res_ex = ex_project_polyline(boundary[:,0:3], trafo_world_to_cam, K)
34 | result = compare_arrays(res_sln, res_ex, "Test for project_polyline failed.",
35 | "Your function project_polyline seems to be correct!")
36 | except NotImplementedError:
37 | print("Test for project_polyline failed. You did not implement the function!")
38 | return False
39 | except BaseException:
40 | logging.exception("Test for project_polyline failed. Your code raised an exception! I will show you the traceback:")
41 | return False
42 | return result
43 |
44 |
45 | def test_get_intrinsic_matrix():
46 | from ...exercises.lane_detection.camera_geometry import get_intrinsic_matrix as ex_get_intrinsic_matrix
47 | from ...solutions.lane_detection.camera_geometry import get_intrinsic_matrix as sln_get_intrinsic_matrix
48 |
49 | res_sln = sln_get_intrinsic_matrix(45, 1024, 512)
50 |
51 | try:
52 | res_ex = ex_get_intrinsic_matrix(45, 1024, 512)
53 | result = compare_arrays(res_sln, res_ex, "Test for get_intrinsic_matrix failed.",
54 | "Your function get_intrinsic_matrix seems to be correct!")
55 | except NotImplementedError:
56 | print("Test for get_intrinsic_matrix failed. You did not implement the function!")
57 | return False
58 | except BaseException:
59 | logging.exception("Test for get_intrinsic_matrix failed. Your code raised an exception! I will show you the traceback:")
60 | return False
61 | return result
62 |
63 |
64 | if __name__ == "__main__":
65 | parser = argparse.ArgumentParser(description='Perform unit tests for the camera_geometry module.')
66 | parser.add_argument('step', type=int, help='Can be either 1,2, or 3. You should first pass all tests of step 1, then step 2, and finally step 3')
67 | args = parser.parse_args()
68 | step = args.step
69 | if step in [1,2,3]:
70 | print("-------------------------")
71 | print("Running tests for step ", step)
72 | print("-------------------------")
73 | else:
74 | print("Error! Step argument needs to be 1, 2, or 3. For example you can run\npython -m code.tests.lane_detection.camera_geometry_unit_test 1")
75 |
76 |
77 | sln_cg = sln_CameraGeometry()
78 |
79 | # Load some data
80 | data_path = Path('data/')
81 | boundary_fn = data_path / "Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_boundary.txt"
82 | boundary = np.loadtxt(boundary_fn)
83 |
84 | trafo_fn = data_path / "Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_trafo.txt"
85 | trafo_world_to_cam = np.loadtxt(trafo_fn)
86 |
87 | # Run tests
88 |
89 | # Test exercise 1
90 | if step == 1:
91 | test_project_polyline(boundary, trafo_world_to_cam, sln_cg.intrinsic_matrix)
92 | test_get_intrinsic_matrix()
93 |
94 | # Test exercise 2
95 | if step == 2:
96 | from ...exercises.lane_detection.camera_geometry import CameraGeometry as ex_CameraGeometry
97 |
98 | try:
99 | ex_cg = ex_CameraGeometry()
100 | compare_arrays(sln_cg.rotation_cam_to_road, ex_cg.rotation_cam_to_road,
101 | "You did not calculate rotation_cam_to_road correctly in your CameraGeometry class.",
102 | "It seems that you computed rotation_cam_to_road correctly!")
103 |
104 | compare_arrays(sln_cg.translation_cam_to_road, ex_cg.translation_cam_to_road,
105 | "You did not calculate translation_cam_to_road correctly in your CameraGeometry class.",
106 | "It seems that you computed translation_cam_to_road correctly!")
107 |
108 | compare_arrays(sln_cg.trafo_cam_to_road, ex_cg.trafo_cam_to_road,
109 | "You did not calculate trafo_cam_to_road correctly in your CameraGeometry class.",
110 | "It seems that you computed trafo_cam_to_road correctly!")
111 |
112 | compare_arrays(sln_cg.road_normal_camframe, ex_cg.road_normal_camframe,
113 | "You did not calculate road_normal_camframe correctly in your CameraGeometry class.",
114 | "It seems that you computed road_normal_camframe correctly!")
115 |
116 | for u,v in [(76,982), (444, 711), (2,1022)]:
117 | compare_arrays(sln_cg.uv_to_roadXYZ_camframe(u,v), ex_cg.uv_to_roadXYZ_camframe(u,v),
118 | "Your function uv_to_roadXYZ_camframe() did not compute the correct result for u,v = {},{}".format(u,v),
119 | "Your function uv_to_roadXYZ_camframe() worked correctkly for u,v={},{}!".format(u,v))
120 | except BaseException:
121 | logging.exception("An exception was thrown in your CameraGeometry class! I will show you the traceback:")
122 |
123 | # Test exercise 3
124 | if step == 3:
125 | from ...exercises.lane_detection.camera_geometry import CameraGeometry as ex_CameraGeometry
126 | try:
127 | ex_cg = ex_CameraGeometry()
128 | compare_arrays(sln_cg.precompute_grid()[1], ex_cg.precompute_grid()[1],
129 | "Your function precompute_grid() did not compute the correct grid.",
130 | "Your function precompute_grid() seems to be correct!")
131 | except BaseException:
132 | logging.exception("An exception was thrown in your CameraGeometry class! I will show you the traceback:")
--------------------------------------------------------------------------------
/code/util/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/code/util/__init__.py
--------------------------------------------------------------------------------
/code/util/carla_util.py:
--------------------------------------------------------------------------------
1 | import carla
2 | import pygame
3 |
4 | import queue
5 | import numpy as np
6 |
7 | def carla_vec_to_np_array(vec):
8 | return np.array([vec.x,
9 | vec.y,
10 | vec.z])
11 |
12 | class CarlaSyncMode(object):
13 | """
14 | Context manager to synchronize output from different sensors. Synchronous
15 | mode is enabled as long as we are inside this context
16 |
17 | with CarlaSyncMode(world, sensors) as sync_mode:
18 | while True:
19 | data = sync_mode.tick(timeout=1.0)
20 |
21 | """
22 |
23 | def __init__(self, world, *sensors, **kwargs):
24 | self.world = world
25 | self.sensors = sensors
26 | self.frame = None
27 | self.delta_seconds = 1.0 / kwargs.get('fps', 20)
28 | self._queues = []
29 | self._settings = None
30 |
31 | def __enter__(self):
32 | self._settings = self.world.get_settings()
33 | self.frame = self.world.apply_settings(carla.WorldSettings(
34 | no_rendering_mode=False,
35 | synchronous_mode=True,
36 | fixed_delta_seconds=self.delta_seconds))
37 |
38 | def make_queue(register_event):
39 | q = queue.Queue()
40 | register_event(q.put)
41 | self._queues.append(q)
42 |
43 | make_queue(self.world.on_tick)
44 | for sensor in self.sensors:
45 | make_queue(sensor.listen)
46 | return self
47 |
48 | def tick(self, timeout):
49 | self.frame = self.world.tick()
50 | data = [self._retrieve_data(q, timeout) for q in self._queues]
51 | assert all(x.frame == self.frame for x in data)
52 | return data
53 |
54 | def __exit__(self, *args, **kwargs):
55 | self.world.apply_settings(self._settings)
56 |
57 | def _retrieve_data(self, sensor_queue, timeout):
58 | while True:
59 | data = sensor_queue.get(timeout=timeout)
60 | if data.frame == self.frame:
61 | return data
62 |
63 |
64 |
65 | def carla_img_to_array(image):
66 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
67 | array = np.reshape(array, (image.height, image.width, 4))
68 | array = array[:, :, :3]
69 | array = array[:, :, ::-1]
70 | return array
71 |
72 |
73 | def draw_image(surface, image, blend=False):
74 | array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
75 | array = np.reshape(array, (image.height, image.width, 4))
76 | array = array[:, :, :3]
77 | array = array[:, :, ::-1]
78 | image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
79 | if blend:
80 | image_surface.set_alpha(100)
81 | surface.blit(image_surface, (0, 0))
82 |
83 | def draw_image_np(surface, image, blend=False):
84 | array = image
85 | image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
86 | if blend:
87 | image_surface.set_alpha(100)
88 | surface.blit(image_surface, (0, 0))
89 |
90 |
91 | def should_quit():
92 | for event in pygame.event.get():
93 | if event.type == pygame.QUIT:
94 | return True
95 | elif event.type == pygame.KEYUP:
96 | if event.key == pygame.K_ESCAPE:
97 | return True
98 | return False
99 |
100 | def find_weather_presets():
101 | import re
102 | rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
103 | name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
104 | presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
105 | return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
--------------------------------------------------------------------------------
/code/util/geometry_util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def dist_point_linestring(p, line_string):
5 | """ Compute distance between a point and a line_string (a.k.a. polyline)
6 | """
7 | a = line_string[:-1, :]
8 | b = line_string[1:, :]
9 | return np.min(linesegment_distances(p, a, b))
10 |
11 | # Function from https://stackoverflow.com/a/58781995/2609987
12 | def linesegment_distances(p, a, b):
13 | """ Cartesian distance from point to line segment
14 |
15 | Edited to support arguments as series, from:
16 | https://stackoverflow.com/a/54442561/11208892
17 |
18 | Args:
19 | - p: np.array of single point, shape (2,) or 2D array, shape (x, 2)
20 | - a: np.array of shape (x, 2)
21 | - b: np.array of shape (x, 2)
22 | """
23 | # normalized tangent vectors
24 | d_ba = b - a
25 | d = np.divide(d_ba, (np.hypot(d_ba[:, 0], d_ba[:, 1]).reshape(-1, 1)))
26 |
27 | # signed parallel distance components
28 | # rowwise dot products of 2D vectors
29 | s = np.multiply(a - p, d).sum(axis=1)
30 | t = np.multiply(p - b, d).sum(axis=1)
31 |
32 | # clamped parallel distance
33 | h = np.maximum.reduce([s, t, np.zeros(len(s))])
34 |
35 | # perpendicular distance component
36 | # rowwise cross products of 2D vectors
37 | d_pa = p - a
38 | c = d_pa[:, 0] * d[:, 1] - d_pa[:, 1] * d[:, 0]
39 |
40 | return np.hypot(h, c)
41 |
--------------------------------------------------------------------------------
/code/util/seg_data_util.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from tqdm import tqdm
3 | import zipfile
4 | import os
5 | import shutil
6 | from pathlib import Path
7 |
8 | SEG_DATA_FOLDER = "data_lane_segmentation"
9 |
10 | # Function from https://stackoverflow.com/a/62113293/2609987
11 | def download(url: str, fname: str):
12 | resp = requests.get(url, stream=True)
13 | total = int(resp.headers.get("content-length", 0))
14 | with open(fname, "wb") as file, tqdm(
15 | desc=fname, total=total, unit="iB", unit_scale=True, unit_divisor=1024,
16 | ) as bar:
17 | for data in resp.iter_content(chunk_size=1024):
18 | size = file.write(data)
19 | bar.update(size)
20 |
21 |
22 | def download_fresh_segmentation_data():
23 | os.makedirs(SEG_DATA_FOLDER)
24 | URL_data = "https://onedrive.live.com/download?cid=C6E4EE0D11B5A7CA&resid=C6E4EE0D11B5A7CA%21192&authkey=AJuKftLgRyb2PQo"
25 | download(URL_data, "temp_data_lane_segmentation.zip")
26 | print("Unzipping... You might want to go grab a coffee")
27 | with zipfile.ZipFile("temp_data_lane_segmentation.zip", "r") as zip_ref:
28 | zip_ref.extractall(SEG_DATA_FOLDER)
29 | os.remove("temp_data_lane_segmentation.zip")
30 | print("Done")
31 |
32 |
33 | def download_segmentation_data():
34 | if os.path.exists(SEG_DATA_FOLDER):
35 | print(
36 | "You already have a folder 'data_lane_segmentation'. No download necessary."
37 | )
38 | print(
39 | "If you want to enforce the download, delete the folder 'data_lane_segmentation' and run this cell again."
40 | )
41 | else:
42 | download_fresh_segmentation_data()
43 |
44 |
45 | def mkdir_if_not_exist(path):
46 | if not os.path.exists(path):
47 | os.makedirs(path)
48 |
49 |
50 | def sort_collected_data():
51 | """ Copy and sort content of 'data' folder into 'data_lane_segmentation' folder """
52 |
53 | def is_from_valid_set(fn):
54 | return fn.find("validation") != -1
55 |
56 | source_dir = "data"
57 |
58 | x_train_dir = os.path.join(SEG_DATA_FOLDER, "train")
59 | y_train_dir = os.path.join(SEG_DATA_FOLDER, "train_label")
60 | x_valid_dir = os.path.join(SEG_DATA_FOLDER, "val")
61 | y_valid_dir = os.path.join(SEG_DATA_FOLDER, "val_label")
62 |
63 | for direc in [x_train_dir, y_train_dir, x_valid_dir, y_valid_dir]:
64 | mkdir_if_not_exist(direc)
65 |
66 | images = [x for x in os.listdir(source_dir) if x.find("png") >= 0]
67 | inputs = [x for x in images if x.find("label") == -1]
68 | labels = [x for x in images if x.find("label") != -1]
69 |
70 | train_x = [x for x in inputs if not is_from_valid_set(x)]
71 | valid_x = [x for x in inputs if is_from_valid_set(x)]
72 | train_y = [x for x in labels if not is_from_valid_set(x)]
73 | valid_y = [x for x in labels if is_from_valid_set(x)]
74 |
75 | for f in train_x:
76 | shutil.copyfile(os.path.join("data", f), os.path.join(x_train_dir, f))
77 |
78 | for f in train_y:
79 | shutil.copyfile(os.path.join("data", f), os.path.join(y_train_dir, f))
80 |
81 | for f in valid_x:
82 | shutil.copyfile(os.path.join("data", f), os.path.join(x_valid_dir, f))
83 |
84 | for f in valid_y:
85 | shutil.copyfile(os.path.join("data", f), os.path.join(y_valid_dir, f))
86 |
--------------------------------------------------------------------------------
/data/Image_yaw_2_pitch_-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/Image_yaw_2_pitch_-3.png
--------------------------------------------------------------------------------
/data/Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set.png
--------------------------------------------------------------------------------
/data/Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_label.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_label.png
--------------------------------------------------------------------------------
/data/Town04_Clear_Noon_09_09_2020_14_57_22_frame_625_validation_set_trafo.txt:
--------------------------------------------------------------------------------
1 | -7.088705301284790039e-01 -7.053386569023132324e-01 0.000000000000000000e+00 -1.886166687011718750e+02
2 | 6.147431046479523431e-02 -6.178213347875161077e-02 -9.961947202682495117e-01 -8.595887513399588897e+00
3 | -7.026546460071827482e-01 7.061730794677458789e-01 -8.715573698282241821e-02 1.131672817460803344e+02
4 | 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00
5 |
--------------------------------------------------------------------------------
/data/calibration_video.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/calibration_video.mp4
--------------------------------------------------------------------------------
/data/carla_vehicle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/carla_vehicle.png
--------------------------------------------------------------------------------
/data/carla_vehicle_bg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/carla_vehicle_bg.png
--------------------------------------------------------------------------------
/data/carla_vehicle_bg_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/carla_vehicle_bg_2.png
--------------------------------------------------------------------------------
/data/cte_yaw_4_no_calib.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/cte_yaw_4_no_calib.npy
--------------------------------------------------------------------------------
/data/cte_yaw_4_perfect_calib.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/cte_yaw_4_perfect_calib.npy
--------------------------------------------------------------------------------
/data/cte_yaw_4_with_calib.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/cte_yaw_4_with_calib.npy
--------------------------------------------------------------------------------
/data/prob_left.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/thomasfermi/Algorithms-for-Automated-Driving/e1e43140d5ec21d958c719d5e3d1ecba9ccf9f42/data/prob_left.npy
--------------------------------------------------------------------------------