├── .gitattributes
├── .gitignore
├── CITATION.cff
├── ITSC2023_0610_FI.pdf
├── LICENSE
├── README.md
├── doc
└── img
│ ├── coherence_motion_froecasting_map_learning.svg
│ └── tl2la_overview.svg
└── lyft_l5_tl2la
├── Makefile
├── configs
├── intro_config.yaml
├── train1_config.yaml
└── trainfull_config.yaml
├── dataset
└── preprocessed_info
│ ├── extended_junction_branch_to_lane.csv
│ ├── ground_truth.csv
│ └── minimal_junction_branch_to_lane.csv
├── requirements.txt
├── res
├── analysis
│ ├── algorithm
│ │ ├── a-t_diagram_scene2101.png
│ │ ├── tl_status-t_diagram_total_scene2101.png
│ │ ├── tl_status-t_diagram_total_scene474.png
│ │ ├── v-t_diagram_lead_vehicle_total_scene_2101.png
│ │ ├── v-t_diagram_total_scene2101.png
│ │ └── v-t_diagram_total_scene474.png
│ ├── dynamic_data
│ │ ├── branch_travel_distribution_per_junction_trainfull.png
│ │ ├── distribution_branch_trips_trainfull.png
│ │ ├── distribution_junction_trips_trainfull.png
│ │ ├── interactive_visualization.png
│ │ ├── localization_or_map_error.png
│ │ └── tl_state_flicker_signal.png
│ └── semantic_data
│ │ ├── semantic_satellite.png
│ │ ├── static_visualization_heatmap.html
│ │ └── tl_junctions.png
└── evaluation
│ ├── confusion_matrix
│ ├── heuristic_used_scenes_10000_all_agents.png
│ └── heuristic_used_scenes_10000_ego.png
│ └── metrics
│ ├── heuristic_performance_all_agents.png
│ ├── heuristic_used_scenes_10000_all_agents.md
│ └── heuristic_used_scenes_10000_ego.md
├── scripts
├── classification
│ ├── pattern_based_contribution.ipynb
│ ├── pattern_based_contribution_experimentation.ipynb
│ └── rejection_method.ipynb
├── preprocessing
│ ├── agent_features.py
│ ├── agent_mapping.py
│ ├── agent_motion.py
│ ├── ground_truth.py
│ ├── preprocess_dataset.ipynb
│ ├── tl_mapping.py
│ └── traffic_light_scene_state.py
└── visualization
│ ├── agent_patterns_with_tl_states.ipynb
│ ├── dataset_statistics.ipynb
│ ├── visualize.py
│ └── visualize_static.py
├── src
├── __init__.py
├── assignment
│ ├── __init__.py
│ ├── assignment_rejection.py
│ ├── assignmet_pattern.py
│ └── evaluation.py
├── common
│ ├── __init__.py
│ ├── definitions.py
│ ├── helper.py
│ └── utils.py
├── dataset_operations
│ ├── __init__.py
│ ├── branch_dataset.py
│ ├── extended_map_api.py
│ ├── junction_dataset.py
│ ├── pipeline.py
│ └── preprocessing.py
└── visualization
│ ├── __init__.py
│ ├── map_renderer.py
│ ├── static_visualizer.py
│ ├── tl_visualization.py
│ └── visualizer.py
└── test
├── __init__.py
├── test_junction_branch_to_lane_dataset.py
└── test_map_agent_to_lane.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | lyft_l5_tl2la/dataset/preprocessed_info/*.pkl filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | *.pyc
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # poetry
99 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100 | # This is especially recommended for binary packages to ensure reproducibility, and is more
101 | # commonly ignored for libraries.
102 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103 | #poetry.lock
104 |
105 | # pdm
106 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107 | #pdm.lock
108 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109 | # in version control.
110 | # https://pdm.fming.dev/#use-with-ide
111 | .pdm.toml
112 |
113 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
114 | __pypackages__/
115 |
116 | # Celery stuff
117 | celerybeat-schedule
118 | celerybeat.pid
119 |
120 | # SageMath parsed files
121 | *.sage.py
122 |
123 | # Environments
124 | .env
125 | .venv
126 | env/
127 | venv/
128 | ENV/
129 | env.bak/
130 | venv.bak/
131 |
132 | # Spyder project settings
133 | .spyderproject
134 | .spyproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 | # mkdocs documentation
140 | /site
141 |
142 | # mypy
143 | .mypy_cache/
144 | .dmypy.json
145 | dmypy.json
146 |
147 | # Pyre type checker
148 | .pyre/
149 |
150 | # pytype static type analyzer
151 | .pytype/
152 |
153 | # Cython debug symbols
154 | cython_debug/
155 |
156 | # PyCharm
157 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159 | # and can be added to the global gitignore or merged into this file. For a more nuclear
160 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161 | #.idea/
162 |
163 | # Vscode
164 | .vscode
165 |
166 | # Project specific
167 | lyft_l5_tl2la/dataset/aerial_map/
168 | lyft_l5_tl2la/dataset/complete/
169 | lyft_l5_tl2la/dataset/semantic_map/
170 | lyft_l5_tl2la/dataset/preprocessed_info/agent_mapping/
171 | lyft_l5_tl2la/dataset/preprocessed_info/agent_motion/
172 | lyft_l5_tl2la/dataset/preprocessed_info/tl_mapping/
173 | lyft_l5_tl2la/dataset/preprocessed_info/combined_features/
174 | lyft_l5_tl2la/dataset/preprocessed_info/complete_features/
175 | lyft_l5_tl2la/dataset/preprocessed_info/drive_trough/
176 | lyft_l5_tl2la/dataset/preprocessed_info/ego_features/
177 | lyft_l5_tl2la/dataset/preprocessed_info/pipeline/
178 | lyft_l5_tl2la/dataset/preprocessed_info/*.pkl
179 | lyft_l5_tl2la/dataset/results/
180 | lyft_l5_tl2la/logs/
181 | lyft_l5_tl2la/res/bokeh/
182 |
183 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.0.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: "Weber"
5 | given-names: "Andreas"
6 | orcid: "https://orcid.org/0009-0004-1672-430X"
7 | title: "tl2la: Semantic Map Learning of Traffic Light to Lane Assignment based on Motion Data using the Lyft Level 5 Dataset"
8 | date-released: 2023-10-01
9 | url: "https://github.com/map-learning/tl2la"
--------------------------------------------------------------------------------
/ITSC2023_0610_FI.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/ITSC2023_0610_FI.pdf
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright for portions of project tl2la are held by Woven by Toyota, Inc.
2 | 2022 as part of project L5Kit.
3 | All other copyright for project tl2la are held by Andreas Weber 2023.
4 |
5 | ------------------------------------------------------------------------------
6 | tl2la
7 | ------------------------------------------------------------------------------
8 |
9 | MIT License
10 |
11 | Copyright (c) 2023 Andreas Weber
12 |
13 | Permission is hereby granted, free of charge, to any person obtaining a copy
14 | of this software and associated documentation files (the "Software"), to deal
15 | in the Software without restriction, including without limitation the rights
16 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 | copies of the Software, and to permit persons to whom the Software is
18 | furnished to do so, subject to the following conditions:
19 |
20 | The above copyright notice and this permission notice shall be included in all
21 | copies or substantial portions of the Software.
22 |
23 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 | SOFTWARE.
30 |
31 |
32 | ------------------------------------------------------------------------------
33 | L5Kit
34 | ------------------------------------------------------------------------------
35 |
36 | Copyright 2022 Woven by Toyota
37 |
38 | Licensed under the Apache License, Version 2.0 (the "License");
39 | you may not use this file except in compliance with the License.
40 | You may obtain a copy of the License at
41 |
42 | http://www.apache.org/licenses/LICENSE-2.0
43 |
44 | Unless required by applicable law or agreed to in writing, software
45 | distributed under the License is distributed on an "AS IS" BASIS,
46 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
47 | See the License for the specific language governing permissions and
48 | limitations under the License.
49 |
50 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
Semantic Map Learning of Traffic Light to Lane Assignment based on Motion Data
3 |
4 |
This repository houses the source code of the paper: Semantic Map Learning of Traffic Light to Lane Assignment based on Motion Data.
5 |

6 |
7 |
8 |
15 |
16 |
17 |
18 | ## Abstract
19 | Understanding which traffic light controls which lane is crucial to navigate intersections safely.
20 | Autonomous vehicles commonly rely on High Definition (HD) maps that contain information about the assignment of traffic lights to lanes.
21 | The manual provisioning of this information is tedious, expensive, and not scalable.
22 | To remedy these issues, our novel approach derives the assignments from traffic light states and the corresponding motion patterns of vehicle traffic.
23 | This works in an automated way and independently of the geometric arrangement.
24 | We show the effectiveness of basic statistical approaches for this task by implementing and evaluating a pattern-based contribution method.
25 | In addition, our novel rejection method includes accompanying safety considerations by leveraging statistical hypothesis testing.
26 | Finally, we propose a dataset transformation to re-purpose available motion prediction datasets for semantic map learning.
27 | Our publicly available API for the Lyft Level 5 dataset enables researchers to develop and evaluate their own approaches.
28 |
29 |
30 |
31 |
32 | ## Table of Content
33 | - [Table of Content](#table-of-content)
34 | - [💿 Setup](#-setup)
35 | - [🚘 Approach](#-approach)
36 | - [📁 Dataset](#-dataset)
37 | - [⚙️ Preprocessing](#️-preprocessing)
38 | - [Traffic Light Scenes](#traffic-light-scenes)
39 | - [Agent to Lane Mapping](#agent-to-lane-mapping)
40 | - [Agent Motion](#agent-motion)
41 | - [Traffic Light State Mapping](#traffic-light-state-mapping)
42 | - [Junction, Branch and Lane Dataset](#junction-branch-and-lane-dataset)
43 | - [Combined Features](#combined-features)
44 | - [Ground Truth (TL2LA)](#ground-truth-tl2la)
45 | - [👀 Visualisation](#-visualisation)
46 | - [Agent Motion](#agent-motion-1)
47 | - [Traffic Light State](#traffic-light-state)
48 | - [Semantic Map](#semantic-map)
49 | - [Interactive Scene Visualization](#interactive-scene-visualization)
50 | - [📃 Results](#-results)
51 | - [📜 License](#-license)
52 | - [🔗 Citation](#-citation)
53 | - [📰 News](#-news)
54 |
55 | ## 💿 Setup
56 | 1. Install dependencies of this project, which also includes the [Lyft Lvl5 dev-kit](https://github.com/woven-planet/l5kit) into a new virtual environment using the requirements.txt
57 |
58 | ```
59 | python -m venv
60 | pip install -r requirement.txt
61 | ```
62 | 2. Download the [*Lyft Lvl5 Motion Prediction Dataset*](https://woven.toyota/en/prediction-dataset)
63 | 3. Place dynamic datasets into Folder dataset/complete/
64 | 4. Set Python path to *lyft_l5_tl2la/*
65 | ```
66 | #powershell
67 | $env:PYTHONPATH=".lyft_l5_tl2la/"
68 |
69 | # vscode (settings.json)
70 | "jupyter.notebookFileRoot": "${workspaceFolder}/lyft_l5_tl2la/",
71 | ```
72 |
73 | 5. Explore the data with the visualization tools and the interactive Python Notebooks
74 |
75 |
76 | ## 🚘 Approach
77 |
78 |
79 |
80 | To derive the traffic light to lane association (TL2LA), it requires **dynamic information** and **static information**.
81 |
82 |
83 | - **Dynamic information**:
84 | - Agents' behavior in the form of trajectories. These represent the positional information of agents over time.
85 | - Time-dependent traffic light state
86 | - **Static information**:
87 | - Lane model (provided by semantic map). Contains geometric information of lanes and their connectivity
88 |
89 | The output of the TL2LA system is the association of traffic lights and lanes (semantic relation). They also represent a static information and can be stored into a high-definition (HD) map.
90 |
91 |
92 | ## 📁 Dataset
93 |
94 | General information and references to the original Lyft Lvl 5 Motion Prediction Dataset:
95 | - [Documentation](https://woven-planet.github.io/l5kit/)
96 | - [Repository](https://github.com/woven-planet/l5kit)
97 | - [Paper](https://arxiv.org/abs/1805.04687)
98 | - [License](https://woven-planet.github.io/l5kit/license.html#license)
99 |
100 | Download all dataset files of the [*Lyft Lvl 5 Motion Prediction Dataset*](https://woven.toyota/en/prediction-dataset) and organize them in the following structure under the *dataset/* directory:
101 |
102 | ```
103 | dataset/
104 | ├──── aerial_map/
105 | │ ├──── nearmap_images/
106 | │ └──── aerial_map.png
107 | ├──── complete/
108 | │ ├──── sample/
109 | │ ├──── train/
110 | │ └──── train_full/
111 | └──── semantic_map/
112 | ├──── meta.json
113 | └──── semantic_map.pb
114 | ```
115 |
116 | ### ⚙️ Preprocessing
117 |
118 | Preprocessign the dataset can be done with the notebook: [preprocess_dataset.ipynb](lyft_l5_tl2la/scripts/preprocessing/preprocess_dataset.ipynb)
119 |
120 | #### Traffic Light Scenes
121 | - These datasets contain the indices of scenes that show traffic lights
122 | - Can be generated for each dataset type (train1 or trainfull dataset) tls_scene_indices_train1.pkl, tls_scene_indices_trainfull.pkl
123 |
124 | #### Agent to Lane Mapping
125 | - 138,000 Scenes in total (trainfull dataset)
126 | - Split into 1000 pickle files
127 | - Maps agents to lane segments of the Lyft Level 5 semantic map
128 |
129 | > Structure of Agent to lane Mapping Dataset
130 | > | scene_idx | frame_idx | agent_id | lane_ids |
131 | > | --------: | --------: | -------: | :------- |
132 | > | 1 | 0 | 0 | [pcct] |
133 | > | 1 | 0 | 1 | [pcct] |
134 | > | ... | ... | ... | ... |
135 |
136 | #### Agent Motion
137 | - Adds vehicle kinematics to Agent to Lane Mapping Dataset
138 | - Split into 1000 pickle files
139 |
140 | > Structure of Agent Motion Dataset
141 | >| | scene_idx | frame_idx | agent_id | lane_id | junction_id | branch_id | lane_sequence_id | sequence_head_lane_id | time | v | a | centroid |
142 | >| ---: | --------: | --------: | -------: | :------ | :---------- | :-------- | ---------------: | :-------------------- | ---: | ------: | -------: | :------------------------------ |
143 | >| 0 | 1 | 161 | 0 | 6AUT | y4Ss | Evy5c | 110 | +xmM | 16.1 | 9.53438 | -1.06195 | [ 738.14093018 -1399.18835449] |
144 | >| 1 | 1 | 162 | 0 | 6AUT | y4Ss | Evy5c | 110 | +xmM | 16.2 | 9.58854 | 0.541639 | [ 737.33410645 -1398.67028809] |
145 | >| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
146 |
147 |
148 | #### Traffic Light State Mapping
149 | - Based on the detected state of the traffic light bulbs, the traffic light state is derived.
150 | - Traffic Light State are mapped to the following signals Red: 0, Green: 1, Yellow: 2, Yellow-Red: 3, Unknown: 4
151 |
152 | > Structure of Traffic Light Mapping Dataset
153 | >| scene_idx | frame_idx | tl_id | tl_status | active_faces | inactive_faces | unknown_faces |
154 | >| --------: | --------: | :---- | --------: | :----------- | :------------- | :--------------- |
155 | >| 1 | 95 | /ggb | 4 | [] | [] | [CDDo jutE xbVG] |
156 | >| 1 | 95 | NTTe | 0 | [/NIJ] | [] | [Q11q, xggH] |
157 | >| ... | ... | ... | ... | ... | ... | ... |
158 |
159 | #### Junction, Branch and Lane Dataset
160 | - Manual labeled dataset that describes which lane segments (*lane_id's*) correspond to which junction
161 | - Based on this dataset, the extanded junction to lane dataset was generated with the predecessor *lane_id's* in a radius of 20 meters.
162 |
163 | > Structure of Junction, Branch and Lane Dataset (Extendend)
164 | >| Unnamed: 0 | junction_id | branch_id | lane_id | lane_sequence_id | sequence_head_lane_id | num_sequence_head_lane_ids |
165 | >| ---------: | :---------- | :-------- | :------ | ---------------: | :-------------------- | -------------------------: |
166 | >| 0 | RYcT | Sy2KY | lXo5 | 0 | 8V6+ | 1 |
167 | >| 1 | RYcT | Sy2KY | 8V6+ | 0 | 8V6+ | 1 |
168 | >| 2 | RYcT | Sy2KY | bW6+ | 1 | bW6+ | 1 |
169 | >| ... | ... | ... | ... | . | ... | ... |
170 |
171 | #### Combined Features
172 | - Combines Agent Motion with Traffic Light State Mapping
173 | - Adds additional features like distance from agent's current position towards stop line, elapsed time since traffic light state change detected, if the vehicle is the first vehicle infront of the stop line, etc.
174 |
175 | > Structure of Combined Feature Dataset
176 | >| scene_idx | frame_idx | agent_id | lane_id | junction_id | branch_id | sequence_head_lane_id | v | a | density | dist | is_lead | tl_id | tl_status | relation | time_in_tl_state | x | y |
177 | >| --------: | --------: | -------: | :------ | :---------- | :-------- | :-------------------- | ------: | -------: | ------: | ------: | :------ | :---- | --------: | -------: | ---------------: | ------: | -------: |
178 | >| 1 | 161 | 0 | 6AUT | y4Ss | Evy5c | +xmM | 9.53438 | -1.06195 | 1 | 78.7516 | True | /ggb | 0 | 1 | 16.1 | 738.141 | -1399.19 |
179 | >| 1 | 161 | 0 | 6AUT | y4Ss | Evy5c | +xmM | 9.53438 | -1.06195 | 1 | 78.7516 | True | NTTe | 0 | 1 | 16.1 | 738.141 | -1399.19 |
180 | >| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | | ... |
181 |
182 | #### Ground Truth (TL2LA)
183 | - This dataset contains the ground truth: Class labels to predict
184 | - 279 semantic relations in total (182 with TL2LA / 97 without TL2LA)
185 |
186 | > Structure of Ground Truth Dataset
187 | >| tl_id | lane_id | relation | junction_id | branch_id |
188 | >|:--------|:----------|-----------:|:--------------|:------------|
189 | >| /ggb | AxmM | 1 | y4Ss | Evy5c |
190 | >| /ggb | fxmM | 1 | y4Ss | Evy5c |
191 | >| /ggb | +xmM | 1 | y4Ss | Evy5c |
192 | >| NTTe | AxmM | 1 | y4Ss | Evy5c |
193 | >| ... | ... | ...| ... | ... |
194 |
195 |
196 | ### 👀 Visualisation
197 |
198 | #### Agent Motion
199 |
200 |
201 | #### Traffic Light State
202 |
203 |
204 | #### Semantic Map
205 |
207 |
208 |
209 | #### Interactive Scene Visualization
210 |
211 |
212 | ### 📃 Results
213 |
214 | The evaluation results of the three Methods to solve TL2LA are listed below:
215 | | Method | Scope | Scenes | Vehicles | TL-Lane Pairs | Acc[%] | Prec[%] | Recall[%] | F1[%] |
216 | | ---------------------- | ---------- | ------ | -------- | ------------- | -------- | ------- | --------- | -------- |
217 | | Naive Baseline | ego only | 109k | 109k | 55 | 81.8 | 81.8 | **100** | 90.0 |
218 | | | all agents | 109k | 109k | 271 | 66.4 | 66.4 | **100** | 79.8 |
219 | | Pattern-based ego only | ego only | 64k | 64k | 55 | 83.6 | 84.6 | 97.8 | 90.7 |
220 | | | all agents | 90k | 10M | 271 | 76.4 | 83.2 | 80.4 | 81.8 |
221 | | Rejection | ego only | 13k | 13k | 55 | **85.5** | 84.9 | **100** | **91.8** |
222 | | | all agents | 42k | 124k | 271 | **80.8** | 78.3 | 98.3 | **87.2** |
223 |
224 |
225 |
226 |
227 | ## 📜 License
228 | The source code is released under the MIT license. Please see the [LICENSE](LICENSE) file for more information.
229 |
230 | ## 🔗 Citation
231 | ```
232 | @inproceedings{SemanticMapLearning2023,
233 | title={Semantic Map Learning of Traffic Light to Lane Assignment based on Motion Data},
234 | author={Monninger, Thomas and Weber, Andreas and Staab, Steffen},
235 | journal={ITSC 2023},
236 | year={2023}
237 | }
238 | ```
239 | ## 📰 News
240 | - [2023/05/28]: The paper is submitted to IEEE ITSC 2023.
241 | - [2023/07/14]: The paper is accepted to IEEE ITSC 2023.
242 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/Makefile:
--------------------------------------------------------------------------------
1 | run_tests:
2 | python -m unittest discover -s test/
--------------------------------------------------------------------------------
/lyft_l5_tl2la/configs/intro_config.yaml:
--------------------------------------------------------------------------------
1 | # Config format schema number
2 | format_version: 4
3 |
4 | ###################
5 | ## Input raster parameters
6 | raster_params:
7 | # raster image size [pixels]
8 | raster_size:
9 | - 224
10 | - 224
11 | # raster's spatial resolution [meters per pixel]: the size in the real world one pixel corresponds to.
12 | pixel_size:
13 | - 0.5
14 | - 0.5
15 | # From 0 to 1 per axis, [0.5,0.5] would show the ego centered in the image.
16 | ego_center:
17 | - 0.25
18 | - 0.5
19 | map_type: "py_semantic"
20 |
21 | # the keys are relative to the dataset environment variable
22 | satellite_map_key: "./aerial_map/aerial_map.png"
23 | semantic_map_key: "./semantic_map/semantic_map.pb"
24 | dataset_meta_key: "./semantic_map/meta.json"
25 |
26 | # e.g. 0.0 include every obstacle, 0.5 show those obstacles with >0.5 probability of being
27 | # one of the classes we care about (cars, bikes, peds, etc.)
28 | filter_agents_threshold: 0.5
29 |
30 | # whether to completely disable traffic light faces in the semantic rasterizer
31 | disable_traffic_light_faces: False
32 |
33 | # When set to True, the rasterizer will set the raster origin at bottom left,
34 | # i.e. vehicles are driving on the right side of the road.
35 | set_origin_to_bottom: True
36 |
37 | ###################
38 | ## Data loader options
39 | val_data_loader:
40 | key: "./complete/sample/sample.zarr"
41 | batch_size: 12
42 | shuffle: False
43 | num_workers: 16
44 |
45 | ###################
46 | ## Preprocessed Data loader options
47 | ## Adapt the filepath and name if required
48 | preprocessed_data_loader:
49 | tls_scene_indices: "./dataset/preprocessed_info/tls_scene_indices_train1.pkl"
50 | junction_branch_to_lane: "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
51 | frame_lane_pos: "./dataset/preprocessed_info/frame_lane_df.pkl"
52 | lanegraph: "./dataset/preprocessed_info/lanegraph_df.pkl"
--------------------------------------------------------------------------------
/lyft_l5_tl2la/configs/train1_config.yaml:
--------------------------------------------------------------------------------
1 | # Config format schema number
2 | format_version: 4
3 |
4 | ###################
5 | ## Input raster parameters
6 | raster_params:
7 | # raster image size [pixels]
8 | raster_size:
9 | - 224
10 | - 224
11 | # raster's spatial resolution [meters per pixel]: the size in the real world one pixel corresponds to.
12 | pixel_size:
13 | - 0.5
14 | - 0.5
15 | # From 0 to 1 per axis, [0.5,0.5] would show the ego centered in the image.
16 | ego_center:
17 | - 0.25
18 | - 0.5
19 | map_type: "py_semantic"
20 |
21 | # the keys are relative to the dataset environment variable
22 | satellite_map_key: "./aerial_map/aerial_map.png"
23 | semantic_map_key: "./semantic_map/semantic_map.pb"
24 | dataset_meta_key: "./semantic_map/meta.json"
25 |
26 | # e.g. 0.0 include every obstacle, 0.5 show those obstacles with >0.5 probability of being
27 | # one of the classes we care about (cars, bikes, peds, etc.)
28 | filter_agents_threshold: 0.5
29 |
30 | # whether to completely disable traffic light faces in the semantic rasterizer
31 | disable_traffic_light_faces: False
32 |
33 | # When set to True, the rasterizer will set the raster origin at bottom left,
34 | # i.e. vehicles are driving on the right side of the road.
35 | set_origin_to_bottom: True
36 |
37 |
38 | ###################
39 | ## Data loader options
40 | val_data_loader:
41 | key: "./complete/train/train.zarr"
42 | batch_size: 12
43 | shuffle: False
44 | num_workers: 16
45 |
46 | ###################
47 | ## Preprocessed Data loader options
48 | ## Adapt the filepath and name if required
49 | preprocessed_data_loader:
50 | tls_scene_indices: "./dataset/preprocessed_info/tls_scene_indices_train1.pkl"
51 | lanegraph: "./dataset/preprocessed_info/lanegraph_df.pkl"
52 | junction_branch_to_lane: "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
53 | ego_frame_lane_mapping: "./dataset/preprocessed_info/frame_lane_df_train1.pkl"
54 | agent_frame_lane_mapping_folder: "./dataset/preprocessed_info/agent_mapping_train1/"
55 | agent_motion: "./dataset/preprocessed_info/agent_motion_train1/"
56 | ground_truth: "./dataset/preprocessed_info/target_associations.pkl"
57 | ground_truth_corrected: "./dataset/preprocessed_info/ground_truth.csv"
58 | scene_tl_state: "./dataset/preprocessed_info/traffic_light_state_scene_df.pkl"
--------------------------------------------------------------------------------
/lyft_l5_tl2la/configs/trainfull_config.yaml:
--------------------------------------------------------------------------------
1 | # Config format schema number
2 | format_version: 4
3 |
4 | ###################
5 | ## Input raster parameters
6 | raster_params:
7 | # raster image size [pixels]
8 | raster_size:
9 | - 224
10 | - 224
11 | # raster's spatial resolution [meters per pixel]: the size in the real world one pixel corresponds to.
12 | pixel_size:
13 | - 0.5
14 | - 0.5
15 | # From 0 to 1 per axis, [0.5,0.5] would show the ego centered in the image.
16 | ego_center:
17 | - 0.25
18 | - 0.5
19 | map_type: "py_semantic"
20 |
21 | # the keys are relative to the dataset environment variable
22 | satellite_map_key: "./aerial_map/aerial_map.png"
23 | semantic_map_key: "./semantic_map/semantic_map.pb"
24 | dataset_meta_key: "./semantic_map/meta.json"
25 |
26 | # e.g. 0.0 include every obstacle, 0.5 show those obstacles with >0.5 probability of being
27 | # one of the classes we care about (cars, bikes, peds, etc.)
28 | filter_agents_threshold: 0.5
29 |
30 | # whether to completely disable traffic light faces in the semantic rasterizer
31 | disable_traffic_light_faces: False
32 |
33 | # When set to True, the rasterizer will set the raster origin at bottom left,
34 | # i.e. vehicles are driving on the right side of the road.
35 | set_origin_to_bottom: True
36 |
37 |
38 | ###################
39 | ## Data loader options
40 | val_data_loader:
41 | key: "./complete/train_full/train_full.zarr"
42 | batch_size: 12
43 | shuffle: False
44 | num_workers: 16
45 |
46 |
47 | ###################
48 | ## Preprocessed Data loader options
49 | ## Adapt the filepath and name if required
50 | preprocessed_data_loader:
51 | tls_scene_indices: "./dataset/preprocessed_info/tls_scene_indices_trainfull.pkl"
52 | lanegraph: "./dataset/preprocessed_info/lanegraph_df.pkl"
53 | junction_branch_to_lane: "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
54 | ego_frame_lane_mapping: "./dataset/preprocessed_info/frame_lane_df_trainfull.pkl"
55 | agent_frame_lane_mapping_folder: "./dataset/preprocessed_info/agent_mapping/"
56 | agent_motion: "./dataset/preprocessed_info/agent_motion/"
57 | ground_truth: "./dataset/preprocessed_info/target_associations.pkl"
58 | ground_truth_corrected: "./dataset/preprocessed_info/ground_truth.csv"
59 | scene_tl_state: "./dataset/preprocessed_info/traffic_light_state_scene_df.pkl"
60 |
61 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/dataset/preprocessed_info/extended_junction_branch_to_lane.csv:
--------------------------------------------------------------------------------
1 | ,junction_id,branch_id,lane_id,lane_sequence_id,sequence_head_lane_id,num_sequence_head_lane_ids
2 | 0,RYcT,Sy2KY,lXo5,0,8V6+,1
3 | 1,RYcT,Sy2KY,8V6+,0,8V6+,1
4 | 2,RYcT,Sy2KY,bW6+,1,bW6+,1
5 | 3,RYcT,Sy2KY,EYo5,1,bW6+,1
6 | 4,RYcT,Sy2KY,6W6+,2,6W6+,1
7 | 5,RYcT,Sy2KY,jYo5,2,6W6+,1
8 | 6,RYcT,NPJXi,O09S,6,5cF2,1
9 | 7,RYcT,NPJXi,5cF2,6,5cF2,1
10 | 8,RYcT,NPJXi,hfgF,6,5cF2,1
11 | 9,RYcT,NPJXi,/iRV,7,YdF2,1
12 | 10,RYcT,NPJXi,YdF2,7,YdF2,1
13 | 11,RYcT,NPJXi,AggF,7,YdF2,1
14 | 12,RYcT,Edzt0,xsPT,8,7/jP,1
15 | 13,RYcT,Edzt0,dfr0,8,7/jP,1
16 | 14,RYcT,Edzt0,7/jP,8,7/jP,1
17 | 15,RYcT,Edzt0,9E7T,9,c/jP,1
18 | 16,RYcT,Edzt0,SsPT,9,c/jP,1
19 | 17,RYcT,Edzt0,c/jP,9,c/jP,1
20 | 18,RYcT,WNPJXi,ZAJ6,10,ZAJ6,1
21 | 19,RYcT,WNPJXi,w8g9,10,ZAJ6,1
22 | 20,RYcT,WNPJXi,0ct5,10,ZAJ6,1
23 | 21,RYcT,WNPJXi,N4Gm,10,ZAJ6,1
24 | 22,RYcT,WNPJXi,EwNX,10,ZAJ6,1
25 | 23,RYcT,WNPJXi,CBiG,10,ZAJ6,1
26 | 24,RYcT,WNPJXi,lQSv,10,ZAJ6,1
27 | 27,qnJy,StDjm,GNgT,11,y3uS,1
28 | 28,qnJy,StDjm,8Jjx,11,y3uS,1
29 | 29,qnJy,StDjm,4ZaL,11,y3uS,1
30 | 30,qnJy,StDjm,XQbC,11,y3uS,1
31 | 31,qnJy,StDjm,y3uS,11,y3uS,1
32 | 32,qnJy,StDjm,T3uS,12,T3uS,1
33 | 33,qnJy,StDjm,dJjx,12,T3uS,1
34 | 34,qnJy,StDjm,azKs,12,T3uS,1
35 | 35,qnJy,StDjm,S0Yy,12,T3uS,1
36 | 36,qnJy,StDjm,nMgT,12,T3uS,1
37 | 37,qnJy,StDjm,0c3b,12,T3uS,1
38 | 38,qnJy,StDjm,UhC0,12,T3uS,1
39 | 39,qnJy,NQxfR,YFUz,13,YFUz,1
40 | 40,qnJy,NQxfR,ne+2,13,YFUz,1
41 | 41,qnJy,NQxfR,Ie+2,14,5EUz,1
42 | 42,qnJy,NQxfR,5EUz,14,5EUz,1
43 | 43,qnJy,NQxfR,pd+2,15,aEUz,1
44 | 44,qnJy,NQxfR,aEUz,15,aEUz,1
45 | 45,ctKJ,E6WDG,9x/Q,19,aVLS,1
46 | 46,ctKJ,E6WDG,Yt7t,19,aVLS,1
47 | 52,ctKJ,E6WDG,I0cX,19,aVLS,1
48 | 53,ctKJ,E6WDG,lWJX,19,aVLS,1
49 | 54,ctKJ,E6WDG,RJSO,19,aVLS,1
50 | 55,ctKJ,E6WDG,aVLS,19,aVLS,1
51 | 56,ctKJ,E6WDG,4SLN,20,7ULS,1
52 | 57,ctKJ,E6WDG,oZs2,20,7ULS,1
53 | 58,ctKJ,E6WDG,7ULS,20,7ULS,1
54 | 59,ctKJ,E6WDG,GWJX,20,7ULS,1
55 | 60,ctKJ,E6WDG,yISO,20,7ULS,1
56 | 61,ctKJ,E6WDG,ex/Q,20,7ULS,1
57 | 62,ctKJ,E6WDG,Y4as,21,cULS,1
58 | 63,ctKJ,E6WDG,/w/Q,21,cULS,1
59 | 64,ctKJ,E6WDG,TISO,21,cULS,1
60 | 65,ctKJ,E6WDG,I/7V,21,cULS,1
61 | 66,ctKJ,E6WDG,cULS,21,cULS,1
62 | 67,ctKJ,E6WDG,nVJX,21,cULS,1
63 | 68,Luda,NtUCt,3BET,22,3BET,1
64 | 69,Luda,NtUCt,Ph2V,22,3BET,1
65 | 70,DoQH,SXep7,uTFZ,24,1WHi,1
66 | 71,DoQH,SXep7,vRim,24,1WHi,1
67 | 72,DoQH,SXep7,1WHi,24,1WHi,1
68 | 73,DoQH,SXep7,WWHi,25,WWHi,1
69 | 74,DoQH,SXep7,e+cf,25,WWHi,1
70 | 75,DoQH,SXep7,PTFZ,25,WWHi,1
71 | 76,DoQH,SXep7,+js+,26,3VHi,1
72 | 77,DoQH,SXep7,3VHi,26,3VHi,1
73 | 78,DoQH,SXep7,wSFZ,26,3VHi,1
74 | 79,DoQH,SXep7,YVHi,27,YVHi,1
75 | 80,DoQH,SXep7,RSFZ,27,YVHi,1
76 | 81,DoQH,SXep7,eJ8d,27,YVHi,1
77 | 82,DoQH,NAc+E,hiPr,28,LXvy,1
78 | 83,DoQH,NAc+E,LXvy,28,LXvy,1
79 | 84,DoQH,NAc+E,ORvO,28,LXvy,1
80 | 85,DoQH,NAc+E,qXvy,29,qXvy,1
81 | 86,DoQH,NAc+E,AjPr,29,qXvy,1
82 | 87,DoQH,NAc+E,urfv,29,qXvy,1
83 | 88,DoQH,NAc+E,JYvy,30,JYvy,1
84 | 89,DoQH,NAc+E,fjPr,30,JYvy,1
85 | 90,DoQH,NAc+E,OGQQ,30,JYvy,1
86 | 91,DoQH,NAc+E,ilp5,31,oYvy,1
87 | 92,DoQH,NAc+E,+jPr,31,oYvy,1
88 | 93,DoQH,NAc+E,oYvy,31,oYvy,1
89 | 94,ECHt,E+/YT,Bm9O,32,Bm9O,1
90 | 97,ECHt,E+/YT,LJOb,32,Bm9O,1
91 | 98,ECHt,E+/YT,il9O,33,il9O,1
92 | 99,ECHt,E+/YT,sIOb,33,il9O,1
93 | 100,ECHt,E+/YT,Mud6,34,Dl9O,1
94 | 101,ECHt,E+/YT,i70Z,34,Dl9O,1
95 | 102,ECHt,E+/YT,Dl9O,34,Dl9O,1
96 | 103,ECHt,E+/YT,sTtZ,35,kk9O,1
97 | 104,ECHt,E+/YT,kk9O,35,kk9O,1
98 | 105,ECHt,E+/YT,D70Z,35,kk9O,1
99 | 108,ECHt,W9rSL,oS2N,36,/TlD,1
100 | 109,ECHt,W9rSL,/TlD,36,/TlD,1
101 | 110,ECHt,W9rSL,eUlD,37,eUlD,1
102 | 111,ECHt,W9rSL,HT2N,37,eUlD,1
103 | 112,ECHt,W9rSL,ntmu,38,9UlD,1
104 | 113,ECHt,W9rSL,9UlD,38,9UlD,1
105 | 114,ECHt,W9rSL,myz0,38,9UlD,1
106 | 115,ECHt,W9rSL,Fzz0,39,cVlD,1
107 | 116,ECHt,W9rSL,cVlD,39,cVlD,1
108 | 117,ECHt,W9rSL,HIXP,39,cVlD,1
109 | 118,FToR,EIyZ/,89HH,40,89HH,1
110 | 119,FToR,EIyZ/,cuTw,40,89HH,1
111 | 120,FToR,EIyZ/,Fw8W,40,89HH,1
112 | 121,FToR,EIyZ/,mv8W,41,d9HH,1
113 | 122,FToR,EIyZ/,d9HH,41,d9HH,1
114 | 123,FToR,EIyZ/,+8HH,42,+8HH,1
115 | 124,FToR,EIyZ/,Hv8W,42,+8HH,1
116 | 125,FToR,EIyZ/,f8HH,43,f8HH,1
117 | 126,FToR,EIyZ/,ou8W,43,f8HH,1
118 | 127,FToR,W6VZ4,RYJ5,48,jMm1,1
119 | 128,FToR,W6VZ4,7E5x,48,jMm1,1
120 | 129,FToR,W6VZ4,jMm1,48,jMm1,1
121 | 130,FToR,W6VZ4,bqIR,49,EMm1,1
122 | 131,FToR,W6VZ4,yXJ5,49,EMm1,1
123 | 132,FToR,W6VZ4,EMm1,49,EMm1,1
124 | 135,FToR,W6VZ4,7PYw,50,lLm1,1
125 | 136,FToR,W6VZ4,lLm1,50,lLm1,1
126 | 137,FToR,W6VZ4,cPYw,51,GLm1,1
127 | 138,FToR,W6VZ4,GLm1,51,GLm1,1
128 | 139,FToR,Shq4c,7cfh,52,8OiE,1
129 | 140,FToR,Shq4c,8OiE,52,8OiE,1
130 | 141,FToR,Shq4c,L2BE,53,FHu2,1
131 | 142,FToR,Shq4c,FHu2,53,FHu2,1
132 | 143,FToR,Shq4c,5dfh,54,6PiE,1
133 | 144,FToR,Shq4c,6PiE,54,6PiE,1
134 | 145,FToR,N/JZp,i1zF,55,YkSE,1
135 | 146,FToR,N/JZp,YkSE,55,YkSE,1
136 | 147,FToR,N/JZp,UsOS,55,YkSE,1
137 | 148,FToR,N/JZp,pz9a,55,YkSE,1
138 | 149,FToR,N/JZp,Awwv,55,YkSE,1
139 | 150,FToR,N/JZp,vZz/,55,YkSE,1
140 | 151,FToR,N/JZp,An6,55,YkSE,1
141 | 152,FToR,N/JZp,f/dY,57,pXNd,1
142 | 153,FToR,N/JZp,9A/o,57,pXNd,1
143 | 154,FToR,N/JZp,bmKy,57,pXNd,1
144 | 155,FToR,N/JZp,pXNd,57,pXNd,1
145 | 156,FToR,N/JZp,poec,58,poec,1
146 | 157,FToR,N/JZp,Cjk3,58,poec,1
147 | 158,FToR,N/JZp,HXf/,58,poec,1
148 | 159,FToR,N/JZp,g2zF,58,poec,1
149 | 162,U6Ym,N8W33,UdQ/,60,CPtm,1
150 | 163,U6Ym,N8W33,mc89,60,CPtm,1
151 | 164,U6Ym,N8W33,qgZI,60,CPtm,1
152 | 165,U6Ym,N8W33,veYS,60,CPtm,1
153 | 166,U6Ym,N8W33,kHou,60,CPtm,1
154 | 167,U6Ym,N8W33,K6x8,60,CPtm,1
155 | 168,U6Ym,N8W33,CPtm,60,CPtm,1
156 | 169,U6Ym,N8W33,hKQN,60,CPtm,1
157 | 170,U6Ym,N8W33,TMC8,60,CPtm,1
158 | 171,U6Ym,N8W33,BLzy,60,CPtm,1
159 | 174,U6Ym,N8W33,cJJW,60,CPtm,1
160 | 175,U6Ym,N8W33,RRaM,60,CPtm,1
161 | 176,U6Ym,N8W33,w8hY,60,CPtm,1
162 | 177,U6Ym,N8W33,TNYY,60,CPtm,1
163 | 178,U6Ym,N8W33,9HQY,60,CPtm,1
164 | 179,U6Ym,N8W33,9qCn,61,lPtm,1
165 | 180,U6Ym,N8W33,lPtm,61,lPtm,1
166 | 181,U6Ym,N8W33,+P/J,61,lPtm,1
167 | 182,U6Ym,N8W33,UB+4,61,lPtm,1
168 | 183,U6Ym,N8W33,sien,61,lPtm,1
169 | 184,U6Ym,EqqbQ,YU/N,62,DvGy,1
170 | 185,U6Ym,EqqbQ,DvGy,62,DvGy,1
171 | 186,U6Ym,EqqbQ,5T/N,63,kuGy,1
172 | 187,U6Ym,EqqbQ,kuGy,63,kuGy,1
173 | 188,U6Ym,EqqbQ,FuGy,64,FuGy,1
174 | 189,U6Ym,EqqbQ,aT/N,64,FuGy,1
175 | 190,U6Ym,EqqbQ,mtGy,65,mtGy,1
176 | 191,U6Ym,EqqbQ,7S/N,65,mtGy,1
177 | 192,U6Ym,WuX82,Zcl0,70,+55w,1
178 | 193,U6Ym,WuX82,+55w,70,+55w,1
179 | 194,U6Ym,WuX82,P8yN,70,+55w,1
180 | 195,U6Ym,WuX82,u8yN,71,d65w,1
181 | 196,U6Ym,WuX82,d65w,71,d65w,1
182 | 197,U6Ym,WuX82,4cl0,71,d65w,1
183 | 198,U6Ym,WuX82,Y3VV,72,865w,1
184 | 199,U6Ym,WuX82,N9yN,72,865w,1
185 | 200,U6Ym,WuX82,865w,72,865w,1
186 | 201,U6Ym,WuX82,s9yN,73,b75w,1
187 | 202,U6Ym,WuX82,4RG2,73,b75w,1
188 | 203,U6Ym,WuX82,b75w,73,b75w,1
189 | 208,E0Us,NAQ7m,nGHV,78,k3bp,1
190 | 209,E0Us,NAQ7m,bE4V,78,k3bp,1
191 | 212,E0Us,NAQ7m,k3bp,78,k3bp,1
192 | 215,E0Us,NAQ7m,8D4V,79,F3bp,1
193 | 216,E0Us,NAQ7m,F3bp,79,F3bp,1
194 | 217,E0Us,NAQ7m,IGHV,79,F3bp,1
195 | 218,E0Us,ET60+,CyA0,80,VUqQ,1
196 | 219,E0Us,ET60+,VUqQ,80,VUqQ,1
197 | 220,E0Us,ET60+,qoFo,80,VUqQ,1
198 | 221,E0Us,ET60+,jxA0,81,2TqQ,1
199 | 222,E0Us,ET60+,KOVH,81,2TqQ,1
200 | 223,E0Us,ET60+,2TqQ,81,2TqQ,1
201 | 224,E0Us,ET60+,qzkm,82,XTqQ,1
202 | 227,E0Us,ET60+,XTqQ,82,XTqQ,1
203 | 230,E0Us,ET60+,Lzkm,83,4SqQ,1
204 | 231,E0Us,ET60+,4SqQ,83,4SqQ,1
205 | 236,E0Us,WFGY0,YpFh,85,cKUZ,1
206 | 237,E0Us,WFGY0,ESMC,85,cKUZ,1
207 | 242,E0Us,WFGY0,DwB3,85,cKUZ,1
208 | 243,E0Us,WFGY0,JhvO,85,cKUZ,1
209 | 246,E0Us,WFGY0,cKUZ,85,cKUZ,1
210 | 251,E0Us,WFGY0,9JUZ,86,9JUZ,1
211 | 252,E0Us,WFGY0,4OV,86,9JUZ,1
212 | 253,E0Us,WFGY0,jVRW,86,9JUZ,1
213 | 254,E0Us,WFGY0,lRMC,86,9JUZ,1
214 | 255,E0Us,WFGY0,qgvO,86,9JUZ,1
215 | 256,E0Us,WFGY0,D7g1,87,eJUZ,1
216 | 257,E0Us,WFGY0,eJUZ,87,eJUZ,1
217 | 258,E0Us,WFGY0,GRMC,87,eJUZ,1
218 | 259,E0Us,WFGY0,LgvO,87,eJUZ,1
219 | 260,E0Us,WFGY0,Y0kf,87,eJUZ,1
220 | 261,E0Us,WFGY0,sfvO,88,/IUZ,1
221 | 262,E0Us,WFGY0,jgwU,88,/IUZ,1
222 | 263,E0Us,WFGY0,4Z0+,88,/IUZ,1
223 | 264,E0Us,WFGY0,nQMC,88,/IUZ,1
224 | 265,E0Us,WFGY0,/IUZ,88,/IUZ,1
225 | 266,3Ccg,NFThX,hINg,94,hINg,1
226 | 269,3Ccg,NFThX,1lJr,94,hINg,1
227 | 270,3Ccg,NFThX,uZT/,94,hINg,1
228 | 280,3Ccg,NFThX,J+PS,94,hINg,1
229 | 281,3Ccg,NFThX,//Qg,94,hINg,1
230 | 282,3Ccg,NFThX,CINg,95,CINg,1
231 | 283,3Ccg,NFThX,WlJr,95,CINg,1
232 | 288,3Ccg,NFThX,O/ie,95,CINg,1
233 | 289,3Ccg,NFThX,3kJr,96,jHNg,1
234 | 290,3Ccg,NFThX,v+ie,96,jHNg,1
235 | 291,3Ccg,NFThX,jHNg,96,jHNg,1
236 | 292,3Ccg,EB35n,jqeT,97,ake8,1
237 | 293,3Ccg,EB35n,cUvZ,97,ake8,1
238 | 294,3Ccg,EB35n,p2A2,97,ake8,1
239 | 295,3Ccg,EB35n,ake8,97,ake8,1
240 | 296,3Ccg,EB35n,Zt8F,97,ake8,1
241 | 297,3Ccg,EB35n,7je8,98,7je8,1
242 | 298,3Ccg,EB35n,DQuy,98,7je8,1
243 | 299,3Ccg,EB35n,K2A2,98,7je8,1
244 | 300,3Ccg,EB35n,r1A2,99,cje8,1
245 | 301,3Ccg,EB35n,cje8,99,cje8,1
246 | 302,3Ccg,EB35n,j19R,99,cje8,1
247 | 303,3Ccg,EB35n,DbNx,100,9ie8,1
248 | 304,3Ccg,EB35n,M1A2,100,9ie8,1
249 | 305,3Ccg,EB35n,9ie8,100,9ie8,1
250 | 306,3Ccg,S8XK4,ceFW,101,vLqW,1
251 | 307,3Ccg,S8XK4,tqfl,101,vLqW,1
252 | 308,3Ccg,S8XK4,vLqW,101,vLqW,1
253 | 309,3Ccg,S8XK4,QLqW,102,QLqW,1
254 | 310,3Ccg,S8XK4,pxWe,102,QLqW,1
255 | 311,3Ccg,S8XK4,vb5l,102,QLqW,1
256 | 312,3Ccg,S8XK4,qpj8,102,QLqW,1
257 | 313,3Ccg,S8XK4,9dFW,102,QLqW,1
258 | 314,3Ccg,S8XK4,NQvE,102,QLqW,1
259 | 315,3Ccg,S8XK4,168D,103,vijX,1
260 | 316,3Ccg,S8XK4,ig2B,103,vijX,1
261 | 317,3Ccg,S8XK4,vijX,103,vijX,1
262 | 318,3Ccg,S8XK4,4Y+a,103,vijX,1
263 | 323,3Ccg,WX0Uk,12MV,104,iD65,1
264 | 324,3Ccg,WX0Uk,iD65,104,iD65,1
265 | 325,3Ccg,WX0Uk,BE65,105,BE65,1
266 | 326,3Ccg,WX0Uk,U3MV,105,BE65,1
267 | 327,3Ccg,WX0Uk,5mPX,106,gE65,1
268 | 328,3Ccg,WX0Uk,0R91,106,gE65,1
269 | 329,3Ccg,WX0Uk,gE65,106,gE65,1
270 | 330,3Ccg,WX0Uk,i0tN,106,gE65,1
271 | 331,3Ccg,WX0Uk,SIca,107,/E65,1
272 | 332,3Ccg,WX0Uk,/E65,107,/E65,1
273 | 333,3Ccg,WX0Uk,CPeu,107,/E65,1
274 | 334,3Ccg,WX0Uk,YnPX,107,/E65,1
275 | 335,3Ccg,WX0Uk,UstW,107,/E65,1
276 | 336,y4Ss,Evy5c,8/TT,108,AxmM,1
277 | 337,y4Ss,Evy5c,AxmM,108,AxmM,1
278 | 338,y4Ss,Evy5c,fxmM,109,fxmM,1
279 | 339,y4Ss,Evy5c,bAUT,109,fxmM,1
280 | 340,y4Ss,Evy5c,6AUT,110,+xmM,1
281 | 341,y4Ss,Evy5c,+xmM,110,+xmM,1
282 | 342,y4Ss,WOuys,Lwft,111,H+dt,1
283 | 343,y4Ss,WOuys,dPy4,111,H+dt,1
284 | 344,y4Ss,WOuys,H+dt,111,H+dt,1
285 | 345,y4Ss,WOuys,DfoC,111,H+dt,1
286 | 346,y4Ss,WOuys,TbHk,111,H+dt,1
287 | 347,y4Ss,WOuys,Qvqr,111,H+dt,1
288 | 350,y4Ss,WOuys,8Py4,112,m+dt,1
289 | 351,y4Ss,WOuys,wJbM,112,m+dt,1
290 | 352,y4Ss,WOuys,ybHk,112,m+dt,1
291 | 353,y4Ss,WOuys,m+dt,112,m+dt,1
292 | 354,LrDx,NPYIq,Eijc,113,jZCn,1
293 | 355,LrDx,NPYIq,jZCn,113,jZCn,1
294 | 356,LrDx,NPYIq,WpUL,113,jZCn,1
295 | 357,LrDx,NPYIq,eR/Y,114,eR/Y,1
296 | 358,LrDx,NPYIq,BKVh,114,eR/Y,1
297 | 359,LrDx,NPYIq,LSuc,114,eR/Y,1
298 | 360,LrDx,NPYIq,xFWI,115,/Q/Y,1
299 | 361,LrDx,NPYIq,cja6,115,/Q/Y,1
300 | 362,LrDx,NPYIq,/Q/Y,115,/Q/Y,1
301 | 363,LrDx,S4QRn,2cmH,116,kgCj,1
302 | 364,LrDx,S4QRn,Xbgn,116,kgCj,1
303 | 365,LrDx,S4QRn,kgCj,116,kgCj,1
304 | 366,LrDx,S4QRn,xL/3,117,DhCj,1
305 | 367,LrDx,S4QRn,DhCj,117,DhCj,1
306 | 368,LrDx,S4QRn,bh2y,117,DhCj,1
307 | 369,LrDx,S4QRn,emZY,118,emZY,1
308 | 370,LrDx,S4QRn,qlr2,118,emZY,1
309 | 371,LrDx,S4QRn,wu0x,118,emZY,1
310 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/dataset/preprocessed_info/ground_truth.csv:
--------------------------------------------------------------------------------
1 | tl_id,lane_id,relation,junction_id,branch_id,corrected
2 | /ggb,AxmM,1,y4Ss,Evy5c,0
3 | /ggb,fxmM,1,y4Ss,Evy5c,0
4 | /ggb,+xmM,1,y4Ss,Evy5c,0
5 | NTTe,AxmM,1,y4Ss,Evy5c,0
6 | NTTe,fxmM,1,y4Ss,Evy5c,0
7 | NTTe,+xmM,1,y4Ss,Evy5c,0
8 | LL1i,AxmM,1,y4Ss,Evy5c,1
9 | LL1i,fxmM,1,y4Ss,Evy5c,1
10 | LL1i,+xmM,1,y4Ss,Evy5c,0
11 | VUk/,H+dt,1,y4Ss,WOuys,0
12 | VUk/,m+dt,1,y4Ss,WOuys,0
13 | axuZ,H+dt,1,y4Ss,WOuys,0
14 | axuZ,m+dt,1,y4Ss,WOuys,0
15 | mBh/,H+dt,1,y4Ss,WOuys,0
16 | mBh/,m+dt,1,y4Ss,WOuys,0
17 | icM8,H+dt,1,y4Ss,WOuys,1
18 | icM8,m+dt,0,y4Ss,WOuys,0
19 | 3WL8,vLqW,0,3Ccg,S8XK4,0
20 | 3WL8,QLqW,1,3Ccg,S8XK4,0
21 | 3WL8,vijX,1,3Ccg,S8XK4,0
22 | FlE6,vLqW,0,3Ccg,S8XK4,0
23 | FlE6,QLqW,1,3Ccg,S8XK4,0
24 | FlE6,vijX,1,3Ccg,S8XK4,0
25 | aHR8,vLqW,1,3Ccg,S8XK4,0
26 | aHR8,QLqW,0,3Ccg,S8XK4,0
27 | aHR8,vijX,0,3Ccg,S8XK4,0
28 | umZo,vLqW,0,3Ccg,S8XK4,0
29 | umZo,QLqW,1,3Ccg,S8XK4,0
30 | umZo,vijX,1,3Ccg,S8XK4,0
31 | RDa4,vLqW,1,3Ccg,S8XK4,0
32 | RDa4,QLqW,0,3Ccg,S8XK4,0
33 | RDa4,vijX,0,3Ccg,S8XK4,0
34 | 4Xis,iD65,0,3Ccg,WX0Uk,0
35 | 4Xis,BE65,1,3Ccg,WX0Uk,0
36 | 4Xis,gE65,1,3Ccg,WX0Uk,0
37 | 4Xis,/E65,1,3Ccg,WX0Uk,0
38 | CqSo,iD65,0,3Ccg,WX0Uk,0
39 | CqSo,BE65,1,3Ccg,WX0Uk,0
40 | CqSo,gE65,1,3Ccg,WX0Uk,0
41 | CqSo,/E65,1,3Ccg,WX0Uk,0
42 | DDUl,iD65,0,3Ccg,WX0Uk,0
43 | DDUl,BE65,1,3Ccg,WX0Uk,0
44 | DDUl,gE65,1,3Ccg,WX0Uk,0
45 | DDUl,/E65,1,3Ccg,WX0Uk,0
46 | ciok,iD65,1,3Ccg,WX0Uk,0
47 | ciok,BE65,0,3Ccg,WX0Uk,0
48 | ciok,gE65,0,3Ccg,WX0Uk,0
49 | ciok,/E65,0,3Ccg,WX0Uk,0
50 | nuVl,iD65,0,3Ccg,WX0Uk,0
51 | nuVl,BE65,1,3Ccg,WX0Uk,0
52 | nuVl,gE65,1,3Ccg,WX0Uk,0
53 | nuVl,/E65,1,3Ccg,WX0Uk,0
54 | lO+m,iD65,1,3Ccg,WX0Uk,0
55 | lO+m,BE65,0,3Ccg,WX0Uk,0
56 | lO+m,gE65,0,3Ccg,WX0Uk,0
57 | lO+m,/E65,0,3Ccg,WX0Uk,0
58 | 9sh4,VUqQ,0,E0Us,ET60+,0
59 | 9sh4,2TqQ,0,E0Us,ET60+,0
60 | 9sh4,XTqQ,0,E0Us,ET60+,0
61 | 9sh4,4SqQ,1,E0Us,ET60+,0
62 | hc0L,VUqQ,1,E0Us,ET60+,0
63 | hc0L,2TqQ,1,E0Us,ET60+,0
64 | hc0L,XTqQ,1,E0Us,ET60+,0
65 | hc0L,4SqQ,0,E0Us,ET60+,0
66 | lZcz,VUqQ,1,E0Us,ET60+,0
67 | lZcz,2TqQ,1,E0Us,ET60+,0
68 | lZcz,XTqQ,1,E0Us,ET60+,0
69 | lZcz,4SqQ,0,E0Us,ET60+,0
70 | O++3,VUqQ,1,E0Us,ET60+,0
71 | O++3,2TqQ,1,E0Us,ET60+,0
72 | O++3,XTqQ,1,E0Us,ET60+,0
73 | O++3,4SqQ,0,E0Us,ET60+,0
74 | zwm4,VUqQ,1,E0Us,ET60+,0
75 | zwm4,2TqQ,1,E0Us,ET60+,0
76 | zwm4,XTqQ,1,E0Us,ET60+,0
77 | zwm4,4SqQ,0,E0Us,ET60+,0
78 | LEdD,cKUZ,1,E0Us,WFGY0,0
79 | LEdD,9JUZ,1,E0Us,WFGY0,0
80 | LEdD,eJUZ,1,E0Us,WFGY0,0
81 | LEdD,/IUZ,0,E0Us,WFGY0,0
82 | T4tC,cKUZ,0,E0Us,WFGY0,0
83 | T4tC,9JUZ,0,E0Us,WFGY0,0
84 | T4tC,eJUZ,0,E0Us,WFGY0,0
85 | T4tC,/IUZ,1,E0Us,WFGY0,0
86 | XSAD,cKUZ,1,E0Us,WFGY0,0
87 | XSAD,9JUZ,1,E0Us,WFGY0,0
88 | XSAD,eJUZ,1,E0Us,WFGY0,0
89 | XSAD,/IUZ,0,E0Us,WFGY0,0
90 | jzLD,cKUZ,1,E0Us,WFGY0,0
91 | jzLD,9JUZ,1,E0Us,WFGY0,0
92 | jzLD,eJUZ,1,E0Us,WFGY0,0
93 | jzLD,/IUZ,0,E0Us,WFGY0,0
94 | ox+T,cKUZ,0,E0Us,WFGY0,0
95 | ox+T,9JUZ,0,E0Us,WFGY0,0
96 | ox+T,eJUZ,0,E0Us,WFGY0,0
97 | ox+T,/IUZ,0,E0Us,WFGY0,0
98 | 9X0L,cKUZ,0,E0Us,WFGY0,0
99 | 9X0L,9JUZ,0,E0Us,WFGY0,0
100 | 9X0L,eJUZ,0,E0Us,WFGY0,0
101 | 9X0L,/IUZ,1,E0Us,WFGY0,0
102 | RsPp,89HH,1,FToR,EIyZ/,0
103 | RsPp,d9HH,1,FToR,EIyZ/,0
104 | RsPp,+8HH,1,FToR,EIyZ/,0
105 | RsPp,f8HH,0,FToR,EIyZ/,0
106 | aBqp,89HH,0,FToR,EIyZ/,0
107 | aBqp,d9HH,0,FToR,EIyZ/,0
108 | aBqp,+8HH,0,FToR,EIyZ/,0
109 | aBqp,f8HH,1,FToR,EIyZ/,0
110 | yYeP,89HH,1,FToR,EIyZ/,0
111 | yYeP,d9HH,1,FToR,EIyZ/,0
112 | yYeP,+8HH,1,FToR,EIyZ/,0
113 | yYeP,f8HH,0,FToR,EIyZ/,0
114 | h0Eo,89HH,1,FToR,EIyZ/,0
115 | h0Eo,d9HH,1,FToR,EIyZ/,0
116 | h0Eo,+8HH,1,FToR,EIyZ/,0
117 | h0Eo,f8HH,0,FToR,EIyZ/,0
118 | 2mol,jMm1,0,FToR,W6VZ4,0
119 | 2mol,EMm1,0,FToR,W6VZ4,0
120 | 2mol,lLm1,0,FToR,W6VZ4,0
121 | 2mol,GLm1,1,FToR,W6VZ4,0
122 | AETt,jMm1,1,FToR,W6VZ4,0
123 | AETt,EMm1,1,FToR,W6VZ4,0
124 | AETt,lLm1,1,FToR,W6VZ4,0
125 | AETt,GLm1,0,FToR,W6VZ4,0
126 | Pdat,jMm1,1,FToR,W6VZ4,0
127 | Pdat,EMm1,1,FToR,W6VZ4,0
128 | Pdat,lLm1,1,FToR,W6VZ4,0
129 | Pdat,GLm1,0,FToR,W6VZ4,0
130 | Ya58,jMm1,1,FToR,W6VZ4,0
131 | Ya58,EMm1,1,FToR,W6VZ4,0
132 | Ya58,lLm1,1,FToR,W6VZ4,0
133 | Ya58,GLm1,0,FToR,W6VZ4,0
134 | 4nUd,Bm9O,1,ECHt,E+/YT,0
135 | 4nUd,il9O,1,ECHt,E+/YT,0
136 | 4nUd,Dl9O,1,ECHt,E+/YT,0
137 | 4nUd,kk9O,1,ECHt,E+/YT,0
138 | RjTd,Bm9O,1,ECHt,E+/YT,0
139 | RjTd,il9O,1,ECHt,E+/YT,0
140 | RjTd,Dl9O,1,ECHt,E+/YT,0
141 | RjTd,kk9O,1,ECHt,E+/YT,0
142 | YTDn,Bm9O,1,ECHt,E+/YT,0
143 | YTDn,il9O,1,ECHt,E+/YT,0
144 | YTDn,Dl9O,1,ECHt,E+/YT,0
145 | YTDn,kk9O,1,ECHt,E+/YT,0
146 | 5Ike,/TlD,1,ECHt,W9rSL,0
147 | 5Ike,eUlD,0,ECHt,W9rSL,0
148 | 5Ike,9UlD,0,ECHt,W9rSL,0
149 | 5Ike,cVlD,0,ECHt,W9rSL,0
150 | wBXk,/TlD,0,ECHt,W9rSL,0
151 | wBXk,eUlD,1,ECHt,W9rSL,0
152 | wBXk,9UlD,1,ECHt,W9rSL,0
153 | wBXk,cVlD,1,ECHt,W9rSL,0
154 | STkk,/TlD,0,ECHt,W9rSL,0
155 | STkk,eUlD,1,ECHt,W9rSL,0
156 | STkk,9UlD,1,ECHt,W9rSL,0
157 | STkk,cVlD,1,ECHt,W9rSL,0
158 | VEcT,/TlD,0,ECHt,W9rSL,0
159 | VEcT,eUlD,1,ECHt,W9rSL,0
160 | VEcT,9UlD,1,ECHt,W9rSL,0
161 | VEcT,cVlD,1,ECHt,W9rSL,0
162 | 3VcD,aVLS,1,ctKJ,E6WDG,0
163 | 3VcD,7ULS,1,ctKJ,E6WDG,0
164 | 3VcD,cULS,1,ctKJ,E6WDG,0
165 | aLv7,aVLS,1,ctKJ,E6WDG,0
166 | aLv7,7ULS,1,ctKJ,E6WDG,0
167 | aLv7,cULS,1,ctKJ,E6WDG,0
168 | LhR9,aVLS,1,ctKJ,E6WDG,0
169 | LhR9,7ULS,1,ctKJ,E6WDG,0
170 | LhR9,cULS,1,ctKJ,E6WDG,0
171 | pmaD,aVLS,1,ctKJ,E6WDG,0
172 | pmaD,7ULS,1,ctKJ,E6WDG,0
173 | pmaD,cULS,1,ctKJ,E6WDG,0
174 | qWWK,YFUz,0,qnJy,NQxfR,0
175 | qWWK,5EUz,0,qnJy,NQxfR,0
176 | qWWK,aEUz,1,qnJy,NQxfR,0
177 | tuOM,YFUz,1,qnJy,NQxfR,0
178 | tuOM,5EUz,1,qnJy,NQxfR,0
179 | tuOM,aEUz,0,qnJy,NQxfR,0
180 | APRT,YFUz,0,qnJy,NQxfR,0
181 | APRT,5EUz,0,qnJy,NQxfR,0
182 | APRT,aEUz,1,qnJy,NQxfR,0
183 | 9j54,3BET,1,Luda,NtUCt,0
184 | R584,3BET,1,Luda,NtUCt,0
185 | ai54,3BET,1,Luda,NtUCt,0
186 | 4AM3,LXvy,0,DoQH,NAc+E,1
187 | 4AM3,qXvy,1,DoQH,NAc+E,0
188 | 4AM3,JYvy,1,DoQH,NAc+E,0
189 | 4AM3,oYvy,1,DoQH,NAc+E,0
190 | UWVz,LXvy,0,DoQH,NAc+E,1
191 | UWVz,qXvy,0,DoQH,NAc+E,0
192 | UWVz,JYvy,0,DoQH,NAc+E,0
193 | UWVz,oYvy,0,DoQH,NAc+E,0
194 | b8F3,LXvy,0,DoQH,NAc+E,1
195 | b8F3,qXvy,1,DoQH,NAc+E,0
196 | b8F3,JYvy,1,DoQH,NAc+E,0
197 | b8F3,oYvy,1,DoQH,NAc+E,0
198 | cvA3,LXvy,0,DoQH,NAc+E,1
199 | cvA3,qXvy,1,DoQH,NAc+E,0
200 | cvA3,JYvy,1,DoQH,NAc+E,0
201 | cvA3,oYvy,1,DoQH,NAc+E,0
202 | QEpZ,jZCn,1,LrDx,NPYIq,0
203 | QEpZ,eR/Y,1,LrDx,NPYIq,0
204 | QEpZ,/Q/Y,0,LrDx,NPYIq,1
205 | h4rU,jZCn,0,LrDx,NPYIq,1
206 | h4rU,eR/Y,0,LrDx,NPYIq,1
207 | h4rU,/Q/Y,1,LrDx,NPYIq,0
208 | 3YQd,jZCn,0,LrDx,NPYIq,1
209 | 3YQd,eR/Y,0,LrDx,NPYIq,1
210 | 3YQd,/Q/Y,1,LrDx,NPYIq,0
211 | 0E3j,kgCj,1,LrDx,S4QRn,0
212 | 0E3j,DhCj,1,LrDx,S4QRn,0
213 | 0E3j,emZY,1,LrDx,S4QRn,0
214 | o39e,kgCj,1,LrDx,S4QRn,0
215 | o39e,DhCj,1,LrDx,S4QRn,0
216 | o39e,emZY,1,LrDx,S4QRn,0
217 | a7+8,+55w,0,U6Ym,WuX82,0
218 | a7+8,d65w,1,U6Ym,WuX82,0
219 | a7+8,865w,1,U6Ym,WuX82,0
220 | a7+8,b75w,1,U6Ym,WuX82,0
221 | gwNT,+55w,1,U6Ym,WuX82,0
222 | gwNT,d65w,0,U6Ym,WuX82,0
223 | gwNT,865w,0,U6Ym,WuX82,0
224 | gwNT,b75w,0,U6Ym,WuX82,0
225 | j9IT,+55w,0,U6Ym,WuX82,0
226 | j9IT,d65w,1,U6Ym,WuX82,0
227 | j9IT,865w,1,U6Ym,WuX82,0
228 | j9IT,b75w,1,U6Ym,WuX82,0
229 | jI/X,+55w,0,U6Ym,WuX82,0
230 | jI/X,d65w,1,U6Ym,WuX82,0
231 | jI/X,865w,1,U6Ym,WuX82,0
232 | jI/X,b75w,1,U6Ym,WuX82,0
233 | lU4X,+55w,0,U6Ym,WuX82,0
234 | lU4X,d65w,1,U6Ym,WuX82,0
235 | lU4X,865w,1,U6Ym,WuX82,0
236 | lU4X,b75w,1,U6Ym,WuX82,0
237 | m558,DvGy,0,U6Ym,EqqbQ,0
238 | m558,kuGy,0,U6Ym,EqqbQ,0
239 | m558,FuGy,0,U6Ym,EqqbQ,0
240 | m558,mtGy,1,U6Ym,EqqbQ,0
241 | CkJa,DvGy,1,U6Ym,EqqbQ,0
242 | CkJa,kuGy,1,U6Ym,EqqbQ,0
243 | CkJa,FuGy,1,U6Ym,EqqbQ,0
244 | CkJa,mtGy,0,U6Ym,EqqbQ,0
245 | EnjI,DvGy,1,U6Ym,EqqbQ,0
246 | EnjI,kuGy,1,U6Ym,EqqbQ,0
247 | EnjI,FuGy,1,U6Ym,EqqbQ,0
248 | EnjI,mtGy,0,U6Ym,EqqbQ,0
249 | cyCW,DvGy,1,U6Ym,EqqbQ,0
250 | cyCW,kuGy,1,U6Ym,EqqbQ,0
251 | cyCW,FuGy,1,U6Ym,EqqbQ,0
252 | cyCW,mtGy,0,U6Ym,EqqbQ,0
253 | ls3Z,DvGy,0,U6Ym,EqqbQ,0
254 | ls3Z,kuGy,0,U6Ym,EqqbQ,0
255 | ls3Z,FuGy,0,U6Ym,EqqbQ,0
256 | ls3Z,mtGy,1,U6Ym,EqqbQ,0
257 | uz2Z,DvGy,1,U6Ym,EqqbQ,0
258 | uz2Z,kuGy,1,U6Ym,EqqbQ,0
259 | uz2Z,FuGy,1,U6Ym,EqqbQ,0
260 | uz2Z,mtGy,0,U6Ym,EqqbQ,0
261 | RR43,1WHi,0,DoQH,SXep7,1
262 | RR43,WWHi,0,DoQH,SXep7,1
263 | RR43,3VHi,0,DoQH,SXep7,1
264 | RR43,YVHi,1,DoQH,SXep7,0
265 | ePt3,1WHi,1,DoQH,SXep7,0
266 | ePt3,WWHi,1,DoQH,SXep7,0
267 | ePt3,3VHi,1,DoQH,SXep7,0
268 | ePt3,YVHi,0,DoQH,SXep7,1
269 | yg28,1WHi,1,DoQH,SXep7,0
270 | yg28,WWHi,1,DoQH,SXep7,0
271 | yg28,3VHi,1,DoQH,SXep7,0
272 | yg28,YVHi,0,DoQH,SXep7,1
273 | HW8C,1WHi,1,DoQH,SXep7,0
274 | HW8C,WWHi,1,DoQH,SXep7,0
275 | HW8C,3VHi,1,DoQH,SXep7,0
276 | HW8C,YVHi,0,DoQH,SXep7,1
277 | MfzU,7/jP,0,RYcT,Edzt0,1
278 | MfzU,c/jP,1,RYcT,Edzt0,0
279 | vW1d,ZAJ6,1,RYcT,WNPJXi,0
280 | 5bhl,ZAJ6,1,RYcT,WNPJXi,0
281 |
282 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/dataset/preprocessed_info/minimal_junction_branch_to_lane.csv:
--------------------------------------------------------------------------------
1 | junction_id,branch_id,lane_id
2 | RYcT,Sy2KY,8V6+
3 | RYcT,Sy2KY,bW6+
4 | RYcT,Sy2KY,6W6+
5 | RYcT,NPJXi,5cF2
6 | RYcT,NPJXi,YdF2
7 | RYcT,Edzt0,7/jP
8 | RYcT,Edzt0,c/jP
9 | RYcT,WNPJXi,ZAJ6
10 | qnJy,StDjm,y3uS
11 | qnJy,StDjm,T3uS
12 | qnJy,NQxfR,YFUz
13 | qnJy,NQxfR,5EUz
14 | qnJy,NQxfR,aEUz
15 | ctKJ,E6WDG,aVLS
16 | ctKJ,E6WDG,7ULS
17 | ctKJ,E6WDG,cULS
18 | Luda,NtUCt,3BET
19 | DoQH,SXep7,1WHi
20 | DoQH,SXep7,WWHi
21 | DoQH,SXep7,3VHi
22 | DoQH,SXep7,YVHi
23 | DoQH,NAc+E,LXvy
24 | DoQH,NAc+E,qXvy
25 | DoQH,NAc+E,JYvy
26 | DoQH,NAc+E,oYvy
27 | ECHt,E+/YT,Bm9O
28 | ECHt,E+/YT,il9O
29 | ECHt,E+/YT,Dl9O
30 | ECHt,E+/YT,kk9O
31 | ECHt,W9rSL,/TlD
32 | ECHt,W9rSL,eUlD
33 | ECHt,W9rSL,9UlD
34 | ECHt,W9rSL,cVlD
35 | FToR,EIyZ/,89HH
36 | FToR,EIyZ/,d9HH
37 | FToR,EIyZ/,+8HH
38 | FToR,EIyZ/,f8HH
39 | FToR,W6VZ4,jMm1
40 | FToR,W6VZ4,EMm1
41 | FToR,W6VZ4,lLm1
42 | FToR,W6VZ4,GLm1
43 | FToR,Shq4c,8OiE
44 | FToR,Shq4c,FHu2
45 | FToR,Shq4c,6PiE
46 | FToR,N/JZp,YkSE
47 | FToR,N/JZp,pXNd
48 | FToR,N/JZp,poec
49 | U6Ym,N8W33,CPtm
50 | U6Ym,N8W33,lPtm
51 | U6Ym,EqqbQ,DvGy
52 | U6Ym,EqqbQ,kuGy
53 | U6Ym,EqqbQ,FuGy
54 | U6Ym,EqqbQ,mtGy
55 | U6Ym,WuX82,+55w
56 | U6Ym,WuX82,d65w
57 | U6Ym,WuX82,865w
58 | U6Ym,WuX82,b75w
59 | E0Us,NAQ7m,k3bp
60 | E0Us,NAQ7m,F3bp
61 | E0Us,ET60+,VUqQ
62 | E0Us,ET60+,2TqQ
63 | E0Us,ET60+,XTqQ
64 | E0Us,ET60+,4SqQ
65 | E0Us,WFGY0,cKUZ
66 | E0Us,WFGY0,9JUZ
67 | E0Us,WFGY0,eJUZ
68 | E0Us,WFGY0,/IUZ
69 | 3Ccg,NFThX,hINg
70 | 3Ccg,NFThX,CINg
71 | 3Ccg,NFThX,jHNg
72 | 3Ccg,EB35n,ake8
73 | 3Ccg,EB35n,7je8
74 | 3Ccg,EB35n,cje8
75 | 3Ccg,EB35n,9ie8
76 | 3Ccg,S8XK4,vLqW
77 | 3Ccg,S8XK4,QLqW
78 | 3Ccg,S8XK4,vijX
79 | 3Ccg,WX0Uk,iD65
80 | 3Ccg,WX0Uk,BE65
81 | 3Ccg,WX0Uk,gE65
82 | 3Ccg,WX0Uk,/E65
83 | y4Ss,Evy5c,AxmM
84 | y4Ss,Evy5c,fxmM
85 | y4Ss,Evy5c,+xmM
86 | y4Ss,WOuys,H+dt
87 | y4Ss,WOuys,m+dt
88 | LrDx,NPYIq,jZCn
89 | LrDx,NPYIq,eR/Y
90 | LrDx,NPYIq,/Q/Y
91 | LrDx,S4QRn,kgCj
92 | LrDx,S4QRn,DhCj
93 | LrDx,S4QRn,emZY
94 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/requirements.txt:
--------------------------------------------------------------------------------
1 | bokeh==2.3.3
2 | wheel==0.38.4
3 | ipykernel==5.5.6
4 | ipython==7.16.3
5 | ipython-genutils==0.2.0
6 | ipywidgets==7.7.0
7 | jupyter==1.0.0
8 | jupyter-console==6.4.3
9 | jupyter-core==4.9.2
10 | jupyterlab-pygments==0.1.2
11 | jupyterlab-widgets==1.1.0
12 | jupytext==1.13.8
13 | l5kit==1.5.0
14 | loguru==0.6.0
15 | numpy==1.19.5
16 | pandas==1.4.4
17 | plotly==5.7.0
18 | prettytable==2.5.0
19 | prompt-toolkit==3.0.30
20 | protobuf==3.19.4
21 | PTable==0.9.2
22 | scipy==1.5.4
23 | seaborn==0.11.2
24 | Shapely==1.8.1.post1
25 | toml==0.10.2
26 | tqdm==4.64.0
27 | zarr==2.8.3
28 | pydotplus
29 | pyinterval==1.2.0
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/a-t_diagram_scene2101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/a-t_diagram_scene2101.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/tl_status-t_diagram_total_scene2101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/tl_status-t_diagram_total_scene2101.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/tl_status-t_diagram_total_scene474.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/tl_status-t_diagram_total_scene474.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_lead_vehicle_total_scene_2101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_lead_vehicle_total_scene_2101.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_total_scene2101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_total_scene2101.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_total_scene474.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/algorithm/v-t_diagram_total_scene474.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/branch_travel_distribution_per_junction_trainfull.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/branch_travel_distribution_per_junction_trainfull.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/distribution_branch_trips_trainfull.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/distribution_branch_trips_trainfull.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/distribution_junction_trips_trainfull.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/distribution_junction_trips_trainfull.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/interactive_visualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/interactive_visualization.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/localization_or_map_error.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/localization_or_map_error.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/dynamic_data/tl_state_flicker_signal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/dynamic_data/tl_state_flicker_signal.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/semantic_data/semantic_satellite.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/semantic_data/semantic_satellite.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/analysis/semantic_data/tl_junctions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/analysis/semantic_data/tl_junctions.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/evaluation/confusion_matrix/heuristic_used_scenes_10000_all_agents.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/evaluation/confusion_matrix/heuristic_used_scenes_10000_all_agents.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/evaluation/confusion_matrix/heuristic_used_scenes_10000_ego.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/evaluation/confusion_matrix/heuristic_used_scenes_10000_ego.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/evaluation/metrics/heuristic_performance_all_agents.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/res/evaluation/metrics/heuristic_performance_all_agents.png
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/evaluation/metrics/heuristic_used_scenes_10000_all_agents.md:
--------------------------------------------------------------------------------
1 | +-----------+-------------+
2 | | Metrics | Results [%] |
3 | +-----------+-------------+
4 | | ACCURACY | 76.4 |
5 | | PRECISION | 83.2 |
6 | | RECALL | 80.4 |
7 | | F_1 | 81.8 |
8 | | F_beta | 80.5 |
9 | +-----------+-------------+
10 | Number of scenes: 89573
11 | Number of agents: 1002873
12 | Number of tl-lane pairs: 271
--------------------------------------------------------------------------------
/lyft_l5_tl2la/res/evaluation/metrics/heuristic_used_scenes_10000_ego.md:
--------------------------------------------------------------------------------
1 | +-----------+-------------+
2 | | Metrics | Results [%] |
3 | +-----------+-------------+
4 | | ACCURACY | 83.6 |
5 | | PRECISION | 84.6 |
6 | | RECALL | 97.8 |
7 | | F_1 | 90.7 |
8 | | F_beta | 97.5 |
9 | +-----------+-------------+
10 | Number of scenes: 63510
11 | Number of agents: 63510
12 | Number of tl-lane pairs: 55
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/agent_features.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Preprocessing script to merge the preprocessed datasets
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | import os
10 | import re
11 | from time import perf_counter
12 |
13 | import pandas as pd
14 | from loguru import logger
15 | from src.common.definitions import DATASET_SIZE
16 | from src.common.helper import get_filenames, init_dataset, pickle_df
17 | from src.common.utils import (calculate_elapsed_time_in_tl_state,
18 | calculate_lead_vehicle, calculate_tl_distance,
19 | calculate_traffic_density,
20 | detect_active_status_change,
21 | generate_tl_lane_relation_lookup,
22 | get_relation_by_lookuptable)
23 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
24 | from src.dataset_operations.junction_dataset import JunctionDataset
25 | from tqdm import tqdm
26 |
27 |
28 | def get_start_idx(path):
29 | """Get current index to preprocess datasets
30 | """
31 | f_names = get_filenames(path)
32 | if len(f_names) == 0:
33 | return 0
34 | # index of last saved scene group +1
35 | return int(re.findall(r'\d+', f_names[-1])[0]) + 1
36 |
37 |
38 | def get_features(junctionDataset: JunctionDataset) -> pd.DataFrame:
39 | """Classifier that applies selected algorithm
40 | Args:
41 | junctionDataset (JunctionDataset): Junction Dataset Object
42 | zarr_dataset (ChunkedDataset): zarr Dataset
43 | cfg (dict): config file
44 | """
45 | path = "dataset/preprocessed_info/combined_features/"
46 | start_idx = get_start_idx(path)
47 | logger.info(f"Starting to process dataset: {start_idx}")
48 | # Generate traffic light to lane lookup
49 | relation_lookup = generate_tl_lane_relation_lookup(junctionDataset.get_ground_truth())
50 |
51 | for dataset_index in tqdm(range(start_idx, DATASET_SIZE), desc="Iterating through preprocessed datasets."):
52 | # Load agent motion and traffic light mapping
53 | motion_df = junctionDataset.get_agent_motion(dataset_index).reset_index(drop=True)
54 | tl_state_df = junctionDataset.get_tl_mapping(dataset_index).reset_index(drop=True)
55 | tl_state_df = tl_state_df.drop(["active_faces", "inactive_faces", "unknown_faces"], axis=1)
56 |
57 |
58 | status_change_df = detect_active_status_change(tl_state_df)
59 |
60 | motion_df = calculate_traffic_density(motion_df)
61 | motion_df = calculate_tl_distance(junctionDataset.mapAPI, motion_df)
62 | motion_df = calculate_lead_vehicle(motion_df)
63 |
64 | merged_df = motion_df.merge(tl_state_df.set_index(
65 | ["scene_idx", "frame_idx"]), on=["scene_idx", "frame_idx"], how="inner")
66 |
67 |
68 | merged_df["relation"] = merged_df.apply(lambda row: get_relation_by_lookuptable(
69 | row["tl_id"], row["sequence_head_lane_id"], relation_lookup), axis=1)
70 | merged_df = merged_df[merged_df["relation"] != -2] #remove non existing relations
71 |
72 |
73 | merged_df["time_in_tl_state"] = merged_df.apply(lambda row: calculate_elapsed_time_in_tl_state(
74 | row["scene_idx"], row["frame_idx"], status_change_df, row["tl_id"]), axis=1)
75 |
76 | merged_df = merged_df.dropna()
77 | merged_df[["x", "y"]] = pd.DataFrame(merged_df["centroid"].tolist(), index= merged_df.index)
78 |
79 | merged_df = merged_df.drop(["lane_sequence_id", "time", "centroid"], axis=1)
80 |
81 | pickle_df(
82 | f"./dataset/preprocessed_info/combined_features/{dataset_index}_df_trainfull.pkl", merged_df)
83 |
84 |
85 | @logger.catch
86 | def main():
87 | # -------------------------- Logging -------------------------- #
88 | log_file_path: str = os.path.join(
89 | 'logs', 'feature_generation', 'file_{time}.log')
90 | logger.add(log_file_path, retention=2,
91 | encoding="utf8", rotation="500 MB", level="TRACE")
92 |
93 | # ----------------- Initialisation of Objects ------------------ #
94 | logger.info("Initializing Dataset")
95 | dataset_type = "trainfull"
96 | cfg, dm, zarr_dataset = init_dataset(config_name=dataset_type)
97 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
98 | junctionDataset = JunctionDataset(cfg, mapAPI, zarr_dataset)
99 |
100 | # ----------------- Get Features------------------ #
101 | logger.info("Get combined features")
102 | t_start = perf_counter()
103 |
104 | get_features(junctionDataset)
105 |
106 | t_stop = perf_counter()
107 | logger.info(f"Elapsed time: {(t_stop-t_start)/60}min.")
108 |
109 |
110 | if __name__ == "__main__":
111 | main()
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/agent_mapping.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Preprocessing script to map agents to their corresponding lanes
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | import os
10 | import pickle
11 | import re
12 | from time import perf_counter
13 |
14 | import numpy as np
15 | import pandas as pd
16 | from loguru import logger
17 | from src.common.helper import get_filenames, init_dataset, pickle_df
18 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
19 | from src.dataset_operations.junction_dataset import JunctionDataset
20 |
21 |
22 | def get_scene_entry(path):
23 | """Returns entry index of scene group for processing
24 | """
25 | f_names = get_filenames(path)
26 | if len(f_names) == 0:
27 | return 0
28 | # index of last saved scene group +1
29 | return int(re.findall(r'\d+', f_names[-1])[0]) + 1
30 |
31 |
32 | @logger.catch
33 | def main():
34 | # configure logging
35 | log_file_path: str = os.path.join(
36 | 'logs', 'agent_mapping', 'file_{time}.log')
37 | logger.add(log_file_path, retention=2,
38 | encoding="utf8", rotation="500 MB", level="TRACE") # Debug
39 |
40 | logger.info("Initializing Dataset")
41 | # dataset_type = "train1"
42 | dataset_type = "trainfull"
43 | cfg, dm, zarr_dataset = init_dataset(config_name=dataset_type)
44 |
45 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
46 | junctionDataset = JunctionDataset(cfg, mapAPI, zarr_dataset)
47 |
48 | logger.info("Load Traffic Light Scene Indices")
49 | with open(f'./dataset/preprocessed_info/tls_scene_indices_{dataset_type}.pkl', 'rb') as f:
50 | tls_scene_indices = pickle.load(f)
51 |
52 | logger.info("Start agent mapping")
53 | t_start = perf_counter()
54 |
55 | splitted_scenes = np.array_split(tls_scene_indices, 1000)
56 |
57 | path = f'./dataset/preprocessed_info/agent_mapping/'
58 | entry_idx = get_scene_entry(path)
59 |
60 | for i, scenes in enumerate(splitted_scenes[entry_idx:]):
61 | results = []
62 | for scene_idx in scenes:
63 | # Map Agents in Frames to Lane
64 | current_results = junctionDataset.map_agents_to_lanes(scene_idx)
65 | results.extend(current_results)
66 |
67 | df = pd.DataFrame(results, columns=[
68 | "scene_idx", "frame_idx", "agent_id", "lane_ids"])
69 | path = f'./dataset/preprocessed_info/agent_mapping/{i}_df_{dataset_type}.pkl'
70 | pickle_df(path, df)
71 |
72 | t_stop = perf_counter()
73 | logger.info(f"Elapsed time: {t_stop-t_start}")
74 |
75 |
76 | if __name__ == "__main__":
77 | main()
78 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/agent_motion.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Preprocessing script to calculate the velocity and acceleration of traffic agents (kinematics)
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | import os
10 | import re
11 | from time import perf_counter
12 |
13 | import pandas as pd
14 | from l5kit.data import ChunkedDataset
15 | from l5kit.data.filter import filter_agents_by_frames
16 | from loguru import logger
17 | from src.common.definitions import DATASET_SIZE, DATASET_TYPE
18 | from src.common.helper import get_filenames, init_dataset, pickle_df
19 | from src.common.utils import (get_movement_frame,
20 | merge_agent_lane_mapping_and_branch_frame)
21 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
22 | from src.dataset_operations.junction_dataset import JunctionDataset
23 | from tqdm import tqdm
24 |
25 |
26 | def get_scene_entry(path):
27 | """Returns entry index of scene group for processing
28 | """
29 | f_names = get_filenames(path)
30 | if len(f_names) == 0:
31 | return 0
32 | # index of last saved scene group +1
33 | return int(re.findall(r'\d+', f_names[-1])[0]) + 1
34 |
35 |
36 | def get_motion_data(junctionDataset: JunctionDataset, zarr_dataset: ChunkedDataset) -> pd.DataFrame:
37 | """Classifier that applies selected algorithm
38 |
39 | Args:
40 | junctionDataset (JunctionDataset): Junction Dataset Object
41 | zarr_dataset (ChunkedDataset): zarr Dataset
42 | cfg (dict): config file
43 | """
44 | extended_branch_df = junctionDataset.get_branch_dataset(DATASET_TYPE.EXTENDED)
45 | start_idx = 0
46 |
47 | for dataset_index in tqdm(range(start_idx, DATASET_SIZE), desc="Iterating through preprocessed datasets."):
48 | # Load agent and traffic light mapping
49 | agent_mapping_df = junctionDataset.get_agent_mapping(dataset_index)
50 | agent_mapping_branch_df = merge_agent_lane_mapping_and_branch_frame(
51 | agent_mapping_df, extended_branch_df)
52 |
53 | scene_indices = agent_mapping_df["scene_idx"].unique()
54 | motion_dataset_scenes = []
55 | for scene_idx in scene_indices:
56 | logger.info(
57 | f"Motion data for scene: {scene_idx} in dataset {dataset_index}")
58 |
59 | # Get Scene Data
60 | current_scene = zarr_dataset.get_scene_dataset(
61 | scene_index=scene_idx)
62 | current_frames = current_scene.frames
63 | current_agents = current_scene.agents
64 | current_agent_frames = filter_agents_by_frames(
65 | current_frames, current_agents)
66 |
67 | movement_df = get_movement_frame(
68 | scene_idx, current_frames, current_agent_frames)
69 |
70 | scene_agent_mapping = agent_mapping_branch_df[agent_mapping_branch_df["scene_idx"] == scene_idx]
71 |
72 | scene_motion_df = scene_agent_mapping.merge(
73 | movement_df.set_index('scene_idx', 'frame_idx', 'agent_id'),
74 | on=['scene_idx', 'frame_idx', 'agent_id'],
75 | how='outer'
76 | )
77 | scene_motion_df = scene_motion_df.dropna(
78 | subset=["lane_sequence_id"])
79 |
80 | motion_dataset_scenes.append(scene_motion_df)
81 |
82 | results = pd.concat(motion_dataset_scenes)
83 | pickle_df(
84 | f"./dataset/preprocessed_info/agent_motion/{dataset_index}_df_trainfull.pkl", results)
85 |
86 |
87 | @logger.catch
88 | def main():
89 | # -------------------------- Logging -------------------------- #
90 | log_file_path: str = os.path.join(
91 | 'logs', 'agent_mapping', 'file_{time}.log')
92 | logger.add(log_file_path, retention=2,
93 | encoding="utf8", rotation="500 MB", level="TRACE")
94 |
95 | # ----------------- Initialisation of Objects ------------------ #
96 | logger.info("Initializing Dataset")
97 | dataset_type = "trainfull"
98 | cfg, dm, zarr_dataset = init_dataset(config_name=dataset_type)
99 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
100 | junctionDataset = JunctionDataset(cfg, mapAPI, zarr_dataset)
101 |
102 | # ----------------- Get Motion Data ------------------ #
103 | logger.info("Get motion data")
104 | t_start = perf_counter()
105 |
106 | get_motion_data(junctionDataset, zarr_dataset)
107 |
108 | t_stop = perf_counter()
109 | logger.info(f"Elapsed time: {t_stop-t_start}")
110 |
111 |
112 | if __name__ == "__main__":
113 | main()
114 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/ground_truth.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Preprocessing of ground truth dataset (afterwards manually corrected 20
3 | # traffic light labels which are incorrectly labeled by Lyft)
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 |
10 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
11 | from src.common.helper import init_dataset, pickle_df
12 | from src.dataset_operations.junction_dataset import JunctionDataset
13 | from src.dataset_operations.preprocessing import generate_target_association
14 |
15 |
16 | # Load Dataset
17 | cfg, dm, zarr_dataset = init_dataset("trainfull")
18 |
19 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
20 | junctionDataset = JunctionDataset(cfg, mapAPI, zarr_dataset)
21 |
22 | target_df = generate_target_association(mapAPI, junctionDataset)
23 | path = "./dataset/preprocessed_info/target_associations.pkl"
24 | pickle_df(path, target_df)
25 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/tl_mapping.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Map tl status to tl for each frame in a scene
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 | import os
9 | import pickle
10 | import re
11 | from time import perf_counter
12 |
13 | import numpy as np
14 | import pandas as pd
15 | from loguru import logger
16 | from src.common.helper import get_filenames, init_dataset, pickle_df
17 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
18 | from src.dataset_operations.junction_dataset import JunctionDataset
19 |
20 |
21 | def get_scene_entry(path):
22 | """Returns entry index of scene group for processing
23 | """
24 | f_names = get_filenames(path)
25 | if len(f_names) == 0:
26 | return 0
27 | # index of last saved scene group +1
28 | return int(re.findall(r'\d+', f_names[-1])[0]) + 1
29 |
30 |
31 | @logger.catch
32 | def main():
33 | # configure logging
34 | log_file_path: str = os.path.join(
35 | 'logs', 'tl_mapping', 'file_{time}.log')
36 | logger.add(log_file_path, retention=2,
37 | encoding="utf8", rotation="500 MB", level="TRACE") # Debug
38 |
39 | logger.info("Initializing Dataset")
40 | # dataset_type = "train1"
41 | dataset_type = "trainfull"
42 | cfg, dm, zarr_dataset = init_dataset(config_name=dataset_type)
43 |
44 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
45 | junctionDataset = JunctionDataset(cfg, mapAPI, zarr_dataset)
46 |
47 | logger.info("Load Traffic Light Scene Indices")
48 | with open(f'./dataset/preprocessed_info/tls_scene_indices_{dataset_type}.pkl', 'rb') as f:
49 | tls_scene_indices = pickle.load(f)
50 |
51 | logger.info("Start agent mapping")
52 | t_start = perf_counter()
53 |
54 | splitted_scenes = np.array_split(tls_scene_indices, 1000)
55 |
56 | base_path = "./dataset/preprocessed_info/tl_mapping/"
57 | entry_idx = get_scene_entry(base_path)
58 |
59 | for i, scenes in enumerate(splitted_scenes[entry_idx:]):
60 | results = []
61 | for j, scene_idx in enumerate(scenes):
62 | # Map Agents in Frames to Lane
63 | current_results = junctionDataset.map_tl_status(scene_idx)
64 | results.extend(current_results)
65 |
66 | df = pd.DataFrame(results, columns=[
67 | "scene_idx", "frame_idx", "tl_id", "tl_status", "active_faces", "inactive_faces", "unknown_faces"])
68 | file_path = os.path.join(base_path, f"{i+entry_idx}_df_{dataset_type}.pkl")
69 | pickle_df(file_path, df)
70 | logger.info(f"Processed: {i/len(splitted_scenes)*100}%")
71 |
72 | t_stop = perf_counter()
73 | logger.info(f"Elapsed time: {t_stop-t_start}")
74 |
75 |
76 | if __name__ == "__main__":
77 | main()
78 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/preprocessing/traffic_light_scene_state.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Process script that assigns a traffic light in a scene a specific state
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 | from src.dataset_operations.preprocessing import generate_traffic_light_scene_table
9 | from src.common.helper import pickle_df
10 |
11 | save_path = "dataset/preprocessed_info/traffic_light_state_scene_df.pkl"
12 | df = generate_traffic_light_scene_table()
13 | df = df.reset_index(drop=True)
14 | pickle_df(save_path, df)
15 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/visualization/visualize.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Run Visualizer to render a single scene of the Lyft Level 5 dataset
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 | # Modified from L5Kit (https://github.com/woven-planet/l5kit)
8 | # Copyright (c) 2022 Woven by Toyota. All Rights Reserved.
9 | # ------------------------------------------------------------------------
10 |
11 | from src.visualization.visualizer import Visualizer
12 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
13 | from src.common.helper import init_dataset
14 | from bokeh.io import save
15 |
16 | # Load Dataset
17 | cfg, dm, zarr_dataset = init_dataset("trainfull")
18 |
19 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
20 |
21 | # Adapt Parameters to visualize a different scene or a bigger radius
22 | scene_idx = 2101
23 | visualizer = Visualizer(cfg, dm, radius=50)
24 |
25 | out = visualizer.zarr_to_visualizer_scene(
26 | zarr_dataset.get_scene_dataset(scene_idx), mapAPI)
27 | out_vis = visualizer.visualize(scene_idx, out)
28 |
29 | save(out_vis, f"./res/bokeh/visualization_{scene_idx}.html")
30 |
31 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/scripts/visualization/visualize_static.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Run Visualizer to render a static environment of the Lyft Level 5 dataset
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 | # Modified from L5Kit (https://github.com/woven-planet/l5kit)
8 | # Copyright (c) 2022 Woven by Toyota. All Rights Reserved.
9 | # ------------------------------------------------------------------------
10 |
11 | from src.visualization.static_visualizer import StaticVisualizer
12 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
13 | from src.common.helper import init_dataset
14 | from bokeh.io import save
15 |
16 | # Load Dataset
17 | d_type = "trainfull"
18 | cfg, dm, zarr_dataset = init_dataset(d_type)
19 |
20 | mapAPI = ExtendedMapAPI.from_config(dm, cfg)
21 | visualizer = StaticVisualizer(cfg, dm)
22 |
23 | out = visualizer.get_semantic_visualisation(mapAPI, heatmap=True)
24 | out_vis = visualizer.visualize(out, heatmap=True)
25 |
26 | save(out_vis, f"./res/bokeh/static_visualization_heatmap.html")
27 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/src/__init__.py
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/assignment/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/src/assignment/__init__.py
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/assignment/assignment_rejection.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Implementation of Rejection Method
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | import pandas as pd
10 | from scipy import stats
11 |
12 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
13 | from src.common.definitions import TL_STATUS, THRESHOLDS_CONSERVATIVE, SCENE_TL_STATUS
14 |
15 |
16 | def detect_drive_trough(frame_idx: int, velocity: float, dist: float) -> int:
17 | """Detect if the agent drives throug the intersection based on its velocity and distance to the stop line for the given frame
18 |
19 | Args:
20 | frame_idx (int): current frame in the scene
21 | velocity (float): velocity of agent
22 | dist (float): distance towards stop line
23 |
24 | Returns:
25 | int: frame idx of drive through. (-1) if no drive through detected
26 | """
27 | if velocity >= THRESHOLDS_CONSERVATIVE.V_DRIVE_THROUGH.value/3.6 and dist < THRESHOLDS_CONSERVATIVE.STOP_LINE_DIST.value:
28 | return frame_idx
29 | return -1
30 |
31 |
32 | def is_unallowed_drive_trough(velocity: float, dist: float, tl_state: int, lane_id: str, map_API: ExtendedMapAPI) -> bool:
33 | """Returns the frame of drive through. -1 if agent does not pass the intersection
34 |
35 | Special case: right turn on red -> only unallowed if drive through at high speed
36 |
37 | Args:
38 | v (float): velocity
39 | dist (float): distance
40 |
41 | Returns:
42 | int: frame idx
43 | """
44 |
45 | assert tl_state != TL_STATUS.UNKNOWN.value, 'This should not occure and filtered out in previous step'
46 |
47 | # tl state not red but known: allowed to pass intersection
48 | if tl_state != TL_STATUS.RED.value:
49 | return False
50 |
51 | # tl state red: forbidden to pass, exept right turn on red!
52 | is_rigthmost_lane = not map_API.has_adjacent_lane_change_right(lane_id)
53 | if velocity >= THRESHOLDS_CONSERVATIVE.V_DRIVE_THROUGH.value/3.6 and dist < THRESHOLDS_CONSERVATIVE.STOP_LINE_DIST.value:
54 | # not right turn
55 | if not is_rigthmost_lane:
56 | return True
57 | # is right turn -> check for higher speed
58 | if velocity > THRESHOLDS_CONSERVATIVE.V_DRIVE_THROUGH_RIGHT_TURN.value/3.6 and dist < THRESHOLDS_CONSERVATIVE.STOP_LINE_DIST.value:
59 | return True
60 |
61 | # right turn on red at low speed allowed
62 | return False
63 |
64 |
65 | def conservative_data_preprocessing(combined_features: pd.DataFrame, scene_tl_state: pd.DataFrame, extended_branch_df: pd.DataFrame):
66 | """Data preprocessing for rejection method
67 |
68 | Args:
69 | combined_features (pd.DataFrame): dataset containing all features of agents, traffic lights and lanes
70 | scene_tl_state (pd.DataFrame): traffic light state
71 | extended_branch_df (pd.DataFrame): junction branch with lane sequence information
72 |
73 | Returns:
74 | _type_: _description_
75 | """
76 | combined_features = combined_features.query("dist <= 5")
77 | combined_features = combined_features.query("v >= 0.5")
78 | combined_features = combined_features.query(f"tl_status != {TL_STATUS.UNKNOWN.value}")
79 |
80 | # filter out traffic lights with invalid state
81 | combined_features = combined_features.merge(scene_tl_state, on=["scene_idx", "tl_id"], how="left")
82 | combined_features = combined_features[~combined_features.isnull().any(axis=1)]
83 | combined_features = combined_features[combined_features["interpreted_state"]!=SCENE_TL_STATUS.UNDEFINED.value]
84 |
85 | #filter out roundtrip bug
86 | combined_features = combined_features.merge(extended_branch_df, on=["junction_id", "branch_id", "lane_id", "sequence_head_lane_id"], how="left")
87 | combined_features = combined_features.dropna(subset="num_sequence_head_lane_ids")
88 |
89 | # filter out specific lanes
90 | ignore_lane_ids = ["qgZI", # roundtrip
91 | "GRMC", "nQMC", "IRMC", # overlapping lanes infront of junction: E0Us
92 | "yISO", "TISO", "RJSO", "/w/Q", "9x/Q", "ex/Q" # overlapping lanes infront of junction: ctKJ
93 | "OLcZ", "TbHk", "ybHk", "v7zs", "o7Zz" # overlapping lanes west arm of junction y4ss
94 | ]
95 | combined_features = combined_features[~combined_features["lane_id"].isin(ignore_lane_ids)]
96 | combined_features = combined_features.astype({"state_known_start_frame": 'int', "state_known_end_frame": 'int', "state_change_frame": 'int', "interpreted_state": "int"})
97 | return combined_features
98 |
99 |
100 |
101 | def select_all_drive_throughs(combined_features: pd.DataFrame) -> pd.DataFrame:
102 | """Selects all agents that performed a drive through in a scene for a specif tl-lane combination.
103 | Take the last drive through index since the detection delivers multiple drive throughs for the interval [0 , STOP_LINE_DIST)
104 |
105 | Args:
106 | combined_features (pd.DataFrame): dataset with al features
107 |
108 | Returns:
109 | pd.DataFrame: dataset containing drive throughs
110 | """
111 | select_columns = ["scene_idx", "frame_idx", "agent_id", "v", "dist", "junction_id", "branch_id", "sequence_head_lane_id", "tl_id", "tl_status", "state_known_start_frame", "state_known_end_frame", "state_change_frame", "interpreted_state", "drive_trough_idx", "is_unallowed_drive_trough"]
112 | return combined_features.query(f"drive_trough_idx >= 0").drop_duplicates(subset=["scene_idx", "agent_id", "tl_id", "sequence_head_lane_id"], keep="last")[select_columns]
113 |
114 |
115 | def select_unallowed_drive_throughs_const_red(drive_troughs: pd.DataFrame) -> pd.DataFrame:
116 | """Select all unallowed drive throughs on a const. red traffic light
117 |
118 | Args:
119 | drive_troughs (pd.DataFrame): dataset with vehicles driving throug an intersection
120 |
121 | Returns:
122 | pd.DataFrame: dataset with vehicles driving throug an intersection at red light
123 | """
124 | drive_trough_on_constant_red = drive_troughs.query(f"tl_status == {TL_STATUS.RED.value} and drive_trough_idx >= 0 and interpreted_state == {SCENE_TL_STATUS.CONST_RED.value} and is_unallowed_drive_trough == True")
125 | return drive_trough_on_constant_red
126 |
127 |
128 | def select_unallowed_drive_throughs_before_red_green_change(drive_troughs: pd.DataFrame) -> pd.DataFrame:
129 | """Select all unallowed drive throughs on before a red to green change (on a red traffic light)
130 |
131 | Args:
132 | drive_troughs (pd.DataFrame): dataset with vehicles driving throug an intersection
133 |
134 | Returns:
135 | pd.DataFrame: dataset with vehicles driving throug an intersection at red light
136 | """
137 | drive_trough_before_red_green = drive_troughs.query(f"tl_status == {TL_STATUS.RED.value} and drive_trough_idx >= 0 and interpreted_state == {SCENE_TL_STATUS.RED_GREEN.value} and drive_trough_idx <= state_change_frame and is_unallowed_drive_trough == True")
138 | return drive_trough_before_red_green
139 |
140 | def select_unallowed_drive_throughs_after_green_red_change(drive_troughs: pd.DataFrame) -> pd.DataFrame:
141 | """Select all unallowed drive throughs on after a green to red change (on a red traffic light)
142 |
143 | Args:
144 | drive_troughs (pd.DataFrame): dataset with vehicles driving throug an intersection
145 |
146 | Returns:
147 | pd.DataFrame: dataset with vehicles driving throug an intersection at red light
148 | """
149 |
150 | drive_trough_after_green_red = drive_troughs.query(f"tl_status == {TL_STATUS.RED.value} and drive_trough_idx >= 0 and interpreted_state == {SCENE_TL_STATUS.GREEN_RED.value} and drive_trough_idx >= state_change_frame + 15 and is_unallowed_drive_trough == True")
151 | return drive_trough_after_green_red
152 |
153 | def select_unallowed_red_drive_throughs(drive_troughs: pd.DataFrame) -> pd.DataFrame:
154 | """Select all unallowed drive throughs on red light
155 |
156 | Args:
157 | drive_troughs (pd.DataFrame): dataset with vehicles driving throug an intersection
158 |
159 | Returns:
160 | pd.DataFrame: dataset with vehicles driving throug an intersection at red light
161 | """
162 | drive_trough_on_constant_red = select_unallowed_drive_throughs_const_red(drive_troughs)
163 | drive_trough_before_red_green = select_unallowed_drive_throughs_before_red_green_change(drive_troughs)
164 | drive_trough_after_green_red = select_unallowed_drive_throughs_after_green_red_change(drive_troughs)
165 |
166 | red_cases = [drive_trough_on_constant_red, drive_trough_before_red_green, drive_trough_after_green_red]
167 | # red_cases = [case.drop_duplicates(subset=["scene_idx", "agent_id", "tl_id", "sequence_head_lane_id"], keep="last") for case in red_cases]
168 | drive_trough_on_red = pd.concat(red_cases)
169 | return drive_trough_on_red
170 |
171 | def count_drive_throughs(drive_throughs: pd.DataFrame, count_column_name: str) -> pd.DataFrame:
172 | """Count the number of vehicles driving through an intersection
173 |
174 | Args:
175 | drive_throughs (pd.DataFrame): dataset with vehicles driving through an intersection
176 | count_column_name (str): _description_
177 |
178 | Returns:
179 | pd.DataFrame: drive through count
180 | """
181 | return drive_throughs.groupby(by=["sequence_head_lane_id", "tl_id"])["agent_id"].count().reset_index().rename(columns={"agent_id": count_column_name})
182 |
183 |
184 | def perform_binom_test(all_drives: pd.DataFrame, red_passes: pd.DataFrame, alpha = 0.001, p_red_drive_through = 0.05) -> pd.DataFrame:
185 | """Perform binomial test to identify based on red-passes if traffic light is assigned to lane.
186 |
187 | Args:
188 | all_drives (pd.DataFrame): contains number of all drives for a traffic light lane pair
189 | red_passes (pd.DataFrame): contains number of red-passes for a traffic light lane pair
190 | alpha (float): probability that H0 is incorrect
191 | p_red_drive_through (float): probability to cross intersection unallowed on red traffic light (should be lower or equal to)
192 |
193 | Returns:
194 | pd.DataFrame: result of binomial test assigning each traffic light lane pair an association (assignment/no assignment)
195 | """
196 |
197 | tl_lane_stats = all_drives.merge(red_passes, on=["sequence_head_lane_id", "tl_id"], how="left")
198 | tl_lane_stats = tl_lane_stats.fillna(0).astype({"num_unallowed_red_drive_throughts": int})
199 | tl_lane_stats["num_red_drive_throughts_percentage"] = tl_lane_stats["num_unallowed_red_drive_throughts"] / tl_lane_stats["num_drive_throughs"]
200 |
201 | # if pvalue <=alpha -> reject H0 -> no assignment between tl-lane pair
202 | tl_lane_stats["invalid_assoc"] = tl_lane_stats.apply(lambda row: stats.binomtest(k=row["num_unallowed_red_drive_throughts"], n=row["num_drive_throughs"], p=p_red_drive_through, alternative="greater").pvalue <= alpha, axis=1)
203 | return tl_lane_stats
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/assignment/assignmet_pattern.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Implementation of Pattern-based contribution Method
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | from concurrent.futures import ThreadPoolExecutor
10 | from typing import List, Optional
11 |
12 | import numpy as np
13 | import pandas as pd
14 | from src.common.definitions import (AGENT_ACCELERATION_STATES, AGENT_PATTERN,
15 | AGENT_VELOCITY_STATES, DATASET_TYPE,
16 | SAMPLE_FREQUENCY, SCENE_TL_STATUS,
17 | THRESHOLDS, TL_STATUS, TURN)
18 | from src.common.helper import get_pickled_dataset_index
19 | from src.common.utils import detect_active_status_change
20 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
21 | from src.dataset_operations.junction_dataset import JunctionDataset
22 |
23 |
24 | def get_elapsed_time_in_tl_state(frame_idx: int, tl_mapping: pd.DataFrame) -> float:
25 | """Calcualtes the time elapsed after a state change of a traffic light
26 |
27 | Args:
28 | frame_idx (int): index of current frame
29 | tl_mapping (pd.DataFrame): tl mapping with state information for whole frame
30 |
31 | Returns:
32 | float: elapsed time
33 | """
34 | status_change_df = detect_active_status_change(tl_mapping)
35 | status_change_bevore_current_frame = status_change_df[status_change_df["frame_idx"] < frame_idx]
36 | if len(status_change_bevore_current_frame) == 0:
37 | return frame_idx * SAMPLE_FREQUENCY
38 | last_status_change = status_change_bevore_current_frame.iloc[-1]
39 | return (frame_idx - last_status_change["frame_idx"]) * SAMPLE_FREQUENCY
40 |
41 |
42 | def calculate_contribution(pattern: int, tl_status: int, tl_status_time: np.float, is_lead: bool, dist: float, turn_type: int, v: float) -> float:
43 | """Calculate contribution value for detectet agent pattern and tl state
44 |
45 | Args:
46 | pattern (int): detected agent pattern
47 | tl_status (int): state of traffic light
48 | tl_status_time (np.float): time elapsed in tl state
49 |
50 | Raises:
51 | Exception: if given pattern is not defined
52 |
53 | Returns:
54 | int: contribution value
55 | """
56 | # TODO: may be useful to weight the contribution values by distance
57 |
58 | if pattern == AGENT_PATTERN.STATONARY.value:
59 | if tl_status == TL_STATUS.RED.value and dist < THRESHOLDS.T_STOP_ZONE.value:
60 | return 2
61 | elif is_lead and tl_status_time > THRESHOLDS.T_GREEN_REACTION.value and tl_status == TL_STATUS.GREEN.value:
62 | return -3
63 | elif is_lead and tl_status == TL_STATUS.GREEN.value:
64 | return -1
65 | return 0 # no contribution
66 | elif pattern == AGENT_PATTERN.MOVING.value:
67 | if tl_status == TL_STATUS.RED.value and tl_status_time > THRESHOLDS.T_RED_REACTION.value and dist < THRESHOLDS.T_STOP_ZONE.value:
68 | return -3
69 | elif tl_status == TL_STATUS.RED.value and dist < THRESHOLDS.T_SD_ZONE.value:
70 | return -1
71 | elif tl_status == TL_STATUS.GREEN.value and dist < THRESHOLDS.T_SD_ZONE.value:
72 | return 3
73 | elif tl_status == TL_STATUS.GREEN.value and dist < THRESHOLDS.T_STOP_ZONE.value:
74 | return 5
75 | return 0
76 | elif pattern == AGENT_PATTERN.ACCELERATION.value:
77 | # from moving
78 | if v in AGENT_VELOCITY_STATES.V_MOVING.value:
79 | if tl_status == TL_STATUS.RED.value and dist < THRESHOLDS.T_STOP_ZONE.value and tl_status_time > THRESHOLDS.T_RED_REACTION.value:
80 | return -3
81 | elif tl_status == TL_STATUS.RED.value and dist < THRESHOLDS.T_SD_ZONE.value:
82 | return -1
83 | elif tl_status == TL_STATUS.GREEN.value and dist < THRESHOLDS.T_STOP_ZONE.value:
84 | return 1
85 | return 0
86 | else: # from stationary
87 | if tl_status == TL_STATUS.RED.value and dist < THRESHOLDS.T_STOP_ZONE.value and turn_type != TURN.RIGHT.value:
88 | return -2
89 | elif tl_status == TL_STATUS.GREEN.value and dist < THRESHOLDS.T_SD_ZONE.value and tl_status_time > THRESHOLDS.T_GREEN_REACTION.value:
90 | return 3
91 | return 0
92 | elif pattern == AGENT_PATTERN.DECELERATION.value:
93 | if tl_status == TL_STATUS.RED.value and dist > THRESHOLDS.T_STOP_ZONE.value and dist < THRESHOLDS.T_SD_ZONE.value:
94 | return 2
95 | elif tl_status == TL_STATUS.GREEN.value and is_lead and dist < THRESHOLDS.T_STOP_ZONE.value and turn_type == TURN.LEFT.value:
96 | return -1
97 | elif tl_status == TL_STATUS.GREEN.value and is_lead and dist < THRESHOLDS.T_STOP_ZONE.value and turn_type == TURN.NORMAL.value:
98 | return -2
99 | return 0
100 | elif pattern == AGENT_PATTERN.UNDEFINED.value:
101 | return 0
102 | raise Exception(f"Please check pattern: {pattern}")
103 |
104 |
105 | def detect_pattern(dist: np.float, velocity: np.float, acceleration: np.float) -> AGENT_PATTERN:
106 | """Determine agent pattern based on velocity, acceleration and distance of agent to stopping line
107 |
108 | Args:
109 | dist (np.float): distance between agent and stopping line
110 | velocity (np.float): velocity of agent
111 | acceleration (np.float): acceleration of agent
112 |
113 | Returns:
114 | Optional[int]: the detected pattern
115 | """
116 | if dist > THRESHOLDS.T_DIST.value:
117 | return AGENT_PATTERN.UNDEFINED
118 | if np.isnan(velocity) or np.isnan(acceleration):
119 | return AGENT_PATTERN.UNDEFINED
120 |
121 | # Check for pattern
122 | if velocity in AGENT_VELOCITY_STATES.V_STANDSTIL.value:
123 | if acceleration in AGENT_ACCELERATION_STATES.ACCELERATING.value:
124 | return AGENT_PATTERN.ACCELERATION
125 | elif acceleration in AGENT_ACCELERATION_STATES.DECELERATION.value:
126 | return AGENT_PATTERN.STATONARY
127 | elif acceleration in AGENT_ACCELERATION_STATES.NO_ACCELERATION.value:
128 | return AGENT_PATTERN.STATONARY
129 | return AGENT_PATTERN.UNDEFINED
130 | elif velocity in AGENT_VELOCITY_STATES.V_MOVING.value:
131 | if acceleration in AGENT_ACCELERATION_STATES.ACCELERATING.value:
132 | return AGENT_PATTERN.MOVING
133 | if acceleration in AGENT_ACCELERATION_STATES.DECELERATION.value:
134 | return AGENT_PATTERN.DECELERATION
135 | elif acceleration in AGENT_ACCELERATION_STATES.NO_ACCELERATION.value:
136 | return AGENT_PATTERN.MOVING
137 | return AGENT_PATTERN.UNDEFINED
138 | return AGENT_PATTERN.UNDEFINED # other states are undefined -> no pattern detected!
139 |
140 |
141 | def fetch_contribution(dist: float, velocity: float, acceleration: float, tl_state: int, time_in_tl_state: float, is_lead: bool, turn_type: int):
142 | """After grouping by: scene_idx, frame_idx, sequence_head_lane_id, tl_id, agent_id
143 |
144 | Args:
145 | combined_features (pd.DataFrame): complete dataset with all features
146 | mapAPI (ExtendedMapAPI): map API Object to interact with HD map
147 | """
148 | agent_pattern = detect_pattern(dist, velocity, acceleration)
149 | return calculate_contribution(agent_pattern.value, tl_state, time_in_tl_state, is_lead, dist, turn_type, velocity)
150 |
151 |
152 | def heuristic_data_preprocessing(combined_features: pd.DataFrame, map_api: ExtendedMapAPI, scene_tl_state: pd.DataFrame, extended_branch_df: pd.DataFrame, only_tl_lane_pairs: Optional[pd.DataFrame] = None, only_ego: bool = False) -> pd.DataFrame:
153 | """Data preprocessing for heuristic approach
154 |
155 | Args:
156 | combined_features (pd.DataFrame): uncleaned dataset
157 | map_api (ExtendedMapAPI): map API
158 | scene_tl_state (pd.DataFrame): dataset containing interpreted traffic light scene state
159 | extended_branch_df (pd.DataFrame): dataset containing junction, branches and lanes
160 | only_ego (bool): consider only ego or all agents
161 |
162 | Returns:
163 | pd.Dataframe: cleaned and combined dataset
164 | """
165 | if only_ego:
166 | combined_features = combined_features.query("agent_id == 0")
167 |
168 | combined_features = combined_features.query("v >= 0.5")
169 | combined_features = combined_features.query(f"tl_status != {TL_STATUS.UNKNOWN.value}")
170 |
171 | # filter out traffic lights with invalid state
172 | combined_features = combined_features.merge(scene_tl_state, on=["scene_idx", "tl_id"], how="left")
173 | combined_features = combined_features[~combined_features.isnull().any(axis=1)]
174 | combined_features = combined_features[combined_features["interpreted_state"]!=SCENE_TL_STATUS.UNDEFINED.value]
175 |
176 | #filter out roundtrip bug
177 | combined_features = combined_features.merge(extended_branch_df, on=["junction_id", "branch_id", "lane_id", "sequence_head_lane_id"], how="left")
178 | combined_features = combined_features.dropna(subset="num_sequence_head_lane_ids")
179 |
180 | # filter out specific lanes
181 | ignore_lane_ids = ["qgZI", # roundtrip
182 | "GRMC", "nQMC", "IRMC", # overlapping lanes infront of junction: E0Us
183 | "yISO", "TISO", "RJSO", "/w/Q", "9x/Q", "ex/Q" # overlapping lanes infront of junction: ctKJ
184 | "OLcZ", "TbHk", "ybHk", "v7zs", "o7Zz" # overlapping lanes west arm of junction y4ss
185 | ]
186 | combined_features = combined_features[~combined_features["lane_id"].isin(ignore_lane_ids)]
187 |
188 | # only consider specific tl lane pairs
189 | if only_tl_lane_pairs is not None:
190 | only_tl_lane_pairs = only_tl_lane_pairs.rename(columns={"lane_id": "sequence_head_lane_id"})
191 | combined_features = combined_features.merge(only_tl_lane_pairs, on=["sequence_head_lane_id", "tl_id"])
192 |
193 | # Add lane type
194 | combined_features["turn_type"] = combined_features["lane_id"].apply(map_api.get_turn_type)
195 |
196 | combined_features = combined_features.astype({"state_known_start_frame": 'int', "state_known_end_frame": 'int', "state_change_frame": 'int', "interpreted_state": "int"})
197 | return combined_features
198 |
199 | def apply_frame_contribution(combined_features: pd.DataFrame) -> pd.DataFrame:
200 | """Calculate contribution for every traffic light - agent - lane combination frame-wise.
201 |
202 | Args:
203 | combined_features (pd.DataFrame): dataset with all features
204 |
205 | Returns:
206 | pd.DataFrame: dataset with contribution for TL2LA betwenn tl and lane pair
207 | """
208 | group_labels = ["scene_idx", "frame_idx", "sequence_head_lane_id", "tl_id", "agent_id"]
209 | sample_scene_grouped = combined_features.groupby(by=group_labels, as_index=True)
210 |
211 | contributions = sample_scene_grouped[["dist", "v", "a", "tl_status", "time_in_tl_state", "is_lead", "turn_type"]].apply(lambda group: fetch_contribution(*group.values[0]))
212 | contributions_df = contributions.reset_index().rename(columns={0: "contribution"})
213 | combined_features = combined_features.merge(contributions_df, on=group_labels)
214 | combined_features = combined_features.dropna(subset="contribution")
215 | return combined_features
216 |
217 | def generalize_over_frames(frame_contribution: pd.DataFrame) -> pd.DataFrame:
218 | """Generalization of contribution over frames -> predict TL2LA per scene by aggregating the contribution scene-wise
219 |
220 | Args:
221 | frame_contribution (pd.DataFrame): dataset with frame contribution
222 |
223 | Returns:
224 | pd.DataFrame: dataset with scene prediction
225 | """
226 | contribution_scenes = frame_contribution.groupby(["scene_idx", "sequence_head_lane_id", "tl_id"], as_index=False).aggregate({"contribution": "sum"}).astype({"contribution": "int"})
227 | contribution_scenes["scene_relation"] = contribution_scenes["contribution"].apply(lambda x: 1 if x > 0 else 0)
228 | return contribution_scenes
229 |
230 | def generalize_over_scenes_by_relation(scene_contribution: pd.DataFrame, significance: float = 0.5) -> pd.DataFrame:
231 | """Generalization of predictions over scenes by average relation -> predict TL2LA by aggregating the prediction of single scenes
232 |
233 | Args:
234 | scene_contribution (pd.DataFrame): dataset with scene prediction
235 |
236 | Returns:
237 | pd.DataFrame: dataset with TL2LA based on the provided scenes
238 | """
239 | contribution_scenes_agg = scene_contribution.groupby(by=["sequence_head_lane_id", "tl_id"], as_index=False).aggregate({"scene_relation": "sum"})
240 | average_scene_relations = contribution_scenes_agg.groupby(by=["sequence_head_lane_id"], as_index=False).aggregate({"scene_relation": "median"}).rename(columns={"scene_relation": "avg_scene_relation"})
241 | contribution_scenes_agg = contribution_scenes_agg.merge(average_scene_relations, on=["sequence_head_lane_id"])
242 | contribution_scenes_agg["predicted_relation"] = contribution_scenes_agg.apply(lambda row: 1 if (row["scene_relation"] > significance * row["avg_scene_relation"]) else 0, axis=1)
243 | return contribution_scenes_agg
244 |
245 | def generalize_over_scenes_by_class_majority(scene_contribution: pd.DataFrame, significance: float = 0.5) -> pd.DataFrame:
246 | """Generalization of predictions over scenes by class majority -> predict TL2LA by aggregating the prediction of single scenes
247 |
248 | Args:
249 | scene_contribution (pd.DataFrame): dataset with scene prediction
250 |
251 | Returns:
252 | pd.DataFrame: dataset with TL2LA based on the provided scenes
253 | """
254 | pos_assignment_counts = scene_contribution.groupby(by=["sequence_head_lane_id", "tl_id"], as_index=False).aggregate({"scene_relation": "sum"}).rename(columns={"scene_relation": "pos_assignment_counts"})
255 | all_assignments_counts = scene_contribution.groupby(by=["sequence_head_lane_id", "tl_id"], as_index=False).aggregate({"scene_relation": "count"}).rename(columns={"scene_relation": "all_assignments_counts"})
256 | contribution_scenes_agg = all_assignments_counts.merge(pos_assignment_counts, on=["sequence_head_lane_id", "tl_id"])
257 | contribution_scenes_agg["predicted_relation"] = contribution_scenes_agg["pos_assignment_counts"] >= significance * contribution_scenes_agg["all_assignments_counts"]
258 | contribution_scenes_agg["predicted_relation"] = contribution_scenes_agg["predicted_relation"].map({True: 1, False: 0})
259 | return contribution_scenes_agg
260 |
261 | def heristic_prediction_for_signle_scene(scene_idx: int, cfg: dict, junctionDataset: JunctionDataset, mapAPI: ExtendedMapAPI) -> pd.DataFrame:
262 | """Apply the heuristic approach on given scenes
263 |
264 | Args:
265 | scene_idx (int): Scene to predict TL2LA
266 | cfg (dict): config
267 |
268 | Returns:
269 | pd.DataFrame: prediction based on the scene
270 | """
271 |
272 | features = junctionDataset.get_combined_features( get_pickled_dataset_index(scene_idx, cfg)).query("scene_idx == @scene_idx")
273 | features["turn_type"] = features["lane_id"].apply(mapAPI.get_turn_type)
274 |
275 | # calculation contribution
276 | frame_contribution = apply_frame_contribution(features)
277 |
278 | scene_prediction = generalize_over_frames(frame_contribution)
279 | prediction = generalize_over_scenes_by_class_majority(scene_prediction)
280 |
281 | # Append intersection information
282 | prediction = prediction.rename(columns={"sequence_head_lane_id": "lane_id"})
283 | prediction = prediction.merge(junctionDataset.get_branch_dataset(DATASET_TYPE.MINIMAL), on ="lane_id", how="left")
284 | return prediction
285 |
286 |
287 | def heristic_prediction_for_scenes(scene_indices: List[int], cfg: dict, junctionDataset: JunctionDataset, mapAPI: ExtendedMapAPI) -> pd.DataFrame:
288 | """Apply the heuristic approach on given scenes
289 |
290 | Args:
291 | scene_idx (int): Scene to predict TL2LA
292 | cfg (dict): config
293 |
294 | Returns:
295 | pd.DataFrame: prediction based on the scene
296 | """
297 | dataset_indices_to_load = set([get_pickled_dataset_index(scene_idx, cfg) for scene_idx in scene_indices])
298 |
299 | with ThreadPoolExecutor(max_workers=4) as executor:
300 | loaded_combined_features = executor.map(junctionDataset.get_combined_features, dataset_indices_to_load)
301 | loaded_scenes = [df.query(f"scene_idx in {scene_indices}") for df in loaded_combined_features]
302 |
303 | if not loaded_scenes:
304 | raise Exception("Nothing to analyze!")
305 |
306 | features = pd.concat(loaded_scenes)
307 | features["turn_type"] = features["lane_id"].apply(mapAPI.get_turn_type)
308 |
309 | # calculation contribution
310 | frame_contribution = apply_frame_contribution(features)
311 |
312 | scene_prediction = generalize_over_frames(frame_contribution)
313 | prediction = generalize_over_scenes_by_class_majority(scene_prediction)
314 |
315 | # Append intersection information
316 | prediction = prediction.rename(columns={"sequence_head_lane_id": "lane_id"})
317 | prediction = prediction.merge(junctionDataset.get_branch_dataset(DATASET_TYPE.MINIMAL), on ="lane_id", how="left")
318 | return prediction
319 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/assignment/evaluation.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Functions to evaluate classification results
3 | # Lyft Lvl 5 Dataset
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 |
10 | from typing import Optional, Tuple
11 |
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 | import pandas as pd
15 | from prettytable import PrettyTable
16 | from sklearn import metrics
17 |
18 |
19 | def get_metrics(y_target: pd.Series, y_pred: pd.Series) -> Tuple[float,...]:
20 | """Visualize confusion matrix
21 |
22 | Args:
23 | y_target (pd.Series): target label of classes
24 | y_pred (pd.Series): predicted label of clases
25 |
26 | Returns:
27 | (Tuple[flaot, ...]): acc, prec, rec, f1
28 | """
29 | accuracy = metrics.accuracy_score(y_target, y_pred)
30 | precision = metrics.precision_score(y_target, y_pred)
31 | recall = metrics.recall_score(y_target, y_pred)
32 | f1_score = metrics.f1_score(y_target, y_pred)
33 | return accuracy, precision, recall, f1_score
34 |
35 |
36 | def visualize_confusion_matrix(y_target: pd.Series, y_pred: pd.Series, title: str, save_path: Optional[str] = None):
37 | """Visualize confusion matrix
38 |
39 | Args:
40 | y_target (pd.Series): target label of classes
41 | y_pred (pd.Series): predicted label of clases
42 | title (str): Title of plot
43 | save_path (str): save plot to given path
44 | """
45 | confusion_matrix = metrics.confusion_matrix(y_target, y_pred)
46 | cm_display = metrics.ConfusionMatrixDisplay(confusion_matrix=confusion_matrix, display_labels=[False, True])
47 |
48 | cm_display.plot()
49 | plt.title(title)
50 | if save_path:
51 | plt.savefig(save_path)
52 | plt.show()
53 |
54 |
55 | def visualize_metrics(y_target: pd.Series, y_pred: pd.Series, title: str, save_path: Optional[str] = None):
56 | """Visualize metrics in a table
57 |
58 | Args:
59 | y_target (pd.Series): target label of classes
60 | y_pred (pd.Series): predicted label of clases
61 | title (str): Title of the table
62 | save_path (str): save table to file under given path
63 | """
64 | accuracy = metrics.accuracy_score(y_target, y_pred)
65 | precision = metrics.precision_score(y_target, y_pred)
66 | recall = metrics.recall_score(y_target, y_pred)
67 | f1_score = metrics.f1_score(y_target, y_pred)
68 |
69 | beta = 8 # beta > 0 focuses more on recall (relevant classes)
70 | fbeta_score = metrics.fbeta_score(y_target, y_pred, beta=beta)
71 |
72 | table = PrettyTable(field_names=["Metrics", "Results [%]"])
73 | table.add_row(["ACCURACY", round(accuracy*100, 1)])
74 | table.add_row(["PRECISION", round(precision*100,1)])
75 | table.add_row(["RECALL", round(recall*100,1)])
76 |
77 | table.add_row(["F_1", round(f1_score*100, 1)])
78 | table.add_row(["F_beta", round(fbeta_score*100,1)])
79 |
80 | # print(title)
81 | print(table)
82 |
83 | if not save_path:
84 | return
85 |
86 | with open(save_path, 'w') as f:
87 | print(title)
88 | print(table, file=f)
89 |
90 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/common/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/src/common/__init__.py
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/common/definitions.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Contains definition of LABELS, TRAFFIC LIGHT STATUS and more
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 | import enum
9 |
10 | from interval import interval, inf
11 |
12 | # Dataset sample rate of single snapshots
13 | SAMPLE_FREQUENCY = 100e-3 # 10 Hz = 100ms
14 |
15 | # Dataset size of preprocessed datasets: agent mapping, agent motion
16 | DATASET_SIZE = 1000
17 |
18 | # Traffic light states
19 | class TL_STATUS(enum.Enum):
20 | RED = 0
21 | GREEN = 1
22 | YELLOW = 2
23 | YELLOW_RED = 3
24 | UNKNOWN = 4
25 |
26 |
27 | # Possible manouvers of a lane depending on lane-connectivity
28 | class SCENE_TL_STATUS(enum.Enum):
29 | CONST_RED = 0
30 | CONST_GREEN = 1
31 | RED_GREEN = 2
32 | GREEN_RED = 3
33 | UNDEFINED = 4
34 |
35 |
36 | # Possible manouvers of a lane depending on lane-connectivity
37 | class TURN(enum.Enum):
38 | LEFT = 0
39 | RIGHT = 1
40 | NORMAL = 2
41 |
42 |
43 | # Possible state changes
44 | STATUS_CHANGE = {
45 | "RED_TO_GREEN": 1,
46 | "GREEN_TO_RED": -1
47 | }
48 |
49 |
50 |
51 | ############################################################
52 | # Parameters for pattern-based contribution method #
53 | ############################################################
54 | class DATASET_TYPE(enum.Enum):
55 | # contains all lane segments that belong to a common lane sequence of an intersection branch
56 | EXTENDED = 0
57 | # contains only the first lane segment (sequence head lane id) that belong to a lane sequence of an intersection branch
58 | MINIMAL = 1
59 | # if you plan to use a model-based approach then dataset can be split in train and test junctions
60 | TRAIN = 2
61 | TEST = 3
62 | UNDEFINED = 4
63 |
64 |
65 | ############################################################
66 | # Parameters for pattern-based contribution method #
67 | ############################################################
68 |
69 | class AGENT_VELOCITY_STATES(enum.Enum):
70 | # Agent velocity states
71 |
72 | # Velocity for standstil in m/s
73 | V_STANDSTIL = interval[0, 1]
74 | # Velocity for moving in m/s
75 | V_MOVING = interval[1, inf]
76 |
77 |
78 | class AGENT_ACCELERATION_STATES(enum.Enum):
79 | # Agent acceleration states
80 | ACCELERATING = interval[1, inf]
81 | NO_ACCELERATION = interval[-1, 1]
82 | DECELERATION = interval[-inf, -1]
83 |
84 |
85 | class AGENT_PATTERN(enum.Enum):
86 | # motion patterns are classified into 5 different classes
87 | STATONARY = 0
88 | MOVING = 1
89 | DECELERATION = 2
90 | ACCELERATION = 3
91 | UNDEFINED = 4
92 |
93 |
94 | class THRESHOLDS(enum.Enum):
95 | # Maximum distance from stop line to a traffic agents current position to consider motion patterns
96 | T_DIST = 80 # meter
97 |
98 | # Lane Squence can be devided into a slow down zone and a stop zone (in meter)
99 | T_STOP_ZONE = 8 # meter
100 | T_SD_ZONE = 20 # meter
101 |
102 | # response time of a vehicle reacting to a green traffig singal change
103 | T_GREEN_REACTION = 3 # seconds
104 |
105 | # response time of a vehicle reacting to a red traffig singal change (some peoply might cross the intersection at red turning light)
106 | T_RED_REACTION = 1 # seconds
107 |
108 | # Traffic lights to consider from ego vehicles POV
109 | T_TL_DIST = 40 # meter
110 |
111 |
112 | ############################################################
113 | # Parameters for rejection method #
114 | ############################################################
115 |
116 | class THRESHOLDS_CONSERVATIVE(enum.Enum):
117 | # Distance to the stop line to detect drive through
118 | STOP_LINE_DIST = 1 # meter
119 | # Velocity combined with the stop line dist to detect drive through
120 | V_DRIVE_THROUGH = 15 # kph
121 | # Velocity combined with the stop line dist to detect drive through on red-turn
122 | V_DRIVE_THROUGH_RIGHT_TURN = 25 # kph
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/common/helper.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Helper functions of any kind
3 | # Lyft Lvl 5 Dataset
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 | import itertools
10 | import os
11 | import pickle
12 | import re
13 | import matplotlib.pyplot as plt
14 | import numpy as np
15 | import pandas as pd
16 |
17 | from typing import Dict, Tuple, TypeVar
18 | from l5kit.configs import load_config_data
19 | from l5kit.data import ChunkedDataset, LocalDataManager
20 | from l5kit.data.filter import filter_tl_faces_by_frames
21 | from scipy.spatial import distance
22 | from src.common.definitions import DATASET_SIZE
23 |
24 |
25 | DATASET_CONFIG = {
26 | "dataset_root_path": "./dataset/",
27 | "intro": "./configs/intro_config.yaml",
28 | "train1": "./configs/train1_config.yaml",
29 | "trainfull": "./configs/trainfull_config.yaml",
30 | "validate": "./configs/validate_config.yaml"
31 | }
32 |
33 |
34 | def init_dataset(config_name: str) -> Tuple[dict, LocalDataManager, ChunkedDataset]:
35 | """Initialize zarr_dataset, Local Datamanager and config
36 |
37 | Avaialabel Config names:
38 | * intro
39 | * train1
40 | * trainfull
41 | * validate
42 |
43 | Args:
44 | config_name (str): Config name
45 |
46 | Returns:
47 | Tuple[dict, LocalDataManager, ChunkedDataset]: config, Data Manager, Zarr Dataset
48 | """
49 | os.environ["L5KIT_DATA_FOLDER"] = DATASET_CONFIG["dataset_root_path"]
50 |
51 | cfg = load_config_data(f"./{DATASET_CONFIG[config_name]}")
52 |
53 | dm = LocalDataManager()
54 | dataset_path = dm.require(cfg["val_data_loader"]["key"])
55 | zarr_dataset = ChunkedDataset(dataset_path)
56 | zarr_dataset.open()
57 | return (cfg, dm, zarr_dataset)
58 |
59 |
60 | def load_pickled_df(path: str) -> pd.DataFrame:
61 | """Loads a pickled Dataframe
62 |
63 | Args:
64 | path (str): path to pkl-file
65 | Returns:
66 | pd.DataFrame: unpickeled DataFrame
67 | """
68 | with open(path, 'rb') as f:
69 | return pickle.load(f)
70 |
71 |
72 | def pickle_df(path: str, df: pd.DataFrame):
73 | """Save a Dataframe as pkl-file
74 |
75 | Args:
76 | path (str): store pkl-file to path
77 | """
78 | with open(path, 'wb') as f:
79 | pickle.dump(df, f)
80 |
81 |
82 | T = TypeVar('T')
83 |
84 |
85 | def load_from_pickled_file(path: str) -> T:
86 | """Loads a pickled file
87 |
88 | Args:
89 | path (str): path to pkl-file
90 | Returns:
91 | T: loaded object
92 | """
93 | with open(path, 'rb') as f:
94 | return pickle.load(f)
95 |
96 |
97 | def save_as_pickled_file(path: str, obj: T):
98 | """Save to pickled file
99 |
100 | Args:
101 | path (str): store pkl-file to path
102 | obj (T): object to pickle
103 | """
104 | with open(path, 'wb') as f:
105 | pickle.dump(obj, f)
106 |
107 |
108 | def get_filenames(folder_path: str) -> list:
109 | """Gets the filenames in a specific folder
110 |
111 | Args:
112 | folder_path (str): path to folder
113 |
114 | Returns:
115 | list: filenames of files in folder
116 | """
117 | f_names = []
118 | for (dirpath, dirnames, filenames) in os.walk(folder_path):
119 | f_names.extend(filenames)
120 | break
121 | f_names = sorted(f_names, key=lambda x: int(
122 | re.findall(r'\d+', x)[0]), reverse=False)
123 | return f_names
124 |
125 |
126 | def concat_preprocessed_dataframes(path: str, start_idx: int = 0, end_idx: int = DATASET_SIZE) -> pd.DataFrame:
127 | """Concatenate preprocessed dataframes
128 |
129 | Args:
130 | path (str): dataset path
131 | start_idx (int, optional): start index. Defaults to 0.
132 | end_idx (int, optional): end index. Defaults to DATASET_SIZE.
133 |
134 | Returns:
135 | pd.DataFrame: concatenated dataframe
136 | """
137 | dfs = []
138 | files = get_filenames(path)
139 |
140 | for fname in files[start_idx:end_idx]:
141 | df = load_pickled_df(path+fname)
142 | dfs.append(df)
143 |
144 | return pd.concat(dfs).reset_index(drop=True)
145 |
146 |
147 | def tl_face_status_is_same(face_a: np.ndarray, face_b: np.ndarray):
148 | """Compares the traffic light face states for two given tl faces.
149 |
150 | Args:
151 | face_states (np.ndarray): two traffic
152 |
153 | Returns:
154 | (bool): depending if states are same or not
155 | """
156 | return np.array_equal(face_a, face_b)
157 |
158 |
159 | def count_conflicting_tl_face_conditions(tl_faces: np.ndarray) -> int:
160 | """Counts conflicting traffic light faces for a a given number of traffic light faces
161 |
162 | Args:
163 | tl_faces (np.ndarray): given traffic light faces
164 |
165 | Returns:
166 | int: number of conflicting tl_faces
167 | """
168 | tl_ids: set = set(tl_faces["traffic_light_id"])
169 | conflicts = 0
170 |
171 | # iterate over traffic lights
172 | for tl_id in tl_ids:
173 |
174 | # filter for specific traffic light
175 | current_tl_faces = tl_faces[tl_faces["traffic_light_id"] == tl_id]
176 | current_face_ids = set(current_tl_faces["face_id"])
177 |
178 | for face_id in current_face_ids:
179 | same_tl_faces = current_tl_faces[current_tl_faces["face_id"] == face_id]
180 | same_tl_face_idx = []
181 | for a_idx, b_idx in itertools.combinations(range(len(same_tl_faces)), 2):
182 | if not tl_face_status_is_same(same_tl_faces[a_idx]["traffic_light_face_status"], same_tl_faces[b_idx]["traffic_light_face_status"]):
183 | # conflicts += 2
184 | same_tl_face_idx.extend([a_idx, b_idx])
185 |
186 | # calc number of conflicts for current tl-face
187 | conflicts += len(same_tl_face_idx)
188 | return conflicts
189 |
190 |
191 | def get_specific_tl_faces(dataset: ChunkedDataset, scene_idx: int, frame_idx: int) -> np.ndarray:
192 | """Get the traffic light faces for a specific frame of a scene
193 |
194 | Args:
195 | scene_idx (int): scene which contains specific frame
196 | frame_idx (int): frame of scene
197 |
198 | Returns:
199 | np.ndarray: tl faces in specific frame
200 | """
201 |
202 | current_scene = dataset.get_scene_dataset(scene_index=scene_idx)
203 | frames = current_scene.frames
204 | tls_frames = filter_tl_faces_by_frames(frames, current_scene.tl_faces)
205 | return tls_frames[frame_idx]
206 |
207 |
208 | def get_closest_point(point: np.ndarray, comparison_points: list) -> np.ndarray:
209 | """Get the closest coparison point to another point
210 |
211 | Args:
212 | point (np.ndarray): origin point
213 | comparison_points (list): list of comparison points
214 |
215 | Returns:
216 | np.ndarray: closest comparison point
217 | """
218 |
219 | dist_list = []
220 |
221 | for p in comparison_points:
222 | dist = distance.euclidean(point, p)
223 | dist_list.append(dist)
224 |
225 | min_dist_index = dist_list.index(min(dist_list))
226 | return comparison_points[min_dist_index]
227 |
228 |
229 | def indices_in_bound(center: np.ndarray, elements_center: np.ndarray, half_extent: float) -> np.ndarray:
230 | """
231 | Get indices of elements for which the bounding box described by bounds intersects the one defined around
232 | center (square with side 2*half_side)
233 |
234 | (element_x - center_x)² + (element_y - center_y)² < half_extent²
235 |
236 | Args:
237 | center (float): XY of the center
238 | elements_center (np.ndarray): array of shape Nx2 [[x1,y1], ...,[xn,yn]]
239 | half_extent (float): half the side of the bounding box centered around center
240 |
241 | Returns:
242 | np.ndarray: indices of elements inside radius from center
243 | """
244 | center_x, center_y = center
245 |
246 | mask = (elements_center[:, 0] - center_x)**2 + \
247 | (elements_center[:, 1] - center_y)**2 <= half_extent**2
248 | return np.nonzero(mask)
249 |
250 |
251 | def calculate_center(xs: np.ndarray, ys: np.ndarray) -> np.ndarray:
252 | """Calculate center of two given points
253 |
254 | Args:
255 | xs (np.ndarray): x-pos of points
256 | ys (np.ndarray): y-pos of points
257 |
258 | Returns:
259 | np.ndarray: center point
260 | """
261 | c1 = xs[0] + 0.5 * (xs[1]-xs[0])
262 | c2 = ys[0] + 0.5 * (ys[1]-ys[0])
263 |
264 | return np.array([c1, c2])
265 |
266 |
267 | def calculate_distance(p1: np.ndarray, p2: np.ndarray) -> np.float:
268 | """Calculates eucledian distance between to given points
269 |
270 | Args:
271 | p1 (np.ndarray): coordinates point 1
272 | p2 (np.ndarray): coordinates point 2
273 |
274 | Returns:
275 | np.float: distance
276 | """
277 | return np.linalg.norm(p2-p1)
278 |
279 |
280 | def filter_unique_labels(ax: plt.Axes) -> Tuple:
281 | """make sure only unique labels show up in matplotlib legend
282 |
283 | Args:
284 | ax (plt.Axes): plot axes
285 |
286 | Returns:
287 | Tuple: handles and labels
288 | """
289 | handles, labels = ax.get_legend_handles_labels()
290 | i = np.arange(len(labels))
291 | filter = np.array([]) # set up a filter
292 |
293 | unique_labels = list(set(labels))
294 | for ul in unique_labels:
295 | # find the first instance of this label and add its index to the filter
296 | filter = np.append(filter, [i[np.array(labels) == ul][0]])
297 |
298 | handles = [handles[int(f)] for f in filter]
299 | labels = [labels[int(f)] for f in filter]
300 | return handles, labels
301 |
302 |
303 | def get_pickled_dataset_index(scene_idx: int, cfg: dict) -> int:
304 | """Get index for traffic light and agent mapping dataset
305 | that needs to be loaded for a specific scene.
306 |
307 | Args:
308 | scene_idx (int): index of a scene
309 | cfg (dict): dataset config
310 |
311 | Returns:
312 | int: common index of pickle file
313 | """
314 | DATASET_FILES = 1000
315 | tl_scenes_path = cfg["preprocessed_data_loader"]["tls_scene_indices"]
316 | with open(tl_scenes_path, 'rb') as f:
317 | tls_scene_indices = pickle.load(f)
318 |
319 | splitted_scenes = np.array_split(tls_scene_indices, DATASET_FILES)
320 | for idx, group in enumerate(splitted_scenes):
321 | if scene_idx >= group[0] and scene_idx <= group[-1]:
322 | return idx
323 |
324 | raise ValueError(f"Index for scene {scene_idx} not found!")
325 |
326 |
327 | def reverse_dict_of_sets(dict_to_reverse: Dict[T, set]) -> Dict[T, set]:
328 | """Revese key value pairs of dictionary that stand in a specific relation
329 |
330 | Args:
331 | dict_to_reverse (Dict["str", set]): dictonary of sets that needs to be reversed
332 |
333 | Returns:
334 | Dict["str", set]: reversed dict
335 | """
336 | reversed_dict = dict()
337 | for lane_id, tl_set in dict_to_reverse.items():
338 | for tl_id in tl_set:
339 | if tl_id not in reversed_dict.keys():
340 | reversed_dict[tl_id] = {lane_id}
341 | else:
342 | reversed_dict[tl_id] = reversed_dict[tl_id].union({
343 | lane_id})
344 |
345 | return reversed_dict
346 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/common/utils.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Utility functions to handle Aggregate, Filter, Fetch Data from
3 | # Lyft Lvl 5 Dataset
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 | from typing import List, Optional
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import pandas as pd
13 | import seaborn as sns
14 |
15 | from l5kit.data import ChunkedDataset
16 | from l5kit.data.zarr_dataset import AGENT_DTYPE
17 | from l5kit.geometry import rotation33_as_yaw
18 | from l5kit.rasterization.box_rasterizer import (EGO_EXTENT_HEIGHT,
19 | EGO_EXTENT_LENGTH,
20 | EGO_EXTENT_WIDTH,
21 | get_box_world_coords)
22 | from l5kit.rasterization.semantic_rasterizer import indices_in_bounds
23 | from loguru import logger
24 | from shapely.geometry import Polygon
25 | from src.common.definitions import (SAMPLE_FREQUENCY, STATUS_CHANGE)
26 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
27 | from src.common.helper import calculate_distance
28 | from tqdm import tqdm
29 |
30 |
31 |
32 | def get_ego_as_agent(ego_pos: np.ndarray, ego_rotation: np.ndarray) -> np.ndarray:
33 | """Transfroms ego numpy array into agents numpy array.
34 |
35 | Args:
36 | ego_pos (np.ndarray): ego position
37 | ego_rotation (np.ndarray): ego rotation
38 |
39 | Returns:
40 | np.ndarray: of type agent
41 | """
42 | ego_agent = np.zeros(1, dtype=AGENT_DTYPE)
43 | ego_agent[0]["centroid"] = ego_pos[:2]
44 | ego_agent[0]["yaw"] = rotation33_as_yaw(ego_rotation)
45 | ego_agent[0]["extent"] = np.asarray(
46 | (EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT))
47 | return ego_agent
48 |
49 |
50 | def get_ego_rect(ego_pos: np.ndarray, ego_rotation: np.ndarray) -> Polygon:
51 | """Returns the Polygon of the ego AV
52 |
53 | Args:
54 | ego_pos (np.ndarray): ego position
55 | ego_rotation (np.ndarray): ego rotation
56 |
57 | Returns:
58 | Polygon: bounding box
59 | """
60 | ego_agent = get_ego_as_agent(ego_pos, ego_rotation)
61 | box_world_coords = get_box_world_coords(ego_agent)
62 | points = box_world_coords[0, :, :]
63 | return Polygon((points[0], points[1], points[2], points[3]))
64 |
65 |
66 | def get_agent_rect(agent: np.ndarray) -> Polygon:
67 | """Returns the Polygon of the ego an agent rect
68 | Args:
69 | agent (np.ndarray): agent array
70 |
71 | Returns:
72 | Polygon: agent bounding box
73 | """
74 | box_world_coords = get_box_world_coords(np.array([agent]))
75 | points = box_world_coords[0, :, :]
76 | return Polygon((points[0], points[1], points[2], points[3]))
77 |
78 |
79 | def get_lane_poly(lane_id: str, mapAPI: ExtendedMapAPI) -> Polygon:
80 | """Generates a shapely Polygon out of the lane coordinates of a given lane
81 |
82 | Args:
83 | lane_id (str): element id of lane
84 | mapAPI (ExtendedMapAPI): map API
85 |
86 | Returns:
87 | Polygon: Lane Polygon
88 | """
89 | lane_coords = mapAPI.get_lane_coords(lane_id)
90 |
91 | lane_poly = np.concatenate(
92 | (lane_coords["xyz_left"][:, :2], lane_coords["xyz_right"][::-1, :2]))
93 | return Polygon(lane_poly)
94 |
95 |
96 | def visualize_mapping(ego_rect: Polygon, lane_poly: Polygon):
97 | """Visualise the mapping of a eg vehicle and a lane
98 |
99 | Args:
100 | point (Point): ego xy-pos
101 | poly (Polygon): lane shape
102 | """
103 |
104 | plt.plot(*lane_poly.exterior.xy)
105 | plt.plot(*ego_rect.exterior.xy)
106 | plt.title("Check if ego on lane")
107 | plt.show()
108 |
109 |
110 | def visualize_lane(lane_id: str, mapAPI: ExtendedMapAPI):
111 | """Visualise a lane
112 |
113 | Args:
114 | lane_id (str): id of lane element
115 | """
116 | lane_poly: Polygon = get_lane_poly(lane_id, mapAPI)
117 | plt.plot(*lane_poly.exterior.xy)
118 | plt.title(f"Lane id: {lane_id}")
119 | plt.show()
120 |
121 | def visualize_tl_status(tl_status: pd.DataFrame, scene_idx: int, specific_ids: Optional[list] = None, path: Optional[str] = None):
122 | fig, ax = plt.subplots(figsize=(10, 4))
123 |
124 | tl_mapping_scene_df = tl_status[tl_status["scene_idx"]==scene_idx]
125 | if specific_ids is None:
126 | traffic_light_ids = tl_mapping_scene_df["tl_id"].unique()
127 | else:
128 | traffic_light_ids = specific_ids
129 |
130 | for tl_id in traffic_light_ids:
131 | current_tl_mapping_df = tl_mapping_scene_df[tl_mapping_scene_df["tl_id"] == tl_id]
132 | sns.lineplot(x="frame_idx", y="tl_status", legend=True,
133 | ax=ax, data=current_tl_mapping_df, label=tl_id)
134 |
135 | plt.title(f"Tl State in Scene: {scene_idx}")
136 | plt.xlabel("Frame")
137 | plt.ylabel("State")
138 | plt.legend(title="Tl id")
139 | plt.grid()
140 | plt.ylim((-0.5,4.5))
141 | if path: plt.savefig(path, format="png", dpi=1000)
142 | plt.show()
143 |
144 | def key_with_max_val(d: dict):
145 | """Returns the key with the max value
146 | """
147 | v = list(d.values())
148 | k = list(d.keys())
149 | return k[v.index(max(v))]
150 |
151 |
152 | def map_agent_to_lanes(ego_or_agent: np.ndarray, mapAPI: ExtendedMapAPI, scene_idx: int, frame_idx: int, is_ego: bool) -> List:
153 | """Maps given position of agents to a lane. Currently only implemented for ego. TODO: implement for other agents with thei bounding box
154 |
155 | Args:
156 | agent_pos (np.ndarray): position of agent
157 | mapAPI (ExtendedMapAPI): API for semantic map interaction
158 |
159 | Returns:
160 | str: lane id of current AV position
161 | """
162 | SEARCH_RADIUS = 2
163 |
164 | if is_ego:
165 | agent_pos = ego_or_agent["ego_translation"]
166 | agent_rotation = ego_or_agent["ego_rotation"]
167 | agent_rect = get_ego_rect(agent_pos, agent_rotation)
168 | else:
169 | # transoform XYZ format
170 | agent_pos = np.append(ego_or_agent["centroid"], 0)
171 | agent_rect = get_agent_rect(ego_or_agent)
172 |
173 | lane_ids: list = mapAPI.bounds_info["lanes"]["ids"]
174 | lane_indices = indices_in_bounds(
175 | agent_pos[:2], mapAPI.bounds_info["lanes"]["bounds"], SEARCH_RADIUS)
176 | on_lanes = []
177 | intersection_area = dict() # key: lane_id, value: intersection areas
178 | for lane_idx in lane_indices:
179 | lane_id = lane_ids[lane_idx]
180 | lane_coords = mapAPI.get_lane_coords(lane_id)
181 |
182 | lane_poly_coords = np.concatenate(
183 | (lane_coords["xyz_left"][:, :2], lane_coords["xyz_right"][::-1, :2]))
184 | lane_poly = Polygon(lane_poly_coords)
185 |
186 | if agent_rect.centroid.within(lane_poly):
187 | on_lanes.append(lane_id)
188 |
189 | elif agent_rect.intersects(lane_poly):
190 | try:
191 | intersection_area[lane_id] = agent_rect.intersection(
192 | lane_poly).area
193 | except ValueError as e:
194 | logger.info(f"Intersection not possble: {e}")
195 |
196 | if len(on_lanes):
197 | return on_lanes
198 | if len(on_lanes) == 0 and intersection_area: # dict not empty and intersections found
199 | return list(intersection_area.keys())
200 |
201 | logger.debug(
202 | f"Mapping for Agent Pos: {agent_pos[0:2]} was not possible in Scene: {scene_idx}, Frame: {frame_idx}!")
203 | return []
204 |
205 |
206 | def filter_tl_scenes(zarr_dataset: ChunkedDataset) -> list:
207 | """Filters out the traffic light scenes and returns the scene idx
208 |
209 | Args:
210 | zarr_dataset (ChunkedDataset): given dataset
211 |
212 | Returns:
213 | list: indices of traffic light scenes
214 | """
215 |
216 | scene_idxs_with_tfls = []
217 | for scene_idx in tqdm(range(len(zarr_dataset.scenes)), desc="Iterate through scenes to get scene idxs with tfls"):
218 | current_scene = zarr_dataset.get_scene_dataset(scene_index=scene_idx)
219 |
220 | # add scenes with tfls
221 | if len(current_scene.tl_faces) > 0:
222 | scene_idxs_with_tfls.append(scene_idx)
223 |
224 | return scene_idxs_with_tfls
225 |
226 |
227 | def filter_tl_status_change(tl_mapping: pd.DataFrame, status_change: str) -> pd.DataFrame:
228 | """Filters out the changed rows of a specific traffic light status change
229 |
230 | Args:
231 | tl_mapping (pd.DataFrame): dataframe with traffic light mapping
232 | status_change (str): the relevant status change
233 |
234 | Returns:
235 | pd.DataFrame: filtered dataframe with scen and frame index of the status change
236 | """
237 |
238 | return tl_mapping[tl_mapping["tl_status"].diff() == STATUS_CHANGE[status_change]]
239 |
240 |
241 | def detect_active_status_change(tl_mapping: pd.DataFrame) -> pd.DataFrame:
242 | """Returns the index of a frame when the traffic light status changes
243 |
244 | Args:
245 | tl_mapping (pd.DataFrame): _description_
246 |
247 | Returns:
248 | dict: tl_id frame idx of change
249 | """
250 | tl_mapping = tl_mapping.sort_values(["tl_id", "scene_idx", "frame_idx"])
251 | unique_tl = tl_mapping["tl_id"].unique()
252 | status_change = []
253 |
254 | for tl_id in unique_tl:
255 | current_tl_mapping = tl_mapping[tl_mapping["tl_id"] == tl_id]
256 | red_to_green_change = current_tl_mapping[current_tl_mapping["tl_status"].diff(
257 | ) == 1]
258 | green_to_red_change = current_tl_mapping[current_tl_mapping["tl_status"].diff(
259 | ) == -1]
260 |
261 | for idx, row in red_to_green_change.iterrows():
262 | status_change.append([row["scene_idx"], row["frame_idx"],
263 | row["tl_id"], STATUS_CHANGE["RED_TO_GREEN"], "RED_TO_GREEN"])
264 |
265 | for idx, row in green_to_red_change.iterrows():
266 | status_change.append([row["scene_idx"], row["frame_idx"],
267 | row["tl_id"], STATUS_CHANGE["GREEN_TO_RED"], "GREEN_TO_RED"])
268 |
269 | return pd.DataFrame(
270 | status_change,
271 | columns=["scene_idx", "frame_idx", "tl_id",
272 | "status_change", "status_change_str"]
273 | )
274 |
275 |
276 | def calculate_ego_velocity(xyz: pd.DataFrame, time: pd.Series) -> pd.Series:
277 | """Calculate velocity for given xyz position between timestamps
278 |
279 | Args:
280 | xyz (pd.DataFrame): xyz position
281 |
282 | Returns:
283 | pd.Series: velocity vector, first element Nan!
284 | """
285 |
286 | X1 = xyz.to_numpy()
287 | X0 = xyz.shift(1).to_numpy()
288 | X_d = (X1 - X0)
289 | X_d = X_d[1:, ]
290 | dist = np.sum(X_d**2, axis=1)**0.5
291 | dist = np.insert(dist, 0, np.NaN)
292 | return dist / (time-time.shift(1))
293 |
294 |
295 | def calculate_agent_velocity(agent_velocity_xy: np.ndarray) -> np.ndarray:
296 | """Calculates the velocity of N agent
297 |
298 | Args:
299 | agent_velocity_xy (np.ndarray): velocity vector of N agent in x and y direction
300 |
301 | Returns:
302 | float: scalar velocity of agents
303 | """
304 | return np.sum(agent_velocity_xy**2, axis=1)**0.5
305 |
306 |
307 | def calcualte_agent_acceleration(agent_velocity_frame: pd.DataFrame) -> pd.Series:
308 | """Calcualte the acceleration of the agents
309 |
310 | Args:
311 | agent_velocity_frame (pd.DataFrame): velcity frame with time information of traffic agents
312 |
313 | Returns:
314 | pd.Series: acceleration of traffic agents
315 | """
316 | unique_agent_ids = agent_velocity_frame["agent_id"].unique()
317 | acceleration_list = []
318 | for agent_id in unique_agent_ids:
319 | current_agent_velocity_df = agent_velocity_frame[agent_velocity_frame["agent_id"] == agent_id]
320 | accel_series = (current_agent_velocity_df["v"]-current_agent_velocity_df["v"].shift(1)) / \
321 | ((current_agent_velocity_df["time"] -
322 | current_agent_velocity_df["time"].shift(1))) # .rolling(25, center=True).mean() # & apply moving average
323 | acceleration_list.append(accel_series)
324 | return pd.concat(acceleration_list)
325 |
326 |
327 | def get_ego_movement_frame(ego_translation: np.ndarray, scene_idx: int, num_frames: int) -> pd.DataFrame:
328 | """Generate Dataframe with velocity and time information of ego vehicle
329 |
330 | Args:
331 | ego_translation (np.ndarray): array of xyz position of ego vehilce
332 | scene_idx (int): index of current scene
333 | num_frames (int): number of frames in scene
334 |
335 | Returns:
336 | (pd.Dataframe): velocity dataframe of ego vehicle
337 | """
338 | ego_velocity_df = pd.DataFrame(ego_translation, columns=["x", "y", "z"])
339 | time = np.arange(
340 | 0, num_frames*SAMPLE_FREQUENCY, SAMPLE_FREQUENCY)
341 | ego_velocity_df["time"] = time[:num_frames]
342 | ego_velocity_df.insert(0, "scene_idx", [scene_idx]*num_frames)
343 | ego_velocity_df.insert(1, "frame_idx", np.arange(0, num_frames))
344 | ego_velocity_df.insert(2, "agent_id", 0)
345 | ego_velocity_df["v"] = calculate_ego_velocity(ego_velocity_df[["x", "y", "z"]],
346 | ego_velocity_df["time"]
347 | )
348 | ego_velocity_df["a"] = (ego_velocity_df["v"]-ego_velocity_df["v"].shift(1)) / \
349 | ((ego_velocity_df["time"]-ego_velocity_df["time"].shift(1))
350 | ) # .rolling(25, center=True).mean() # & apply moving average
351 | ego_velocity_df["centroid"] = ego_velocity_df.apply(
352 | lambda row: np.array([row["x"], row["y"]]), axis=1)
353 | ego_velocity_df = ego_velocity_df.drop(["x", "y", "z"], axis=1)
354 | return ego_velocity_df
355 |
356 |
357 | def get_agents_velocity_frame(agent_frames: np.ndarray, scene_idx: int) -> pd.DataFrame:
358 | """Generate Dataframe with velocity and time information of surrounding traffic agents
359 |
360 | Args:
361 | agent_frames (np.ndarray): agent frame data with velocity information
362 | scene_idx (int): index of current scene
363 |
364 | Returns:
365 | (pd.Dataframe): velocity dataframe of agent vehicles
366 | """
367 | agent_velocity_df = pd.DataFrame(
368 | columns=["scene_idx", "frame_idx", "agent_id", "time", "v", "centroid"])
369 | AGENT_FRAMES = len(agent_frames)
370 |
371 | for agent_frame_idx in range(AGENT_FRAMES):
372 | agents = agent_frames[agent_frame_idx]
373 |
374 | velocity = calculate_agent_velocity(agents["velocity"])
375 |
376 | time = agent_frame_idx*SAMPLE_FREQUENCY
377 | agent_velocity = np.column_stack(([scene_idx]*len(agents), [agent_frame_idx]*len(agents),
378 | agents["track_id"].tolist(), [time]*len(agents), velocity.tolist(), [0]*len(agents)))
379 |
380 | helper_df = pd.DataFrame(
381 | agent_velocity, columns=["scene_idx", "frame_idx", "agent_id", "time", "v", "centroid"])
382 | helper_df["centroid"] = pd.Series(agents["centroid"].tolist())
383 |
384 | agent_velocity_df = agent_velocity_df.append(
385 | helper_df, ignore_index=True)
386 |
387 | agent_velocity_df = agent_velocity_df.astype(
388 | {"scene_idx": "int", "frame_idx": "int", "agent_id": "int"})
389 | return agent_velocity_df
390 |
391 |
392 | def get_movement_frame(scene_idx: int, frames: np.ndarray, agent_frames: np.ndarray) -> pd.DataFrame:
393 | """Get for a specific scene the a agent and ego movement. This includes, velocity, acceleration and centroid
394 | for each frame in the scene.
395 |
396 | Args:
397 | scene_idx (int): index of scene
398 | frames (np.ndarray): frames
399 | agent_frames (np.ndarray): agent frames
400 |
401 | Returns:
402 | pd.DataFrame: movement dataframe
403 | """
404 |
405 | ego_movement_df = get_ego_movement_frame(frames["ego_translation"],
406 | scene_idx,
407 | len(frames)
408 | )
409 | agent_movement_df = get_agents_velocity_frame(agent_frames, scene_idx)
410 | agent_movement_df["a"] = calcualte_agent_acceleration(agent_movement_df)
411 |
412 | movement_df = pd.concat(
413 | [ego_movement_df, agent_movement_df], ignore_index=True)
414 | return movement_df
415 |
416 |
417 | def merge_agent_lane_mapping_and_branch_frame(agent_lane_mapping_df: pd.DataFrame, extended_branch_df: pd.DataFrame) -> pd.DataFrame:
418 | """Merge agent lane mapping dataframe with extended branch dataframe
419 |
420 | Args:
421 | agent_lane_mapping_df (pd.DataFrame): agent to lane mapping dataframe
422 | extended_branch_df (pd.DataFrame): branch dataframe with lane sequences
423 |
424 | Returns:
425 | pd.DataFrame: merged dataframe
426 | """
427 | agent_lane_mapping_df = agent_lane_mapping_df.explode('lane_ids')
428 | agent_lane_mapping_df.rename(columns={'lane_ids': 'lane_id'}, inplace=True)
429 |
430 | agent_mapping_branch_df = agent_lane_mapping_df.join(
431 | extended_branch_df.set_index('lane_id'), on=['lane_id'])
432 | agent_mapping_branch_df = agent_mapping_branch_df.dropna()
433 | agent_mapping_branch_df = agent_mapping_branch_df.drop_duplicates()
434 | return agent_mapping_branch_df
435 |
436 |
437 | def agent_on_branch(current_agent_mapping: pd.DataFrame, branch_id: str) -> bool:
438 | """Check if agent on branch with given id
439 |
440 | Args:
441 | branch_id (str): branch id
442 | current_agent_mapping (pd.Dataframe): mapping of current agent to lane
443 |
444 | Returns:
445 | bool: checks if on branch
446 | """
447 | return len(current_agent_mapping[current_agent_mapping["branch_id"] == branch_id]) > 0
448 |
449 |
450 | def on_lane_sequence(current_agent_mapping: pd.DataFrame) -> bool:
451 | """Check if the agent drives on a lane that is part of a lane sequence of a branch.
452 | -1 indicates that the lane segment is not part of a sequence!
453 |
454 | Args:
455 | current_agent_mapping (pd.Dataframe): mapping of current agent to lane
456 |
457 | Returns:
458 | bool: true if it has common lane idx
459 | """
460 | return len(current_agent_mapping[current_agent_mapping["lane_sequence_id"] != -1.0]) > 0
461 |
462 |
463 | def above_given_track_length(agent_vel: pd.DataFrame, threshold: int) -> bool:
464 | """Check if agents track length is above a given threshold
465 |
466 | Args:
467 | agent_vel (pd.DataFrame): _description_
468 | threshold (int): minimum given track length
469 |
470 | Returns:
471 | bool: is track length over threshold?
472 | """
473 | start_time = agent_vel["time"].iloc[0]
474 | end_time = agent_vel["time"].iloc[-1]
475 | return (end_time-start_time) >= threshold
476 |
477 |
478 | def above_given_speed(agent_vel: pd.DataFrame, threshold: int) -> bool:
479 | """Check if agents speed is above a given threshold
480 |
481 | Args:
482 | agent_vel (pd.DataFrame): _description_
483 | threshold (int): reached speed
484 |
485 | Returns:
486 | bool: is track length over threshold?
487 | """
488 | return (agent_vel["v"] >= threshold).any()
489 |
490 |
491 | def get_tl_distance(mapAPI: ExtendedMapAPI, lane_head_id: str, centroid: np.ndarray) -> np.float:
492 | """Calculate for each agent the distance towards the stopping line depending on its current lane sequence
493 |
494 | Args:
495 | mapAPI (ExtendedMapAPI): map Api
496 | lane_head_id (str): head lane id of current lane sequence
497 | centroid (np.ndarray): agent's current centroid
498 |
499 | Returns:
500 | (np.float, None): distance to stopping line
501 | """
502 | if not isinstance(lane_head_id, str):
503 | return np.NaN
504 | return calculate_distance(mapAPI.get_lane_stopping_point(lane_head_id), centroid)
505 |
506 |
507 | def calculate_tl_distance(mapAPI: ExtendedMapAPI, motion_df: pd.DataFrame) -> pd.DataFrame:
508 | motion_df["dist"] = np.NaN
509 | motion_df["dist"] = motion_df.apply(lambda x: get_tl_distance(
510 | mapAPI, x.get("sequence_head_lane_id"), x.get("centroid")), axis=1)
511 | return motion_df
512 |
513 |
514 |
515 | def calculate_lead_vehicle(motion_df: pd.DataFrame) -> pd.DataFrame:
516 | """Checks if vehicle is the first vehicle infront of the stopping line.
517 |
518 | Args:
519 | motion_df (pd.DataFrame): dataframe with motion data and distance information towards lead vehicle
520 |
521 | Returns:
522 | pd.DataFrame: _description_
523 | """
524 | # calculate lead indices
525 | lead_indices = motion_df.groupby(["scene_idx", "frame_idx", "sequence_head_lane_id"])["dist"].idxmin()
526 | motion_df["is_lead"] = False
527 | motion_df.loc[lead_indices.values.tolist(), "is_lead"] = True
528 | return motion_df
529 |
530 |
531 | def calculate_traffic_density(motion_df: pd.DataFrame) -> pd.DataFrame:
532 | """Calculate traffic density of a lane sequence
533 |
534 | Args:
535 | motion_df (pd.DataFrame): motion dataframe
536 |
537 | Returns:
538 | pd.DataFrame: motion dataframe with appended density
539 | """
540 | motion_df["density"] = np.NaN
541 | density = motion_df.groupby(["scene_idx", "frame_idx", "sequence_head_lane_id"])['agent_id'].transform('count')
542 | motion_df["density"] = density
543 | return motion_df.astype({"density":"int"})
544 |
545 |
546 | def calculate_elapsed_time_in_tl_state(scene_idx: int, frame_idx: int, status_change_df: pd.DataFrame, tl_id: str) -> float:
547 | """Calcualtes the time elapsed after a state change of a traffic light
548 |
549 | Args:
550 | frame_idx (int): index of current frame
551 | status_change_df (pd.DataFrame): tl state change information
552 |
553 | Returns:
554 | float: elapsed time
555 | """
556 | change_frame_indices = status_change_df[(status_change_df["scene_idx"] == scene_idx) & (
557 | status_change_df["tl_id"] == tl_id)]["frame_idx"].values
558 | change_frame_distance = frame_idx-change_frame_indices
559 | positive_change_frame_distance = change_frame_distance[change_frame_distance >= 0]
560 |
561 | # minimum elapsed time in traffic light state since the beginning of the scene if no state change was seen
562 | if len(positive_change_frame_distance) == 0:
563 | return frame_idx * SAMPLE_FREQUENCY
564 | # otherwise calculate time between last state change
565 | idx = positive_change_frame_distance.argmin()
566 | distance = positive_change_frame_distance[idx]
567 | return distance * SAMPLE_FREQUENCY
568 |
569 |
570 | def generate_tl_lane_relation_lookup(ground_trouth: pd.DataFrame) -> dict:
571 | """Generate a fast lookup table for the traffic light to lane relation using the ground trouth relation
572 |
573 | Args:
574 | ground_trouth (pd.DataFrame): ground trouth relation of traffic lights and lanes
575 |
576 | Returns:
577 | dict: lookup
578 | """
579 | ground_trouth_dict = dict()
580 | for _ , row in ground_trouth.iterrows():
581 | hash = row["tl_id"] + row["lane_id"]
582 | ground_trouth_dict[hash] = row["relation"]
583 | return ground_trouth_dict
584 |
585 |
586 | def get_relation_by_lookuptable(tl_id: str, lane_id: str, relation_lookup: dict) -> int:
587 | """Get the traffic light to lane relation (1 or 0) of a specific traffic light and a lane by using a lookup table.
588 |
589 | Args:
590 | tl_id (str): traffic light id
591 | lane_id (str): lane id
592 | relation_lookup (dict): lookup
593 |
594 | Returns:
595 | int: relation of traffic light and lane
596 | """
597 | hash = tl_id+lane_id
598 | if hash in relation_lookup.keys():
599 | return relation_lookup[hash]
600 | return -2
601 |
602 |
603 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/dataset_operations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/src/dataset_operations/__init__.py
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/dataset_operations/branch_dataset.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Helper Class to Load Junction, branch and Lane Dataset
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 | from typing import Optional
9 |
10 | import pandas as pd
11 | from src.common.definitions import DATASET_TYPE
12 |
13 |
14 | class BranchDataset:
15 | def __init__(self) -> None:
16 | self.dataset_type = DATASET_TYPE.UNDEFINED
17 | self.dataset: Optional[pd.DataFrame] = None
18 |
19 | def get_branch_dataset(self, dataset_type: DATASET_TYPE) -> pd.DataFrame:
20 | """Loads specific branch dataset from file system.
21 |
22 | Args:
23 | dataset_type (int): which type of dataset to load
24 | """
25 | # Check if dataset already loaded -> then it can directly be returned
26 | if self.dataset is not None and dataset_type.value == self.dataset_type.value:
27 | return self.dataset
28 |
29 | if dataset_type.value == DATASET_TYPE.EXTENDED.value:
30 | path = "./dataset/preprocessed_info/extended_junction_branch_to_lane.csv"
31 | return pd.read_csv(path, encoding="ascii")
32 | elif dataset_type.value == DATASET_TYPE.MINIMAL.value:
33 | path = "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
34 | return pd.read_csv(path, encoding="ascii")
35 | elif dataset_type.value == DATASET_TYPE.TRAIN.value:
36 | path = "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
37 | minimal_branch_df = pd.read_csv(path, encoding="ascii")
38 | TRAIN_JUNCTIONS = minimal_branch_df["junction_id"].unique()[:8]
39 | return minimal_branch_df[minimal_branch_df["junction_id"].isin(TRAIN_JUNCTIONS)]
40 | elif dataset_type.value == DATASET_TYPE.TEST.value:
41 | path = "./dataset/preprocessed_info/minimal_junction_branch_to_lane.csv"
42 | minimal_branch_df = pd.read_csv(path, encoding="ascii")
43 | TEST_JUNCTIONS = minimal_branch_df["junction_id"].unique()[8:]
44 | return minimal_branch_df[minimal_branch_df["junction_id"].isin(TEST_JUNCTIONS)]
45 | else:
46 | raise Exception(
47 | f"Please check given branch type to load: {dataset_type.name}")
48 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/dataset_operations/preprocessing.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Helper functions to perform dataset transformation and aggregation of
3 | # Lyft Dataset
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 | import pandas as pd
10 | import numpy as np
11 | import itertools
12 | from tqdm import tqdm
13 |
14 | from src.dataset_operations.junction_dataset import JunctionDataset
15 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
16 | from src.common.definitions import DATASET_SIZE
17 | from src.common.helper import load_pickled_df, pickle_df
18 | from src.dataset_operations.pipeline import load_preprocessed_datasets
19 |
20 | from src.common.definitions import SCENE_TL_STATUS, DATASET_TYPE
21 |
22 | def generate_lane_tl_relation(mapAPI: ExtendedMapAPI) -> dict:
23 | """Generate the lane to traffic light relation
24 |
25 | Args:
26 | mapAPI (ExtendedMapAPI): map api
27 |
28 | Returns:
29 | dict: lane to traffic light association
30 | """
31 | lane_tls_dict = dict()
32 | lane_ids: list = mapAPI.bounds_info["lanes"]["ids"]
33 |
34 | for lane_id in lane_ids:
35 | lane_traffic_controll_ids = mapAPI.get_lane_traffic_control_ids(
36 | lane_id)
37 |
38 | # list of traffic controll element of type traffic light next to a lane
39 | lane_traffic_lights = set()
40 |
41 | for control_id in lane_traffic_controll_ids:
42 | conrol_element = mapAPI.get_element_by_id(control_id)
43 | if mapAPI.is_traffic_light(conrol_element):
44 | lane_traffic_lights = lane_traffic_lights.union(
45 | set([control_id]))
46 |
47 | if len(lane_traffic_lights) != 0:
48 | lane_tls_dict[lane_id] = lane_traffic_lights
49 |
50 | return lane_tls_dict
51 |
52 |
53 | def generate_predecessor_successor_lookup(mapAPI: ExtendedMapAPI) -> pd.DataFrame:
54 | """Generate Lane Frame Lookup
55 |
56 | Args:
57 | mapAPI (ExtendedMapAPI): _description_
58 |
59 | Returns:
60 | pd.DataFrame: _description_
61 | """
62 | lanes = mapAPI.get_elements_from_layer("lane")
63 | lane_ids = mapAPI.get_element_ids_from_layer("lane")
64 |
65 | # initialize dict
66 | lane_dict = dict()
67 | for lane_id in lane_ids:
68 | lane_dict[lane_id] = {
69 | 'successor': set(), "predecessor": set()} # initialize keys
70 |
71 | for lane in lanes:
72 | current_lane_id = mapAPI.id_as_str(lane.id)
73 | lane_ids_ahead = [mapAPI.id_as_str(id)
74 | for id in lane.element.lane.lanes_ahead]
75 |
76 | lane_dict[current_lane_id]["successor"] = set(
77 | lane_ids_ahead) # set successing lanes
78 |
79 | for ahead_lane_id in lane_ids_ahead: # set predecessing lanes
80 | lane_dict[ahead_lane_id]["predecessor"] = lane_dict[ahead_lane_id]["predecessor"].union(
81 | set([current_lane_id]))
82 |
83 | predessors = []
84 | successors = []
85 | lane_dict_values = lane_dict.values()
86 | for v in lane_dict_values:
87 | predessors.append(list(v["predecessor"]))
88 | successors.append(list(v["successor"]))
89 |
90 | return pd.DataFrame({"lane_id": lane_dict.keys(), "predecessor": predessors, "successor": successors})
91 |
92 |
93 | def generate_target_label(junctionDataset: JunctionDataset, combinations: pd.DataFrame) -> list:
94 | """Generate target vector
95 |
96 | Args:
97 | junctionDataset (JunctionDataset): junction dataset
98 | combinations (pd.DataFrame): traffic light to sequence head lane ids
99 |
100 | Returns:
101 | list: calssification labels
102 | """
103 | target_vector = []
104 | for idx, row in combinations.iterrows():
105 | tl_id = row["tl_id"]
106 | lane_id = row["lane_id"]
107 |
108 | if tl_id not in junctionDataset.tl_lane_relations.keys():
109 | target_vector.append(-1) # relation not available in semantic map
110 | elif lane_id in junctionDataset.tl_lane_relations[tl_id]:
111 | target_vector.append(1)
112 | else:
113 | target_vector.append(0)
114 | return target_vector
115 |
116 |
117 | def generate_target_association(mapAPI: ExtendedMapAPI, junctionDataset: JunctionDataset) -> pd.DataFrame:
118 | """Generate target classification dataset of all active traffic lights that have been recorded in all scenes.
119 |
120 | Args:
121 | mapAPI (ExtendedMapAPI): map api
122 | junctionDataset (JunctionDataset): junction dataset
123 |
124 | Returns:
125 | pd.DataFrame: target dataset with traffic ligth to lane relations and classification label (1/0/-1)
126 | """
127 | extended_branch_df = junctionDataset.get_branch_dataset(DATASET_TYPE.EXTENDED)
128 | targets = []
129 | for dataset_index in tqdm(range(DATASET_SIZE), desc="Iterating through preprocessed datasets."):
130 | tl_mapping_df = junctionDataset.get_tl_mapping(dataset_index)
131 | agent_mapping_df = junctionDataset.get_agent_mapping(dataset_index)
132 |
133 | ego_mapping_df = agent_mapping_df[agent_mapping_df["agent_id"] == 0].explode(
134 | "lane_ids").rename(columns={"lane_ids": "lane_id"}).drop_duplicates()
135 |
136 | # filter ego agent mapping for agent on branch lane sequences
137 | ego_mapping_branch_df = ego_mapping_df.join(
138 | extended_branch_df.set_index('lane_id'), on=['lane_id'])
139 | ego_mapping_branch_df = ego_mapping_branch_df.dropna()
140 | ego_mapping_branch_df = ego_mapping_branch_df.drop_duplicates()
141 | scene_indices = ego_mapping_branch_df["scene_idx"].unique()
142 |
143 | for scene_idx in scene_indices:
144 | ego_mapping_current_scene = ego_mapping_branch_df[
145 | ego_mapping_branch_df["scene_idx"] == scene_idx]
146 | tl_mapping_current_scene = tl_mapping_df[tl_mapping_df["scene_idx"] == scene_idx]
147 | unique_branches = ego_mapping_current_scene["branch_id"].unique()
148 |
149 | for branch_id in unique_branches:
150 | ego_mapping_current_scene_and_branch = ego_mapping_current_scene[
151 | ego_mapping_current_scene["branch_id"] == branch_id]
152 | sequence_head_lane_id_of_branch = extended_branch_df[extended_branch_df[
153 | "branch_id"] == branch_id]["sequence_head_lane_id"].tolist()
154 |
155 | start_frame = ego_mapping_current_scene_and_branch["frame_idx"].min(
156 | )
157 | start_end = ego_mapping_current_scene_and_branch["frame_idx"].max(
158 | )
159 | frame_interval = [
160 | start_frame, start_end]
161 |
162 | active_tl_mapping_current_scene = tl_mapping_current_scene[((
163 | tl_mapping_current_scene["tl_status"] == 0) | (tl_mapping_current_scene["tl_status"] == 1)) & (tl_mapping_current_scene["frame_idx"].isin(
164 | list(range(*frame_interval))))]
165 |
166 | if len(active_tl_mapping_current_scene) < 1:
167 | continue
168 |
169 | active_tl_ids = active_tl_mapping_current_scene["tl_id"].unique(
170 | )
171 | junction_id = junctionDataset.get_junction_of_branch(branch_id)
172 | active_tl_ids = [
173 | id for id in active_tl_ids if mapAPI.tl_of_junction(id, junction_id)]
174 |
175 | combinations = list(itertools.product(
176 | active_tl_ids, sequence_head_lane_id_of_branch))
177 |
178 | for c in combinations:
179 | if c not in targets:
180 | targets.append(c)
181 |
182 | target_df = pd.DataFrame(
183 | targets, columns=["tl_id", "lane_id"])
184 | target_df["relation"] = generate_target_label(junctionDataset, target_df)
185 |
186 | return target_df
187 |
188 | def generate_drive_trough_table(mapAPI: ExtendedMapAPI, junctionDataset: JunctionDataset) -> pd.DataFrame:
189 | """Generate table with drive trough junction description.
190 | For every scene and agent.
191 |
192 | Args:
193 | mapAPI (ExtendedMapAPI): map api
194 | junctionDataset (JunctionDataset): junction dataset
195 |
196 | Returns:
197 | pd.DataFrame: Drive trough table
198 | """
199 | minimal_branch_df = junctionDataset.get_branch_dataset(DATASET_TYPE.MINIMAL)
200 | branch_lanes = minimal_branch_df["lane_id"]
201 | drive_trough_dfs = []
202 | for dataset_index in tqdm(range(DATASET_SIZE), desc="Iterating through preprocessed datasets."):
203 | agent_mapping = junctionDataset.get_agent_mapping(dataset_index)
204 |
205 |
206 | agent_mapping = agent_mapping.explode("lane_ids").rename(columns={"lane_ids":"lane_id"})
207 | agent_mapping = agent_mapping.drop_duplicates()
208 | mapped_lanes = agent_mapping.groupby(["scene_idx", "agent_id"])["lane_id"].unique()
209 | no_mapping_indices = mapped_lanes[mapped_lanes.apply(lambda x: len(x) == 1 and pd.isna(x).any())].index
210 | reduced_mapped_lanes = mapped_lanes.drop(index=no_mapping_indices)
211 | reduced_mapped_lanes = reduced_mapped_lanes.reset_index()
212 | minimal_branch_df = junctionDataset.get_branch_dataset(DATASET_TYPE.MINIMAL)
213 | branch_lanes = minimal_branch_df["lane_id"]
214 | intersecting_mapped_lanes = reduced_mapped_lanes["lane_id"].apply(lambda x: set(x).intersection(set(branch_lanes)))
215 | successor_intersecting_mapped_lanes = intersecting_mapped_lanes.apply(lambda x: [mapAPI.get_lane_successors(lane) for lane in x])
216 |
217 | reduced_mapped_lanes["successors"] = successor_intersecting_mapped_lanes
218 | reduced_mapped_lanes["successors"] = reduced_mapped_lanes["successors"].apply(lambda x: list(itertools.chain.from_iterable(x)))
219 | reduced_mapped_lanes["drive_trough"] = reduced_mapped_lanes.apply(lambda x: any(item in x["lane_id"] for item in x["successors"]), axis=1)
220 | drive_trough_dfs.append(reduced_mapped_lanes)
221 |
222 | pickle_df(f"dataset/preprocessed_info/drive_trough/{dataset_index}_df_trainfull.pkl", reduced_mapped_lanes)
223 |
224 |
225 |
226 | def generate_traffic_light_scene_table() -> pd.DataFrame:
227 | traffic_light_mapping_dfs = []
228 | path = "dataset/preprocessed_info/tl_mapping/"
229 | start_idx = 0
230 | step = 20
231 | batch_size = 5
232 | for dataset_index in tqdm(range(start_idx, DATASET_SIZE, step), desc="Iterating through preprocessed datasets."):
233 | tl_mapping_df = load_preprocessed_datasets(path, dataset_index, step, batch_size).drop(columns=["active_faces", "inactive_faces", "unknown_faces"])
234 | tl_mapping_df = tl_mapping_df.sort_values(["tl_id", "scene_idx", "frame_idx"])
235 |
236 | # 1. Aggregate
237 | tl_mapping_scene = tl_mapping_df.groupby(["tl_id", "scene_idx"], as_index=False).agg({'tl_status': 'mean'}).copy()
238 |
239 | # 2. Trivial Assignment of constant states over complete scene
240 | tl_mapping_scene_const_undefined = tl_mapping_scene[tl_mapping_scene["tl_status"]==4]
241 | tl_mapping_scene.loc[tl_mapping_scene_const_undefined.index, "interpreted_state"] = SCENE_TL_STATUS.UNDEFINED.value
242 |
243 | # 3. Trivial Assignment of constant states over complete scene
244 | tl_mapping_df_without_unknown = tl_mapping_df[tl_mapping_df["tl_status"]!=4].copy()
245 | tl_mapping_df_without_unknown = tl_mapping_df_without_unknown.sort_values(["tl_id", "scene_idx", "frame_idx"])
246 |
247 | tl_mapping_scene_without_unknown = tl_mapping_df_without_unknown.groupby(["tl_id", "scene_idx"], as_index=False).agg({'tl_status': 'mean'})
248 | tl_mapping_scene_without_unknown["interpreted_state"] = tl_mapping_scene_without_unknown["tl_status"].apply(lambda x: int(x) if (x == SCENE_TL_STATUS.CONST_RED.value or x==SCENE_TL_STATUS.CONST_GREEN.value) else np.nan)
249 |
250 | start_frame_df = tl_mapping_df_without_unknown.groupby(["tl_id", "scene_idx"], as_index=False)["frame_idx"].min().rename({"frame_idx":"state_known_start_frame"}, axis=1)
251 | end_frame_df = tl_mapping_df_without_unknown.groupby(["tl_id", "scene_idx"], as_index=False)["frame_idx"].max().rename({"frame_idx":"state_known_end_frame"}, axis=1)
252 |
253 | tl_mapping_scene_without_unknown = tl_mapping_scene_without_unknown.merge(start_frame_df, on=["tl_id", "scene_idx"], how="left")
254 | tl_mapping_scene_without_unknown = tl_mapping_scene_without_unknown.merge(end_frame_df, on=["tl_id", "scene_idx"], how="left")
255 |
256 | red_green_change = tl_mapping_df[tl_mapping_df["tl_status"].diff().abs() == 1]#detect red/green change
257 | red_green_change= red_green_change.drop_duplicates(subset=["scene_idx", "tl_id"], keep="first") #remove tl_changes more than once between red and
258 | red_green_change["interpreted_state"] = red_green_change["tl_status"].apply(lambda x: SCENE_TL_STATUS.RED_GREEN.value if x==1 else SCENE_TL_STATUS.GREEN_RED.value)
259 | missing_states = tl_mapping_scene_without_unknown[tl_mapping_scene_without_unknown["interpreted_state"].isna()]
260 | red_green_change_reduced = red_green_change[(red_green_change["scene_idx"].isin(missing_states["scene_idx"].unique())) & (red_green_change["tl_id"].isin(missing_states["tl_id"].unique()))]
261 | red_green_change_reduced = red_green_change_reduced.rename(columns={"frame_idx":"state_change_frame"}).drop("tl_status", axis=1)
262 |
263 | tl_mapping_scene_without_unknown = tl_mapping_scene_without_unknown.merge(red_green_change_reduced, on=["tl_id", "scene_idx"], how="left")
264 | tl_mapping_scene_without_unknown["interpreted_state"] = tl_mapping_scene_without_unknown["interpreted_state_x"].fillna(tl_mapping_scene_without_unknown["interpreted_state_y"])
265 | tl_mapping_scene_without_unknown = tl_mapping_scene_without_unknown.drop(columns=["interpreted_state_x", "interpreted_state_y"], axis=1)
266 |
267 | tl_mapping_scene_unknown = tl_mapping_scene[tl_mapping_scene["tl_status"]==4 ].copy()
268 | tl_mapping_scene_unknown["state_known_start_frame"] =-1
269 | tl_mapping_scene_unknown["state_known_end_frame"] =-1
270 | tl_mapping_scene_unknown["state_change_frame"] =-1
271 | tl_mapping_scene_unknown = tl_mapping_scene_unknown[["tl_id", "scene_idx", "tl_status","state_known_start_frame", "state_known_end_frame", "state_change_frame", "interpreted_state"]]
272 | tl_mapping_scene_total = pd.concat([tl_mapping_scene_without_unknown,tl_mapping_scene_unknown] , ignore_index=True)
273 | del tl_mapping_scene_unknown
274 |
275 | tl_mapping_scene_total = tl_mapping_scene_total.sort_values(["tl_id", "scene_idx"])
276 | tl_mapping_scene_total["state_change_frame"] = tl_mapping_scene_total["state_change_frame"].fillna(-1)
277 | tl_mapping_scene_total = tl_mapping_scene_total[ (tl_mapping_scene_total["interpreted_state"].notna())]
278 | tl_mapping_scene_total = tl_mapping_scene_total.astype({"scene_idx":"int", "tl_status": "int", "state_change_frame": "int", "interpreted_state":"int"})
279 | tl_mapping_scene_total = tl_mapping_scene_total.drop(columns=["tl_status"])
280 | traffic_light_mapping_dfs.append(tl_mapping_scene_total)
281 |
282 | return pd.concat(traffic_light_mapping_dfs)
283 |
284 |
285 |
286 | def filter_ego_features():
287 | """Filter complete features of all agents by ego vehicle
288 | """
289 | res = []
290 | for i, dataset_index in tqdm(enumerate(range(DATASET_SIZE)), desc="Iterating through preprocessed datasets."):
291 | features_df = load_pickled_df(f"dataset/preprocessed_info/combined_features/{dataset_index}_df_trainfull.pkl")
292 | ego_features_df = features_df[features_df["agent_id"] == 0]
293 | res.append(ego_features_df)
294 | if (i % 100) == 99:
295 | df = pd.concat(res)
296 | df = df.drop_duplicates()
297 | res = []
298 | pickle_df(f"dataset/preprocessed_info/ego_features/{dataset_index}_df_trainfull.pkl", df)
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/map-learning/tl2la/784b32b758a8d6f2250bb08265ef32dc7f1cc1bf/lyft_l5_tl2la/src/visualization/__init__.py
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/visualization/map_renderer.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Visualizer based on Matplotlib to render Map features of
3 | # Lyft's HD map
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 |
9 |
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | from matplotlib.patches import Rectangle
13 |
14 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
15 |
16 |
17 | class StaticMapRenderer:
18 | """Fetch semantic attributes over the map api and visualize it"""
19 |
20 | def __init__(self, map_api):
21 | self._color_mapping = dict(drivable_area='#afd1e3',
22 | road_segment='#1871ad',
23 | road_block='#b2df8a',
24 | lane='#474747',
25 | traffic_control_element='#ff0000',
26 | traffic_light="#00db04",
27 | junctions='#fcba03')
28 |
29 | self._map_api: ExtendedMapAPI = map_api
30 | self.fig = plt.figure(figsize=(10, 10))
31 |
32 | def add_layer(self, name: str):
33 | ax = self.fig.gca()
34 | #print("Layer name:", name)
35 | return ax
36 |
37 | def render_lanes(self):
38 | lanes = self._map_api.get_elements_from_layer("lane")
39 | ax = self.add_layer("lanes")
40 | for lane in lanes:
41 | self.render_lane_segment(ax, lane)
42 | return self.fig, ax
43 |
44 | def render_lane_segment(self, ax, lane):
45 | coords = self._map_api.get_lane_coords(ExtendedMapAPI.id_as_str(lane.id))
46 | self.plot_coords(ax, "lane", coords, "xyz_left")
47 | self.plot_coords(ax, "lane", coords, "xyz_right")
48 |
49 | def plot_coords(self, ax, attribute_type: str, coords: np.ndarray, access_key: str):
50 | xs = coords[access_key][:, 0]
51 | ys = coords[access_key][:, 1]
52 | ax.plot(
53 | xs, ys, color=self._color_mapping[attribute_type], label=attribute_type)
54 |
55 | def plot_bounding_box(self, ax, attribute_type: str, bounds: np.ndarray):
56 | p_south_west = bounds
57 |
58 | width = 50
59 | heigth = 50
60 | ax.add_patch(Rectangle((p_south_west[0]-25, p_south_west[1]-25), width,
61 | heigth, edgecolor="blue", facecolor=self._color_mapping[attribute_type], fill=True, label=attribute_type, zorder=10))
62 |
63 | def render_traffic_control_elements(self):
64 | traffic_elements = self._map_api.get_elements_from_layer("traffic_control_element")
65 | ax = self.add_layer("traffic_control_elements")
66 | for traffic_element in traffic_elements:
67 | self.render_traffic_control_element(ax, traffic_element)
68 | return self.fig, ax
69 |
70 | def render_traffic_control_element(self, ax, traffic_element):
71 | coords = self._map_api.get_traffic_control_element_coords(
72 | ExtendedMapAPI.id_as_str(traffic_element.id)
73 | )
74 | self.plot_coords(ax, "traffic_control_element", coords, "xyz")
75 |
76 | def render_traffic_lights(self):
77 | traffic_elements = self._map_api.get_elements_from_layer("traffic_control_element")
78 |
79 | # Filter for traffic lights
80 | traffic_lights = [
81 | elem for elem in traffic_elements if self._map_api.is_traffic_light(elem)]
82 | ax = self.add_layer("traffic_control_elements")
83 | for traffic_light in traffic_lights:
84 | self.render_traffic_light(ax, traffic_light)
85 | return self.fig, ax
86 |
87 | def render_traffic_light(self, ax, traffic_element):
88 | coords = self._map_api.get_traffic_light_coords(
89 | ExtendedMapAPI.id_as_str(traffic_element.id)
90 | )
91 | self.plot_coords(ax, "traffic_light", coords, "xyz")
92 |
93 | def render_junctions(self, with_tl_only=True):
94 | all_junctions = self._map_api.get_elements_from_layer("junction")
95 | ax = self.add_layer("junctions")
96 |
97 | if with_tl_only:
98 | all_junctions = self._map_api.filter_tl_junctions(all_junctions)
99 |
100 | for junction in all_junctions:
101 | self.render_junction(ax, junction)
102 | return self.fig, ax
103 |
104 | def render_junction(self, ax, junction):
105 | bounds = self._map_api.get_junction_coords(
106 | ExtendedMapAPI.id_as_str(junction.id))
107 | self.plot_bounding_box(ax, "junctions", bounds)
108 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/visualization/static_visualizer.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Visualizer based on Bokeh to render Map features and semantics of
3 | # Lyft's HD map
4 | #
5 | # tl2la
6 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
7 | # ------------------------------------------------------------------------
8 | # Modified from L5Kit (https://github.com/woven-planet/l5kit)
9 | # Copyright (c) 2022 Woven by Toyota. All Rights Reserved.
10 | #
11 | # Based on files:
12 | # - l5kit.visualization.visualizer.py
13 | # - l5kit.visualization.zarr_utils.py
14 | # ------------------------------------------------------------------------
15 |
16 |
17 | import pickle
18 | from collections import defaultdict
19 | from typing import Any, DefaultDict, Dict, List, Set
20 |
21 | import bokeh.io
22 | import bokeh.plotting
23 | import matplotlib.cm as cmx
24 | import matplotlib.colors as colors
25 | import matplotlib.pyplot as plt
26 | import numpy as np
27 | import pandas as pd
28 | from bokeh.layouts import LayoutDOM
29 | from bokeh.models import BasicTicker, ColorBar, HoverTool, LinearColorMapper
30 | from bokeh.palettes import Plasma
31 | from bokeh.plotting import ColumnDataSource
32 | from l5kit.data import DataManager
33 | from l5kit.visualization.visualizer.common import CWVisualization
34 | from src.common.helper import load_pickled_df, reverse_dict_of_sets
35 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
36 | from src.visualization.tl_visualization import (JunctionVisualization,
37 | LaneTLRelationVisualization,
38 | LaneVisualization,
39 | NodeVisualization,
40 | StaticVisualization,
41 | TLVisualization)
42 |
43 |
44 | class StaticVisualizer:
45 | def __init__(self, cfg: dict, dm: DataManager) -> None:
46 | self.cfg = cfg
47 | self.dm = dm
48 |
49 | self.frame_lane_df: pd.DataFrame = None
50 |
51 | def _calculate_center(self, xs: np.ndarray, ys: np.ndarray) -> np.ndarray:
52 | """Calculate center of two given points
53 |
54 | Args:
55 | xs (np.ndarray): x-pos of points
56 | ys (np.ndarray): y-pos of points
57 |
58 | Returns:
59 | np.ndarray: center point
60 | """
61 | c1 = xs[0] + 0.5 * (xs[1]-xs[0])
62 | c2 = ys[0] + 0.5 * (ys[1]-ys[0])
63 |
64 | return np.array([c1, c2])
65 |
66 | def load_df(self, path: str) -> pd.DataFrame:
67 | """Returns dataframe of pickled file
68 |
69 | Args:
70 | path (str): path to pickle file
71 |
72 | Returns:
73 | pd.DataFrame: unpacked pickeld dataframe
74 | """
75 | with open(path, 'rb') as f:
76 | return pickle.load(f)
77 |
78 | def get_semantic_visualisation(self, mapAPI: ExtendedMapAPI, **kwargs) -> StaticVisualization:
79 | """Get visualisation objects for static map.
80 |
81 | Keyword Args:
82 | * heatmap (bool): Visualization of the frequency of lane usage by the ego vehicle
83 | """
84 | #################
85 | # plot lane
86 | lane_ids: list = mapAPI.bounds_info["lanes"]["ids"]
87 | lane_id_to_scene_ids_df = None
88 |
89 | if "heatmap" in kwargs.keys() and kwargs["heatmap"]:
90 | self.frame_lane_df = self.load_df(
91 | # path=self.cfg["preprocessed_data_loader"]["ego_frame_lane_mapping"])
92 | path="dataset/preprocessed_info/frame_lane_df_trainfull.pkl")
93 | self.branches_df = pd.read_csv(
94 | self.cfg["preprocessed_data_loader"]["junction_branch_to_lane"], encoding="ascii")
95 |
96 | merged_df = self.frame_lane_df.join(
97 | self.branches_df.set_index('lane_id'), on=['lane_id'])
98 | merged_df = merged_df.fillna("unknown")
99 |
100 | scene_ids_per_lane = merged_df.groupby(
101 | "lane_id")["scene_idx"].unique()
102 | lane_id_to_scene_ids_df = pd.DataFrame(
103 | columns=["lane_id", "scene_indices"])
104 | lane_id_to_scene_ids_df["scene_indices"] = lane_id_to_scene_ids_df["scene_indices"].astype(
105 | 'object')
106 | lane_id_to_scene_ids_df["scene_indices"] = scene_ids_per_lane.values.tolist(
107 | )
108 | lane_id_to_scene_ids_df["lane_id"] = merged_df["lane_id"].unique(
109 | ).tolist()
110 | lane_id_to_scene_ids_df["trips"] = lane_id_to_scene_ids_df["scene_indices"].apply(
111 | lambda x: len(x))
112 |
113 | jet = plt.get_cmap('plasma')
114 | cNorm = colors.Normalize(
115 | vmin=0, vmax=lane_id_to_scene_ids_df["trips"].max())
116 | scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
117 |
118 | lanes_vis: List[LaneVisualization] = []
119 |
120 | for lane_id in lane_ids:
121 | lane_colour = "gray"
122 | if lane_id_to_scene_ids_df is not None:
123 | current_row = lane_id_to_scene_ids_df[lane_id_to_scene_ids_df["lane_id"] == lane_id]
124 | if len(current_row) > 0:
125 | lane_colour = colors.to_hex(list(scalarMap.to_rgba(
126 | current_row["trips"].iloc[0])))
127 |
128 | lane_coords = mapAPI.get_lane_coords(lane_id)
129 | left_lane = lane_coords["xyz_left"][:, :2]
130 | right_lane = lane_coords["xyz_right"][::-1, :2]
131 |
132 | lanes_vis.append(LaneVisualization(xs=np.hstack((left_lane[:, 0], right_lane[:, 0])),
133 | ys=np.hstack(
134 | (left_lane[:, 1], right_lane[:, 1])),
135 | color=lane_colour,
136 | id=lane_id))
137 |
138 | #################
139 | # plot traffic light to lane relation:
140 | lane_tl_vis: List[LaneTLRelationVisualization] = []
141 |
142 | # get corresponding traffic lights of lanes
143 | lane_tls_dict = mapAPI.get_lane_tl_relation()
144 | tl_lanes_dict = reverse_dict_of_sets(lane_tls_dict)
145 |
146 | for lane_id in lane_tls_dict:
147 | corresponding_tl_ids = lane_tls_dict[lane_id]
148 |
149 | predecessor_lanes = mapAPI.get_lane_predecessors(lane_id)
150 | # has_predecessor = any(predecessor in lane_tls_dict.keys()
151 | # for predecessor in predecessor_lanes)
152 | # if has_predecessor:
153 | # continue
154 |
155 | for tl_id in corresponding_tl_ids:
156 | has_predecessor = any(predecessor in tl_lanes_dict[tl_id]
157 | for predecessor in predecessor_lanes)
158 | if has_predecessor:
159 | continue
160 |
161 | # calculate points of lane portals (start and end)
162 |
163 | # start points
164 | tl_coords = mapAPI.get_traffic_light_coords(tl_id)
165 | p_start = self._calculate_center(
166 | tl_coords["xyz"][:, 0], tl_coords["xyz"][:, 1])
167 |
168 | # end point
169 | lane_coords = mapAPI.get_lane_coords(lane_id)
170 | portal_1 = np.array(
171 | [lane_coords["xyz_left"][-1, 0:2], lane_coords["xyz_right"][-1, 0:2]])
172 |
173 | portal1_center = self._calculate_center(np.array(
174 | [portal_1[0, 0], portal_1[1, 0]]), np.array([portal_1[0, 1], portal_1[1, 1]]))
175 | p_end = portal1_center
176 |
177 | lane_tl_vis.append(
178 | LaneTLRelationVisualization(xs=np.array([p_start[0], p_end[0]]),
179 | ys=np.array([p_start[1], p_end[1]]))
180 | )
181 |
182 | #################
183 | # plot predicted traffic light to lane relation:
184 | predicted_lane_tl_vis: List[LaneTLRelationVisualization] = []
185 | gt_lane_tl_vis: List[LaneTLRelationVisualization] = []
186 |
187 | if "predicted_assocs" in kwargs.keys() and kwargs["predicted_assocs"]:
188 | # get corresponding traffic lights of lanes
189 | path="dataset/results/conservative/groundtruth_vs_prediction_notna.pkl"
190 | groundtruth_vs_prediction_notna = load_pickled_df(path)
191 |
192 | predicted_lane_tls_dict = {lane_id: set() for lane_id in groundtruth_vs_prediction_notna["lane_id"].unique()}
193 | gt_lane_tls_dict = {lane_id: set() for lane_id in groundtruth_vs_prediction_notna["lane_id"].unique()}
194 | for idx, row in groundtruth_vs_prediction_notna.iterrows():
195 | if row["predicted_relation"] > 0:
196 | predicted_lane_tls_dict[row["lane_id"]].add(row["tl_id"])
197 | if row["relation"] > 0:
198 | gt_lane_tls_dict[row["lane_id"]].add(row["tl_id"])
199 |
200 | for lane_id in predicted_lane_tls_dict:
201 | corresponding_tl_ids = predicted_lane_tls_dict[lane_id]
202 |
203 | predecessor_lanes = mapAPI.get_lane_predecessors(lane_id)
204 |
205 | for tl_id in corresponding_tl_ids:
206 | has_predecessor = any(predecessor in tl_lanes_dict[tl_id]
207 | for predecessor in predecessor_lanes)
208 | if has_predecessor:
209 | continue
210 |
211 | # calculate points of lane portals (start and end)
212 |
213 | # start points
214 | tl_coords = mapAPI.get_traffic_light_coords(tl_id)
215 | p_start = self._calculate_center(
216 | tl_coords["xyz"][:, 0], tl_coords["xyz"][:, 1])
217 |
218 | # end point
219 | lane_coords = mapAPI.get_lane_coords(lane_id)
220 | portal_1 = np.array(
221 | [lane_coords["xyz_left"][-1, 0:2], lane_coords["xyz_right"][-1, 0:2]])
222 |
223 | portal1_center = self._calculate_center(np.array(
224 | [portal_1[0, 0], portal_1[1, 0]]), np.array([portal_1[0, 1], portal_1[1, 1]]))
225 | p_end = portal1_center
226 |
227 | offset = -0.1
228 | predicted_lane_tl_vis.append(
229 | LaneTLRelationVisualization(xs=np.array([p_start[0]+offset, p_end[0]+offset]),
230 | ys=np.array([p_start[1]+offset, p_end[1]+offset]))
231 | )
232 | for lane_id in gt_lane_tls_dict:
233 | corresponding_tl_ids = gt_lane_tls_dict[lane_id]
234 |
235 | predecessor_lanes = mapAPI.get_lane_predecessors(lane_id)
236 |
237 | for tl_id in corresponding_tl_ids:
238 | has_predecessor = any(predecessor in tl_lanes_dict[tl_id]
239 | for predecessor in predecessor_lanes)
240 | if has_predecessor:
241 | continue
242 |
243 | # calculate points of lane portals (start and end)
244 |
245 | # start points
246 | tl_coords = mapAPI.get_traffic_light_coords(tl_id)
247 | p_start = self._calculate_center(
248 | tl_coords["xyz"][:, 0], tl_coords["xyz"][:, 1])
249 |
250 | # end point
251 | lane_coords = mapAPI.get_lane_coords(lane_id)
252 | portal_1 = np.array(
253 | [lane_coords["xyz_left"][-1, 0:2], lane_coords["xyz_right"][-1, 0:2]])
254 |
255 | portal1_center = self._calculate_center(np.array(
256 | [portal_1[0, 0], portal_1[1, 0]]), np.array([portal_1[0, 1], portal_1[1, 1]]))
257 | p_end = portal1_center
258 |
259 | offset = 0.1
260 | gt_lane_tl_vis.append(
261 | LaneTLRelationVisualization(xs=np.array([p_start[0]+offset, p_end[0]+offset]),
262 | ys=np.array([p_start[1]+offset, p_end[1]+offset]))
263 | )
264 |
265 | #################
266 | # plot traffic lights:
267 |
268 | # reverse lane_tls_dict
269 | tl_lanes_dict = dict()
270 | for lane_id, tl_set in lane_tls_dict.items():
271 | for tl_id in tl_set:
272 | if tl_id not in tl_lanes_dict.keys():
273 | tl_lanes_dict[tl_id] = {lane_id}
274 | else:
275 | tl_lanes_dict[tl_id] = tl_lanes_dict[tl_id].union({
276 | lane_id})
277 |
278 | tl_ids = mapAPI.get_tl_bounds()["ids"]
279 |
280 | tl_vis: List[TLVisualization] = []
281 |
282 | for tl_id in tl_ids:
283 | tl_color = "grey"
284 | tl_color_state = "unknown"
285 |
286 | tl_coords = mapAPI.get_traffic_light_coords(tl_id)
287 | lane_ids = list(tl_lanes_dict.get(tl_id, []))
288 | tl_element = mapAPI.get_element_by_id(tl_id)
289 | tl_face_ids = mapAPI.get_tl_face_ids(tl_element)
290 |
291 | center = self._calculate_center(
292 | tl_coords["xyz"][:, 0], tl_coords["xyz"][:, 1])
293 | tl_vis.append(TLVisualization(xs=tl_coords["xyz"][:, 0],
294 | ys=tl_coords["xyz"][:, 1],
295 | center_x=center[0],
296 | center_y=center[1],
297 | state_color=tl_color,
298 | state=tl_color_state,
299 | traffic_light_id=tl_id,
300 | face_ids=np.array(
301 | list(tl_face_ids)),
302 | corresponding_lanes=lane_ids)) # lane_ids
303 |
304 | #################
305 | # plot traffic nodes:
306 | node_vis: List[NodeVisualization] = []
307 | nodes_coords = np.array(mapAPI.get_nodes_coordinates()["coords"])
308 | nodes_ids = np.array(mapAPI.get_nodes_coordinates()["ids"])
309 |
310 | for idx, node_coords in enumerate(nodes_coords):
311 | node_vis.append(
312 | NodeVisualization(
313 | x=node_coords[0], y=node_coords[1], id=nodes_ids[idx])
314 | )
315 |
316 | #################
317 | # plot junction points:
318 | junction_vis: List[JunctionVisualization] = []
319 | junctions_coords = np.array(mapAPI.get_junctions_coords()["coords"])
320 | junctions_ids = mapAPI.get_junctions_coords()["ids"]
321 |
322 | for idx, junction_coords in enumerate(junctions_coords):
323 | junction_vis.append(
324 | JunctionVisualization(
325 | x=junction_coords[0], y=junction_coords[1], id=junctions_ids[idx])
326 | )
327 |
328 | #################
329 | # plot crosswalks
330 | crosswalks_coords = mapAPI.bounds_info["crosswalks"]["bounds"]
331 | crosswalks_vis: List[CWVisualization] = []
332 |
333 | for coords in crosswalks_coords:
334 | crosswalks_vis.append(CWVisualization(xs=coords[:, 0],
335 | ys=coords[:, 1],
336 | color="yellow"))
337 |
338 | return StaticVisualization(lanes=lanes_vis,
339 | crosswalks=crosswalks_vis,
340 | trafficLights=tl_vis,
341 | lane_tl_relations=lane_tl_vis,
342 | predicted_lane_tl_relations=predicted_lane_tl_vis,
343 | gt_lane_tl_relations=gt_lane_tl_vis,
344 | nodes=node_vis,
345 | tl_junctions=junction_vis
346 | )
347 |
348 | def _visualization_list_to_dict(self, visualisation_list: List[Any], null_el: Any) -> Dict[str, Any]:
349 | """Convert a list of NamedTuple into a dict, where:
350 | - the NamedTuple fields are the dict keys;
351 | - the dict value are lists;
352 |
353 | :param visualisation_list: a list of NamedTuple
354 | :param null_el: an element to be used as null if the list is empty (it can crash visualisation)
355 | :return: a dict with the same information
356 | """
357 | visualisation_list = visualisation_list if len(
358 | visualisation_list) else [null_el]
359 | visualisation_dict: DefaultDict[str, Any] = defaultdict(list)
360 |
361 | keys_set: Set[str] = set(visualisation_list[0]._asdict().keys())
362 | for el in visualisation_list:
363 | for k, v in el._asdict().items():
364 | if k not in keys_set:
365 | raise ValueError(
366 | "keys set is not consistent between elements in the list")
367 | visualisation_dict[k].append(v)
368 | return dict(visualisation_dict)
369 |
370 | def visualize(self, static_vis: StaticVisualization, **kwargs) -> LayoutDOM:
371 | """Visualise static environment using Bokeh.
372 | """
373 |
374 | tl_hover = HoverTool(
375 | mode="mouse",
376 | names=["trafficLights"],
377 | tooltips=[
378 | ("xPos", "@center_x{0.00}"),
379 | ("yPos", "@center_y{0.00}"),
380 | ("State", "@state"),
381 | ("TL Id", "@traffic_light_id"),
382 | ("Face Ids", "@face_ids"),
383 | ("Lanes", "@corresponding_lanes")
384 | ],
385 | )
386 |
387 | lane_hover = HoverTool(
388 | mode="mouse",
389 | names=["lanes"],
390 | tooltips=[
391 | ("(x,y)", "($x, $y)"),
392 | ("Lane Id", "@id")
393 | ],
394 | )
395 |
396 | junction_hover = HoverTool(
397 | mode="mouse",
398 | names=["tl_junctions"],
399 | tooltips=[
400 | ("xPos", "@x{0.00}"),
401 | ("yPos", "@y{0.00}"),
402 | ("Id", "@id"),
403 | ],
404 | )
405 |
406 | node_hover = HoverTool(
407 | mode="mouse",
408 | names=["nodes"],
409 | tooltips=[
410 | ("xPos", "@x{0.00}"),
411 | ("yPos", "@y{0.00}"),
412 | ("Id", "@id"),
413 | ],
414 | )
415 |
416 | out: Dict[str, ColumnDataSource] = None
417 |
418 | # we need to ensure we have something otherwise js crashes
419 |
420 | lanes_dict = self._visualization_list_to_dict(static_vis.lanes, LaneVisualization(xs=np.empty(0), ys=np.empty(0),
421 | color="black", id="none"))
422 |
423 | crosswalk_dict = self._visualization_list_to_dict(static_vis.crosswalks, CWVisualization(xs=np.empty(0), ys=np.empty(0),
424 | color="black"))
425 | tl_dict = self._visualization_list_to_dict(static_vis.trafficLights, TLVisualization(xs=np.empty(0), ys=np.empty(0),
426 | center_x=0, center_y=0,
427 | state_color="grey", state="unknown",
428 | traffic_light_id="none", face_ids=np.empty(0),
429 | corresponding_lanes=["unknown"]))
430 |
431 | lane_tl_dict = self._visualization_list_to_dict(
432 | static_vis.lane_tl_relations, LaneTLRelationVisualization(xs=np.zeros((2,), dtype=float), ys=np.zeros((2,), dtype=float)))
433 |
434 | predicted_lane_tl_dict = self._visualization_list_to_dict(
435 | static_vis.predicted_lane_tl_relations, LaneTLRelationVisualization(xs=np.zeros((2,), dtype=float), ys=np.zeros((2,), dtype=float)))
436 |
437 | gt_lane_tl_dict = self._visualization_list_to_dict(
438 | static_vis.gt_lane_tl_relations, LaneTLRelationVisualization(xs=np.zeros((2,), dtype=float), ys=np.zeros((2,), dtype=float)))
439 |
440 | node_dict = self._visualization_list_to_dict(
441 | static_vis.nodes, NodeVisualization(x=0., y=0., id="none"))
442 |
443 | junction_dict = self._visualization_list_to_dict(
444 | static_vis.tl_junctions, JunctionVisualization(x=0., y=0., id="unknown"))
445 |
446 | out = dict(lanes=ColumnDataSource(lanes_dict),
447 | crosswalks=ColumnDataSource(crosswalk_dict),
448 | traffic_lights=ColumnDataSource(tl_dict),
449 | lane_tl_relations=ColumnDataSource(lane_tl_dict),
450 | predicted_lane_tl_relations=ColumnDataSource(predicted_lane_tl_dict),
451 | gt_lane_tl_relations=ColumnDataSource(gt_lane_tl_dict),
452 | nodes=ColumnDataSource(node_dict),
453 | tl_junctions=ColumnDataSource(junction_dict))
454 |
455 | scene_fig = bokeh.plotting.figure(
456 | title="Static Environment of Dataset",
457 | match_aspect=True,
458 | tools=["pan", "wheel_zoom",
459 | tl_hover,
460 | lane_hover,
461 | junction_hover,
462 | node_hover,
463 | "save", "reset"],
464 | active_scroll="wheel_zoom",
465 | align='center',
466 | plot_width=1000,
467 | plot_height=1000
468 | )
469 |
470 | scene_fig.xgrid.grid_line_color = None
471 | scene_fig.ygrid.grid_line_color = None
472 |
473 | scene_fig.patches(line_width=0, alpha=0.5, name="lanes",
474 | color="color", source=out["lanes"])
475 | scene_fig.patches(line_width=0, alpha=0.5, color="#B5B50D",
476 | source=out["crosswalks"])
477 |
478 | # visualize tls state
479 | scene_fig.ellipse(x="center_x", y="center_y", width=2, height=2, line_width=2, color="state_color",
480 | source=out["traffic_lights"], name="trafficLights")
481 |
482 | # visualize relation
483 | scene_fig.multi_line(xs="xs", ys="ys",
484 | line_width=2, color="#8073ac", source=out["lane_tl_relations"], legend_label="lane_tl_relations")
485 |
486 | # visualize predicted relation
487 | if "predicted_assocs" in kwargs.keys() and kwargs["predicted_assocs"]:
488 | scene_fig.multi_line(xs="xs", ys="ys",
489 | line_width=2, color="#212591", source=out["predicted_lane_tl_relations"], legend_label="predicted_lane_tl_relations")
490 | # visualize gt relation
491 | scene_fig.multi_line(xs="xs", ys="ys",
492 | line_width=2, color="#a80d10", source=out["gt_lane_tl_relations"], legend_label="ground_truth_lane_tl_relations")
493 |
494 | # visualize tls geometry
495 | scene_fig.patches(line_width=2, color="black",
496 | source=out["traffic_lights"])
497 |
498 | # visualize nodes
499 | scene_fig.ellipse(x="x", y="y", width=2, height=2, line_width=2, color="black",
500 | source=out["nodes"], name="nodes", legend_label="nodes")
501 |
502 | # visualize junctions
503 | scene_fig.ellipse(x="x", y="y", width=3, height=3, line_width=2, color="orange",
504 | source=out["tl_junctions"], name="tl_junctions", legend_label="tl_junctions")
505 |
506 | scene_fig.legend.location = "top_left"
507 | scene_fig.legend.click_policy = "hide"
508 |
509 | if "heatmap" in kwargs.keys() and kwargs["heatmap"]:
510 | color_mapper = LinearColorMapper(
511 | palette=Plasma[11], low=0, high=8187)
512 | color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(),
513 | location=(0, 0))
514 | scene_fig.add_layout(color_bar, 'right')
515 |
516 | return scene_fig
517 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/src/visualization/tl_visualization.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Extend existing visualization Classes from L5Kit to visualize
3 | # traffic light geometries, traffic light states, traffic light
4 | # to lane assignments, etc.
5 | #
6 | # tl2la
7 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
8 | # ------------------------------------------------------------------------
9 | # Modified from L5Kit (https://github.com/woven-planet/l5kit)
10 | # Copyright (c) 2022 Woven by Toyota. All Rights Reserved.
11 | #
12 | # Based on files:
13 | # - l5kit.visualization.visualizer.common
14 | # ------------------------------------------------------------------------
15 |
16 |
17 | from typing import List, NamedTuple
18 |
19 | import numpy as np
20 | from l5kit.visualization.visualizer.common import (AgentVisualization,
21 | CWVisualization,
22 | EgoVisualization,
23 | TrajectoryVisualization)
24 |
25 |
26 | class TLVisualization(NamedTuple):
27 | """Hold information about a single traffic light
28 |
29 | :param xs: 1D array of x coordinates
30 | :param ys: 1D array of y coordinates
31 | :param state_color: color of the tl as a type str
32 | :param state: state of tl
33 | :param traffic_light_id: id of traffic light
34 | :param face_ids: 1D array of traffic light faces
35 | :param corresponding_lanes: list of corresponding lane ids
36 |
37 | """
38 | xs: np.ndarray
39 | ys: np.ndarray
40 | center_x: np.float
41 | center_y: np.float
42 | state_color: str
43 | state: str
44 | traffic_light_id: str
45 | face_ids: np.ndarray
46 | corresponding_lanes: list
47 |
48 |
49 | class LaneTLRelationVisualization(NamedTuple):
50 | """Hold information the relation between a lane and a traffic light
51 |
52 | :param xs: 1D array of x coordinates
53 | :param ys: 1D array of y coordinates
54 |
55 | """
56 | xs: np.ndarray
57 | ys: np.ndarray
58 |
59 |
60 | class LaneVisualization(NamedTuple):
61 | """Hold information about a single lane
62 |
63 | :param xs: 1D array of x coordinates
64 | :param ys: 1D array of y coordinates
65 | :param color: color of the lane as a string (both hex or text)
66 | :param id: id of the lane as a string
67 | """
68 | xs: np.ndarray
69 | ys: np.ndarray
70 | color: str
71 | id: str
72 |
73 |
74 | class NodeVisualization(NamedTuple):
75 | """Hold information of a node between two segments
76 |
77 | :param xs: 1D array of x coordinates
78 | :param ys: 1D array of y coordinates
79 | """
80 | x: np.float
81 | y: np.float
82 | id: str
83 |
84 |
85 | class JunctionVisualization(NamedTuple):
86 | """Hold information of a junction
87 |
88 | :param xs: 1D array of x coordinates
89 | :param ys: 1D array of y coordinates
90 | """
91 | x: np.float
92 | y: np.float
93 | id: str
94 |
95 |
96 | class FrameVisualization(NamedTuple):
97 | """Hold information about a frame (the state of a scene at a given time)
98 |
99 | :param ego: a single ego annotation
100 | :param agents: a list of agents
101 | :param lanes: a list of lanes
102 | :param crosswalks: a list of crosswalks
103 | :param trajectories: a list of trajectories
104 | :param trafficLights: a list of traffic lights
105 | :param lane_tl_relations: a list of lane traffic light relations
106 | """
107 | ego: EgoVisualization
108 | agents: List[AgentVisualization]
109 | lanes: List[LaneVisualization]
110 | crosswalks: List[CWVisualization]
111 | trajectories: List[TrajectoryVisualization]
112 | trafficLights: List[TLVisualization]
113 | lane_tl_relations: List[LaneTLRelationVisualization]
114 | nodes: List[NodeVisualization]
115 |
116 |
117 | class StaticVisualization(NamedTuple):
118 | """Hold information about a frame (the state of a scene at a given time)
119 |
120 | :param lanes: a list of lanes
121 | :param crosswalks: a list of crosswalks
122 | :param trafficLights: a list of traffic lights
123 | :param lane_tl_relations: a list of lane traffic light relations
124 | :param tl_junctions: a list of junctions that get controlled by traffic lights
125 | """
126 | lanes: List[LaneVisualization]
127 | crosswalks: List[CWVisualization]
128 | trafficLights: List[TLVisualization]
129 | lane_tl_relations: List[LaneTLRelationVisualization]
130 | predicted_lane_tl_relations: List[LaneTLRelationVisualization]
131 | gt_lane_tl_relations: List[LaneTLRelationVisualization]
132 | nodes: List[NodeVisualization]
133 | tl_junctions: List[JunctionVisualization]
134 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/test/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | PROJECT_PATH = os.getcwd()
4 | SOURCE_PATH = os.path.join(
5 | PROJECT_PATH,"src"
6 | )
7 | sys.path.append(SOURCE_PATH)
--------------------------------------------------------------------------------
/lyft_l5_tl2la/test/test_junction_branch_to_lane_dataset.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Junction Dataset Sanity Checks
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 |
9 | import unittest
10 |
11 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
12 | import pandas as pd
13 | from src.common.helper import init_dataset
14 |
15 |
16 | class TestJunctionBranchToLaneMapping(unittest.TestCase):
17 |
18 | def __init__(self, *args, **kwargs):
19 | super(TestJunctionBranchToLaneMapping, self).__init__(*args, **kwargs)
20 |
21 | preprocessed_data_path = "./dataset/preprocessed_info/junction_branch_to_lane.csv"
22 | self.branches_df = pd.read_csv(
23 | preprocessed_data_path, encoding="ascii")
24 |
25 | cfg, dm, self.zarr_dataset = init_dataset("train1")
26 |
27 | self.mapAPI = ExtendedMapAPI.from_config(dm, cfg)
28 |
29 | def testIfLaneIdsExist(self):
30 | for _, lane_id in self.branches_df["lane_id"].items():
31 | lane_element = self.mapAPI.get_element_by_id(lane_id)
32 |
33 | is_lane_element = self.mapAPI.element_of_type(lane_element, "lane")
34 | self.assertTrue(is_lane_element, msg=f"Element id: {lane_id}")
35 |
36 | def testIfJunctionIdsExist(self):
37 | for _, junction_id in self.branches_df["junction_id"].items():
38 | junction_element = self.mapAPI.get_element_by_id(junction_id)
39 |
40 | is_junction_element = self.mapAPI.element_of_type(
41 | junction_element, "junction")
42 | self.assertTrue(is_junction_element,
43 | msg=f"Element id: {junction_id}")
44 |
45 |
46 | if __name__ == '__main__':
47 | unittest.main()
48 |
--------------------------------------------------------------------------------
/lyft_l5_tl2la/test/test_map_agent_to_lane.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Test of Mapping Agent to lane
3 | #
4 | # tl2la
5 | # Copyright (c) 2023 Andreas Weber. All Rights Reserved.
6 | # ------------------------------------------------------------------------
7 |
8 | import unittest
9 |
10 | from src.dataset_operations.extended_map_api import ExtendedMapAPI
11 | from src.common.helper import init_dataset
12 | from src.common.utils import map_agent_to_lanes
13 |
14 |
15 | class TestAgentToLaneMapping(unittest.TestCase):
16 |
17 | def __init__(self, *args, **kwargs):
18 | super(TestAgentToLaneMapping, self).__init__(*args, **kwargs)
19 |
20 | cfg, dm, self.zarr_dataset = init_dataset("train1")
21 |
22 | self.mapAPI = ExtendedMapAPI.from_config(dm, cfg)
23 |
24 | def test_mapping_1(self):
25 | """Mapping test for Scene 604 Frame 0.
26 | """
27 | scene_idx = 604
28 | frame_idx = 0
29 | lane_id_results = ["qoFo"]
30 |
31 | sample_scene = self.zarr_dataset.get_scene_dataset(scene_idx)
32 | ego_frame = sample_scene.frames[frame_idx]
33 | mapped_lane_ids = map_agent_to_lanes(
34 | ego_frame, self.mapAPI, scene_idx, frame_idx, is_ego=True)
35 |
36 | self.assertEqual(mapped_lane_ids, lane_id_results)
37 |
38 | def test_mapping_2(self):
39 | """Mapping test for Scene 604 Frame 0.
40 | """
41 | scene_idx = 604
42 | frame_idx = 0
43 | lane_id_results = ["/Eof"]
44 |
45 | sample_scene = self.zarr_dataset.get_scene_dataset(scene_idx)
46 | ego_frame = sample_scene.frames[frame_idx]
47 | mapped_lane_ids = map_agent_to_lanes(
48 | ego_frame, self.mapAPI, scene_idx, frame_idx, is_ego=True)
49 |
50 | self.assertNotEqual(mapped_lane_ids, lane_id_results)
51 |
52 | def test_mapping_3(self):
53 | """Mapping test for Scene 23 Frame 99.
54 | """
55 | scene_idx = 23
56 | frame_idx = 99
57 | lane_id_results = ['QtPT', 'b+hl', 'xsPT', '89hl']
58 |
59 | sample_scene = self.zarr_dataset.get_scene_dataset(scene_idx)
60 | ego_frame = sample_scene.frames[frame_idx]
61 | mapped_lane_ids = map_agent_to_lanes(
62 | ego_frame, self.mapAPI, scene_idx, frame_idx, is_ego=True)
63 |
64 | self.assertEqual(mapped_lane_ids, lane_id_results)
65 |
66 | def test_mapping_4(self):
67 | """Mapping test for Scene 99 Frame 23. This scene shows a localisation or map error.
68 | Therefore no mapping possible
69 | """
70 | scene_idx = 10945
71 | frame_idx = 221
72 | lane_id_results = []
73 |
74 | sample_scene = self.zarr_dataset.get_scene_dataset(scene_idx)
75 | ego_frame = sample_scene.frames[frame_idx]
76 | mapped_lane_ids = map_agent_to_lanes(
77 | ego_frame, self.mapAPI, scene_idx, frame_idx, is_ego=True)
78 |
79 | self.assertEqual(mapped_lane_ids, lane_id_results)
80 |
81 | def test_mapping_5(self):
82 | """Mapping test for Scene 18 Frame 47. The AV should be localized on the small lane tile
83 | """
84 | scene_idx = 18
85 | frame_idx = 47
86 | lane_id_results = ["DvGy"]
87 |
88 | sample_scene = self.zarr_dataset.get_scene_dataset(scene_idx)
89 | ego_frame = sample_scene.frames[frame_idx]
90 | mapped_lane_ids = map_agent_to_lanes(
91 | ego_frame, self.mapAPI, scene_idx, frame_idx, is_ego=True)
92 |
93 | self.assertEqual(mapped_lane_ids, lane_id_results)
94 |
95 |
96 | if __name__ == '__main__':
97 | unittest.main()
98 |
--------------------------------------------------------------------------------