├── .gitignore
├── LICENSE.md
├── PrepareVicuna.md
├── README.md
├── click2chat
├── README.md
├── click2chat_llava.py
└── click2chat_minigpt4.py
├── data
├── calibration.json
├── format.md
├── generate_base.py
├── generate_captions.py
├── scratch.md
├── stp3
│ ├── config.py
│ ├── configs
│ │ └── carla
│ │ │ ├── Perception.yml
│ │ │ ├── Planning.yml
│ │ │ └── Prediction.yml
│ ├── cost.py
│ ├── datas
│ │ ├── CarlaData.py
│ │ ├── CenterlinesNuscenesData.py
│ │ ├── NuscenesData.py
│ │ ├── __NuscenesData.py
│ │ ├── dataloaders.py
│ │ ├── fullNuscenesData.py
│ │ ├── reducedNuscenesData.py
│ │ └── trainNuscenesData.py
│ ├── layers
│ │ ├── convolutions.py
│ │ └── temporal.py
│ ├── losses.py
│ ├── metrics.py
│ ├── models
│ │ ├── astar.cpp
│ │ ├── decoder.py
│ │ ├── distributions.py
│ │ ├── encoder.py
│ │ ├── future_prediction.py
│ │ ├── orig.cpp
│ │ ├── planning_model.py
│ │ ├── stp3.py
│ │ └── temporal_model.py
│ ├── trainer.py
│ └── utils
│ │ ├── Optnode.py
│ │ ├── Optnode_naive.py
│ │ ├── Optnode_obs_batched.py
│ │ ├── Optnode_obs_unbatched.py
│ │ ├── Optnode_waypoint.py
│ │ ├── data.py
│ │ ├── geometry.py
│ │ ├── go_save.py
│ │ ├── instance.py
│ │ ├── network.py
│ │ ├── sampler.py
│ │ ├── spline.py
│ │ ├── tools.py
│ │ └── visualisation.py
└── utils.py
├── docs
├── README.md
├── assets
│ └── pdf
│ │ └── talk2bev.pdf
├── index.html
└── static
│ ├── css
│ ├── bulma-carousel.min.css
│ ├── bulma-slider.min.css
│ ├── bulma.css.map.txt
│ ├── bulma.min.css
│ ├── fontawesome.all.min.css
│ └── index.css
│ ├── images
│ ├── favicon.svg
│ ├── icons8-chat-32.png
│ ├── icons8-chat-96.png
│ ├── interpolate_end.jpg
│ ├── interpolate_start.jpg
│ ├── pipeline1.png
│ ├── spatial1.png
│ ├── spatial2.png
│ ├── steve.webm
│ └── talk2bev_teaser-1.png
│ ├── interpolation
│ └── stacked
│ │ ├── ._Icon
│ │ ├── 000000.jpg
│ │ ├── 000001.jpg
│ │ ├── 000002.jpg
│ │ ├── 000003.jpg
│ │ ├── 000004.jpg
│ │ ├── 000005.jpg
│ │ ├── 000006.jpg
│ │ ├── 000007.jpg
│ │ ├── 000008.jpg
│ │ ├── 000009.jpg
│ │ ├── 000010.jpg
│ │ ├── 000011.jpg
│ │ ├── 000012.jpg
│ │ ├── 000013.jpg
│ │ ├── 000014.jpg
│ │ ├── 000015.jpg
│ │ ├── 000016.jpg
│ │ ├── 000017.jpg
│ │ ├── 000018.jpg
│ │ ├── 000019.jpg
│ │ ├── 000020.jpg
│ │ ├── 000021.jpg
│ │ ├── 000022.jpg
│ │ ├── 000023.jpg
│ │ ├── 000024.jpg
│ │ ├── 000025.jpg
│ │ ├── 000026.jpg
│ │ ├── 000027.jpg
│ │ ├── 000028.jpg
│ │ ├── 000029.jpg
│ │ ├── 000030.jpg
│ │ ├── 000031.jpg
│ │ ├── 000032.jpg
│ │ ├── 000033.jpg
│ │ ├── 000034.jpg
│ │ ├── 000035.jpg
│ │ ├── 000036.jpg
│ │ ├── 000037.jpg
│ │ ├── 000038.jpg
│ │ ├── 000039.jpg
│ │ ├── 000040.jpg
│ │ ├── 000041.jpg
│ │ ├── 000042.jpg
│ │ ├── 000043.jpg
│ │ ├── 000044.jpg
│ │ ├── 000045.jpg
│ │ ├── 000046.jpg
│ │ ├── 000047.jpg
│ │ ├── 000048.jpg
│ │ ├── 000049.jpg
│ │ ├── 000050.jpg
│ │ ├── 000051.jpg
│ │ ├── 000052.jpg
│ │ ├── 000053.jpg
│ │ ├── 000054.jpg
│ │ ├── 000055.jpg
│ │ ├── 000056.jpg
│ │ ├── 000057.jpg
│ │ ├── 000058.jpg
│ │ ├── 000059.jpg
│ │ ├── 000060.jpg
│ │ ├── 000061.jpg
│ │ ├── 000062.jpg
│ │ ├── 000063.jpg
│ │ ├── 000064.jpg
│ │ ├── 000065.jpg
│ │ ├── 000066.jpg
│ │ ├── 000067.jpg
│ │ ├── 000068.jpg
│ │ ├── 000069.jpg
│ │ ├── 000070.jpg
│ │ ├── 000071.jpg
│ │ ├── 000072.jpg
│ │ ├── 000073.jpg
│ │ ├── 000074.jpg
│ │ ├── 000075.jpg
│ │ ├── 000076.jpg
│ │ ├── 000077.jpg
│ │ ├── 000078.jpg
│ │ ├── 000079.jpg
│ │ ├── 000080.jpg
│ │ ├── 000081.jpg
│ │ ├── 000082.jpg
│ │ ├── 000083.jpg
│ │ ├── 000084.jpg
│ │ ├── 000085.jpg
│ │ ├── 000086.jpg
│ │ ├── 000087.jpg
│ │ ├── 000088.jpg
│ │ ├── 000089.jpg
│ │ ├── 000090.jpg
│ │ ├── 000091.jpg
│ │ ├── 000092.jpg
│ │ ├── 000093.jpg
│ │ ├── 000094.jpg
│ │ ├── 000095.jpg
│ │ ├── 000096.jpg
│ │ ├── 000097.jpg
│ │ ├── 000098.jpg
│ │ ├── 000099.jpg
│ │ ├── 000100.jpg
│ │ ├── 000101.jpg
│ │ ├── 000102.jpg
│ │ ├── 000103.jpg
│ │ ├── 000104.jpg
│ │ ├── 000105.jpg
│ │ ├── 000106.jpg
│ │ ├── 000107.jpg
│ │ ├── 000108.jpg
│ │ ├── 000109.jpg
│ │ ├── 000110.jpg
│ │ ├── 000111.jpg
│ │ ├── 000112.jpg
│ │ ├── 000113.jpg
│ │ ├── 000114.jpg
│ │ ├── 000115.jpg
│ │ ├── 000116.jpg
│ │ ├── 000117.jpg
│ │ ├── 000118.jpg
│ │ ├── 000119.jpg
│ │ ├── 000120.jpg
│ │ ├── 000121.jpg
│ │ ├── 000122.jpg
│ │ ├── 000123.jpg
│ │ ├── 000124.jpg
│ │ ├── 000125.jpg
│ │ ├── 000126.jpg
│ │ ├── 000127.jpg
│ │ ├── 000128.jpg
│ │ ├── 000129.jpg
│ │ ├── 000130.jpg
│ │ ├── 000131.jpg
│ │ ├── 000132.jpg
│ │ ├── 000133.jpg
│ │ ├── 000134.jpg
│ │ ├── 000135.jpg
│ │ ├── 000136.jpg
│ │ ├── 000137.jpg
│ │ ├── 000138.jpg
│ │ ├── 000139.jpg
│ │ ├── 000140.jpg
│ │ ├── 000141.jpg
│ │ ├── 000142.jpg
│ │ ├── 000143.jpg
│ │ ├── 000144.jpg
│ │ ├── 000145.jpg
│ │ ├── 000146.jpg
│ │ ├── 000147.jpg
│ │ ├── 000148.jpg
│ │ ├── 000149.jpg
│ │ ├── 000150.jpg
│ │ ├── 000151.jpg
│ │ ├── 000152.jpg
│ │ ├── 000153.jpg
│ │ ├── 000154.jpg
│ │ ├── 000155.jpg
│ │ ├── 000156.jpg
│ │ ├── 000157.jpg
│ │ ├── 000158.jpg
│ │ ├── 000159.jpg
│ │ ├── 000160.jpg
│ │ ├── 000161.jpg
│ │ ├── 000162.jpg
│ │ ├── 000163.jpg
│ │ ├── 000164.jpg
│ │ ├── 000165.jpg
│ │ ├── 000166.jpg
│ │ ├── 000167.jpg
│ │ ├── 000168.jpg
│ │ ├── 000169.jpg
│ │ ├── 000170.jpg
│ │ ├── 000171.jpg
│ │ ├── 000172.jpg
│ │ ├── 000173.jpg
│ │ ├── 000174.jpg
│ │ ├── 000175.jpg
│ │ ├── 000176.jpg
│ │ ├── 000177.jpg
│ │ ├── 000178.jpg
│ │ ├── 000179.jpg
│ │ ├── 000180.jpg
│ │ ├── 000181.jpg
│ │ ├── 000182.jpg
│ │ ├── 000183.jpg
│ │ ├── 000184.jpg
│ │ ├── 000185.jpg
│ │ ├── 000186.jpg
│ │ ├── 000187.jpg
│ │ ├── 000188.jpg
│ │ ├── 000189.jpg
│ │ ├── 000190.jpg
│ │ ├── 000191.jpg
│ │ ├── 000192.jpg
│ │ ├── 000193.jpg
│ │ ├── 000194.jpg
│ │ ├── 000195.jpg
│ │ ├── 000196.jpg
│ │ ├── 000197.jpg
│ │ ├── 000198.jpg
│ │ ├── 000199.jpg
│ │ ├── 000200.jpg
│ │ ├── 000201.jpg
│ │ ├── 000202.jpg
│ │ ├── 000203.jpg
│ │ ├── 000204.jpg
│ │ ├── 000205.jpg
│ │ ├── 000206.jpg
│ │ ├── 000207.jpg
│ │ ├── 000208.jpg
│ │ ├── 000209.jpg
│ │ ├── 000210.jpg
│ │ ├── 000211.jpg
│ │ ├── 000212.jpg
│ │ ├── 000213.jpg
│ │ ├── 000214.jpg
│ │ ├── 000215.jpg
│ │ ├── 000216.jpg
│ │ ├── 000217.jpg
│ │ ├── 000218.jpg
│ │ ├── 000219.jpg
│ │ ├── 000220.jpg
│ │ ├── 000221.jpg
│ │ ├── 000222.jpg
│ │ ├── 000223.jpg
│ │ ├── 000224.jpg
│ │ ├── 000225.jpg
│ │ ├── 000226.jpg
│ │ ├── 000227.jpg
│ │ ├── 000228.jpg
│ │ ├── 000229.jpg
│ │ ├── 000230.jpg
│ │ ├── 000231.jpg
│ │ ├── 000232.jpg
│ │ ├── 000233.jpg
│ │ ├── 000234.jpg
│ │ ├── 000235.jpg
│ │ ├── 000236.jpg
│ │ ├── 000237.jpg
│ │ ├── 000238.jpg
│ │ ├── 000239.jpg
│ │ └── Icon
│ ├── js
│ ├── bulma-carousel.js
│ ├── bulma-carousel.min.js
│ ├── bulma-slider.js
│ ├── bulma-slider.min.js
│ ├── fontawesome.all.min.js
│ ├── index.js
│ └── jquery.jslatex.js
│ └── videos
│ ├── ._Icon
│ ├── methodology.mp4
│ ├── overtaking.mp4
│ ├── parked.mp4
│ ├── spatial_1_cropped.mp4
│ ├── spatial_2_cropped.mp4
│ └── teaser_presentation_compressed.mp4
└── evaluation
├── chatgptinterface.py
├── eval_mcq.py
├── eval_spops.py
├── spatial_operators.py
└── talk2bev-bench
└── .gitkeep
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pt
2 | *.pth
3 | *.ckpt
4 | *.npy
5 | *.swp
6 | __pycache__
7 | __pycache__/*
8 | nuscenes/
9 | FastSAM/
10 | fastsam/__pycache__/
11 | llava/
12 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright 2023 Deyao Zhu
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
9 |
10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
11 |
12 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
--------------------------------------------------------------------------------
/PrepareVicuna.md:
--------------------------------------------------------------------------------
1 | ## How to Prepare Vicuna Weight
2 | Vicuna is an open-source LLAMA-based LLM that has a performance close to ChatGPT.
3 | We currently use the v0 version of Vicuna-13B.
4 |
5 | To prepare Vicuna’s weight, first download Vicuna’s **delta** weight from [https://huggingface.co/lmsys/vicuna-13b-delta-v0](https://huggingface.co/lmsys/vicuna-13b-delta-v0).
6 | In case you have git-lfs installed (https://git-lfs.com), this can be done by
7 |
8 | ```
9 | git lfs install
10 | git clone https://huggingface.co/lmsys/vicuna-13b-delta-v0 # more powerful, need at least 24G gpu memory
11 | # or
12 | git clone https://huggingface.co/lmsys/vicuna-7b-delta-v0 # smaller, need 12G gpu memory
13 | ```
14 |
15 | Note that this is not directly the working weight, but the difference between the working weight and the original weight of LLAMA-13B. (Due to LLAMA’s rules, we cannot distribute the weight of LLAMA.)
16 |
17 | Then, you need to obtain the original LLAMA-7B or LLAMA-13B weights in the HuggingFace format
18 | either following the instruction provided by HuggingFace
19 | [here](https://huggingface.co/docs/transformers/main/model_doc/llama) or from the Internet.
20 |
21 | When these two weights are ready, we can use tools from Vicuna’s team to create the real working weight.
22 | First, Install their library that is compatible with v0 Vicuna by
23 |
24 | ```
25 | pip install git+https://github.com/lm-sys/FastChat.git@v0.1.10
26 | ```
27 |
28 | Then, run the following command to create the final working weight
29 |
30 | ```
31 | python -m fastchat.model.apply_delta --base /path/to/llama-13bOR7b-hf/ --target /path/to/save/working/vicuna/weight/ --delta /path/to/vicuna-13bOR7b-delta-v0/
32 | ```
33 |
34 | Now you are good to go!
35 |
36 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Talk2BEV: Language-Enhanced Bird's Eye View Maps
2 |
3 | [**Project Page**](https://llmbev.github.io/talk2bev/) |
4 | [**ArXiv**](https://arxiv.org/abs/2310.02251) |
5 | [**Video**](https://www.youtube.com/watch?v=TvMeekCAfRs)
6 |
7 | [Vikrant Dewangan](https://vikr-182.github.io/)\* **1**,
8 | [Tushar Choudhary](https://tusharc31.github.io/)\* **1**,
9 | [Shivam Chandhok](https://scholar.google.com/citations?user=ZER2BeIAAAAJ&hl=en)\* **2**,
10 | [Shubham Priyadarshan](https://rudeninja.github.io/) **1**,
11 | [Anushka Jain](https://anushkaj1.github.io) **1**,
12 | [Arun K. Singh](https://scholar.google.co.in/citations?user=0zgDoIEAAAAJ&hl=en) **3**,
13 | [Siddharth Srivastava](https://siddharthsrivastava.github.io/) **4**,
14 | [Krishna Murthy Jatavallabhula](https://krrish94.github.io/) $^\dagger$ **5**,
15 | [K. Madhava Krishna](https://scholar.google.co.in/citations?user=QDuPGHwAAAAJ&hl=en) $^\dagger$ **1**
16 |
17 | **1** International Institute of Information Technology Hyderabad,
18 | **2** University of British Columbia,
19 | **3** University of Tartu
20 | **4** TensorTour Inc
21 | **5** MIT-CSAIL
22 |
23 | > *denotes equal contribution,
24 | $^\dagger$ denotes equal advising
25 |
26 | > [ICRA 2024](https://2024.ieee-icra.org/)
27 |
28 |
29 | https://github.com/llmbev/talk2bev/assets/44745884/251ffffd-2bd7-4838-895a-83939ec5b87f
30 |
31 | ## Abstract
32 |
33 | We introduce Talk2BEV, a large vision- language model (LVLM) interface for bird’s-eye view (BEV) maps commonly used in autonomous driving.
34 |
35 | While existing perception systems for autonomous driving scenarios have largely focused on a pre-defined (closed) set of object categories and driving scenarios, Talk2BEV eliminates the need for BEV- specific training, relying instead on performant pre-trained LVLMs. This enables a single system to cater to a variety of autonomous driving tasks encompassing visual and spatial reasoning, predicting the intents of traffic actors, and decision- making based on visual cues.
36 |
37 | We extensively evaluate Talk2BEV on a large number of scene understanding tasks that rely on both the ability to interpret freefrom natural language queries, and in grounding these queries to the visual context embedded into the language-enhanced BEV map. To enable further research in LVLMs for autonomous driving scenarios, we develop and release Talk2BEV-Bench, a benchmark encom- passing 1000 human-annotated BEV scenarios, with more than 20,000 questions and ground-truth responses from the NuScenes dataset.
38 |
39 | ## Data Preparation
40 |
41 | Please download the [NuScenes v1.0-trainval](https://www.nuscenes.org/download) dataset. Our dataset consists of 2 parts - Talk2BEV-Base and Talk2BEV-Captions, consisting of base (crops, perspective images, bev area centroids) and crop captions respectively.
42 |
43 | ### Download links
44 |
45 | We provide 2 Links to the Talk2BEV dataset (_Talk2BEV-Mini_ (captions only) and _Talk2BEV-Full_) are provided below. The dataset is hosted on Google Drive. Please download the dataset and extract the files to the `data` folder.
46 |
47 | | Name | Base | Captions | Bench | Link |
48 | | --- | --- | --- | --- | --- |
49 | | Talk2BEV-_Mini_ | ✓ | ✗ | ✗ | [link](https://drive.google.com/file/d/1B5Uong8xYGRDkufR33T9sCNyNdRzPxc4/view?usp=sharing) |
50 | | Talk2BEV-_Full_ | ✗ | ✗ | ✗ | _TODO_ |
51 |
52 | If you want to generate the dataset from scratch, please follow the process [here](./data/scratch.md). The format for each of the data parts is described in [format](./data/format.md).
53 |
54 | ## Evaluation
55 |
56 | Evaluation on Talk2BEV happens via 2 methods - MCQs (from Talk2BEV-Bench) and Spatial Operators. We use GPT-4 for our evaluation. Please follow the instructions in [GPT-4](https://platform.openai.com/) and initialize the API key and Organization in your os env.
57 |
58 | ```bash
59 | ORGANIZATION=
60 | API_KEY=
61 | ```
62 |
63 | ### Evaluating - MCQs
64 |
65 | To obtain the accuracy for a MCQs, please run the following command:
66 |
67 | ```bash
68 | cd evaluation
69 | python eval_mcq.py
70 | ```
71 |
72 | This will yield the accuracy for the MCQs.
73 |
74 | ### Evaluating Spatial Operators
75 |
76 | To obtain the distance error, IoU for a MCQs, please run the following command:
77 |
78 | ```bash
79 | cd evaluation
80 | python eval_spops.py
81 | ```
82 |
83 | ## Click2Chat
84 |
85 | We also allow free-form conversation with the BEV. Please follow the instructions in [Click2Chat](./click2chat/README.md) to chat with the BEV.
86 |
87 | ### Talk2BEV-Bench
88 |
89 | TO BE RELEASED
90 |
91 | ## 👉 TODO
92 |
93 | - [x] Spatial operators evaluation pipeline
94 | - [ ] Add links to BEV crops -- Release Talk2BEV-Full
95 | - [ ] Release Talk2BEV-Bench
96 |
--------------------------------------------------------------------------------
/click2chat/README.md:
--------------------------------------------------------------------------------
1 | # Talk2BEV Click2Chat Interface
2 |
3 | Code for:
4 |
5 | 1. Click2Chat interface
6 |
7 | 2. JSON generation
8 |
9 | ## Installation
10 | Please run the following commands
11 | ### Setup Talk2BEV
12 |
13 | ```
14 | git clone https://github.com/llm-bev/talk2bev
15 | ```
16 |
17 | ## Usage (for Click2Chat Interface)
18 | If using LLaVa
19 | ```
20 | python click2chat/click2chat_llava.py --sam-checkpoint --conv-mode --model-path --gpu-id
21 | ```
22 |
23 | If using MiniGPT-4
24 | ```
25 | python click2chat/click2chat_minigpt4.py --sam-checkpoint --conv-mode --model-path --gpu-id
26 | ```
27 |
--------------------------------------------------------------------------------
/data/calibration.json:
--------------------------------------------------------------------------------
1 | {
2 | "CAM_FRONT_LEFT":{
3 | "translation": [-1.57525595, -0.50051938, -1.50696033],
4 | "rotation": [[ 0.82254604, -0.56868433, -0.00401771], [ 0.00647832, 0.01643407, -0.99984396], [ 0.56866162, 0.82239167, 0.01720189]],
5 | "camera_intrinsic": [[1257.8625342125129, 0.0, 827.2410631095686], [0.0, 1257.8625342125129, 450.915498205774], [0.0, 0.0, 1.0]]
6 | },
7 | "CAM_FRONT":{
8 | "translation": [-1.72200568, -0.00475453, -1.49491292],
9 | "rotation": [[ 0.01026021, -0.99987258, -0.01222952], [ 0.00843345, 0.01231626, -0.99988859], [ 0.9999118 , 0.01015593, 0.00855874]],
10 | "camera_intrinsic": [[1252.8131021185304, 0.0, 826.588114781398], [0.0, 1252.8131021185304, 469.9846626224581], [0.0, 0.0, 1.0]]
11 | },
12 | "CAM_FRONT_RIGHT":{
13 | "translation": [-1.58082566, 0.49907871, -1.51749368],
14 | "rotation": [[-0.84397973, -0.53614138, -0.01583178], [ 0.01645551, 0.00362107, -0.99985804], [ 0.5361226 , -0.84412044, 0.00576637]],
15 | "camera_intrinsic": [[1256.7485116440405, 0.0, 817.7887570959712], [0.0, 1256.7485116440403, 451.9541780095127], [0.0, 0.0, 1.0]]
16 | },
17 | "CAM_BACK_LEFT":{
18 | "translation": [-1.035691 , -0.48479503, -1.59097015],
19 | "rotation": [[ 0.94776036, 0.31896113, 0.00375564], [ 0.00866572, -0.0139763 , -0.99986478], [-0.31886551, 0.94766474, -0.01601021]],
20 | "camera_intrinsic": [[1256.7414812095406, 0.0, 792.1125740759628], [0.0, 1256.7414812095406, 492.7757465151356], [0.0, 0.0, 1.0]]
21 | },
22 | "CAM_BACK":{
23 | "translation": [-0.02832603, -0.00345137, -1.57910346],
24 | "rotation": [[ 0.00242171, 0.99998907, -0.00400023], [-0.01675361, -0.00395911, -0.99985181], [-0.99985672, 0.00248837, 0.01674384]],
25 | "camera_intrinsic": [[809.2209905677063, 0.0, 829.2196003259838], [0.0, 809.2209905677063, 481.77842384512485], [0.0, 0.0, 1.0]]
26 | },
27 | "CAM_BACK_RIGHT":{
28 | "translation": [-1.0148781 , 0.48056822, -1.56239545],
29 | "rotation": [[-0.93477554, 0.35507456, -0.01080503], [ 0.01587584, 0.0113705 , -0.99980932], [-0.35488399, -0.93476883, -0.01626597]],
30 | "camera_intrinsic": [[1259.5137405846733, 0.0, 807.2529053838625], [0.0, 1259.5137405846733, 501.19579884916527], [0.0, 0.0, 1.0]]
31 | }
32 | }
--------------------------------------------------------------------------------
/data/format.md:
--------------------------------------------------------------------------------
1 | # Talk2BEV-Base Format
2 |
3 | `Talk2BEV` consists 3 parts: `crops`, `cam_imgs`, and `scene`. The folder structure should look like this:
4 |
5 | ```bash
6 | - Talk2BEV/
7 | - TOKEN_NUM
8 | - cam_imgs/
9 | - 1_cimg.npy: perspective 1
10 | - 2_cimg.npy: perspective 2
11 | - ...
12 | - crops/
13 | - 1_matched_imgs.npy: object 1 crop
14 | - 2_matched_imgs.npy: object 2 crop
15 | - ...
16 | - scene/
17 | - answer_blip2_gt.json: ground truth scene object with captions using blip2
18 | - answer_blip2_pred.json: predicted truth scene object with captions using blip2
19 | - answer_minigpt4_gt.json: ground truth scene object with captions using minigpt4
20 | - answer_minigpt4_pred.json: predicted scene object with captions using minigpt4
21 | - answer_instructblip2_gt.json: ground truth scene object with captions using instructblip2
22 | - answer_instructblip2_gt.json: predicted truth scene object with captions using instructblip2
23 | - bev_gt.png: ground truth bev
24 | - bev_pred.png: predicted bev
25 | ```
26 | The `TOKEN` is the NuScenes scene token ID, and `NUM` is the number of the scene.
27 | The folder `crops` contains the crop images of the objects. The folder `cam_imgs` contains the perspective images. The folder `scene` contains the ground truth and predicted scene objects. The files `bev_gt.png` and `bev_pred.png` are the ground truth and predicted BEV images. The BEV images are RGB images, with Blue (0, 0, 255) as background
28 |
29 | ## Scene-Object
30 |
31 | This is how a scene is encoded within an object:
32 |
33 | ```python
34 | [
35 | {
36 | "object_id": 1, # ID of this object
37 | "bev_centroid": [5, -5], # BEV centroid of this object",
38 | "bev_area": 10, # BEV area of this object in pixels
39 | "matched_coords": [[-5, -5], [6, 6],.. [12, 10]], # matched coordinates of this object
40 | "matched_cam": "CAM_FRONT" # matched camera for this object
41 | "matched_point": [800, 900], # matched point in the matched camera
42 | "annotation": {...}, # nuscenes annotation for this object - containing token ID, category etc.
43 | "MODEL_crop_lights_1": "th
44 | "MODEL_crop_lights_1": "thi",
45 | },
46 | ...
47 | ]
48 | ```
49 |
--------------------------------------------------------------------------------
/data/scratch.md:
--------------------------------------------------------------------------------
1 | # Talk2BEV-Dataset from Scratch
2 |
3 | ## Installation
4 |
5 | To generate captions, setup the baselines using the following commands:
6 |
7 | ### LLava
8 |
9 | ```bash
10 | git clone https://github.com/haotian-liu/LLaVA parent-folder
11 | mv parent-folder/llava ./
12 | rm -rf parent-folder
13 | ```
14 |
15 | Please download the preprocessed weights for [vicuna-13b](https://huggingface.co/liuhaotian/llava-v1-0719-336px-lora-vicuna-13b-v1.3)
16 |
17 | ### MiniGPT-4 (optional)
18 |
19 | ```bash
20 | git clone https://github.com/Vision-CAIR/MiniGPT-4 parent-folder
21 | mv parent-folder/minigpt4 ./
22 | rm -rf parent-folder
23 | ```
24 |
25 | Please download the preprocessed weights for Vicuna. After downloading the weights, you change the following line in `minigpt4/configs/models/minigpt4.yaml`.
26 |
27 | ```bash
28 | 16: llama_model: "path-to-llama-preprocessed-weights"
29 | ```
30 |
31 | Please download the minigpt4 weights [here](https://drive.google.com/file/d/1RY9jV0dyqLX-o38LrumkKRh6Jtaop58R/view) and change the link in `eval_configs/minigpt4_eval.yaml`:
32 |
33 | ```bash
34 | 11: ckpt: 'path-to-prerained_minigpt4_7b-weights'
35 | ```
36 |
37 | ### FastSAM
38 |
39 | ```bash
40 | git clone https://github.com/CASIA-IVA-Lab/FastSAM parent-folder
41 | mv parent-folder/FastSAM/fastsam ./
42 | rm -rf parent-folder
43 | ```
44 |
45 | Download the weights from [here](https://drive.google.com/file/d/1m1sjY4ihXBU1fZXdQ-Xdj-mDltW-2Rqv/view)
46 |
47 | ### Install SAM (optional)
48 |
49 | ```bash
50 | pip3 install segment-anything
51 | ```
52 |
53 | Download the sam weights from [here](https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth).
54 |
55 |
56 | ## Base
57 |
58 | To generate the base, please run the following commands:
59 |
60 | ```bash
61 | cd data
62 | python3 generate_base.py --data_path --save_path --bev pred/gt
63 | ```
64 |
65 | ## Captioning
66 |
67 | To generate the captions for each scene object, please run the following commands:
68 |
69 | ```bash
70 | python3 generate_captions.py --model --data_path --json_name pred/gt --start --end
71 | ```
72 |
--------------------------------------------------------------------------------
/data/stp3/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from fvcore.common.config import CfgNode as _CfgNode
3 |
4 |
5 | def convert_to_dict(cfg_node, key_list=[]):
6 | """Convert a config node to dictionary."""
7 | _VALID_TYPES = {tuple, list, str, int, float, bool}
8 | if not isinstance(cfg_node, _CfgNode):
9 | if type(cfg_node) not in _VALID_TYPES:
10 | print(
11 | 'Key {} with value {} is not a valid type; valid types: {}'.format(
12 | '.'.join(key_list), type(cfg_node), _VALID_TYPES
13 | ),
14 | )
15 | return cfg_node
16 | else:
17 | cfg_dict = dict(cfg_node)
18 | for k, v in cfg_dict.items():
19 | cfg_dict[k] = convert_to_dict(v, key_list + [k])
20 | return cfg_dict
21 |
22 |
23 | class CfgNode(_CfgNode):
24 | """Remove once https://github.com/rbgirshick/yacs/issues/19 is merged."""
25 |
26 | def convert_to_dict(self):
27 | return convert_to_dict(self)
28 |
29 |
30 | CN = CfgNode
31 |
32 | _C = CN()
33 | _C.LOG_DIR = 'tensorboard_logs'
34 | _C.TAG = 'default'
35 |
36 | _C.GPUS = [0, 1, 2] # which gpus to use
37 | _C.PRECISION = 32 # 16bit or 32bit
38 | _C.BATCHSIZE = 1
39 | _C.EPOCHS = 20
40 | _C.DEBUG = True
41 | _C.DENOISE = False # to denoise the GT or not
42 | _C.LOGGER = 'wandb'
43 | _C.COMPARE_WITH_ORIG = True # to compare with original or not
44 |
45 | _C.N_WORKERS = 0
46 | _C.VIS_INTERVAL = 5000
47 | _C.LOGGING_INTERVAL = 500
48 |
49 | _C.PRETRAINED = CN()
50 | _C.PRETRAINED.LOAD_WEIGHTS = True
51 | _C.PRETRAINED.PATH = 'last.ckpt'
52 |
53 | _C.DATASET = CN()
54 | _C.DATASET.DATAROOT = '/mnt/e/datasets/nuscenes'
55 | _C.DATASET.VERSION = 'trainval'
56 | _C.DATASET.NAME = 'nuscenes'
57 | _C.DATASET.MAP_FOLDER = "/scratch/tushar.choudhary/forecasting/v1.0-trainval"
58 | _C.DATASET.IGNORE_INDEX = 255 # Ignore index when creating flow/offset labels
59 | _C.DATASET.FILTER_INVISIBLE_VEHICLES = True # Filter vehicles that are not visible from the cameras
60 | _C.DATASET.SAVE_DIR = 'datas'
61 | _C.DATASET.THRESHOLD = 25
62 |
63 | _C.TIME_RECEPTIVE_FIELD = 3 # how many frames of temporal context (1 for single timeframe)
64 | _C.N_FUTURE_FRAMES = 6 # how many time steps into the future to predict
65 |
66 | _C.IMAGE = CN()
67 | _C.IMAGE.FINAL_DIM = (224, 480)
68 | _C.IMAGE.RESIZE_SCALE = 0.3
69 | _C.IMAGE.TOP_CROP = 46
70 | _C.IMAGE.ORIGINAL_HEIGHT = 900 # Original input RGB camera height
71 | _C.IMAGE.ORIGINAL_WIDTH = 1600 # Original input RGB camera width
72 | _C.IMAGE.NAMES = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
73 |
74 | _C.LIFT = CN() # image to BEV lifting
75 | _C.LIFT.X_BOUND = [-50.0, 50.0, 0.5] # Forward
76 | _C.LIFT.Y_BOUND = [-50.0, 50.0, 0.5] # Sides
77 | _C.LIFT.Z_BOUND = [-10.0, 10.0, 20.0] # Height
78 | _C.LIFT.D_BOUND = [2.0, 50.0, 1.0]
79 | _C.LIFT.GT_DEPTH = False
80 | _C.LIFT.DISCOUNT = 0.5
81 |
82 | _C.EGO = CN()
83 | _C.EGO.WIDTH = 1.85
84 | _C.EGO.HEIGHT = 4.084
85 |
86 | _C.MODEL = CN()
87 |
88 | _C.MODEL.ENCODER = CN()
89 | _C.MODEL.ENCODER.DOWNSAMPLE = 8
90 | _C.MODEL.ENCODER.NAME = 'efficientnet-b4'
91 | _C.MODEL.ENCODER.OUT_CHANNELS = 64
92 | _C.MODEL.ENCODER.USE_DEPTH_DISTRIBUTION = True
93 |
94 | _C.MODEL.TEMPORAL_MODEL = CN()
95 | _C.MODEL.TEMPORAL_MODEL.NAME = 'temporal_block' # type of temporal model
96 | _C.MODEL.TEMPORAL_MODEL.START_OUT_CHANNELS = 64
97 | _C.MODEL.TEMPORAL_MODEL.EXTRA_IN_CHANNELS = 0
98 | _C.MODEL.TEMPORAL_MODEL.INBETWEEN_LAYERS = 0
99 | _C.MODEL.TEMPORAL_MODEL.PYRAMID_POOLING = True
100 | _C.MODEL.TEMPORAL_MODEL.INPUT_EGOPOSE = True
101 |
102 | _C.MODEL.DISTRIBUTION = CN()
103 | _C.MODEL.DISTRIBUTION.LATENT_DIM = 32
104 | _C.MODEL.DISTRIBUTION.MIN_LOG_SIGMA = -5.0
105 | _C.MODEL.DISTRIBUTION.MAX_LOG_SIGMA = 5.0
106 |
107 | _C.MODEL.FUTURE_PRED = CN()
108 | _C.MODEL.FUTURE_PRED.N_GRU_BLOCKS = 2
109 | _C.MODEL.FUTURE_PRED.N_RES_LAYERS = 1
110 | _C.MODEL.FUTURE_PRED.MIXTURE = True
111 |
112 | _C.MODEL.DECODER = CN()
113 |
114 | _C.MODEL.BN_MOMENTUM = 0.1
115 |
116 | _C.SEMANTIC_SEG = CN()
117 |
118 | _C.SEMANTIC_SEG.VEHICLE = CN()
119 | _C.SEMANTIC_SEG.VEHICLE.WEIGHTS = [1.0, 2.0]
120 | _C.SEMANTIC_SEG.VEHICLE.USE_TOP_K = True # backprop only top-k hardest pixels
121 | _C.SEMANTIC_SEG.VEHICLE.TOP_K_RATIO = 0.25
122 |
123 | _C.SEMANTIC_SEG.PEDESTRIAN = CN()
124 | _C.SEMANTIC_SEG.PEDESTRIAN.ENABLED = True
125 | _C.SEMANTIC_SEG.PEDESTRIAN.WEIGHTS = [1.0, 10.0]
126 | _C.SEMANTIC_SEG.PEDESTRIAN.USE_TOP_K = True
127 | _C.SEMANTIC_SEG.PEDESTRIAN.TOP_K_RATIO = 0.25
128 |
129 | _C.SEMANTIC_SEG.HDMAP = CN()
130 | _C.SEMANTIC_SEG.HDMAP.ENABLED = True
131 | _C.SEMANTIC_SEG.HDMAP.ELEMENTS = ['lane_divider', 'drivable_area']
132 | _C.SEMANTIC_SEG.HDMAP.WEIGHTS = [[1.0, 5.0], [1.0, 1.0]]
133 | _C.SEMANTIC_SEG.HDMAP.TRAIN_WEIGHT = [1, 1]
134 | _C.SEMANTIC_SEG.HDMAP.USE_TOP_K = [True, False]
135 | _C.SEMANTIC_SEG.HDMAP.TOP_K_RATIO = [0.25, 0.25]
136 |
137 | _C.INSTANCE_SEG = CN()
138 | _C.INSTANCE_SEG.ENABLED = False
139 |
140 | _C.INSTANCE_FLOW = CN()
141 | _C.INSTANCE_FLOW.ENABLED = False
142 |
143 | _C.PROBABILISTIC = CN()
144 | _C.PROBABILISTIC.ENABLED = True # learn a distribution over futures
145 | _C.PROBABILISTIC.METHOD = 'GAUSSIAN' # [BERNOULLI, GAUSSIAN, MIXGAUSSIAN]
146 |
147 | _C.PLANNING = CN()
148 | _C.PLANNING.ENABLED = True
149 | _C.PLANNING.GRU_ENABLED = False
150 | _C.PLANNING.GRU_STATE_SIZE = 256
151 | _C.PLANNING.SAMPLE_NUM = 150
152 | _C.PLANNING.COMMAND = ['LEFT', 'FORWARD', 'RIGHT']
153 |
154 | # dense configs
155 | _C.PLANNING.DENSE = CN()
156 | _C.PLANNING.DENSE.ENABLED = True # to consider dense centerlines
157 | _C.PLANNING.DENSE.OBS = False # avoid obstacles
158 | _C.PLANNING.DENSE.NUM_OBS = 5 # avoid obstacles
159 | _C.PLANNING.DENSE.PATH = '../' # centerlines path
160 |
161 | _C.FUTURE_DISCOUNT = 0.95
162 |
163 | _C.OPTIMIZER = CN()
164 | _C.OPTIMIZER.LR = 1e-3
165 | _C.OPTIMIZER.WEIGHT_DECAY = 1e-7
166 | _C.GRAD_NORM_CLIP = 5
167 |
168 | _C.COST_FUNCTION = CN()
169 | _C.COST_FUNCTION.SAFETY = 0.1
170 | _C.COST_FUNCTION.LAMBDA = 1.
171 | _C.COST_FUNCTION.HEADWAY = 1.
172 | _C.COST_FUNCTION.LRDIVIDER = 10.
173 | _C.COST_FUNCTION.COMFORT = 0.1
174 | _C.COST_FUNCTION.PROGRESS = 0.5
175 | _C.COST_FUNCTION.VOLUME = 100.
176 |
177 | def get_parser():
178 | parser = argparse.ArgumentParser(description='Fiery training')
179 | parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file')
180 | parser.add_argument(
181 | 'opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER,
182 | )
183 | return parser
184 |
185 |
186 | def get_cfg(args=None, cfg_dict=None):
187 | """ First get default config. Then merge cfg_dict. Then merge according to args. """
188 |
189 | cfg = _C.clone()
190 |
191 | if cfg_dict is not None:
192 | tmp = CfgNode(cfg_dict)
193 | for i in tmp.COST_FUNCTION:
194 | tmp.COST_FUNCTION.update({i: float(tmp.COST_FUNCTION.get(i))})
195 | cfg.merge_from_other_cfg(tmp)
196 |
197 | if args is not None:
198 | if args.config_file:
199 | cfg.merge_from_file(args.config_file)
200 | cfg.merge_from_list(args.opts)
201 | # cfg.freeze()
202 | return cfg
203 |
--------------------------------------------------------------------------------
/data/stp3/configs/carla/Perception.yml:
--------------------------------------------------------------------------------
1 | TAG: 'CARLA_perception'
2 |
3 | GPUS: [0, 1, 2, 3]
4 |
5 | BATCHSIZE: 6
6 | PRECISION: 16
7 | EPOCHS: 20
8 |
9 | N_WORKERS: 8
10 |
11 | DATASET:
12 | NAME: 'carla'
13 |
14 | TIME_RECEPTIVE_FIELD: 3
15 | N_FUTURE_FRAMES: 0
16 |
17 |
18 | IMAGE:
19 | FINAL_DIM: (256, 256)
20 | ORIGINAL_HEIGHT: 300
21 | ORIGINAL_WIDTH: 400
22 |
23 | LIFT:
24 | X_BOUND: [-20.0, 20.0, 0.2]
25 | Y_BOUND: [-20.0, 20.0, 0.2]
26 | GT_DEPTH: False
27 |
28 | MODEL:
29 | ENCODER:
30 | NAME: 'efficientnet-b4'
31 | USE_DEPTH_DISTRIBUTION: True
32 | TEMPORAL_MODEL:
33 | NAME: 'temporal_block'
34 | INPUT_EGOPOSE: True
35 | BN_MOMENTUM: 0.05
36 |
37 | SEMANTIC_SEG:
38 | PEDESTRIAN:
39 | ENABLED: True
40 | HDMAP:
41 | ENABLED: True
42 |
43 | INSTANCE_SEG:
44 | ENABLED: False
45 |
46 | INSTANCE_FLOW:
47 | ENABLED: False
48 |
49 | PROBABILISTIC:
50 | ENABLED: False
51 |
52 | PLANNING:
53 | ENABLED: False
54 |
55 | OPTIMIZER:
56 | LR: 1e-3
57 |
58 |
59 |
--------------------------------------------------------------------------------
/data/stp3/configs/carla/Planning.yml:
--------------------------------------------------------------------------------
1 | TAG: 'CARLA_planning'
2 |
3 | GPUS: [0, 1, 2, 3]
4 |
5 | BATCHSIZE: 2
6 | PRECISION: 16
7 | EPOCHS: 20
8 |
9 | N_WORKERS: 8
10 |
11 | DATASET:
12 | NAME: 'carla'
13 |
14 | TIME_RECEPTIVE_FIELD: 3
15 | N_FUTURE_FRAMES: 4
16 |
17 | IMAGE:
18 | FINAL_DIM: (256, 256)
19 | ORIGINAL_HEIGHT: 300
20 | ORIGINAL_WIDTH: 400
21 |
22 | LIFT:
23 | X_BOUND: [-20.0, 20.0, 0.2]
24 | Y_BOUND: [-20.0, 20.0, 0.2]
25 | GT_DEPTH: False
26 |
27 | EGO:
28 | WIDTH: 2.12
29 | HEIGHT: 4.90
30 |
31 | MODEL:
32 | ENCODER:
33 | NAME: 'efficientnet-b4'
34 | USE_DEPTH_DISTRIBUTION: True
35 | TEMPORAL_MODEL:
36 | NAME: 'temporal_block'
37 | INPUT_EGOPOSE: True
38 | BN_MOMENTUM: 0.05
39 |
40 | SEMANTIC_SEG:
41 | PEDESTRIAN:
42 | ENABLED: True
43 | HDMAP:
44 | ENABLED: True
45 |
46 | INSTANCE_SEG:
47 | ENABLED: False
48 |
49 | INSTANCE_FLOW:
50 | ENABLED: False
51 |
52 | PROBABILISTIC:
53 | ENABLED: True
54 | METHOD: 'GAUSSIAN'
55 |
56 | PLANNING:
57 | ENABLED: True
58 | GRU_STATE_SIZE: 128
59 | SAMPLE_NUM: 2400
60 |
61 | FUTURE_DISCOUNT: 0.95
62 |
63 | OPTIMIZER:
64 | LR: 2e-4
65 |
66 | COST_FUNCTION:
67 | SAFETY: 1.
68 | HEADWAY: 1.
69 | LRDIVIDER: 10.
70 | COMFORT: 1.
71 | PROGRESS: 1.
72 | VOLUME: 100.
73 |
74 |
75 | PRETRAINED:
76 | LOAD_WEIGHTS: True
77 |
--------------------------------------------------------------------------------
/data/stp3/configs/carla/Prediction.yml:
--------------------------------------------------------------------------------
1 | TAG: 'CARLA_prediction'
2 |
3 | GPUS: [0, 1, 2, 3]
4 |
5 | BATCHSIZE: 2
6 | PRECISION: 16
7 | EPOCHS: 20
8 |
9 | N_WORKERS: 8
10 |
11 | DATASET:
12 | NAME: 'carla'
13 |
14 | TIME_RECEPTIVE_FIELD: 3
15 | N_FUTURE_FRAMES: 4
16 |
17 | IMAGE:
18 | FINAL_DIM: (256, 256)
19 | ORIGINAL_HEIGHT: 300
20 | ORIGINAL_WIDTH: 400
21 |
22 | LIFT:
23 | X_BOUND: [-20.0, 20.0, 0.2]
24 | Y_BOUND: [-20.0, 20.0, 0.2]
25 | GT_DEPTH: False
26 |
27 | MODEL:
28 | ENCODER:
29 | NAME: 'efficientnet-b4'
30 | USE_DEPTH_DISTRIBUTION: True
31 | TEMPORAL_MODEL:
32 | NAME: 'temporal_block'
33 | INPUT_EGOPOSE: True
34 | BN_MOMENTUM: 0.05
35 |
36 | SEMANTIC_SEG:
37 | PEDESTRIAN:
38 | ENABLED: True
39 | HDMAP:
40 | ENABLED: True
41 |
42 | INSTANCE_SEG:
43 | ENABLED: False
44 |
45 | INSTANCE_FLOW:
46 | ENABLED: False
47 |
48 | PROBABILISTIC:
49 | ENABLED: True
50 | METHOD: 'GAUSSIAN'
51 |
52 | PLANNING:
53 | ENABLED: False
54 |
55 | FUTURE_DISCOUNT: 0.95
56 |
57 | OPTIMIZER:
58 | LR: 2e-4
59 |
60 |
61 | PRETRAINED:
62 | LOAD_WEIGHTS: True
63 |
--------------------------------------------------------------------------------
/data/stp3/datas/dataloaders.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.utils.data
3 | from nuscenes.nuscenes import NuScenes
4 | from stp3.datas.trainNuscenesData import FuturePredictionDataset
5 | from stp3.datas.CarlaData import CarlaDataset
6 |
7 | def prepare_dataloaders(cfg, return_dataset=False):
8 | if cfg.DATASET.NAME == 'nuscenes':
9 | # 28130 train and 6019 val
10 | dataroot = cfg.DATASET.DATAROOT
11 | nusc = NuScenes(version='v1.0-{}'.format(cfg.DATASET.VERSION), dataroot=dataroot, verbose=False)
12 | traindata = FuturePredictionDataset(nusc, 0, cfg)
13 | valdata = FuturePredictionDataset(nusc, 1, cfg)
14 |
15 | if cfg.DATASET.VERSION == 'mini':
16 | pass
17 | # traindata.indices = traindata.indices[:10]
18 | # valdata.indices = valdata.indices[:10]
19 |
20 | nworkers = cfg.N_WORKERS
21 | trainloader = torch.utils.data.DataLoader(
22 | traindata, batch_size=cfg.BATCHSIZE, shuffle=False, num_workers=10, pin_memory=True, drop_last=True
23 | )
24 | valloader = torch.utils.data.DataLoader(
25 | valdata, batch_size=cfg.BATCHSIZE, shuffle=True, num_workers=10, pin_memory=True, drop_last=False
26 | )
27 | elif cfg.DATASET.NAME == 'carla':
28 | dataroot = cfg.DATASET.DATAROOT
29 | traindata = CarlaDataset(dataroot, True, cfg)
30 | valdata = CarlaDataset(dataroot, False, cfg)
31 | nworkers = cfg.N_WORKERS
32 | trainloader = torch.utils.data.DataLoader(
33 | traindata, batch_size=cfg.BATCHSIZE, shuffle=True, num_workers=nworkers, pin_memory=True, drop_last=True
34 | )
35 | valloader = torch.utils.data.DataLoader(
36 | valdata, batch_size=cfg.BATCHSIZE, shuffle=False, num_workers=nworkers, pin_memory=True, drop_last=False)
37 | else:
38 | raise NotImplementedError
39 |
40 | if return_dataset:
41 | return trainloader, valloader, traindata, valdata
42 | else:
43 | return trainloader, valloader
44 |
--------------------------------------------------------------------------------
/data/stp3/losses.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | DEBUG = False
5 |
6 | class SpatialRegressionLoss(nn.Module):
7 | def __init__(self, norm, ignore_index=255, future_discount=1.0):
8 | super(SpatialRegressionLoss, self).__init__()
9 | self.norm = norm
10 | self.ignore_index = ignore_index
11 | self.future_discount = future_discount
12 |
13 | if norm == 1:
14 | self.loss_fn = F.l1_loss
15 | elif norm == 2:
16 | self.loss_fn = F.mse_loss
17 | else:
18 | raise ValueError(f'Expected norm 1 or 2, but got norm={norm}')
19 |
20 | def forward(self, prediction, target, n_present=3):
21 | assert len(prediction.shape) == 5, 'Must be a 5D tensor'
22 | # ignore_index is the same across all channels
23 | mask = target[:, :, :1] != self.ignore_index
24 | if mask.sum() == 0:
25 | return prediction.new_zeros(1)[0].float()
26 | loss = self.loss_fn(prediction, target, reduction='none')
27 | # Sum channel dimension
28 | loss = torch.sum(loss, dim=-3, keepdim=True)
29 | seq_len = loss.shape[1]
30 | assert seq_len >= n_present
31 | future_len = seq_len - n_present
32 | future_discounts = self.future_discount ** torch.arange(1, future_len+1, device=loss.device, dtype=loss.dtype)
33 | discounts = torch.cat([torch.ones(n_present, device=loss.device, dtype=loss.dtype), future_discounts], dim=0)
34 | discounts = discounts.view(1, seq_len, 1, 1, 1)
35 | loss = loss * discounts
36 |
37 | return loss[mask].mean()
38 |
39 | class SegmentationLoss(nn.Module):
40 | def __init__(self, class_weights, ignore_index=255, use_top_k=False, top_k_ratio=1.0, future_discount=1.0):
41 | super().__init__()
42 | self.class_weights = class_weights
43 | self.ignore_index = ignore_index
44 | self.use_top_k = use_top_k
45 | self.top_k_ratio = top_k_ratio
46 | self.future_discount = future_discount
47 |
48 | def forward(self, prediction, target, n_present=3):
49 | if target.shape[-3] != 1:
50 | raise ValueError('segmentation label must be an index-label with channel dimension = 1.')
51 | b, s, c, h, w = prediction.shape
52 | prediction = prediction.view(b * s, c, h, w)
53 | target = target.view(b * s, h, w)
54 | loss = F.cross_entropy(
55 | prediction,
56 | target,
57 | ignore_index=self.ignore_index,
58 | reduction='none',
59 | weight=self.class_weights.to(target.device),
60 | )
61 |
62 | loss = loss.view(b, s, h, w)
63 |
64 | assert s >= n_present
65 | future_len = s - n_present
66 | future_discounts = self.future_discount ** torch.arange(1, future_len+1, device=loss.device, dtype=loss.dtype)
67 | discounts = torch.cat([torch.ones(n_present, device=loss.device, dtype=loss.dtype), future_discounts], dim=0)
68 | discounts = discounts.view(1, s, 1, 1)
69 | loss = loss * discounts
70 |
71 | loss = loss.view(b, s, -1)
72 | if self.use_top_k:
73 | # Penalises the top-k hardest pixels
74 |
75 | k = int(self.top_k_ratio * loss.shape[2])
76 |
77 | loss, _ = torch.sort(loss, dim=2, descending=True)
78 |
79 | loss = loss[:, :, :k]
80 |
81 |
82 | return torch.mean(loss)
83 |
84 | class HDmapLoss(nn.Module):
85 | def __init__(self, class_weights, training_weights, use_top_k, top_k_ratio, ignore_index=255):
86 | super(HDmapLoss, self).__init__()
87 | self.class_weights = class_weights
88 | self.training_weights = training_weights
89 | self.ignore_index = ignore_index
90 | self.use_top_k = use_top_k
91 | self.top_k_ratio = top_k_ratio
92 |
93 | def forward(self, prediction, target):
94 | loss = 0
95 | for i in range(target.shape[-3]):
96 | cur_target = target[:, 0, i]
97 | b, h, w = cur_target.shape
98 | cur_prediction = prediction[:, 2*i:2*(i+1)]
99 | cur_loss = F.cross_entropy(
100 | cur_prediction,
101 | cur_target,
102 | ignore_index=self.ignore_index,
103 | reduction='none',
104 | weight=self.class_weights[i].to(target.device),
105 | )
106 | cur_loss = cur_loss.view(b, -1)
107 | if self.use_top_k[i]:
108 | k = int(self.top_k_ratio[i] * cur_loss.shape[1])
109 | cur_loss, _ = torch.sort(cur_loss, dim=1, descending=True)
110 | cur_loss = cur_loss[:, :k]
111 | loss += torch.mean(cur_loss) * self.training_weights[i]
112 | return loss
113 |
114 | class DepthLoss(nn.Module):
115 | def __init__(self, class_weights=None, ignore_index=255):
116 | super(DepthLoss, self).__init__()
117 | self.class_weights = class_weights
118 | self.ignore_index = ignore_index
119 |
120 | def forward(self, prediction, target):
121 | b, s, n, d, h, w = prediction.shape
122 |
123 | prediction = prediction.view(b*s*n, d, h, w)
124 | target = target.view(b*s*n, h, w)
125 | loss = F.cross_entropy(
126 | prediction,
127 | target,
128 | ignore_index=self.ignore_index,
129 | reduction='none',
130 | weight=self.class_weights
131 | )
132 | return torch.mean(loss)
133 |
134 |
135 | class ProbabilisticLoss(nn.Module):
136 | def __init__(self, method):
137 | super(ProbabilisticLoss, self).__init__()
138 | self.method = method
139 |
140 | def kl_div(self, present_mu, present_log_sigma, future_mu, future_log_sigma):
141 | var_future = torch.exp(2 * future_log_sigma)
142 | var_present = torch.exp(2 * present_log_sigma)
143 | kl_div = (
144 | present_log_sigma - future_log_sigma - 0.5 + (var_future + (future_mu - present_mu) ** 2) / (
145 | 2 * var_present)
146 | )
147 |
148 | kl_loss = torch.mean(torch.sum(kl_div, dim=-1))
149 | return kl_loss
150 |
151 | def forward(self, output):
152 | if self.method == 'GAUSSIAN':
153 |
154 | present_mu = output['present_mu']
155 |
156 | present_log_sigma = output['present_log_sigma']
157 |
158 | future_mu = output['future_mu']
159 |
160 | future_log_sigma = output['future_log_sigma']
161 |
162 |
163 | kl_loss = self.kl_div(present_mu, present_log_sigma, future_mu, future_log_sigma)
164 |
165 | elif self.method == 'MIXGAUSSIAN':
166 | present_mu = output['present_mu']
167 | present_log_sigma = output['present_log_sigma']
168 | future_mu = output['future_mu']
169 | future_log_sigma = output['future_log_sigma']
170 |
171 | kl_loss = 0
172 | for i in range(len(present_mu)):
173 | kl_loss += self.kl_div(present_mu[i], present_log_sigma[i], future_mu[i], future_log_sigma[i])
174 | elif self.method == 'BERNOULLI':
175 | present_log_prob = output['present_log_prob']
176 | future_log_prob = output['future_log_prob']
177 |
178 | kl_loss = F.kl_div(present_log_prob, future_log_prob, reduction='batchmean', log_target=True)
179 | else:
180 | raise NotImplementedError
181 |
182 |
183 | return kl_loss
--------------------------------------------------------------------------------
/data/stp3/models/decoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from torchvision.models.resnet import resnet18
4 |
5 | from stp3.layers.convolutions import UpsamplingAdd, DeepLabHead
6 |
7 |
8 | class Decoder(nn.Module):
9 | def __init__(self, in_channels, n_classes, n_present, n_hdmap, predict_gate):
10 | super().__init__()
11 | self.perceive_hdmap = predict_gate['perceive_hdmap']
12 | self.predict_pedestrian = predict_gate['predict_pedestrian']
13 | self.predict_instance = predict_gate['predict_instance']
14 | self.predict_future_flow = predict_gate['predict_future_flow']
15 | self.planning = predict_gate['planning']
16 |
17 | self.n_classes = n_classes
18 | self.n_present = n_present
19 | if self.predict_instance is False and self.predict_future_flow is True:
20 | raise ValueError('flow cannot be True when not predicting instance')
21 |
22 | backbone = resnet18(pretrained=False, zero_init_residual=True)
23 |
24 | self.first_conv = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
25 | self.bn1 = backbone.bn1
26 | self.relu = backbone.relu
27 |
28 | self.layer1 = backbone.layer1
29 | self.layer2 = backbone.layer2
30 | self.layer3 = backbone.layer3
31 |
32 | shared_out_channels = in_channels
33 | self.up3_skip = UpsamplingAdd(256, 128, scale_factor=2)
34 | self.up2_skip = UpsamplingAdd(128, 64, scale_factor=2)
35 | self.up1_skip = UpsamplingAdd(64, shared_out_channels, scale_factor=2)
36 |
37 | self.segmentation_head = nn.Sequential(
38 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
39 | nn.BatchNorm2d(shared_out_channels),
40 | nn.ReLU(inplace=True),
41 | nn.Conv2d(shared_out_channels, self.n_classes, kernel_size=1, padding=0),
42 | )
43 |
44 | if self.predict_pedestrian:
45 | self.pedestrian_head = nn.Sequential(
46 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
47 | nn.BatchNorm2d(shared_out_channels),
48 | nn.ReLU(inplace=True),
49 | nn.Conv2d(shared_out_channels, self.n_classes, kernel_size=1, padding=0),
50 | )
51 |
52 | if self.perceive_hdmap:
53 | self.hdmap_head = nn.Sequential(
54 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
55 | nn.BatchNorm2d(shared_out_channels),
56 | nn.ReLU(inplace=True),
57 | nn.Conv2d(shared_out_channels, 2 * n_hdmap, kernel_size=1, padding=0),
58 | )
59 |
60 | if self.predict_instance:
61 | self.instance_offset_head = nn.Sequential(
62 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
63 | nn.BatchNorm2d(shared_out_channels),
64 | nn.ReLU(inplace=True),
65 | nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
66 | )
67 | self.instance_center_head = nn.Sequential(
68 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
69 | nn.BatchNorm2d(shared_out_channels),
70 | nn.ReLU(inplace=True),
71 | nn.Conv2d(shared_out_channels, 1, kernel_size=1, padding=0),
72 | nn.Sigmoid(),
73 | )
74 |
75 | if self.predict_future_flow:
76 | self.instance_future_head = nn.Sequential(
77 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
78 | nn.BatchNorm2d(shared_out_channels),
79 | nn.ReLU(inplace=True),
80 | nn.Conv2d(shared_out_channels, 2, kernel_size=1, padding=0),
81 | )
82 |
83 | if self.planning:
84 | self.costvolume_head = nn.Sequential(
85 | nn.Conv2d(shared_out_channels, shared_out_channels, kernel_size=3, padding=1, bias=False),
86 | nn.BatchNorm2d(shared_out_channels),
87 | nn.ReLU(inplace=True),
88 | nn.Conv2d(shared_out_channels, 1, kernel_size=1, padding=0),
89 | )
90 |
91 | def forward(self, x):
92 | b, s, c, h, w = x.shape
93 | x = x.view(b * s, c, h, w)
94 | # (H, W)
95 | skip_x = {'1': x}
96 |
97 | # (H/2, W/2)
98 | x = self.first_conv(x)
99 | x = self.bn1(x)
100 | x = self.relu(x)
101 | x = self.layer1(x)
102 | skip_x['2'] = x
103 |
104 | # (H/4 , W/4)
105 | x = self.layer2(x)
106 | skip_x['3'] = x
107 |
108 | # (H/8, W/8)
109 | x = self.layer3(x) # (b*s, 256, 25, 25)
110 |
111 | # First upsample to (H/4, W/4)
112 | x = self.up3_skip(x, skip_x['3'])
113 |
114 | # Second upsample to (H/2, W/2)
115 | x = self.up2_skip(x, skip_x['2'])
116 |
117 | # Third upsample to (H, W)
118 | x = self.up1_skip(x, skip_x['1'])
119 |
120 | segmentation_output = self.segmentation_head(x)
121 | pedestrian_output = self.pedestrian_head(x) if self.predict_pedestrian else None
122 | hdmap_output = self.hdmap_head(x.view(b, s, *x.shape[1:])[:,self.n_present-1]) if self.perceive_hdmap else None
123 | instance_center_output = self.instance_center_head(x) if self.predict_instance else None
124 | instance_offset_output = self.instance_offset_head(x) if self.predict_instance else None
125 | instance_future_output = self.instance_future_head(x) if self.predict_future_flow else None
126 | costvolume = self.costvolume_head(x).squeeze(1) if self.planning else None
127 | return {
128 | 'segmentation': segmentation_output.view(b, s, *segmentation_output.shape[1:]),
129 | 'pedestrian': pedestrian_output.view(b, s, *pedestrian_output.shape[1:])
130 | if pedestrian_output is not None else None,
131 | 'hdmap' : hdmap_output,
132 | 'instance_center': instance_center_output.view(b, s, *instance_center_output.shape[1:])
133 | if instance_center_output is not None else None,
134 | 'instance_offset': instance_offset_output.view(b, s, *instance_offset_output.shape[1:])
135 | if instance_offset_output is not None else None,
136 | 'instance_flow': instance_future_output.view(b, s, *instance_future_output.shape[1:])
137 | if instance_future_output is not None else None,
138 | 'costvolume': costvolume.view(b, s, *costvolume.shape[1:])
139 | if costvolume is not None else None,
140 | }
141 |
--------------------------------------------------------------------------------
/data/stp3/models/distributions.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from stp3.layers.convolutions import Bottleneck
5 |
6 |
7 | class DistributionModule(nn.Module):
8 | """
9 | A convolutional net that parametrises a diagonal Gaussian distribution.
10 | """
11 |
12 | def __init__(
13 | self, in_channels, latent_dim, method="GAUSSIAN"):
14 | super().__init__()
15 | self.compress_dim = in_channels // 2
16 | self.latent_dim = latent_dim
17 | self.method = method
18 |
19 | if method == 'GAUSSIAN':
20 | self.encoder = DistributionEncoder(in_channels, self.compress_dim)
21 | self.decoder = nn.Sequential(
22 | nn.AdaptiveAvgPool2d(1), nn.Conv2d(self.compress_dim, out_channels=2 * self.latent_dim, kernel_size=1)
23 | )
24 | elif method == 'MIXGAUSSIAN':
25 | self.encoder = DistributionEncoder(in_channels, self.compress_dim)
26 | self.decoder = nn.Sequential(
27 | nn.AdaptiveAvgPool2d(1), nn.Conv2d(self.compress_dim, out_channels=6 * self.latent_dim + 3, kernel_size=1)
28 | )
29 | elif method == 'BERNOULLI':
30 | self.encoder = nn.Sequential(
31 | Bottleneck(in_channels, self.latent_dim)
32 | )
33 | self.decoder = nn.LogSigmoid()
34 | else:
35 | raise NotImplementedError
36 |
37 | def forward(self, s_t):
38 | b, s = s_t.shape[:2]
39 | assert s == 1
40 | encoding = self.encoder(s_t[:, 0])
41 |
42 | if self.method == 'GAUSSIAN':
43 | decoder = self.decoder(encoding).view(b, 1, 2 * self.latent_dim)
44 | elif self.method == 'MIXGAUSSIAN':
45 | decoder = self.decoder(encoding).view(b, 1, 6 * self.latent_dim + 3)
46 | elif self.method == 'BERNOULLI':
47 | decoder = self.decoder(encoding)
48 | else:
49 | raise NotImplementedError
50 |
51 | return decoder
52 |
53 |
54 | class DistributionEncoder(nn.Module):
55 | """Encodes s_t or (s_t, y_{t+1}, ..., y_{t+H}).
56 | """
57 | def __init__(self, in_channels, out_channels):
58 | super().__init__()
59 |
60 | self.model = nn.Sequential(
61 | Bottleneck(in_channels, out_channels=out_channels, downsample=True),
62 | Bottleneck(out_channels, out_channels=out_channels, downsample=True),
63 | Bottleneck(out_channels, out_channels=out_channels, downsample=True),
64 | Bottleneck(out_channels, out_channels=out_channels, downsample=True),
65 | )
66 |
67 | def forward(self, s_t):
68 | return self.model(s_t)
69 |
--------------------------------------------------------------------------------
/data/stp3/models/encoder.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import numpy as np
3 | from efficientnet_pytorch import EfficientNet
4 |
5 | from stp3.layers.convolutions import UpsamplingConcat, DeepLabHead
6 |
7 |
8 |
9 | class Encoder(nn.Module):
10 | def __init__(self, cfg, D):
11 | super().__init__()
12 | self.D = D
13 | self.C = cfg.OUT_CHANNELS
14 | self.use_depth_distribution = cfg.USE_DEPTH_DISTRIBUTION
15 | self.downsample = cfg.DOWNSAMPLE
16 | self.version = cfg.NAME.split('-')[1]
17 |
18 | self.backbone = EfficientNet.from_pretrained(cfg.NAME)
19 | self.delete_unused_layers()
20 | if self.version == 'b4':
21 | self.reduction_channel = [0, 24, 32, 56, 160, 448]
22 | elif self.version == 'b0':
23 | self.reduction_channel = [0, 16, 24, 40, 112, 320]
24 | else:
25 | raise NotImplementedError
26 | self.upsampling_out_channel = [0, 48, 64, 128, 512]
27 |
28 | # index = np.log2(self.downsample).astype(np.int)
29 | index = np.log2(self.downsample).astype(int)
30 |
31 |
32 | if self.use_depth_distribution:
33 | self.depth_layer_1 = DeepLabHead(self.reduction_channel[index+1], self.reduction_channel[index+1], hidden_channel=64)
34 | self.depth_layer_2 = UpsamplingConcat(self.reduction_channel[index+1] + self.reduction_channel[index], self.D)
35 |
36 | self.feature_layer_1 = DeepLabHead(self.reduction_channel[index+1], self.reduction_channel[index+1], hidden_channel=64)
37 | self.feature_layer_2 = UpsamplingConcat(self.reduction_channel[index+1] + self.reduction_channel[index], self.C)
38 |
39 |
40 |
41 | def delete_unused_layers(self):
42 | indices_to_delete = []
43 | for idx in range(len(self.backbone._blocks)):
44 | if self.downsample == 8:
45 | if self.version == 'b0' and idx > 10:
46 | indices_to_delete.append(idx)
47 | if self.version == 'b4' and idx > 21:
48 | indices_to_delete.append(idx)
49 |
50 | for idx in reversed(indices_to_delete):
51 | del self.backbone._blocks[idx]
52 |
53 | del self.backbone._conv_head
54 | del self.backbone._bn1
55 | del self.backbone._avg_pooling
56 | del self.backbone._dropout
57 | del self.backbone._fc
58 |
59 | def get_features_depth(self, x):
60 | # Adapted from https://github.com/lukemelas/EfficientNet-PyTorch/blob/master/efficientnet_pytorch/model.py#L231
61 | endpoints = dict()
62 |
63 | # Stem
64 | x = self.backbone._swish(self.backbone._bn0(self.backbone._conv_stem(x)))
65 | prev_x = x
66 |
67 | # Blocks
68 | for idx, block in enumerate(self.backbone._blocks):
69 | drop_connect_rate = self.backbone._global_params.drop_connect_rate
70 | if drop_connect_rate:
71 | drop_connect_rate *= float(idx) / len(self.backbone._blocks)
72 | x = block(x, drop_connect_rate=drop_connect_rate)
73 | if prev_x.size(2) > x.size(2):
74 | endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
75 | prev_x = x
76 |
77 | if self.downsample == 8:
78 | if self.version == 'b0' and idx == 10:
79 | break
80 | if self.version == 'b4' and idx == 21:
81 | break
82 |
83 | # Head
84 | endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
85 |
86 | index = np.log2(self.downsample).astype(np.int)
87 | input_1 = endpoints['reduction_{}'.format(index + 1)]
88 | input_2 = endpoints['reduction_{}'.format(index)]
89 |
90 | feature = self.feature_layer_1(input_1)
91 | feature = self.feature_layer_2(feature, input_2)
92 |
93 | if self.use_depth_distribution:
94 | depth = self.depth_layer_1(input_1)
95 | depth = self.depth_layer_2(depth, input_2)
96 | else:
97 | depth = None
98 |
99 | return feature, depth
100 |
101 | def forward(self, x):
102 | feature, depth = self.get_features_depth(x) # get feature vector
103 |
104 | # if self.use_depth_distribution:
105 | # depth_prob = depth.softmax(dim=1)
106 | # feature = depth_prob.unsqueeze(1) * feature.unsqueeze(2) # outer product depth and features
107 | # else:
108 | # feature = feature.unsqueeze(2).repeat(1, 1, self.D, 1, 1)
109 |
110 | return feature, depth
111 |
--------------------------------------------------------------------------------
/data/stp3/models/future_prediction.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from stp3.layers.convolutions import Bottleneck, Block, DeepLabHead
5 | from stp3.layers.temporal import SpatialGRU, Dual_GRU, BiGRU
6 |
7 | class FuturePrediction(nn.Module):
8 | def __init__(self, in_channels, latent_dim, n_future, mixture=True, n_gru_blocks=2, n_res_layers=1):
9 | super(FuturePrediction, self).__init__()
10 | self.n_spatial_gru = n_gru_blocks
11 |
12 | gru_in_channels = latent_dim
13 | self.dual_grus = Dual_GRU(gru_in_channels, in_channels, n_future=n_future, mixture=mixture)
14 | self.res_blocks1 = nn.Sequential(*[Block(in_channels) for _ in range(n_res_layers)])
15 |
16 | self.spatial_grus = []
17 | self.res_blocks = []
18 | for i in range(self.n_spatial_gru):
19 | self.spatial_grus.append(SpatialGRU(in_channels, in_channels))
20 | if i < self.n_spatial_gru - 1:
21 | self.res_blocks.append(nn.Sequential(*[Block(in_channels) for _ in range(n_res_layers)]))
22 | else:
23 | self.res_blocks.append(DeepLabHead(in_channels, in_channels, 128))
24 |
25 | self.spatial_grus = torch.nn.ModuleList(self.spatial_grus)
26 | self.res_blocks = torch.nn.ModuleList(self.res_blocks)
27 |
28 |
29 | def forward(self, x, state):
30 | # x has shape (b, 1, c, h, w), state: torch.Tensor [b, n_present, hidden_size, h, w]
31 | x = self.dual_grus(x, state)
32 |
33 | b, n_future, c, h, w = x.shape
34 | x = self.res_blocks1(x.view(b * n_future, c, h, w))
35 | x = x.view(b, n_future, c, h, w)
36 |
37 | x = torch.cat([state, x], dim=1)
38 |
39 | hidden_state = x[:, 0]
40 | for i in range(self.n_spatial_gru):
41 | x = self.spatial_grus[i](x, hidden_state)
42 |
43 | b, s, c, h, w = x.shape
44 | x = self.res_blocks[i](x.view(b*s, c, h, w))
45 | x = x.view(b, s, c, h, w)
46 |
47 | return x
--------------------------------------------------------------------------------
/data/stp3/models/planning_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | import numpy as np
5 |
6 | from src.layers.convolutions import Bottleneck
7 | from src.layers.temporal import SpatialGRU, Dual_GRU, BiGRU
8 | from src.cost import Cost_Function
9 |
10 | class Planning(nn.Module):
11 | def __init__(self, cfg, feature_channel, gru_input_size=6, gru_state_size=256):
12 | super(Planning, self).__init__()
13 | self.cost_function = Cost_Function(cfg)
14 |
15 | self.sample_num = cfg.PLANNING.SAMPLE_NUM
16 | self.commands = cfg.PLANNING.COMMAND
17 | assert self.sample_num % 3 == 0
18 | self.num = int(self.sample_num / 3)
19 |
20 | self.reduce_channel = nn.Sequential(
21 | Bottleneck(feature_channel, feature_channel, downsample=True),
22 | Bottleneck(feature_channel, int(feature_channel/2), downsample=True),
23 | Bottleneck(int(feature_channel/2), int(feature_channel/2), downsample=True),
24 | Bottleneck(int(feature_channel/2), int(feature_channel/8))
25 | )
26 |
27 | self.GRU = nn.GRUCell(gru_input_size, gru_state_size)
28 | self.decoder = nn.Sequential(
29 | nn.Linear(gru_state_size, gru_state_size),
30 | nn.ReLU(inplace=True),
31 | nn.Linear(gru_state_size, 2)
32 | )
33 |
34 |
35 | def compute_L2(self, trajs, gt_traj):
36 | '''
37 | trajs: torch.Tensor (B, N, n_future, 3)
38 | gt_traj: torch.Tensor (B,1, n_future, 3)
39 | '''
40 | if trajs.ndim == 4 and gt_traj.ndim == 4:
41 | return ((trajs[:,:,:,:2] - gt_traj[:,:,:,:2]) ** 2).sum(dim=-1)
42 | if trajs.ndim == 3 and gt_traj.ndim == 3:
43 | return ((trajs[:, :, :2] - gt_traj[:, :, :2]) ** 2).sum(dim=-1)
44 |
45 | raise ValueError('trajs ndim != gt_traj ndim')
46 |
47 | def select(self, trajs, cost_volume, semantic_pred, lane_divider, drivable_area, target_points, k=1):
48 | '''
49 | trajs: torch.Tensor (B, N, n_future, 3)
50 | cost_volume: torch.Tensor (B, n_future, 200, 200)
51 | semantic_pred: torch.Tensor(B, n_future, 200, 200)
52 | lane_divider: torch.Tensor(B, 1/2, 200, 200)
53 | drivable_area: torch.Tensor(B, 1/2, 200, 200)
54 | target_points: torch.Tensor (B, 2)
55 | '''
56 | sm_cost_fc, sm_cost_fo = self.cost_function(cost_volume, trajs[:,:,:,:2], semantic_pred, lane_divider, drivable_area, target_points)
57 |
58 | CS = sm_cost_fc + sm_cost_fo.sum(dim=-1)
59 | CC, KK = torch.topk(CS, k, dim=-1, largest=False)
60 |
61 | ii = torch.arange(len(trajs))
62 | select_traj = trajs[ii[:,None], KK].squeeze(1) # (B, n_future, 3)
63 |
64 | return select_traj, CS
65 |
66 | def loss(self, trajs, gt_trajs, cost_volume, semantic_pred, lane_divider, drivable_area, target_points):
67 | '''
68 | trajs: torch.Tensor (B, N, n_future, 3)
69 | gt_trajs: torch.Tensor (B, n_future, 3)
70 | cost_volume: torch.Tensor (B, n_future, 200, 200)
71 | semantic_pred: torch.Tensor(B, n_future, 200, 200)
72 | lane_divider: torch.Tensor(B, 1/2, 200, 200)
73 | drivable_area: torch.Tensor(B, 1/2, 200, 200)
74 | target_points: torch.Tensor (B, 2)
75 | '''
76 | sm_cost_fc, sm_cost_fo = self.cost_function(cost_volume, trajs[:, :, :, :2], semantic_pred, lane_divider, drivable_area, target_points)
77 | print(sm_cost_fc.shape, sm_cost_fo.shape, "sm_costs")
78 |
79 | if gt_trajs.ndim == 3:
80 | gt_trajs = gt_trajs[:, None]
81 |
82 | gt_cost_fc, gt_cost_fo = self.cost_function(cost_volume, gt_trajs[:, :, :, :2], semantic_pred, lane_divider, drivable_area, target_points)
83 |
84 | L, _ = F.relu(
85 | F.relu(gt_cost_fo - sm_cost_fo).sum(-1) + (gt_cost_fc - sm_cost_fc) + self.compute_L2(trajs, gt_trajs).mean(
86 | dim=-1)).max(dim=-1)
87 |
88 | return torch.mean(L), gt_cost_fc, gt_cost_fo, sm_cost_fc, sm_cost_fo
89 |
90 | def forward(self,cam_front, trajs, gt_trajs, cost_volume, semantic_pred, hd_map, commands, target_points):
91 | '''
92 | cam_front: torch.Tensor (B, 64, 60, 28)
93 | trajs: torch.Tensor (B, N, n_future, 3)
94 | gt_trajs: torch.Tensor (B, n_future, 3)
95 | cost_volume: torch.Tensor (B, n_future, 200, 200)
96 | semantic_pred: torch.Tensor(B, n_future, 200, 200)
97 | hd_map: torch.Tensor (B, 2/4, 200, 200)
98 | commands: List (B)
99 | target_points: (B, 2)
100 | '''
101 |
102 | cur_trajs = []
103 | for i in range(len(commands)):
104 | command = commands[i]
105 | traj = trajs[i]
106 | if command == 'LEFT':
107 | cur_trajs.append(traj[:self.num].repeat(3, 1, 1))
108 | elif command == 'FORWARD':
109 | cur_trajs.append(traj[self.num:self.num * 2].repeat(3, 1, 1))
110 | elif command == 'RIGHT':
111 | cur_trajs.append(traj[self.num * 2:].repeat(3, 1, 1))
112 | else:
113 | cur_trajs.append(traj)
114 | cur_trajs = torch.stack(cur_trajs)
115 | print(cur_trajs.shape, "cur_trajs", trajs.shape, "trajs")
116 | cur_trajs = trajs
117 |
118 |
119 | if hd_map.shape[1] == 2:
120 | lane_divider = hd_map[:, 0:1]
121 | drivable_area = hd_map[:, 1:2]
122 | elif hd_map.shape[1] == 4:
123 | lane_divider = hd_map[:, 0:2]
124 | drivable_area = hd_map[:, 2:4]
125 | else:
126 | raise NotImplementedError
127 |
128 | if self.training:
129 | loss, gt_cost_fc, gt_cost_fo, sm_cost_fc, sm_cost_fo = self.loss(cur_trajs, gt_trajs, cost_volume, semantic_pred, lane_divider, drivable_area, target_points)
130 | else:
131 | loss, gt_cost_fc, gt_cost_fo, sm_cost_fc, sm_cost_fo = self.loss(cur_trajs, gt_trajs, cost_volume, semantic_pred, lane_divider, drivable_area, target_points)
132 | loss = 0
133 |
134 |
135 | cam_front = self.reduce_channel(cam_front)
136 | h0 = cam_front.flatten(start_dim=1) # (B, 256/128)
137 | final_traj, CS = self.select(cur_trajs, cost_volume, semantic_pred, lane_divider, drivable_area, target_points) # (B, n_future, 3)
138 | target_points = target_points.to(dtype=h0.dtype)
139 | b, s, _ = final_traj.shape
140 | x = torch.zeros((b, 2), device=h0.device)
141 | output_traj = []
142 | for i in range(s):
143 | x = torch.cat([x, final_traj[:,i,:2], target_points], dim=-1) # (B, 6)
144 | h0 = self.GRU(x, h0)
145 | x = self.decoder(h0) # (B, 2)
146 | output_traj.append(x)
147 | output_traj = torch.stack(output_traj, dim=1) # (B, 4, 2)
148 |
149 | output_traj = torch.cat(
150 | [output_traj, torch.zeros((*output_traj.shape[:-1],1), device=output_traj.device)], dim=-1
151 | )
152 |
153 | if self.training:
154 | loss = loss*0.5 + (F.smooth_l1_loss(final[:,:,:2], gt_trajs[:,:,:2], reduction='none')*torch.tensor([10., 1.], device=loss.device)).mean()
155 |
156 |
157 | return loss, output_traj, CS, cur_trajs, gt_cost_fc, gt_cost_fo, final_traj, sm_cost_fc, sm_cost_fo
158 |
--------------------------------------------------------------------------------
/data/stp3/models/temporal_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from stp3.layers.temporal import Bottleneck3D, TemporalBlock
5 | from stp3.layers.convolutions import ConvBlock, Bottleneck, DeepLabHead
6 |
7 | class TemporalModel(nn.Module):
8 | def __init__(
9 | self, in_channels, receptive_field, input_shape, start_out_channels=64, extra_in_channels=0,
10 | n_spatial_layers_between_temporal_layers=0, use_pyramid_pooling=True):
11 | super().__init__()
12 | self.receptive_field = receptive_field
13 | n_temporal_layers = receptive_field - 1
14 |
15 | h, w = input_shape
16 | modules = []
17 |
18 | block_in_channels = in_channels
19 | block_out_channels = start_out_channels
20 |
21 | for _ in range(n_temporal_layers):
22 | if use_pyramid_pooling:
23 | use_pyramid_pooling = True
24 | pool_sizes = [(2, h, w)]
25 | else:
26 | use_pyramid_pooling = False
27 | pool_sizes = None
28 | temporal = TemporalBlock(
29 | block_in_channels,
30 | block_out_channels,
31 | use_pyramid_pooling=use_pyramid_pooling,
32 | pool_sizes=pool_sizes,
33 | )
34 | spatial = [
35 | Bottleneck3D(block_out_channels, block_out_channels, kernel_size=(1, 3, 3))
36 | for _ in range(n_spatial_layers_between_temporal_layers)
37 | ]
38 | temporal_spatial_layers = nn.Sequential(temporal, *spatial)
39 | modules.extend(temporal_spatial_layers)
40 |
41 | block_in_channels = block_out_channels
42 | block_out_channels += extra_in_channels
43 |
44 | self.out_channels = block_in_channels
45 |
46 | self.final_conv = DeepLabHead(block_out_channels, block_out_channels, hidden_channel=128)
47 |
48 | self.model = nn.Sequential(*modules)
49 |
50 | def forward(self, x):
51 | # Reshape input tensor to (batch, C, time, H, W)
52 | x = x.permute(0, 2, 1, 3, 4)
53 | x = self.model(x)
54 | x = x.permute(0, 2, 1, 3, 4).contiguous()
55 |
56 | b, s, c, h, w = x.shape
57 | x = x.view(b * s, c, h, w)
58 | x = self.final_conv(x)
59 | x = x.view(b, s, c, h, w)
60 | return x
61 |
62 |
63 | class TemporalModelIdentity(nn.Module):
64 | def __init__(self, in_channels, receptive_field):
65 | super().__init__()
66 | self.receptive_field = receptive_field
67 | self.out_channels = in_channels
68 |
69 | def forward(self, x):
70 | return x
--------------------------------------------------------------------------------
/data/stp3/utils/Optnode_naive.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import warnings
4 | warnings.filterwarnings('ignore')
5 |
6 | import torch
7 | import numpy as np
8 | import scipy.special
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 | import matplotlib.pyplot as plt
12 |
13 | from scipy.linalg import block_diag
14 | from torch.utils.data import Dataset, DataLoader
15 |
16 | import os
17 | import sys
18 | import warnings
19 | warnings.filterwarnings('ignore')
20 |
21 | import torch
22 | import numpy as np
23 | import scipy.special
24 | import torch.nn as nn
25 | import torch.nn.functional as F
26 | import matplotlib.pyplot as plt
27 |
28 | from scipy.linalg import block_diag
29 | from torch.utils.data import Dataset, DataLoader
30 | #from bernstein import bernstesin_coeff_order10_new
31 |
32 | def bernstein_coeff_order10_new(n, tmin, tmax, t_actual):
33 | l = tmax - tmin
34 | t = (t_actual - tmin) / l
35 |
36 | P0 = scipy.special.binom(n, 0) * ((1 - t) ** (n - 0)) * t ** 0
37 | P1 = scipy.special.binom(n, 1) * ((1 - t) ** (n - 1)) * t ** 1
38 | P2 = scipy.special.binom(n, 2) * ((1 - t) ** (n - 2)) * t ** 2
39 | P3 = scipy.special.binom(n, 3) * ((1 - t) ** (n - 3)) * t ** 3
40 | P4 = scipy.special.binom(n, 4) * ((1 - t) ** (n - 4)) * t ** 4
41 | P5 = scipy.special.binom(n, 5) * ((1 - t) ** (n - 5)) * t ** 5
42 | P6 = scipy.special.binom(n, 6) * ((1 - t) ** (n - 6)) * t ** 6
43 | P7 = scipy.special.binom(n, 7) * ((1 - t) ** (n - 7)) * t ** 7
44 | P8 = scipy.special.binom(n, 8) * ((1 - t) ** (n - 8)) * t ** 8
45 | P9 = scipy.special.binom(n, 9) * ((1 - t) ** (n - 9)) * t ** 9
46 | P10 = scipy.special.binom(n, 10) * ((1 - t) ** (n - 10)) * t ** 10
47 |
48 | P0dot = -10.0 * (-t + 1) ** 9
49 | P1dot = -90.0 * t * (-t + 1) ** 8 + 10.0 * (-t + 1) ** 9
50 | P2dot = -360.0 * t ** 2 * (-t + 1) ** 7 + 90.0 * t * (-t + 1) ** 8
51 | P3dot = -840.0 * t ** 3 * (-t + 1) ** 6 + 360.0 * t ** 2 * (-t + 1) ** 7
52 | P4dot = -1260.0 * t ** 4 * (-t + 1) ** 5 + 840.0 * t ** 3 * (-t + 1) ** 6
53 | P5dot = -1260.0 * t ** 5 * (-t + 1) ** 4 + 1260.0 * t ** 4 * (-t + 1) ** 5
54 | P6dot = -840.0 * t ** 6 * (-t + 1) ** 3 + 1260.0 * t ** 5 * (-t + 1) ** 4
55 | P7dot = -360.0 * t ** 7 * (-t + 1) ** 2 + 840.0 * t ** 6 * (-t + 1) ** 3
56 | P8dot = 45.0 * t ** 8 * (2 * t - 2) + 360.0 * t ** 7 * (-t + 1) ** 2
57 | P9dot = -10.0 * t ** 9 + 9 * t ** 8 * (-10.0 * t + 10.0)
58 | P10dot = 10.0 * t ** 9
59 |
60 | P0ddot = 90.0 * (-t + 1) ** 8
61 | P1ddot = 720.0 * t * (-t + 1) ** 7 - 180.0 * (-t + 1) ** 8
62 | P2ddot = 2520.0 * t ** 2 * (-t + 1) ** 6 - 1440.0 * t * (-t + 1) ** 7 + 90.0 * (-t + 1) ** 8
63 | P3ddot = 5040.0 * t ** 3 * (-t + 1) ** 5 - 5040.0 * t ** 2 * (-t + 1) ** 6 + 720.0 * t * (-t + 1) ** 7
64 | P4ddot = 6300.0 * t ** 4 * (-t + 1) ** 4 - 10080.0 * t ** 3 * (-t + 1) ** 5 + 2520.0 * t ** 2 * (-t + 1) ** 6
65 | P5ddot = 5040.0 * t ** 5 * (-t + 1) ** 3 - 12600.0 * t ** 4 * (-t + 1) ** 4 + 5040.0 * t ** 3 * (-t + 1) ** 5
66 | P6ddot = 2520.0 * t ** 6 * (-t + 1) ** 2 - 10080.0 * t ** 5 * (-t + 1) ** 3 + 6300.0 * t ** 4 * (-t + 1) ** 4
67 | P7ddot = -360.0 * t ** 7 * (2 * t - 2) - 5040.0 * t ** 6 * (-t + 1) ** 2 + 5040.0 * t ** 5 * (-t + 1) ** 3
68 | P8ddot = 90.0 * t ** 8 + 720.0 * t ** 7 * (2 * t - 2) + 2520.0 * t ** 6 * (-t + 1) ** 2
69 | P9ddot = -180.0 * t ** 8 + 72 * t ** 7 * (-10.0 * t + 10.0)
70 | P10ddot = 90.0 * t ** 8
71 | 90.0 * t ** 8
72 |
73 | P = np.hstack((P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10))
74 | Pdot = np.hstack((P0dot, P1dot, P2dot, P3dot, P4dot, P5dot, P6dot, P7dot, P8dot, P9dot, P10dot)) / l
75 | Pddot = np.hstack((P0ddot, P1ddot, P2ddot, P3ddot, P4ddot, P5ddot, P6ddot, P7ddot, P8ddot, P9ddot, P10ddot)) / (l ** 2)
76 | return P, Pdot, Pddot
77 |
78 |
79 | #####################################################
80 | #####################################################
81 | ##################### NEW OPTIMIZER #################
82 | #######################################################
83 | ######################################################
84 | ######################################################################################################################
85 | ######################################################################################################################
86 | ######################################################################################################################
87 |
88 | class OPTNode_batched():
89 | def __init__(self, P, Pddot, A_eq, A_obs, Q_smoothness, x_obs, y_obs, num=12, num_obs=4, nvar=11, a_obs=1.0, b_obs=1.0, rho_obs=0.3, rho_eq=10.0, weight_smoothness=10, maxiter=300, eps=1e-7, num_tot=48, batch_size=30):
90 | super().__init__()
91 | self.device = 'cpu'
92 | device = self.device
93 | self.P = torch.tensor(P, dtype=torch.double).to(device).expand(batch_size, -1, -1)
94 | self.Pddot = torch.tensor(Pddot, dtype=torch.double).to(device).expand(batch_size, -1, -1)
95 | self.A_eq = torch.tensor(A_eq, dtype=torch.double).to(device).expand(batch_size, -1, -1)
96 | self.A_obs = torch.tensor(A_obs, dtype=torch.double).to(device).expand(batch_size, -1, -1)
97 | self.Q_smoothness = torch.tensor(Q_smoothness, dtype=torch.double).to(device).expand(batch_size, -1, -1)
98 | self.x_obs = torch.tensor(x_obs, dtype=torch.double).to(device).expand(batch_size, -1, -1)
99 | self.y_obs = torch.tensor(y_obs, dtype=torch.double).to(device).expand(batch_size, -1, -1)
100 |
101 |
102 | self.num = num
103 | self.num_obs = num_obs
104 | self.eps = eps
105 | self.nvar = nvar
106 | self.a_obs = a_obs
107 | self.b_obs = b_obs
108 | self.rho_eq = rho_eq
109 | self.num_obs = num_obs
110 | self.maxiter = maxiter
111 | self.num_tot = num_tot
112 | self.rho_obs = rho_obs
113 | self.weight_smoothness = weight_smoothness
114 | self.batch_size = batch_size
115 |
116 | def optimize2(self, b, lamda_x, lamda_y):
117 | device = self.device
118 | # print(b.shape)
119 | bx_eq_tensor, by_eq_tensor = torch.split(b, 6, dim=0)
120 |
121 | d_obs = torch.ones(self.batch_size, self.num_obs, self.num, dtype=torch.double).to(device)
122 | alpha_obs = torch.zeros(self.batch_size, self.num_obs, self.num, dtype=torch.double).to(device)
123 | ones_tensor = torch.ones((self.batch_size, self.num_obs, self.num), dtype=torch.double).to(device)
124 | # print(self.Pddot.shape)
125 | # trans = self.Pddot.permute(0, 2, 1)
126 | # print(trans.shape, " AA")
127 | # import pdb;pdb.set_trace()
128 | cost_smoothness = self.weight_smoothness * torch.bmm(self.Pddot.permute(0, 2, 1), self.Pddot)
129 | cost = cost_smoothness + self.rho_obs * torch.bmm(self.A_obs.permute(0, 2, 1), self.A_obs) + self.rho_eq * torch.bmm(self.A_eq.permute(0, 2, 1), self.A_eq)
130 | # import pdb;pdb.set_trace()
131 | for i in range(self.maxiter):
132 | temp_x_obs = d_obs * torch.cos(alpha_obs) * self.a_obs
133 | temp_y_obs = d_obs * torch.sin(alpha_obs) * self.b_obs
134 |
135 | # import pdb;pdb.set_trace()
136 | b_obs_x = self.x_obs.view(self.batch_size, self.num * self.num_obs) + temp_x_obs.view(self.batch_size, self.num * self.num_obs)
137 | b_obs_y = self.y_obs.view(self.batch_size, self.num * self.num_obs) + temp_y_obs.view(self.batch_size, self.num * self.num_obs)
138 | # import pdb;pdb.set_trace()
139 | # print(self.A_eq.permute(0, 2, 1).shape, bx_eq_tensor.unsqueeze(2).shape, " POO", lamda_x.shape)
140 | # print(self.A_obs.shape, b_obs_x.unsqueeze(2).shape, torch.bmm(self.A_obs.permute(0, 2, 1), b_obs_x.unsqueeze(2)).shape, " obs")
141 | # print(self.A_eq.shape, bx_eq_tensor.unsqueeze(2).shape, torch.bmm(self.A_eq.permute(0, 2, 1), bx_eq_tensor.unsqueeze(2).permute(1, 0, 2)).shape, " eq")
142 |
143 | # print(lamda_x.shape, " lamda_x")
144 | # import pdb;pdb.set_trace()
145 | lincost_x = -lamda_x - self.rho_obs * torch.bmm(self.A_obs.permute(0, 2, 1), b_obs_x.unsqueeze(2)) - self.rho_eq * torch.bmm(self.A_eq.permute(0, 2, 1), bx_eq_tensor.unsqueeze(2).permute(1, 0, 2))
146 | lincost_y = -lamda_y - self.rho_obs * torch.bmm(self.A_obs.permute(0, 2, 1), b_obs_y.unsqueeze(2)) - self.rho_eq * torch.bmm(self.A_eq.permute(0, 2, 1), by_eq_tensor.unsqueeze(2).permute(1, 0, 2))
147 | # import pdb;pdb.set_trace()
148 | # lincost_x = lincost_x.view(-1, 1)
149 | # lincost_y = lincost_y.view(-1, 1)
150 | # print(cost.shape, " AAA")
151 | # print(cost_inv.shape)
152 | # print(cost_inv.shape, lincost_x.shape, lincost_x.T.shape, " lincost")
153 | cost_inv = torch.zeros_like(cost)
154 | for j in range(self.batch_size):
155 | cost_inv[j] = torch.linalg.inv(cost[j])
156 |
157 | sol_x = torch.bmm(-cost_inv, lincost_x)
158 | sol_y = torch.bmm(-cost_inv, lincost_y)
159 | # import pdb;pdb.set_trace()
160 |
161 | # print(torch.linalg.lstsq(lincost_x, -cost).solution)
162 | # sol_x, _ = torch.linalg.lstsq(lincost_x, -cost)
163 | # sol_x = torch.linalg.lstsq(lincost_x, -cost).solution
164 | # sol_y, _ = torch.linalg.lstsq(lincost_y, -cost)
165 | # sol_y = torch.linalg.lstsq(lincost_y, -cost).solution
166 | # print(sol_x.shape, self.P.shape, " sol")
167 |
168 | # sol_x = sol_x.view(-1)
169 | # sol_y = sol_y.view(-1)
170 |
171 | x = torch.bmm(self.P, sol_x)
172 | y = torch.bmm(self.P, sol_y)
173 | # import pdb;pdb.set_trace()
174 |
175 | # print(x.shape, y.shape, " xy")
176 | # print(self.x_obs.shape, " x_obs")
177 | wc_alpha = x.permute(0, 2, 1) - self.x_obs
178 | ws_alpha = y.permute(0, 2, 1) - self.y_obs
179 | alpha_obs = torch.atan2(ws_alpha * self.a_obs, wc_alpha * self.b_obs)
180 | # import pdb;pdb.set_trace()
181 |
182 | c1_d = self.rho_obs * (self.a_obs ** 2 * torch.cos(alpha_obs) ** 2 + self.b_obs ** 2 * torch.sin(alpha_obs) ** 2)
183 | c2_d = self.rho_obs * (self.a_obs * wc_alpha * torch.cos(alpha_obs) + self.b_obs * ws_alpha * torch.sin(alpha_obs))
184 | d_temp = c2_d / c1_d
185 | d_obs = torch.max(d_temp, ones_tensor)
186 | # import pdb;pdb.set_trace()
187 |
188 | # print(alpha_obs.shape, wc_alpha.shape, " alpga_obs")
189 | res_x_obs_vec = wc_alpha - self.a_obs * d_obs * torch.cos(alpha_obs)
190 | res_y_obs_vec = ws_alpha - self.b_obs * d_obs * torch.sin(alpha_obs)
191 | # import pdb;pdb.set_trace()
192 |
193 | # print(torch.bmm(self.A_eq, sol_x).shape, " res_eq_x_vec")
194 | res_eq_x_vec = torch.bmm(self.A_eq, sol_x) - bx_eq_tensor.unsqueeze(2).permute(1, 0, 2)
195 | res_eq_y_vec = torch.bmm(self.A_eq, sol_y) - by_eq_tensor.unsqueeze(2).permute(1, 0, 2)
196 | # import pdb;pdb.set_trace()
197 |
198 | # print(res_x_obs_vec.shape, "res_x_obs_vec")
199 | # print(res_eq_x_vec.shape, "res_eq_x_vec")
200 | # print(self.A_obs.permute(0, 2, 1).shape, res_x_obs_vec.view(self.batch_size, -1).unsqueeze(2).shape)
201 | # print(self.A_eq.permute(0, 2, 1).shape, res_eq_x_vec.shape)
202 | # print(torch.bmm(self.A_obs.permute(0, 2, 1), res_x_obs_vec.view(self.batch_size, -1).unsqueeze(2)).shape)
203 | # print(torch.bmm(self.A_eq.permute(0, 2, 1), res_eq_x_vec).shape)
204 | # print(lamda_x.shape)
205 | lamda_x -= self.rho_obs * torch.bmm(self.A_obs.permute(0, 2, 1), res_x_obs_vec.view(self.batch_size, -1).unsqueeze(2)) + self.rho_eq * torch.bmm(self.A_eq.permute(0, 2, 1), res_eq_x_vec)
206 | lamda_y -= self.rho_obs * torch.bmm(self.A_obs.permute(0, 2, 1), res_y_obs_vec.view(self.batch_size, -1).unsqueeze(2)) + self.rho_eq * torch.bmm(self.A_eq.permute(0, 2, 1), res_eq_y_vec)
207 | # import pdb;pdb.set_trace()
208 |
209 | sol = torch.cat([sol_x, sol_y], dim=1)
210 | return sol
211 |
212 |
213 | def solve(self, b, lamda_x, lamda_y):
214 | device = self.device
215 | batch_size, _ = b.size()
216 | b = b.transpose(0, 1)
217 | lamda_x = lamda_x.unsqueeze(2)
218 | lamda_y = lamda_y.unsqueeze(2)
219 | sol = self.optimize2(b, lamda_x, lamda_y)
220 | return sol.squeeze(), None
--------------------------------------------------------------------------------
/data/stp3/utils/Optnode_waypoint.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import warnings
4 | warnings.filterwarnings('ignore')
5 |
6 | import torch
7 | import numpy as np
8 | import scipy.special
9 | import torch.nn as nn
10 | import torch.nn.functional as F
11 | import matplotlib.pyplot as plt
12 |
13 | from scipy.linalg import block_diag
14 | from torch.utils.data import Dataset, DataLoader
15 |
16 | import os
17 | import sys
18 | import warnings
19 | warnings.filterwarnings('ignore')
20 |
21 | import torch
22 | import numpy as np
23 | import scipy.special
24 | import torch.nn as nn
25 | import torch.nn.functional as F
26 | import matplotlib.pyplot as plt
27 |
28 | from scipy.linalg import block_diag
29 | from torch.utils.data import Dataset, DataLoader
30 | #from bernstein import bernstesin_coeff_order10_new
31 |
32 | class OPTNode_waypoint():
33 | def __init__(self, rho_eq=1.0, rho_goal=1.0, rho_nonhol=1.0, rho_psi=1.0, maxiter=5000, weight_smoothness=1.0, weight_smoothness_psi=1.0, t_fin=2.0, num=30, bernstein_coeff_order10_new=None, device="cpu"):
34 | super().__init__()
35 | self.rho_eq = rho_eq
36 | self.rho_goal = rho_goal
37 | self.rho_nonhol = rho_nonhol
38 | self.rho_psi = rho_psi
39 | self.maxiter = maxiter
40 | self.weight_smoothness = weight_smoothness
41 | self.weight_smoothness_psi = weight_smoothness_psi
42 |
43 | self.device = device
44 |
45 | self.t_fin = t_fin
46 | self.num = num
47 | self.t = self.t_fin / self.num
48 |
49 | #self.num_batch = 10
50 |
51 | tot_time = np.linspace(0.0, self.t_fin, self.num)
52 | tot_time_copy = tot_time.reshape(self.num, 1)
53 | self.P, self.Pdot, self.Pddot = bernstein_coeff_order10_new(10, tot_time_copy[0], tot_time_copy[-1], tot_time_copy)
54 | self.nvar = np.shape(self.P)[1]
55 |
56 | self.cost_smoothness = self.weight_smoothness * np.dot(self.Pddot.T, self.Pddot)
57 | self.cost_smoothness_psi = self.weight_smoothness_psi * np.dot(self.Pddot.T, self.Pddot)
58 | self.lincost_smoothness_psi = np.zeros(self.nvar)
59 |
60 | # self.A_eq = np.vstack((self.P[0], self.P[9], self.P[19], self.P[-1]))
61 | # self.A_eq_psi = np.vstack((self.P[0], self.Pdot[0], self.P[-1]))
62 | self.A_eq = np.vstack((self.P[0], self.P[5], self.P[10], self.P[15], self.P[20], self.P[25], self.P[30]))
63 | # self.A_eq = np.vstack((self.P[0], self.P[5], self.P[10], self.P[15], self.P[20], self.P[25], self.P[30]))s
64 | self.A_eq_psi = np.vstack((self.P[0], self.Pdot[0], self.P[-1]))
65 |
66 |
67 | self.P = torch.tensor(self.P, dtype=torch.double).to(device)
68 | self.Pdot = torch.tensor(self.Pdot, dtype=torch.double).to(device)
69 | self.Pddot = torch.tensor(self.Pddot, dtype=torch.double).to(device)
70 | self.A_eq = torch.tensor(self.A_eq, dtype=torch.double).to(device)
71 | self.A_eq_psi = torch.tensor(self.A_eq_psi, dtype=torch.double).to(device)
72 | self.cost_smoothness = torch.tensor(self.cost_smoothness, dtype=torch.double).to(device)
73 | self.cost_smoothness_psi = torch.tensor(self.cost_smoothness_psi, dtype=torch.double).to(device)
74 | self.lincost_smoothness_psi = torch.tensor(self.lincost_smoothness_psi, dtype=torch.double).to(device)
75 |
76 | self.A_nonhol = self.Pdot
77 | self.A_psi = self.P
78 |
79 | self.lamda_x = None
80 | self.lamda_y = None
81 | self.lamda_psi = None
82 |
83 | def compute_x(self, v, psi, b_eq_x, b_eq_y):
84 | b_nonhol_x = v * torch.cos(psi)
85 | b_nonhol_y = v * torch.sin(psi)
86 |
87 | cost = self.cost_smoothness + self.rho_nonhol * torch.matmul(self.A_nonhol.T, self.A_nonhol) + self.rho_eq * torch.matmul(self.A_eq.T, self.A_eq)
88 | lincost_x = -self.lamda_x - self.rho_nonhol * torch.matmul(self.A_nonhol.T, b_nonhol_x.T).T - self.rho_eq * torch.matmul(self.A_eq.T, b_eq_x.T).T
89 | lincost_y = -self.lamda_y - self.rho_nonhol * torch.matmul(self.A_nonhol.T, b_nonhol_y.T).T - self.rho_eq * torch.matmul(self.A_eq.T, b_eq_y.T).T
90 |
91 | cost_inv = torch.linalg.inv(cost)
92 |
93 | sol_x = torch.matmul(-cost_inv, lincost_x.T).T
94 | sol_y = torch.matmul(-cost_inv, lincost_y.T).T
95 |
96 | x = torch.matmul(self.P, sol_x.T).T
97 | xdot = torch.matmul(self.Pdot, sol_x.T).T
98 |
99 | y = torch.matmul(self.P, sol_y.T).T
100 | ydot = torch.matmul(self.Pdot, sol_y.T).T
101 |
102 | return sol_x, sol_y, x, xdot, y, ydot
103 |
104 | def compute_psi(self, psi, lamda_psi, psi_temp, b_eq_psi):
105 | cost = self.cost_smoothness_psi + self.rho_psi * torch.matmul(self.A_psi.T, self.A_psi) + self.rho_eq * torch.matmul(self.A_eq_psi.T, self.A_eq_psi)
106 | lincost_psi = -self.lamda_psi - self.rho_psi * torch.matmul(self.A_psi.T, psi_temp.T).T - self.rho_eq * torch.matmul(self.A_eq_psi.T, b_eq_psi.T).T
107 |
108 | cost_inv = torch.linalg.inv(cost)
109 |
110 | sol_psi = torch.matmul(-cost_inv, lincost_psi.T).T
111 |
112 | psi = torch.matmul(self.P, sol_psi.T).T
113 |
114 | res_psi = torch.matmul(self.A_psi, sol_psi.T).T - psi_temp
115 | res_eq_psi = torch.matmul(self.A_eq_psi, sol_psi.T).T - b_eq_psi
116 |
117 | self.lamda_psi = self.lamda_psi - self.rho_psi * torch.matmul(self.A_psi.T, res_psi.T).T - self.rho_eq * torch.matmul(self.A_eq_psi.T, res_eq_psi.T).T
118 |
119 | return sol_psi, np.linalg.norm(res_psi), np.linalg.norm(res_eq_psi), psi
120 |
121 |
122 | def solve(self, fixed_params, variable_params):
123 | batch_size, _ = fixed_params.size()
124 | x_init, y_init, v_init, psi_init, psidot_init = torch.chunk(fixed_params, 5, dim=1)
125 | x_fin, y_fin, psi_fin, x_mid1, y_mid1, x_mid2, y_mid2, x_mid3, y_mid3, x_mid4, y_mid4, x_mid5, y_mid5 = torch.chunk(variable_params, 13, dim=1)
126 |
127 | b_eq_x = torch.cat((x_init, x_mid1, x_mid2, x_mid3, x_mid4, x_mid5, x_fin), dim=1)
128 | b_eq_y = torch.cat((y_init, y_mid1, y_mid2, y_mid3, y_mid4, y_mid5, y_fin), dim=1)
129 |
130 | b_eq_psi = torch.cat((psi_init, psidot_init, psi_fin), dim=1)
131 |
132 | v = torch.ones(batch_size, self.num, dtype=torch.double).to(self.device) * v_init
133 | psi = torch.ones(batch_size, self.num, dtype=torch.double).to(self.device) * psi_init
134 | xdot = v * torch.cos(psi)
135 | ydot = v * torch.sin(psi)
136 |
137 | self.lamda_x = torch.zeros(batch_size, self.nvar, dtype=torch.double).to(self.device)
138 | self.lamda_y = torch.zeros(batch_size, self.nvar, dtype=torch.double).to(self.device)
139 | self.lamda_psi = torch.zeros(batch_size, self.nvar, dtype=torch.double).to(self.device)
140 |
141 | res_psi_arr = []
142 | res_eq_psi_arr = []
143 | res_eq_arr = []
144 | res_nonhol_arr = []
145 | for i in range(0, self.maxiter):
146 | psi_temp = torch.atan2(ydot, xdot)
147 | c_psi, res_psi, res_eq_psi, psi = self.compute_psi(psi, self.lamda_psi, psi_temp, b_eq_psi)
148 | c_x, c_y, x, xdot, y, ydot = self.compute_x(v, psi, b_eq_x, b_eq_y)
149 |
150 | res_eq_psi_arr.append(res_eq_psi)
151 | res_psi_arr.append(res_psi)
152 | v = torch.sqrt(xdot ** 2 + ydot ** 2)
153 | #v[:, 0] = v_init[:, 0]
154 |
155 | res_eq_x = torch.matmul(self.A_eq, c_x.T).T - b_eq_x
156 | res_nonhol_x = xdot - v * torch.cos(psi)
157 |
158 | res_eq_y = torch.matmul(self.A_eq, c_y.T).T - b_eq_y
159 | res_nonhol_y = ydot - v * torch.sin(psi)
160 |
161 | res_eq_arr.append(np.linalg.norm(np.sqrt(res_eq_x**2 + res_eq_y**2)))
162 | res_nonhol_arr.append(np.linalg.norm(np.sqrt(res_nonhol_x**2 + res_nonhol_y**2)))
163 |
164 | self.lamda_x = self.lamda_x - self.rho_eq * torch.matmul(self.A_eq.T, res_eq_x.T).T - self.rho_nonhol * torch.matmul(self.A_nonhol.T, res_nonhol_x.T).T
165 | self.lamda_y = self.lamda_y - self.rho_eq * torch.matmul(self.A_eq.T, res_eq_y.T).T - self.rho_nonhol * torch.matmul(self.A_nonhol.T, res_nonhol_y.T).T
166 |
167 | primal_sol = torch.hstack((c_x, c_y, c_psi, v))
168 | return primal_sol, None
169 |
170 | def objective(self, fixed_params, variable_params, y):
171 | c_x = y[:, :self.nvar]
172 | c_y = y[:, self.nvar:2*self.nvar]
173 | c_psi = y[:, 2*self.nvar:3*self.nvar]
174 | v = y[:, 3*self.nvar:]
175 |
176 | x_init, y_init, v_init, psi_init, psidot_init = torch.chunk(fixed_params, 5, dim=1)
177 | x_fin, y_fin, psi_fin, x_mid1, y_mid1, x_mid2, y_mid2, x_mid3, y_mid3, x_mid4, y_mid4, x_mid5, y_mid5 = torch.chunk(variable_params, 13, dim=1)
178 |
179 | x = torch.matmul(self.P, c_x.T).T
180 | y = torch.matmul(self.P, c_y.T).T
181 | psi = torch.matmul(self.P, c_psi.T).T
182 | xdot = torch.matmul(self.Pdot, c_x.T).T
183 | ydot = torch.matmul(self.Pdot, c_y.T).T
184 | psidot = torch.matmul(self.Pdot, c_psi.T).T
185 | xddot = torch.matmul(self.Pddot, c_x.T).T
186 | yddot = torch.matmul(self.Pddot, c_y.T).T
187 | psiddot = torch.matmul(self.Pddot, c_psi.T).T
188 |
189 | cost_nonhol = 0.5*self.rho_nonhol*torch.sum((xdot - v*torch.cos(psi)) ** 2, 1) + 0.5*self.rho_nonhol*torch.sum((ydot - v*torch.sin(psi)) ** 2, 1)
190 | cost_pos = 0.5*self.rho_eq*(torch.sum((x[:, -1] - x_fin) ** 2, 1) + torch.sum((y[:, -1] - y_fin) ** 2, 1) + torch.sum((x[:, 0] - x_init) ** 2, 1) + torch.sum((y[:, 0] - y_init) ** 2, 1))
191 | cost_pos += 0.5*self.rho_eq*(torch.sum((x[:, 5] - x_mid1)**2, 1)
192 | + torch.sum((y[:, 5] - y_mid1)**2, 1)
193 | + torch.sum((x[:, 10] - x_mid2)**2, 1)
194 | + torch.sum((y[:, 10] - y_mid2)**2, 1)
195 | + torch.sum((x[:, 15] - x_mid3)**2, 1)
196 | + torch.sum((y[:, 15] - y_mid3)**2, 1)
197 | + torch.sum((x[:, 20] - x_mid4)**2, 1)
198 | + torch.sum((y[:, 20] - y_mid4)**2, 1)
199 | + torch.sum((x[:, 25] - x_mid5)**2, 1)
200 | + torch.sum((y[:, 25] - y_mid5)**2, 1)
201 | )
202 | cost_psi = 0.5*self.rho_eq*(torch.sum((psi[:, -1] - psi_fin) ** 2, 1) + torch.sum((psi[:, 0] - psi_init) ** 2, 1)
203 | + torch.sum((psidot[:, 0] - psidot_init) ** 2, 1))
204 | #cost_v = 0.5*self.rho_eq*torch.sum((v[:, 0] - v_init) ** 2, 1)
205 | cost_cancel = torch.diagonal(torch.matmul(-self.lamda_x, c_x.T) + torch.matmul(-self.lamda_y, c_y.T) + torch.matmul(-self.lamda_psi, c_psi.T))
206 |
207 | cost_smoothness = 0.5*self.weight_smoothness*(torch.sum(xddot**2, 1) + torch.sum(yddot**2, 1)) + 0.5*self.weight_smoothness_psi*torch.sum(psiddot**2, 1)
208 | return cost_nonhol + cost_pos + cost_psi + cost_smoothness + cost_cancel #+ cost_v
209 |
--------------------------------------------------------------------------------
/data/stp3/utils/data.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import pytorch_lightning as pl
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import io
7 | import copy
8 | from io import BytesIO
9 | import matplotlib
10 | import matplotlib as mpl
11 | import PIL
12 | from PIL import Image
13 |
14 | from stp3.utils.geometry import cumulative_warp_features_reverse, cumulative_warp_features, extract_trajs, extract_obs_from_centerness, generate_instance_colours
15 |
16 | def prepare_future_labels(batch, model):
17 | labels = {}
18 |
19 | cfg = model.cfg
20 |
21 | segmentation_labels = batch['segmentation']
22 | hdmap_labels = batch['hdmap']
23 | future_egomotion = batch['future_egomotion']
24 | gt_trajectory = batch['gt_trajectory']
25 | labels['sample_trajectory'] = batch['sample_trajectory']
26 |
27 | # present frame hd map gt
28 | labels['hdmap'] = hdmap_labels[:, model.receptive_field - 1].long().contiguous()
29 |
30 | # first rec token
31 | if cfg.DATASET.VERSION == 'nuscenes':
32 | labels['rec_first'] = batch['rec_first']
33 | labels['gt_trajectory_prev'] = batch['gt_trajectory_prev']
34 |
35 | # gt trajectory
36 | labels['gt_trajectory'] = gt_trajectory
37 | spatial_extent = (cfg.LIFT.X_BOUND[1], cfg.LIFT.Y_BOUND[1])
38 |
39 | # Past frames gt depth
40 | if cfg.LIFT.GT_DEPTH:
41 | depths = batch['depths']
42 | depth_labels = depths[:, :model.receptive_field, :, ::model.encoder_downsample,
43 | ::model.encoder_downsample]
44 | depth_labels = torch.clamp(depth_labels, cfg.LIFT.D_BOUND[0], cfg.LIFT.D_BOUND[1] - 1) - \
45 | cfg.LIFT.D_BOUND[0]
46 | depth_labels = depth_labels.long().contiguous()
47 | labels['depths'] = depth_labels
48 |
49 | # Warp labels to present's reference frame
50 | segmentation_labels_past = cumulative_warp_features(
51 | segmentation_labels[:, :model.receptive_field].float(),
52 | future_egomotion[:, :model.receptive_field],
53 | mode='nearest', spatial_extent=spatial_extent,
54 | ).long().contiguous()[:, :-1]
55 | segmentation_labels = cumulative_warp_features_reverse(
56 | segmentation_labels[:, (model.receptive_field - 1):].float(),
57 | future_egomotion[:, (model.receptive_field - 1):],
58 | mode='nearest', spatial_extent=spatial_extent,
59 | ).long().contiguous()
60 | labels['segmentation'] = torch.cat([segmentation_labels_past, segmentation_labels], dim=1)
61 |
62 | if cfg.SEMANTIC_SEG.HDMAP.ENABLED:
63 | hdmap_labels = batch['hdmap'][:, :, 1:2]
64 | hdmap_labels_past = cumulative_warp_features(
65 | hdmap_labels[:, :model.receptive_field].float(),
66 | future_egomotion[:, :model.receptive_field],
67 | mode='nearest', spatial_extent=spatial_extent,
68 | ).long().contiguous()[:, :-1]
69 | hdmap_labels = cumulative_warp_features_reverse(
70 | hdmap_labels[:, (model.receptive_field - 1):].float(),
71 | future_egomotion[:, (model.receptive_field - 1):],
72 | mode='nearest', spatial_extent=spatial_extent,
73 | ).long().contiguous()
74 | labels['hdmap_warped_road'] = torch.cat([hdmap_labels_past, hdmap_labels], dim=1)
75 | hdmap_labels = batch['hdmap'][:, :, 0:1]
76 | hdmap_labels_past = cumulative_warp_features(
77 | hdmap_labels[:, :model.receptive_field].float(),
78 | future_egomotion[:, :model.receptive_field],
79 | mode='nearest', spatial_extent=spatial_extent,
80 | ).long().contiguous()[:, :-1]
81 | hdmap_labels = cumulative_warp_features_reverse(
82 | hdmap_labels[:, (model.receptive_field - 1):].float(),
83 | future_egomotion[:, (model.receptive_field - 1):],
84 | mode='nearest', spatial_extent=spatial_extent,
85 | ).long().contiguous()
86 | labels['hdmap_warped_lane'] = torch.cat([hdmap_labels_past, hdmap_labels], dim=1)
87 |
88 | return labels
89 |
--------------------------------------------------------------------------------
/data/stp3/utils/network.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torchvision
4 |
5 | def pack_sequence_dim(x):
6 | b, s = x.shape[:2]
7 | return x.view(b * s, *x.shape[2:])
8 |
9 |
10 | def unpack_sequence_dim(x, b, s):
11 | return x.view(b, s, *x.shape[1:])
12 |
13 |
14 | def preprocess_batch(batch, device, unsqueeze=False):
15 | for key, value in batch.items():
16 | if torch.is_tensor(value):
17 | batch[key] = value.to(device)
18 | if unsqueeze:
19 | batch[key] = batch[key].unsqueeze(0)
20 |
21 |
22 | def set_module_grad(module, requires_grad=False):
23 | for p in module.parameters():
24 | p.requires_grad = requires_grad
25 |
26 |
27 | def set_bn_momentum(model, momentum=0.1):
28 | for m in model.modules():
29 | if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
30 | m.momentum = momentum
31 |
32 |
33 | class NormalizeInverse(torchvision.transforms.Normalize):
34 | # https://discuss.pytorch.org/t/simple-way-to-inverse-transform-normalization/4821/8
35 | def __init__(self, mean, std):
36 | mean = torch.as_tensor(mean)
37 | std = torch.as_tensor(std)
38 | std_inv = 1 / (std + 1e-7)
39 | mean_inv = -mean * std_inv
40 | super().__init__(mean=mean_inv, std=std_inv)
41 |
42 | def __call__(self, tensor):
43 | return super().__call__(tensor.clone())
44 |
--------------------------------------------------------------------------------
/data/stp3/utils/spline.py:
--------------------------------------------------------------------------------
1 | """
2 | Cubic spline planner
3 | Author: Atsushi Sakai(@Atsushi_twi)
4 | Source: https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathPlanning/CubicSpline/cubic_spline_planner.py
5 | """
6 | import math
7 | import numpy as np
8 | import bisect
9 |
10 |
11 | class Spline:
12 | """
13 | Cubic Spline class
14 | """
15 |
16 | def __init__(self, x, y):
17 | self.b, self.c, self.d, self.w = [], [], [], []
18 |
19 | self.x = np.array(x)
20 | self.y = np.array(y)
21 |
22 | self.eps = np.finfo(float).eps
23 |
24 | self.nx = len(x) # dimension of x
25 | h = np.diff(x)
26 |
27 | # calc coefficient c
28 | self.a = np.array([iy for iy in y])
29 |
30 | # calc coefficient c
31 | A = self.__calc_A(h)
32 | B = self.__calc_B(h)
33 | self.c = np.linalg.solve(A, B)
34 | # print(self.c1)
35 |
36 | # calc spline coefficient b and d
37 | for i in range(self.nx - 1):
38 | self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i] + self.eps))
39 | tb = (self.a[i + 1] - self.a[i]) / (h[i] + self.eps) - h[i] * \
40 | (self.c[i + 1] + 2.0 * self.c[i]) / 3.0
41 | self.b.append(tb)
42 | self.b = np.array(self.b)
43 | self.d = np.array(self.d)
44 |
45 | def calc(self, t):
46 | """
47 | Calc position
48 | if t is outside of the input x, return None
49 | """
50 | t = np.asarray(t)
51 | mask = np.logical_and(t < self.x[0], t > self.x[-1])
52 | t[mask] = self.x[0]
53 |
54 | i = self.__search_index(t)
55 | dx = t - self.x[i.astype(int)]
56 | result = self.a[i] + self.b[i] * dx + \
57 | self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
58 |
59 | result = np.asarray(result)
60 | result[mask] = None
61 | return result
62 |
63 | def calcd(self, t):
64 | """
65 | Calc first derivative
66 | if t is outside of the input x, return None
67 | """
68 | t = np.asarray(t)
69 | mask = np.logical_and(t < self.x[0], t > self.x[-1])
70 | t[mask] = 0
71 |
72 | i = self.__search_index(t)
73 | dx = t - self.x[i]
74 | result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
75 |
76 | result = np.asarray(result)
77 | result[mask] = None
78 | return result
79 |
80 | def calcdd(self, t):
81 | """
82 | Calc second derivative
83 | """
84 | t = np.asarray(t)
85 | mask = np.logical_and(t < self.x[0], t > self.x[-1])
86 | t[mask] = 0
87 |
88 | i = self.__search_index(t)
89 | dx = t - self.x[i]
90 | result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
91 |
92 | result = np.asarray(result)
93 | result[mask] = None
94 | return result
95 |
96 | def __search_index(self, x):
97 | """
98 | search data segment index
99 | """
100 | indices = np.asarray(np.searchsorted(self.x, x, "left") - 1)
101 | indices[indices <= 0] = 0
102 | return indices
103 |
104 | def __calc_A(self, h):
105 | """
106 | calc matrix A for spline coefficient c
107 | """
108 | A = np.zeros((self.nx, self.nx))
109 | A[0, 0] = 1.0
110 | for i in range(self.nx - 1):
111 | if i != (self.nx - 2):
112 | A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
113 | A[i + 1, i] = h[i]
114 | A[i, i + 1] = h[i]
115 |
116 | A[0, 1] = 0.0
117 | A[self.nx - 1, self.nx - 2] = 0.0
118 | A[self.nx - 1, self.nx - 1] = 1.0
119 | # print(A)
120 | return A
121 |
122 | def __calc_B(self, h):
123 | """
124 | calc matrix B for spline coefficient c
125 | """
126 | B = np.zeros(self.nx)
127 | for i in range(self.nx - 2):
128 | B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / (h[i + 1] + self.eps) \
129 | - 3.0 * (self.a[i + 1] - self.a[i]) / (h[i] + self.eps)
130 | return B
131 |
132 |
133 | class Spline2D:
134 | """
135 | 2D Cubic Spline class
136 | """
137 |
138 | def __init__(self, x, y, resolution=0.1, extension=300, debug=False):
139 | # resolution of 1m, extension of 300m on both sides
140 | # trail and append linear extensions
141 | # angle_start = np.arctan2(y[1] - y[0], x[1] - x[0])
142 | # angle_end = np.arctan2(y[-1] - y[-2], x[-1] - x[-2])
143 |
144 | # x_start = np.arange(x[0] - extension, x[0], 0.5)
145 | # x_end = np.arange(x[-1], x[-1] + extension, 0.5)
146 |
147 | # y_start = y[0] + np.tan(angle_start) * np.arange(- extension, 0, 0.5)
148 | # y_end = y[-1] + np.tan(angle_end) * np.arange(0, 0 + extension, 0.5)
149 |
150 | # x = np.concatenate((x_start, x.reshape(x.shape[0]), x_end))
151 | # y = np.concatenate((y_start, y.reshape(y.shape[0]), y_end))
152 |
153 | self.splinex = x
154 | self.spliney = y
155 |
156 | if debug:
157 | import matplotlib.pyplot as plt
158 | plt.plot(x,y)
159 | plt.title("Reference Spline")
160 | plt.show()
161 | plt.clf()
162 |
163 | self.s = self.__calc_s(x, y)
164 | self.sx = Spline(self.s, x)
165 | self.sy = Spline(self.s, y)
166 |
167 | self.s_fine = np.arange(0, self.s[-1], resolution)
168 | xy = np.array([self.calc_global_position_online(s_i) for s_i in self.s_fine])
169 |
170 | self.x_fine = xy[:, 0]
171 | self.y_fine = xy[:, 1]
172 |
173 | def __calc_s(self, x, y):
174 | dx = np.diff(x)
175 | dy = np.diff(y)
176 | self.ds = np.hypot(dx, dy)
177 | s = [0]
178 | s.extend(np.cumsum(self.ds))
179 | return s
180 |
181 | def calc_global_position_online(self, s):
182 | """
183 | calc global position of points on the line, s: float
184 | return: x: float; y: float; the global coordinate of given s on the spline
185 | """
186 | x = self.sx.calc(s)
187 | y = self.sy.calc(s)
188 |
189 | return x, y
190 |
191 | def calc_global_position_offline(self, s, d):
192 | """
193 | calc global position of points in the frenet coordinate w.r.t. the line.
194 | s: float, longitudinal; d: float, lateral;
195 | return: x, float; y, float;
196 | """
197 | s_x = self.sx.calc(s)
198 | s_y = self.sy.calc(s)
199 |
200 | theta = math.atan2(self.sy.calcd(s), self.sx.calcd(s))
201 | x = s_x - math.sin(theta) * d
202 | y = s_y + math.cos(theta) * d
203 | return x, y
204 |
205 | def calc_frenet_position(self, x, y):
206 | """
207 | cal the frenet position of given global coordinate (x, y)
208 | return s: the longitudinal; d: the lateral
209 | """
210 | # find nearst x, y
211 | diff = np.hypot(self.x_fine - x, self.y_fine - y)
212 | idx = np.argmin(diff)
213 | [x_s, y_s] = self.x_fine[idx], self.y_fine[idx]
214 | s = self.s_fine[idx]
215 |
216 | # compute theta
217 | theta = math.atan2(self.sy.calcd(s), self.sx.calcd(s))
218 | d_x, d_y = x - x_s, y - y_s
219 | cross_rd_nd = math.cos(theta) * d_y - math.sin(theta) * d_x
220 | d = math.copysign(np.hypot(d_x, d_y), cross_rd_nd)
221 | return s, d
222 |
223 | def calc_curvature(self, s):
224 | """
225 | calc curvature
226 | """
227 | dx = self.sx.calcd(s)
228 | ddx = self.sx.calcdd(s)
229 | dy = self.sy.calcd(s)
230 | ddy = self.sy.calcdd(s)
231 | k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))
232 | return k
233 |
234 | def calc_yaw(self, s):
235 | """
236 | calc yaw
237 | """
238 | dx = self.sx.calcd(s)
239 | dy = self.sy.calcd(s)
240 | yaw = np.arctan2(dy, dx)
241 | return yaw
242 |
243 |
244 | def calc_spline_course(x, y, ds=0.1):
245 | sp = Spline2D(x, y)
246 | s = list(np.arange(0, sp.s[-1], ds))
247 |
248 | rx, ry, ryaw, rk = [], [], [], []
249 | for i_s in s:
250 | ix, iy = sp.calc_global_position_online(i_s)
251 | rx.append(ix)
252 | ry.append(iy)
253 | ryaw.append(sp.calc_yaw(i_s))
254 | rk.append(sp.calc_curvature(i_s))
255 |
256 | return rx, ry, ryaw, rk, s
257 |
258 |
259 | def main(): # pragma: no cover
260 | print("Spline 2D test")
261 | import matplotlib.pyplot as plt
262 | # x = [-2.5, 0.0, 2.5, 5.0, 7.5, 3.0, -1.0]
263 | # y = [0.7, -6, 5, 6.5, 0.0, 5.0, -2.0]
264 | x = [float(i) for i in range(30)]
265 | y = [float(0.0) for i in range(30)]
266 | for i in range(len(y)):
267 | if i % 2 == 0.0:
268 | y[i] = float(i)
269 |
270 | ds = 0.1 # [m] distance of each intepolated points
271 |
272 | sp = Spline2D(x, y)
273 | s = np.arange(0, sp.s[-1], ds)
274 |
275 | rx, ry, ryaw, rk = [], [], [], []
276 | rx, ry = sp.calc_global_position_online(s)
277 | ryaw = sp.calc_yaw(s)
278 | rk = sp.calc_curvature(s)
279 |
280 | plt.subplots(1)
281 | plt.plot(x, y, "xb", label="input")
282 | plt.plot(rx, ry, "-r", label="spline")
283 | plt.grid(True)
284 | plt.axis("equal")
285 | plt.xlabel("x[m]")
286 | plt.ylabel("y[m]")
287 | plt.legend()
288 |
289 | plt.subplots(1)
290 | plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], "-r", label="yaw")
291 | plt.grid(True)
292 | plt.legend()
293 | plt.xlabel("line length[m]")
294 | plt.ylabel("yaw angle[deg]")
295 |
296 | plt.subplots(1)
297 | plt.plot(s, rk, "-r", label="curvature")
298 | plt.grid(True)
299 | plt.legend()
300 | plt.xlabel("line length[m]")
301 | plt.ylabel("curvature [1/m]")
302 |
303 | plt.show()
304 |
305 |
306 | if __name__ == '__main__':
307 | main()
308 |
--------------------------------------------------------------------------------
/data/utils.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import argparse
3 | from PIL import Image
4 |
5 | from minigpt4.common.config import Config
6 | from minigpt4.common.registry import registry
7 | from minigpt4.conversation.conversation import Chat, CONV_VISION
8 |
9 | # imports modules for registration
10 | from minigpt4.datasets.builders import *
11 | from minigpt4.models import *
12 | from minigpt4.processors import *
13 | from minigpt4.runners import *
14 | from minigpt4.tasks import *
15 |
16 | from llava.conversation import conv_templates
17 |
18 | from lavis.models import load_model_and_preprocess
19 |
20 | from tqdm import tqdm
21 | import argparse
22 |
23 | cam_keys = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
24 |
25 | # ========================================
26 | # InsructBLIP-2 Model Initialization
27 | # ========================================
28 | def init_instructblip2(model_name = "blip2_vicuna_instruct", device="cuda:0"):
29 | model, vis_processors, _ = load_model_and_preprocess(
30 | name=model_name,
31 | model_type="vicuna13b",
32 | is_eval=True,
33 | device=device,
34 | )
35 | return model, vis_processors
36 | # ========================================
37 |
38 | # ========================================
39 | # BLIP-2 Model Initialization
40 | # ========================================
41 | def init_blip2(model_name = "blip2_vicuna_instruct", device="cuda:0", model_type="vicuna13b"):
42 | model, vis_processors, _ = load_model_and_preprocess(
43 | name=model_name,
44 | model_type=model_type,
45 | is_eval=True,
46 | device=device,
47 | )
48 | return model, vis_processors
49 | # ========================================
50 |
51 |
52 | # ========================================
53 | # MiniGPT4 Initialization
54 | # ========================================
55 | def init_minigp4():
56 | parser = argparse.ArgumentParser(description="Demo")
57 | parser.add_argument("--cfg-path", default="eval_configs/minigpt4_eval.yaml", help="path to configuration file.")
58 | parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
59 | parser.add_argument("--sam-checkpoint", type=str, default="sam_vit_h_4b8939.pth", help="path to sam weights.")
60 | parser.add_argument('--model_path', type=str, default="/raid/t1/scratch/vikrant.dewangan/LLaVA/ckpt-old/", help='save path for jsons')
61 | parser.add_argument('--save_path', type=str, default="/raid/t1/scratch/vikrant.dewangan/datas", help='save path for jsons')
62 | parser.add_argument('--gpu', type=str, default="cuda:0", help='save path for jsons')
63 | parser.add_argument('--json_name', type=str, default="answer_pred_both.json", help='save path for jsons')
64 | parser.add_argument('--start', type=int, default=0, help='start index')
65 | parser.add_argument('--end', type=int, default=100, help='end index')
66 |
67 | parser.add_argument(
68 | "--options",
69 | nargs="+",
70 | help="override some settings in the used config, the key-value pair "
71 | "in xxx=yyy format will be merged into config file (deprecate), "
72 | "change to --cfg-options instead.",
73 | )
74 | args = parser.parse_args()
75 | print('Initializing Chat')
76 | cfg = Config(args)
77 |
78 | model_config = cfg.model_cfg
79 | model_config.device_8bit = args.gpu_id
80 | model_cls = registry.get_model_class(model_config.arch)
81 | model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id))
82 |
83 | vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
84 | vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
85 | chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id))
86 | return chat
87 | # ========================================
88 |
89 | def reset_conv(model_name = "llava"):
90 | if 'llama-2' in model_name.lower():
91 | conv_mode = "llava_llama_2"
92 | elif "v1" in model_name.lower():
93 | conv_mode = "llava_v1"
94 | elif "mpt" in model_name.lower():
95 | conv_mode = "mpt"
96 | else:
97 | conv_mode = "llava_v0"
98 | print('reset conv')
99 |
100 | conv = conv_templates[conv_mode].copy()
101 | if "mpt" in model_name.lower():
102 | roles = ('user', 'assistant')
103 | else:
104 | roles = conv.roles
105 | return conv
106 |
107 | def minigpt4_inference(chat, img_cropped, user_message):
108 | img_list = []
109 | chat_state = CONV_VISION.copy() # Reset chat state to default template
110 | llm_message = chat.upload_img(Image.fromarray(img_cropped), chat_state, img_list)
111 |
112 | print('Upload done')
113 |
114 | chat.ask(user_message, chat_state)
115 | llm_message = chat.answer(
116 | conv=chat_state,
117 | img_list=img_list,
118 | # num_beams=num_beams,
119 | num_beams=1,
120 | # temperature=temperature,
121 | temperature=0.7,
122 | max_new_tokens=300,
123 | max_length=2000
124 | )[0]
125 | return llm_message
126 |
127 | def instructblip2_inference(model_instructblip, img_cropped, vis_processors, device="cuda:0", user_message="describe the central object in the scene."):
128 | image = vis_processors["eval"](Image.fromarray(img_cropped)).unsqueeze(0).to(device)
129 |
130 | samples = {
131 | "image": image,
132 | "prompt": user_message,
133 | }
134 |
135 | output_blip = model_instructblip.generate(
136 | samples,
137 | length_penalty=float(1),
138 | repetition_penalty=float(1),
139 | num_beams=5,
140 | max_length=256,
141 | min_length=1,
142 | top_p=0.2,
143 | use_nucleus_sampling=False,
144 | )
145 |
146 | return output_blip[0]
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Talk2BEV
2 |
3 | This is the repository that contains source code for [Talk2BEV](https://llmbev.github.io/talk2bev).
4 |
5 | If you find Talk2BEV useful for your work please cite:
6 | ```
7 | @article{talk2bev
8 | title = {Talk2BEV: Language-enhanced Bird’s-eye View Maps for Autonomous Driving},
9 | author = {Dewangan, Vikrant and Choudhary, Tushar and Chandhok, Shivam and Priyadarshan, Shubham and Jain,Anushka and Singh, Arun and Srivastava, Siddharth and Jatavallabhula, {Krishna Murthy} and Krishna, Madhava},
10 | year = {2023},
11 | booktitle = {arXiv},
12 | }
13 | ```
14 |
15 | # Website License
16 | 
This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
17 |
--------------------------------------------------------------------------------
/docs/assets/pdf/talk2bev.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/assets/pdf/talk2bev.pdf
--------------------------------------------------------------------------------
/docs/static/css/bulma-carousel.min.css:
--------------------------------------------------------------------------------
1 | @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.slider{position:relative;width:100%}.slider-container{display:flex;flex-wrap:nowrap;flex-direction:row;overflow:hidden;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);min-height:100%}.slider-container.is-vertical{flex-direction:column}.slider-container .slider-item{flex:none}.slider-container .slider-item .image.is-covered img{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.slider-container .slider-item .video-container{height:0;padding-bottom:0;padding-top:56.25%;margin:0;position:relative}.slider-container .slider-item .video-container.is-1by1,.slider-container .slider-item .video-container.is-square{padding-top:100%}.slider-container .slider-item .video-container.is-4by3{padding-top:75%}.slider-container .slider-item .video-container.is-21by9{padding-top:42.857143%}.slider-container .slider-item .video-container embed,.slider-container .slider-item .video-container iframe,.slider-container .slider-item .video-container object{position:absolute;top:0;left:0;width:100%!important;height:100%!important}.slider-navigation-next,.slider-navigation-previous{display:flex;justify-content:center;align-items:center;position:absolute;width:42px;height:42px;background:#fff center center no-repeat;background-size:20px 20px;border:1px solid #fff;border-radius:25091983px;box-shadow:0 2px 5px #3232321a;top:50%;margin-top:-20px;left:0;cursor:pointer;transition:opacity .3s,-webkit-transform .3s;transition:transform .3s,opacity .3s;transition:transform .3s,opacity .3s,-webkit-transform .3s}.slider-navigation-next:hover,.slider-navigation-previous:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.slider-navigation-next.is-hidden,.slider-navigation-previous.is-hidden{display:none;opacity:0}.slider-navigation-next svg,.slider-navigation-previous svg{width:25%}.slider-navigation-next{left:auto;right:0;background:#fff center center no-repeat;background-size:20px 20px}.slider-pagination{display:none;justify-content:center;align-items:center;position:absolute;bottom:0;left:0;right:0;padding:.5rem 1rem;text-align:center}.slider-pagination .slider-page{background:#fff;width:10px;height:10px;border-radius:25091983px;display:inline-block;margin:0 3px;box-shadow:0 2px 5px #3232321a;transition:-webkit-transform .3s;transition:transform .3s;transition:transform .3s,-webkit-transform .3s;cursor:pointer}.slider-pagination .slider-page.is-active,.slider-pagination .slider-page:hover{-webkit-transform:scale(1.4);transform:scale(1.4)}@media screen and (min-width:800px){.slider-pagination{display:flex}}.hero.has-carousel{position:relative}.hero.has-carousel+.hero-body,.hero.has-carousel+.hero-footer,.hero.has-carousel+.hero-head{z-index:10;overflow:hidden}.hero.has-carousel .hero-carousel{position:absolute;top:0;left:0;bottom:0;right:0;height:auto;border:none;margin:auto;padding:0;z-index:0}.hero.has-carousel .hero-carousel .slider{width:100%;max-width:100%;overflow:hidden;height:100%!important;max-height:100%;z-index:0}.hero.has-carousel .hero-carousel .slider .has-background{max-height:100%}.hero.has-carousel .hero-carousel .slider .has-background .is-background{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.hero.has-carousel .hero-body{margin:0 3rem;z-index:10}
--------------------------------------------------------------------------------
/docs/static/css/index.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: 'Noto Sans', sans-serif;
3 | }
4 |
5 |
6 | .footer .icon-link {
7 | font-size: 25px;
8 | color: #000;
9 | }
10 |
11 | .link-block a {
12 | margin-top: 5px;
13 | margin-bottom: 5px;
14 | }
15 |
16 | .coolname {
17 | font-variant: small-caps;
18 | }
19 |
20 |
21 | .teaser .hero-body {
22 | padding-top: 0;
23 | padding-bottom: 3rem;
24 | }
25 |
26 | .teaser {
27 | font-family: 'Google Sans', sans-serif;
28 | }
29 |
30 |
31 | .publication-title {
32 | }
33 |
34 | .publication-banner {
35 | max-height: parent;
36 |
37 | }
38 |
39 | .publication-banner video {
40 | position: relative;
41 | left: auto;
42 | top: auto;
43 | transform: none;
44 | object-fit: fit;
45 | }
46 |
47 | .publication-header .hero-body {
48 | }
49 |
50 | .publication-title {
51 | font-family: 'Google Sans', sans-serif;
52 | }
53 |
54 | .publication-authors {
55 | font-family: 'Google Sans', sans-serif;
56 | }
57 |
58 | .publication-venue {
59 | color: #555;
60 | width: fit-content;
61 | font-weight: bold;
62 | }
63 |
64 | .publication-awards {
65 | color: #ff3860;
66 | width: fit-content;
67 | font-weight: bolder;
68 | }
69 |
70 | .publication-authors {
71 | }
72 |
73 | .publication-authors a {
74 | color: #4A1FB4 !important;
75 | }
76 |
77 | .publication-authors a:hover {
78 | text-decoration: underline;
79 | }
80 |
81 | .text-black {
82 | color: black;
83 | }
84 |
85 |
86 | .text-black:hover {
87 | color: magenta;
88 | /* text-decoration: underline; */
89 | }
90 |
91 |
92 | .author-block {
93 | display: inline-block;
94 | }
95 |
96 | .publication-banner img {
97 | }
98 |
99 | .publication-authors {
100 | /*color: #4286f4;*/
101 | }
102 |
103 | .publication-video {
104 | position: relative;
105 | width: 100%;
106 | height: 0;
107 | padding-bottom: 33.25%;
108 |
109 | overflow: hidden;
110 | border-radius: 10px !important;
111 | }
112 |
113 | .publication-video iframe {
114 | position: absolute;
115 | top: 0;
116 | left: 0;
117 | width: 100%;
118 | height: 100%;
119 | }
120 |
121 | .spatial-video {
122 | position: relative;
123 | width: 100%;
124 | height: 0;
125 | padding-bottom: 85%;
126 |
127 | overflow: hidden;
128 | border-radius: 10px !important;
129 | }
130 |
131 | .spatial-video iframe {
132 | position: absolute;
133 | top: 0;
134 | left: 0;
135 | width: 100%;
136 | height: 100%;
137 | }
138 |
139 | .carousel-video {
140 | position: relative;
141 | width: 100%;
142 | height: 0;
143 | padding-bottom: 33.25%;
144 |
145 | overflow: hidden;
146 | border-radius: 10px !important;
147 | }
148 |
149 | .carousel-video iframe {
150 | position: absolute;
151 | top: 0;
152 | left: 0;
153 | width: 100%;
154 | height: 100%;
155 | }
156 |
157 |
158 | .publication-body img {
159 | }
160 |
161 | .results-carousel {
162 | overflow: hidden;
163 | }
164 |
165 | .results-carousel .item {
166 | margin: 5px;
167 | overflow: hidden;
168 | border: 1px solid #bbb;
169 | border-radius: 10px;
170 | padding: 0;
171 | font-size: 0;
172 | }
173 |
174 | .results-carousel video {
175 | margin: 0;
176 | }
177 |
178 |
179 | .interpolation-panel {
180 | background: #f5f5f5;
181 | border-radius: 10px;
182 | }
183 |
184 | .interpolation-panel .interpolation-image {
185 | width: 100%;
186 | border-radius: 5px;
187 | }
188 |
189 | .interpolation-video-column {
190 | }
191 |
192 | .interpolation-panel .slider {
193 | margin: 0 !important;
194 | }
195 |
196 | .interpolation-panel .slider {
197 | margin: 0 !important;
198 | }
199 |
200 | #interpolation-image-wrapper {
201 | width: 100%;
202 | }
203 | #interpolation-image-wrapper img {
204 | border-radius: 5px;
205 | }
206 |
207 | .interpolation-image-wrapper-uncoco img {
208 | margin-top: 50px ;
209 | margin-bottom: 80px;
210 | }
211 | .interpolation-image-wrapper-tabletop img {
212 | margin-top: 70px ;
213 | margin-bottom: 100px;
214 | }
215 | .interpolation-image-wrapper-fine-grained img {
216 | /* margin-top: 50px ; */
217 | margin-bottom: 20px;
218 | }
219 | .interpolation-image-wrapper-zero-shot img {
220 | margin-top: 25px ;
221 | /* margin-bottom: 100px; */
222 | }
223 |
--------------------------------------------------------------------------------
/docs/static/images/favicon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
38 |
--------------------------------------------------------------------------------
/docs/static/images/icons8-chat-32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/icons8-chat-32.png
--------------------------------------------------------------------------------
/docs/static/images/icons8-chat-96.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/icons8-chat-96.png
--------------------------------------------------------------------------------
/docs/static/images/interpolate_end.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/interpolate_end.jpg
--------------------------------------------------------------------------------
/docs/static/images/interpolate_start.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/interpolate_start.jpg
--------------------------------------------------------------------------------
/docs/static/images/pipeline1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/pipeline1.png
--------------------------------------------------------------------------------
/docs/static/images/spatial1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/spatial1.png
--------------------------------------------------------------------------------
/docs/static/images/spatial2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/spatial2.png
--------------------------------------------------------------------------------
/docs/static/images/steve.webm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/steve.webm
--------------------------------------------------------------------------------
/docs/static/images/talk2bev_teaser-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/images/talk2bev_teaser-1.png
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/._Icon:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/._Icon
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000000.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000000.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000001.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000001.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000002.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000002.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000003.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000003.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000004.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000004.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000005.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000005.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000006.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000006.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000007.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000007.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000008.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000008.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000009.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000009.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000010.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000010.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000011.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000011.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000012.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000012.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000013.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000013.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000014.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000014.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000015.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000015.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000016.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000016.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000017.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000017.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000018.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000018.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000019.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000019.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000020.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000020.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000021.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000021.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000022.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000022.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000023.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000023.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000024.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000024.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000025.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000025.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000026.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000026.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000027.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000027.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000028.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000028.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000029.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000029.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000030.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000030.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000031.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000031.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000032.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000032.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000033.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000033.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000034.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000034.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000035.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000035.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000036.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000036.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000037.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000037.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000038.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000038.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000039.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000039.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000040.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000040.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000041.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000041.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000042.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000042.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000043.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000043.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000044.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000044.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000045.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000045.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000046.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000046.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000047.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000047.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000048.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000048.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000049.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000049.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000050.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000050.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000051.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000051.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000052.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000052.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000053.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000053.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000054.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000054.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000055.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000055.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000056.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000056.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000057.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000057.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000058.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000058.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000059.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000059.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000060.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000060.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000061.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000061.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000062.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000062.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000063.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000063.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000064.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000064.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000065.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000065.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000066.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000066.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000067.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000067.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000068.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000068.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000069.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000069.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000070.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000070.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000071.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000071.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000072.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000072.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000073.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000073.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000074.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000074.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000075.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000075.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000076.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000076.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000077.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000077.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000078.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000078.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000079.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000079.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000080.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000080.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000081.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000081.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000082.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000082.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000083.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000083.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000084.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000084.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000085.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000085.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000086.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000086.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000087.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000087.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000088.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000088.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000089.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000089.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000090.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000090.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000091.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000091.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000092.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000092.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000093.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000093.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000094.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000094.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000095.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000095.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000096.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000096.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000097.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000097.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000098.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000098.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000099.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000099.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000100.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000100.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000101.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000101.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000102.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000102.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000103.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000103.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000104.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000104.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000105.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000105.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000106.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000106.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000107.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000107.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000108.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000108.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000109.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000109.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000110.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000110.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000111.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000111.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000112.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000112.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000113.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000113.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000114.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000114.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000115.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000115.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000116.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000116.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000117.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000117.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000118.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000118.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000119.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000119.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000120.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000120.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000121.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000121.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000122.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000122.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000123.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000123.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000124.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000124.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000125.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000125.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000126.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000126.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000127.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000127.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000128.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000128.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000129.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000129.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000130.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000130.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000131.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000131.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000132.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000132.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000133.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000133.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000134.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000134.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000135.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000135.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000136.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000136.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000137.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000137.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000138.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000138.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000139.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000139.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000140.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000140.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000141.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000141.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000142.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000142.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000143.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000143.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000144.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000144.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000145.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000145.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000146.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000146.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000147.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000147.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000148.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000148.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000149.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000149.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000150.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000150.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000151.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000151.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000152.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000152.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000153.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000153.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000154.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000154.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000155.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000155.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000156.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000156.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000157.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000157.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000158.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000158.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000159.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000159.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000160.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000160.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000161.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000161.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000162.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000162.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000163.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000163.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000164.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000164.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000165.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000165.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000166.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000166.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000167.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000167.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000168.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000168.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000169.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000169.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000170.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000170.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000171.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000171.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000172.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000172.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000173.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000173.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000174.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000174.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000175.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000175.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000176.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000176.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000177.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000177.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000178.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000178.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000179.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000179.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000180.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000180.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000181.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000181.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000182.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000182.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000183.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000183.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000184.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000184.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000185.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000185.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000186.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000186.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000187.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000187.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000188.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000188.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000189.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000189.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000190.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000190.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000191.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000191.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000192.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000192.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000193.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000193.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000194.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000194.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000195.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000195.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000196.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000196.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000197.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000197.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000198.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000198.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000199.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000199.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000200.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000201.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000201.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000202.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000202.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000203.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000203.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000204.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000204.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000205.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000205.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000206.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000206.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000207.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000207.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000208.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000208.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000209.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000209.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000210.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000210.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000211.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000211.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000212.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000212.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000213.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000213.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000214.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000214.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000215.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000215.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000216.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000216.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000217.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000217.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000218.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000218.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000219.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000219.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000220.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000220.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000221.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000221.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000222.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000222.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000223.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000223.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000224.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000224.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000225.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000225.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000226.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000226.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000227.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000227.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000228.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000228.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000229.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000229.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000230.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000230.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000231.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000231.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000232.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000232.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000233.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000233.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000234.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000234.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000235.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000235.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000236.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000236.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000237.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000237.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000238.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000238.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/000239.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/000239.jpg
--------------------------------------------------------------------------------
/docs/static/interpolation/stacked/Icon:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/interpolation/stacked/Icon
--------------------------------------------------------------------------------
/docs/static/js/bulma-slider.min.js:
--------------------------------------------------------------------------------
1 | !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default});
--------------------------------------------------------------------------------
/docs/static/js/index.js:
--------------------------------------------------------------------------------
1 | window.HELP_IMPROVE_VIDEOJS = false;
2 |
3 | var INTERP_BASE = "./static/interpolation/stacked";
4 | var NUM_INTERP_FRAMES = 240;
5 |
6 | var interp_images = [];
7 | function preloadInterpolationImages() {
8 | for (var i = 0; i < NUM_INTERP_FRAMES; i++) {
9 | var path = INTERP_BASE + '/' + String(i).padStart(6, '0') + '.jpg';
10 | interp_images[i] = new Image();
11 | interp_images[i].src = path;
12 | }
13 | }
14 |
15 | function setInterpolationImage(i) {
16 | var image = interp_images[i];
17 | image.ondragstart = function() { return false; };
18 | image.oncontextmenu = function() { return false; };
19 | $('#interpolation-image-wrapper').empty().append(image);
20 | }
21 |
22 |
23 | $(document).ready(function() {
24 | // Check for click events on the navbar burger icon
25 | $(".navbar-burger").click(function() {
26 | // Toggle the "is-active" class on both the "navbar-burger" and the "navbar-menu"
27 | $(".navbar-burger").toggleClass("is-active");
28 | $(".navbar-menu").toggleClass("is-active");
29 |
30 | });
31 |
32 | var options = {
33 | slidesToScroll: 1,
34 | slidesToShow: 3,
35 | loop: true,
36 | infinite: true,
37 | autoplay: false,
38 | autoplaySpeed: 3000,
39 | }
40 |
41 | // Initialize all div with carousel class
42 | var carousels = bulmaCarousel.attach('.carousel', options);
43 |
44 | // Loop on each carousel initialized
45 | for(var i = 0; i < carousels.length; i++) {
46 | // Add listener to event
47 | carousels[i].on('before:show', state => {
48 | console.log(state);
49 | });
50 | }
51 |
52 | // Access to bulmaCarousel instance of an element
53 | var element = document.querySelector('#my-element');
54 | if (element && element.bulmaCarousel) {
55 | // bulmaCarousel instance is available as element.bulmaCarousel
56 | element.bulmaCarousel.on('before-show', function(state) {
57 | console.log(state);
58 | });
59 | }
60 |
61 | /*var player = document.getElementById('interpolation-video');
62 | player.addEventListener('loadedmetadata', function() {
63 | $('#interpolation-slider').on('input', function(event) {
64 | console.log(this.value, player.duration);
65 | player.currentTime = player.duration / 100 * this.value;
66 | })
67 | }, false);*/
68 | preloadInterpolationImages();
69 |
70 | $('#interpolation-slider').on('input', function(event) {
71 | setInterpolationImage(this.value);
72 | });
73 | setInterpolationImage(0);
74 | $('#interpolation-slider').prop('max', NUM_INTERP_FRAMES - 1);
75 |
76 | bulmaSlider.attach();
77 |
78 | })
79 |
--------------------------------------------------------------------------------
/docs/static/js/jquery.jslatex.js:
--------------------------------------------------------------------------------
1 | /*
2 | * jsLaTeX v1.2.2 - A jQuery plugin to directly embed LaTeX into your website or blog
3 | *
4 | * Copyright (c) 2009 Andreas Grech
5 | *
6 | * Licensed under the WTFPL license:
7 | * http://www.wtfpl.net/about/
8 | *
9 | * http://dreasgrech.com
10 | */
11 |
12 | (function ($) {
13 | var attachToImage = function () {
14 | return $("
").attr({
15 | src: this.src
16 | });
17 | },
18 | formats = {
19 | 'gif': attachToImage,
20 | 'png': attachToImage,
21 | 'swf': function () {
22 | return $("").attr({
23 | src: this.src,
24 | type: 'application/x-shockwave-flash'
25 | });
26 | }
27 | },
28 | sections = {
29 | '{f}': 'format',
30 | '{e}': 'equation'
31 | },
32 | escapes = {
33 | '+': '2B',
34 | '=': '3D'
35 | };
36 |
37 | $.fn.latex = function (opts) {
38 | opts = $.extend({},
39 | $.fn.latex.defaults, opts);
40 | opts.format = formats[opts.format] ? opts.format : 'gif';
41 | return this.each(function () {
42 | var $this = $(this),
43 | format, s, element, url = opts.url;
44 | opts.equation = $.trim($this.text());
45 | for (s in sections) {
46 | if (sections.hasOwnProperty(s) && (format = url.indexOf(s)) >= 0) {
47 | url = url.replace(s, opts[sections[s]]);
48 | }
49 | }
50 | for (s in escapes) {
51 | if (escapes.hasOwnProperty(s) && (format = url.indexOf(s)) >= 0) {
52 | url = url.replace(s, '%' + escapes[s]);
53 | }
54 | }
55 | opts.src = url;
56 | element = formats[opts.format].call(opts);
57 | $this.html('').append(element);
58 | if (opts.callback) {
59 | opts.callback.call(element);
60 | }
61 | });
62 | };
63 |
64 | $.fn.latex.defaults = {
65 | format: 'gif',
66 | url: 'http://latex.codecogs.com/{f}.latex?{e}'
67 | };
68 | }(jQuery));
69 |
70 |
--------------------------------------------------------------------------------
/docs/static/videos/._Icon:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/._Icon
--------------------------------------------------------------------------------
/docs/static/videos/methodology.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/methodology.mp4
--------------------------------------------------------------------------------
/docs/static/videos/overtaking.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/overtaking.mp4
--------------------------------------------------------------------------------
/docs/static/videos/parked.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/parked.mp4
--------------------------------------------------------------------------------
/docs/static/videos/spatial_1_cropped.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/spatial_1_cropped.mp4
--------------------------------------------------------------------------------
/docs/static/videos/spatial_2_cropped.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/spatial_2_cropped.mp4
--------------------------------------------------------------------------------
/docs/static/videos/teaser_presentation_compressed.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/docs/static/videos/teaser_presentation_compressed.mp4
--------------------------------------------------------------------------------
/evaluation/chatgptinterface.py:
--------------------------------------------------------------------------------
1 | import openai
2 | import os
3 |
4 | # chat gpt credentials
5 | ORGANIZATION = os.getenv("ORGANIZATION")
6 | API_KEY = os.getenv("API_KEY")
7 |
8 | # question types to ask
9 | QUESTION_TYPES = ["SPATIAL_REASONING", "INSTANCE_ATTRIBUTE", "INSTANCE_COUNTING", "VISUAL_REASONING"]
10 |
11 | # to generate per type
12 | NUM_PER_TYPE = 10
13 |
14 | JSONS_LIMIT = 1000
15 |
16 | # example conversation to prompt GPT
17 | EXAMPLE_MCQ = "Here is an example\
18 | What is our planet's name?\
19 | A. Earth\
20 | B. Sun\
21 | C. Mars\
22 | D. Jupyter\
23 | \n\n \
24 | Answer: A. Earth"
25 |
26 | # scene data format
27 | SCENE_FORMAT = {
28 | "bev_centroid": "2D coordinates of the center of the object",
29 | "bev_area": "2D area of the object",
30 | "brief_label": "A brief caption describing the object.",
31 | "bg_description": "A description of the background around the object.",
32 | "weather": "Weather description of the scene"
33 | }
34 |
35 | # question context
36 | QUESTION_CONTEXT = {
37 | "SPATIAL_REASONING": "The questions should be about spatial relations between two objects. The question should be mainly based on the coordinates of the two objects. To answer the question, one should find the two mentioned objects, and find their relative spatial relation to answer the question.",
38 | "INSTANCE_ATTRIBUTE": "The questions should be about the attribute of a certain object, such as its color, shape or fine-grained type. Do not use the object ID in the question.",
39 | "INSTANCE_COUNTING": "The questions should involve the number of appearance of a certain object. Start with 'How many ....'. The choices of the question should be numbers. To answer the question, one should find and count all of the mentioned objects in the image. Make sure the options are far apart.",
40 | "VISUAL_REASONING": "Create complex questions beyond describing the scene. The questions should aid the user who is driving the ego-vehicle. To answer such question, one should first understanding the visual content, then based on the background knowledge or reasoning, either explain why the things are happening that way, or provide guides and help to user's request. Make the question challenging by not including the visual content details in the question so that the user needs to reason about that first. Do not use the object ID in the question. Do not mention the coordinates."
41 | }
42 |
43 | # question generation format
44 | QUESTION_FORMAT = {
45 | "question_number": "Please give the question number of this question.",
46 | "question": "Please give the question.",
47 | "options": "this should be a list of options",
48 | "correct_option": "this should be the correct options",
49 | }
50 |
51 | # answer format - spatial operators
52 | ANSWER_FORMAT = {
53 | "inferred_query": "your interpretation of the user query in a succinct form",
54 | "relevant_objects": "python list of all relevant object ids for the user query",
55 | "query_achievable": "whether or not the user-specified query is achievable using the objects and descriptions provided in the scene.",
56 | "spatial_reasoning_functions": "If the query needs calling one or more spatial reasoning functions, this field contains a list of function calls that conform to the API above. Else, this field contains an empty list.",
57 | "explanation": "A brief explanation of what the relevant objects, and how it addresses the task."
58 | }
59 |
60 | # spatial operators
61 | SPATIAL_OPERATORS_LIST = [
62 | "filter_front(list_of_objects, object_id) : Within the the list of objects, it returns the list of objects to front of the object with id as object_id",
63 | "filter_left(list_of_objects, object_id) : Within the the list of objects, it returns the list of objects to left of the object with id as object_id",
64 | "filter_right(list_of_objects, object_id) : Within the the list of objects, it returns the list of objects to right of the object with id as object_id",
65 | "filter_rear(list_of_objects, object_id) : Within the the list of objects, it returns the list of objects to rear of the object with id as object_id",
66 | "find_distance(list_of_objects, object_id_1, object_id_2) : Within the the list of objects, it returns the distance within 2 objects object_id_1 and object_id_2",
67 | "find_objects_within_distance(list_of_objects, object_id, distance) : Within the the list of objects, it returns the list of objects within distance to the object with id object_id",
68 | "get_k_closest_objects(list_of_objects, object_id, k) : Within the the list of objects, it returns the list of k closest objects to the object with id object_id",
69 | "get_k_farthest_objects(list_of_objects, object_id, k) : Within the the list of objects, it gets the k farthest objects to the object with id object_id",
70 | "filter_objects_with_tag(list_of_objects, object_id, tagname, d) : Within the the list of objects, it finds the objects which have tag as tagname and are within d distance to the object with id object_id",
71 | "filter_color(list_of_objects, object_id, colorname, d): Within the the list of objects, itfinds the objects which have color as colorname and are within d distance to the object with id object_id",
72 | "filter_size(list_of_objects, object_id, distance, min_size, max_size): Within the the list of objects, itfinds the objects which have size between min_size and max_size and are within d distance to the object with id object_id"
73 | ]
74 |
75 | def setup_question_generation(question_type):
76 | message = f"You will be given, as input a 2D road scene in Bird's Eye View, as a list. The ego-vehicle is at (100, 100) facing along the positive Y-axis. Each entry in the list describes one object in the scene, with the following five fields: \
77 | \n{str(SCENE_FORMAT)}\n\
78 | Once you have parsed the JSON and are ready to generate question about the scene, Create {NUM_PER_TYPE} distinct multi-choice question about the scene, and provide the choices and answer. Each question should have 4 options, out of which only 1 should be correct. Do not use the object ID in the question. Do not mention the coordinates."# + "You have to return a list of JSONs each containing an MCQ question."
79 | message += QUESTION_CONTEXT[question_type]
80 | return {
81 | "role": "system",
82 | "content": message + "Please provide answer as well. \n" + str(EXAMPLE_MCQ) + "\n . NOTE: DO NOT ask simple questions like 'What is the central object in the scene?', or What is the color of the car?." + "\n\n. "
83 | # "content": message + "Please provide answer as well. \n" + "\n . NOTE: DO NOT ask simple questions like 'What is the central object in the scene?'." + "\n\n. For each question, your JSON should contain - " + str(QUESTION_FORMAT) + "\n. Your final output should be a list of JSONs, and each string should be in double quotes. Please ensure your list can be parsed in a python program. Your string should be in double quotes. Please ensure this."
84 | }
85 |
86 | def add_conversation_context(conversation_type):
87 | if conversation_type == "MCQ":
88 | return "The user will then begin to ask Multiple Choice Questions, and the task is to answer various user queries about the scene. For each question, answer with just one correct option. You should only output the correct answer. Do not give any explanation"
89 |
90 | if conversation_type == "SPATIAL":
91 | return f"The user will then begin to ask questions, and the task is to answer various user queries about the scene. These questions will involve spatial reasoning. To assist with such queries, we have the following available functions:.\
92 | {str(SPATIAL_OPERATORS_LIST)}\
93 | For each user question, respond with a JSON dictionary with the following fields:\
94 | {str(ANSWER_FORMAT)}\
95 | The Object ID of ego-vehicle is 0. Only output this JSON. Do not output any explanation."
96 |
97 | def setup_conversation(conversation_type="MCQ"):
98 | message = f"You will be given, as input a 2D road scene in Bird's Eye View, as a list. The ego-vehicle is at (0,0) facing along the positive Y-axis. Each entry in the list describes one object in the scene, with the following five fields: \
99 | \n\n {str(SCENE_FORMAT)} \n\n \
100 | Once you have parsed the JSON and are ready to answer questions about the scene, please wait for the user to input JSON."
101 | message += add_conversation_context(conversation_type)
102 | return {
103 | "role": "system",
104 | "content": message
105 | }
106 |
107 | class ChatGPTInteface:
108 | def __init__(self, API_KEY, organization, model_name="gpt-4") -> None:
109 | openai.api_key = API_KEY
110 | openai.organization = organization
111 | self.model_name = "gpt-4"
112 |
113 | def generate_question(self, data, question_type="DEFAULT"):
114 | system_message = setup_question_generation(question_type)
115 | user_message = {
116 | "role": "user",
117 | "content": str(data)
118 | }
119 | response = openai.ChatCompletion.create(
120 | model=self.model_name,
121 | messages=[
122 | system_message, # prompt template
123 | user_message # data
124 | ],
125 | temperature=0,
126 | max_tokens=1024
127 | )
128 | return response
129 |
130 | def generate_conversation(self, data, question, conversation_type):
131 | system_message = setup_conversation(conversation_type)
132 | user_message = {
133 | "role": "user",
134 | "content": str(data) + ". The question is as follows \n" + str(question)
135 | }
136 | response = openai.ChatCompletion.create(
137 | model=self.model_name,
138 | messages=[
139 | system_message,
140 | user_message
141 | ],
142 | temperature=0,
143 | max_tokens=1024
144 | )
145 | return response
146 |
--------------------------------------------------------------------------------
/evaluation/eval_mcq.py:
--------------------------------------------------------------------------------
1 | from chatgptinterface import ChatGPTInteface
2 | import json
3 | import numpy as np
4 | import pandas as pd
5 | import os
6 |
7 | def calculate_jaccard_similarity(str1, str2):
8 | set1 = set(str1.lower().split())
9 | set2 = set(str2.lower().split())
10 |
11 | intersection = len(set1.intersection(set2))
12 | union = len(set1) + len(set2) - intersection
13 |
14 | jaccard_similarity = intersection / union
15 | return jaccard_similarity
16 |
17 | def are_strings_similar(str1, str2, threshold=0.75):
18 | similarity = calculate_jaccard_similarity(str1, str2)
19 | return similarity >= threshold
20 |
21 | ORGANIZATION = os.getenv("ORGANIZATION")
22 | API_KEY = os.getenv("API_KEY")
23 | save_path = "datas"
24 |
25 | chatGPTInteface = ChatGPTInteface(API_KEY=API_KEY, organization=ORGANIZATION)
26 | interface = ChatGPTInteface(API_KEY, ORGANIZATION, model_name="gpt-3.5-turbo")
27 | json_list = sorted(os.listdir(save_path))
28 |
29 | counts = []
30 | logs = []
31 | for json_item in json_list:
32 | gt_json = json.load(open(os.path.join(save_path, json_item, "scene", "answer_gt.json")))
33 | pred_json = json.load(open(os.path.join(save_path, json_item, "scene", "answer_pred_both.json")))
34 | for i in gt_json:
35 | del i["matched_coords"]
36 | del i["annotation"]
37 |
38 | for i in pred_json:
39 | del i["matched_coords"]
40 | del i["annotation"]
41 |
42 | question_types = ["SPATIAL_REASONING", "INSTANCE_ATTRIBUTE", "INSTANCE_COUNTING", "VISUAL_REASONING"]
43 | for qntype in question_types:
44 | try:
45 | response = interface.generate_question(gt_json, question_type=qntype)
46 | question_with_answer = response["choices"][0]["message"]["content"]
47 | print("response \n", question_with_answer)
48 | print("\n\n")
49 | answer_ind = response["choices"][0]["message"]["content"].lower().find("answer")
50 | generated_question = question_with_answer[:answer_ind]
51 | correct_answer = question_with_answer[answer_ind+8:]
52 | print("separated question \n", generated_question)
53 | print("\n\n")
54 | print("separated answer \n", correct_answer)
55 | response = interface.generate_conversation(pred_json, generated_question, conversation_type="MCQ")
56 | chatgpt_answer = response["choices"][0]["message"]["content"]
57 | print("\n\n\n")
58 | print("selected answer \n", chatgpt_answer)
59 | count = are_strings_similar(chatgpt_answer, correct_answer)
60 | print(count)
61 | counts.append(count)
62 | logs.append([
63 | generated_question,
64 | correct_answer,
65 | chatgpt_answer
66 | ])
67 | except Exception as e:
68 | print(e)
69 | pass
70 |
71 | counts = np.array(counts)
72 | print("MCQ Accuracy: \t", (counts.sum())/len(counts))
73 | df = pd.DataFrame( data=logs, columns=["Question", "Correct Answer", "ChatGPT Answer"])
74 | df.to_csv("logs.csv")
75 |
--------------------------------------------------------------------------------
/evaluation/eval_spops.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import re
3 | import json
4 | import decimal
5 | import os
6 |
7 | from spatial_operators import *
8 |
9 | s1 = 0
10 | s2 = 0
11 | dir1 = 'spatial_gt/' # path to GT jsons
12 | dir2 = 'spatial_pred/' # path to answer jsons
13 |
14 | files = os.listdir(dir1)
15 |
16 | for fll in files:
17 | key = fll[:-5]
18 |
19 | json_gt = json.load(open(dir1[8:]+key+'.json'))
20 | json_pd = json.load(open(dir2[8:]+key+'.json'))
21 |
22 | # Question 1
23 | d1 = get_object_dist(json_gt, json_gt[0]['token'][0])
24 | d2 = get_object_dist(json_pd, json_gt[0]['token'][0])
25 | s1 += dist_score(d1, d2)
26 |
27 | # Question 2
28 | d1 = get_object_dist(json_gt, json_gt[1]['token'][0])
29 | d2 = get_object_dist(json_pd, json_gt[1]['token'][0])
30 | s1 += dist_score(d1, d2)
31 |
32 | # Question 3
33 | d1 = distance_between_objects(json_gt, json_gt[0]['token'][0], json_gt[1]['token'][0])
34 | d2 = distance_between_objects(json_pd, json_gt[0]['token'][0], json_gt[1]['token'][0])
35 | s1 += dist_score(d1, d2)
36 |
37 | # Question 4
38 | jgt = get_k_closest_jsons(json_gt, 2)
39 | jpd = get_k_closest_jsons(json_pd, 2)
40 | d1 = distance_between_objects(json_gt, jgt[0]['token'][0], jgt[1]['token'][0])
41 | d2 = distance_between_objects(json_pd, jpd[0]['token'][0], jpd[1]['token'][0])
42 | s1 += dist_score(d1, d2)
43 |
44 | dd = 20
45 |
46 | # Question 5
47 | jgt = distance_filtering(json_gt, dd)
48 | jpd = distance_filtering(json_pd, dd)
49 | s2 += iou(jgt, jpd)
50 |
51 | # Question 6
52 | jgt = get_k_closest_jsons(json_gt, 3)
53 | jpd = get_k_closest_jsons(json_pd, 3)
54 | s2 += iou(jgt, jpd)
55 |
56 | # Question 7
57 | jgt = filter_front(json_gt)
58 | jgt = distance_filtering(jgt, dd)
59 | jpd = filter_front(json_pd)
60 | jpd = distance_filtering(jpd, dd)
61 | s2 += iou(jgt, jpd)
62 |
63 | # Question 8
64 | jgt = filter_rear(json_gt)
65 | jgt = get_k_closest_jsons(jgt, 3)
66 | jpd = filter_rear(json_pd)
67 | jpd = get_k_closest_jsons(jpd, 3)
68 | s2 += iou(jgt, jpd)
69 |
70 | s1 = s1/4/7
71 | s1 = decimal.Decimal(s1)
72 | s1 = s1.quantize(decimal.Decimal('0.00'))
73 |
74 | s2 = s2/4/7
75 | s2 = decimal.Decimal(s2)
76 | s2 = s2.quantize(decimal.Decimal('0.00'))
77 |
78 | print("IoU:", s2)
79 | print("Distance Score:", s1)
80 |
--------------------------------------------------------------------------------
/evaluation/talk2bev-bench/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/llmbev/talk2bev/b2b34c27fa73ee7ce0d91af5dc2683264ca0f92d/evaluation/talk2bev-bench/.gitkeep
--------------------------------------------------------------------------------