├── .docker_utils
├── Dockerfile.cuda
├── Dockerfile.jetson
└── main_dock.sh
├── .gitignore
├── .misc
└── abstract_figure.png
├── LICENSE
├── README.md
├── arm_requirements.txt
├── inference
├── inf_gguf.py
├── inf_pipeline.py
└── inf_transformer.py
├── linux_requirements.txt
├── llm_mpc.py
├── models
└── .gitkeep
├── prompts
├── RAG_memory.txt
├── custom_mpc_cost.txt
├── example_synthetic.txt
├── mpc_base_memory.txt
└── mpc_memory.txt
├── requirements.txt
├── tests
├── __init__.py
├── decision_tester
│ ├── README.md
│ ├── __init__.py
│ ├── decision_tester.py
│ ├── logs
│ │ └── .gitkeep
│ └── robot_states
│ │ ├── centerline.json
│ │ ├── close_to_wall.json
│ │ ├── forward.json
│ │ ├── oscillating.json
│ │ ├── racingline.json
│ │ ├── reversed.json
│ │ ├── speed.json
│ │ ├── stop.json
│ │ └── unused
│ │ ├── crashed_car.json
│ │ ├── data_checker.py
│ │ ├── improved_stopped_car.json
│ │ └── wall_crashed_car.json
└── mpc_tester
│ ├── __init__.py
│ └── mpc_tester.py
└── train
├── __init__.py
├── config
└── sft_train.yaml
├── data_generator
├── __init__.py
├── distil_combined_reason_mpc_dataset_generator.py
├── distil_mpc_dataset_generator.py
├── distil_randomized_reasoning.py
├── distil_reason_dataset_generator.py
└── synthetic_data_generator.py
├── dataset
├── excluded
│ ├── .gitkeep
│ ├── combined_reason_mpc_dataset.json
│ ├── dummy_math.json
│ ├── gpt_distil_reason_dataset.json
│ ├── gpt_distil_reasonfromdata_dataset.json
│ ├── synthetic_robot_data.json
│ └── synthetic_robot_data_old.json
├── gpt_distil_mpc_dataset.json
├── jfr_paper.json
├── mpc.json
├── racing_guide.json
├── randomized_decision_making.json
└── randomized_state_analysis.json
└── sft_train.py
/.docker_utils/Dockerfile.cuda:
--------------------------------------------------------------------------------
1 | # Use an official Nvidia runtime as a parent image
2 | FROM nvidia/cuda:11.8.0-devel-ubuntu20.04
3 | SHELL [ "/bin/bash", "-c" ]
4 |
5 | # CUDA ARCH 86 for RTX3090; 89 for RTX4070
6 | ARG CUDA_ARCH=86
7 | # Set a non-interactive frontend (avoids some prompts)
8 | ARG DEBIAN_FRONTEND=noninteractive
9 |
10 | # Install necessary packages
11 | RUN apt-get update && apt-get install -y \
12 | curl \
13 | git \
14 | libgl1-mesa-glx \
15 | libglib2.0-0 \
16 | build-essential \
17 | net-tools \
18 | x11-apps \
19 | lsb-release \
20 | findutils \
21 | gnupg2 \
22 | sudo
23 |
24 | # Clean up
25 | RUN apt-get clean && \
26 | rm -rf /var/lib/apt/lists/*
27 |
28 | # Install linux requirements
29 | COPY linux_requirements.txt /embodiedai/linux_requirements.txt
30 | RUN apt-get update && apt-get install -y $(cat /embodiedai/linux_requirements.txt) && \
31 | apt-get clean && \
32 | rm -rf /var/lib/apt/lists/*
33 |
34 | COPY requirements.txt /embodiedai/requirements.txt
35 |
36 | # Set Bash as the default shell
37 | SHELL ["/bin/bash", "-c"]
38 | # Install Python 3.10
39 | RUN apt-get update && \
40 | apt-get install -y software-properties-common && \
41 | add-apt-repository ppa:deadsnakes/ppa && \
42 | apt-get update && \
43 | apt-get install -y python3.10 python3.10-dev python3.10-distutils && \
44 | update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 1 && \
45 | update-alternatives --set python3 /usr/bin/python3.10
46 |
47 | # Install pip for Python 3.10
48 | RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
49 |
50 | RUN pip install --upgrade pip setuptools wheel && \
51 | pip install packaging && \
52 | pip install torch==2.1.0+cu118 torchvision==0.16.0+cu118 torchaudio==2.1.0+cu118 --extra-index-url https://download.pytorch.org/whl/cu118
53 |
54 | # Install other Python dependencies
55 | RUN pip install -r /embodiedai/requirements.txt
56 |
57 | # Install flash-attn from GitHub (CUDA 11.8 support) using tested commit
58 | RUN git clone --recursive https://github.com/HazyResearch/flash-attention.git /tmp/flash-attn \
59 | && cd /tmp/flash-attn \
60 | && git checkout fd2fc9d85c8e54e5c20436465bca709bc1a6c5a1 \
61 | && pip install . \
62 | && rm -rf /tmp/flash-attn
63 |
64 |
65 | # Install llama-cpp-python seperately because of GPU support
66 | # Set ENV with CUDA arch
67 | ENV CMAKE_ARGS="-DGGML_CUDA=on -DLLAVA_BUILD=off -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH} -DCUDA_PATH=/usr/local/cuda-11.8 -DCUDAToolkit_ROOT=/usr/local/cuda-11.8 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-11/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-11.8/lib64"
68 | ENV FORCE_CMAKE=1
69 | ENV LD_LIBRARY_PATH="/usr/local/cuda-11.8/compat/libcuda.so"
70 | RUN pip install llama-cpp-python==0.3.5 --no-cache-dir
71 |
72 | # Set the working directory
73 | WORKDIR /embodiedai
74 |
--------------------------------------------------------------------------------
/.docker_utils/Dockerfile.jetson:
--------------------------------------------------------------------------------
1 | # Use a dusty image with torch
2 | FROM dustynv/l4t-pytorch:r36.2.0
3 | SHELL [ "/bin/bash", "-c" ]
4 |
5 | # Set a non-interactive frontend (avoids some prompts)
6 | ARG DEBIAN_FRONTEND=noninteractive
7 |
8 | # Install necessary packages
9 | RUN apt-get update && apt-get install -y \
10 | curl \
11 | wget \
12 | git \
13 | libgl1-mesa-glx \
14 | libglib2.0-0 \
15 | build-essential \
16 | net-tools \
17 | x11-apps \
18 | lsb-release \
19 | gnupg2 \
20 | findutils \
21 | python3-pip
22 |
23 | # Clean up
24 | RUN apt-get clean && \
25 | rm -rf /var/lib/apt/lists/*
26 |
27 | # Add the TeX Live repository and its GPG keys
28 | RUN echo "deb http://ftp.de.debian.org/debian buster main" >> /etc/apt/sources.list && \
29 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 648ACFD622F3D138 && \
30 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 0E98404D386FA1D9 && \
31 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys DCC9EFBF77E11517 && \
32 | apt-get update
33 |
34 | # Install Embodied AI Linux requirements
35 | COPY linux_requirements.txt /embodiedai/linux_requirements.txt
36 |
37 | COPY arm_requirements.txt /embodiedai/arm_requirements.txt
38 | RUN pip install -r /embodiedai/arm_requirements.txt
39 |
40 | # Install llama-cpp-python seperately because of GPU support
41 | ENV CMAKE_ARGS="-DGGML_CUDA=on -DCUDA_PATH=/usr/local/cuda-11.4 -DCUDAToolkit_ROOT=/usr/local/cuda-11.4 -DCUDAToolkit_INCLUDE_DIR=/usr/local/cuda-11/include -DCUDAToolkit_LIBRARY_DIR=/usr/local/cuda-11.4/lib64"
42 | ENV FORCE_CMAKE=1
43 | RUN pip install llama-cpp-python --no-cache-dir
44 |
45 | # Set the working directory
46 | WORKDIR /embodiedai
47 |
--------------------------------------------------------------------------------
/.docker_utils/main_dock.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | XAUTH=$HOME/.Xauthority
4 |
5 | # Determine the directory where the script is running
6 | SCRIPT_DIR="$(dirname "$(realpath "$0")")"
7 |
8 | # Use a relative path for the data directory
9 | DATA_DIR="$SCRIPT_DIR/../"
10 | echo "Data directory mounting: $DATA_DIR"
11 |
12 | # Check if the first argument is 'cuda' or 'jetson'
13 | GPU_OPTION=""
14 | EXTRA_MOUNTS=""
15 | if [ "$1" == "cuda" ]; then
16 | GPU_OPTION="--gpus all"
17 | elif [ "$1" == "jetson" ]; then
18 | GPU_OPTION="--gpus all --runtime=nvidia"
19 | EXTRA_MOUNTS="--volume /run/jtop.sock:/run/jtop.sock"
20 | fi
21 |
22 | # Check if a second argument is provided for the container name, else use default
23 | CONTAINER_NAME="${2:-embodiedai_dock}"
24 |
25 | docker run --privileged -it \
26 | --env DISPLAY=$DISPLAY \
27 | --net=host \
28 | --volume /dev:/dev \
29 | --volume $XAUTH:/root/.Xauthority \
30 | --volume /tmp/.X11-unix:/tmp/.X11-unix \
31 | --volume="$DATA_DIR:/embodiedai" \
32 | $EXTRA_MOUNTS \
33 | --name="$CONTAINER_NAME" \
34 | --detach \
35 | $GPU_OPTION \
36 | --rm \
37 | --shm-size=1gb \
38 | embodiedai:latest \
39 | /bin/bash
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Never push .env files because they contain sensitive information
2 | .env
3 |
4 | #logs
5 | **/logs/*.txt
6 | **/logs/**/
7 |
8 | #images
9 | *.png
10 | *.jpg
11 | *.jpeg
12 | *.JPG
13 |
14 | #pychache
15 | **/__pycache__/
16 |
17 | #vscode
18 | .vscode/
19 |
20 | #eval
21 | eval/*
22 |
23 | #No GGUFs
24 | *.gguf
25 |
26 | #Unsloth stuff
27 | **/_unsloth*
28 | **/unsloth_*
29 |
30 | #models
31 | models/*
32 |
33 | #wandb
34 | wandb/
35 |
36 | #MPC tester output
37 | tests/mpc_tester/eval/*
38 |
39 | #train outputs
40 | train/outputs/*
41 | train/dataset/combined/*
--------------------------------------------------------------------------------
/.misc/abstract_figure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/.misc/abstract_figure.png
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Enhancing Autonomous Driving Systems with On-Board Deployed Large Language Models
2 |
3 | This repository accompanies our [RSS 2025 paper](https://arxiv.org/abs/2504.11514), **Enhancing Autonomous Driving Systems with On-Board Deployed Large Language Models**. It provides the codebase for the **MPCxLLM** and **DecisionxLLM** modules, alongside tools for training, testing, and deployment.
4 |
5 |
6 |
7 |
8 |
9 | Watch an explanatory Youtube video accompanying the paper [here](https://www.youtube.com/watch?v=4iGN1uBl4v4).
10 |
11 | ## 🚀 Installation
12 |
13 | ### CUDA Platform (e.g., RTX 30xx / 40xx)
14 |
15 | 1. Build the Docker container (adapt `CUDA_ARCH` accordingly: `86` for RTX 30xx, `89` for 40xx):
16 | ```bash
17 | docker build --build-arg CUDA_ARCH= -t embodiedai -f .docker_utils/Dockerfile.cuda .
18 | ```
19 |
20 | 2. Mount the container to the project directory:
21 | ```bash
22 | ./.docker_utils/main_dock.sh cuda
23 | ```
24 |
25 | 3. Attach to the container:
26 | ```bash
27 | docker exec -it embodiedai /bin/bash
28 | ```
29 | or use VS Code Remote Containers.
30 |
31 | ---
32 |
33 | ### Jetson Platform (e.g., Orin AGX)
34 |
35 | 1. Build the ARM-compatible Docker image:
36 | ```bash
37 | docker build -t embodiedai -f .docker_utils/Dockerfile.jetson .
38 | ```
39 | **Note that on the jetson, unsloth can not be installed (as of 07.05.2025). So only inference via quantized models are possible!**
40 |
41 | 2. Mount and launch the container:
42 | ```bash
43 | ./.docker_utils/main_dock.sh jetson
44 | ```
45 |
46 | 3. Attach via terminal or VS Code.
47 |
48 | ### Create .env File
49 | Create a `.env` file in the root directory with the following content:
50 | ```bash
51 | HUGGINGFACEHUB_API_TOKEN=""
52 | OPENAI_API_TOKEN=""
53 | ```
54 | This is needed for downloading models and using OpenAI APIs which is required if you want to use `gpt-4o` or for using the modules with their RAG embeddings. **Make sure to keep this file private!**
55 |
56 | ### Download Models (optional)
57 | You can use the LoRA + RAG SFT trained FP16 model [nibauman/RobotxLLM_Qwen7B_SFT](https://huggingface.co/nibauman/RobotxLLM_Qwen7B_SFT) directly from HuggingFace
58 | without having to download it locally. If you want to use the quantized model, you can download it with the following command:
59 |
60 | ```bash
61 | huggingface-cli download nibauman/race_llm-Q5_K_M-GGUF --local-dir models/race_llm_q5
62 | ```
63 |
64 | ---
65 |
66 | ## 🧠 Usage
67 |
68 | This repo integrates with the [ForzaETH Race Stack](https://github.com/ForzaETH/race_stack). Follow their installation instructions and ensure your `ROS_MASTER_URI` is correctly configured (see [example line](https://github.com/ForzaETH/race_stack/blob/main/.devcontainer/.install_utils/bashrc_ext#L12)) in this readme we use 192.168.192.75 as an example!
69 |
70 | ### On the Robot Stack
71 | Run each command in a separate terminal.
72 | ```bash
73 | roscore
74 | roslaunch stack_master base_system.launch map_name:=f racecar_version:=NUC2 sim:=true
75 | roslaunch stack_master timetrials.launch ctrl_algo:=KMPC
76 | roslaunch rosbridge_server rosbridge_websocket.launch address:=192.168.192.75
77 | ```
78 |
79 | ### On the LLM Machine
80 |
81 | ```bash
82 | python3 llm_mpc.py --model custom --model_dir nibauman/RobotxLLM_Qwen7B_SFT --hostip 192.168.192.75 --prompt "Drive in Reverse!"
83 | ```
84 |
85 | **Key Options:**
86 |
87 | - `--model`: `custom` or `gpt-4o`
88 | - `--model_dir`: HuggingFace or local path (used for `custom`)
89 | - `--hostip`: ROS master IP
90 | - `--prompt`: Natural language instruction
91 | - `--quant`: Use quantized `GGUF` model
92 | - `--mpconly`: Skip DecisionxLLM
93 |
94 | As an **example** for on the **Jetson** you can only run the quantized models with the models downloaded to the models folder as explained above. You can run the following command to test the quantized model:
95 | ```bash
96 | python3 llm_mpc.py --model custom --model_dir models/race_llm_q5 --hostip 192.168.192.75 --prompt "Drive in Reverse!" --quant
97 | ```
98 | ---
99 |
100 | ## 🏋️ Training
101 |
102 | To train a new LoRA adapter on synthetic data:
103 |
104 | ```bash
105 | python3 -m train.sft_train --config train/config/sft_train.yaml
106 | ```
107 |
108 | You can modify `sft_train.yaml` to change the model or dataset. Default setup:
109 |
110 | - Base: `unsloth/Qwen2.5-7B-Instruct`
111 | - Dataset: `train/dataset/`
112 | - Output: `train/outputs/`
113 |
114 | ---
115 |
116 | ## 📊 Evaluation
117 |
118 | ### MPCxLLM Evaluation (requires autonomy stack)
119 |
120 | ```bash
121 | python3 -m tests.mpc_tester.mpc_tester --model custom --model_dir nibauman/RobotxLLM_Qwen7B_SFT --host_ip 192.168.192.75
122 | ```
123 |
124 | ### DecisionxLLM Evaluation (autonomy stack not required)
125 |
126 | ```bash
127 | python3 -m tests.decision_tester.decision_tester --model nibauman/RobotxLLM_Qwen7B_SFT --dataset all --mini --rag
128 | ```
129 |
130 | **Evaluation Options:**
131 |
132 | - `--dataset`: e.g., `all`, `stop`, `reverse`, etc.
133 | - `--mini`: Run a small evaluation subset
134 | - `--rag`: Enable retrieval-augmented decision prompts
135 | - `--quant`: Use quantized model
136 | ---
137 | ## Acknowledgements
138 | SFT training was performed through the distillation of [OpenAI GPT-4o](https://openai.com/index/hello-gpt-4o/) queries.
139 | This work would not have been possible without the great work of other repositories such as:
140 | - [llama.cpp](https://github.com/ggml-org/llama.cpp)
141 | - [Hugging Face](https://github.com/huggingface)
142 | - [unsloth](https://github.com/unslothai/unsloth)
143 | - [roslibpy](https://github.com/gramaziokohler/roslibpy)
144 | ---
145 |
146 | ## 📄 Citation
147 |
148 | If this repository is useful for your research, please consider citing our work:
149 |
150 | ```bibtex
151 | @article{baumann2025enhancing,
152 | title={Enhancing Autonomous Driving Systems with On-Board Deployed Large Language Models},
153 | author={Baumann, Nicolas and Hu, Cheng and Sivasothilingam, Paviththiren and Qin, Haotong and Xie, Lei and Magno, Michele and Benini, Luca},
154 | journal={arXiv preprint arXiv:2504.11514},
155 | year={2025}
156 | }
157 | ```
158 |
--------------------------------------------------------------------------------
/arm_requirements.txt:
--------------------------------------------------------------------------------
1 | # Huggingface
2 | python-dotenv==1.0.1
3 | sentencepiece==0.2.0
4 | tokenizers==0.21.0
5 | transformers==4.47.1
6 | pillow==10.3.0
7 |
8 | # Langchain
9 | langchain==0.3.12
10 | langchain-experimental==0.3.3
11 | unstructured[md]==0.16.11
12 | langchain-openai==0.2.12
13 |
14 | # Unsloth & related
15 | scipy==1.14.1
16 | trl==0.13.0
17 | peft==0.14.0
18 | accelerate==1.2.1
19 | bitsandbytes==0.44.1
20 |
21 | # ROS stuff
22 | roslibpy==1.8.0
23 |
24 | # misc
25 | matplotlib==3.10.0
--------------------------------------------------------------------------------
/inference/inf_gguf.py:
--------------------------------------------------------------------------------
1 | from llama_cpp import Llama
2 | import os
3 |
4 | class RaceLLMGGGUF:
5 | def __init__(self, model_dir, gguf_name, max_tokens=256):
6 | self.max_tokens = max_tokens
7 | self.path = os.path.join(model_dir, gguf_name)
8 | self.llm = Llama(
9 | model_path=self.path,
10 | chat_format="llama-2",
11 | n_gpu_layers=1000,
12 | n_ctx=2048,
13 | n_batch=2048,
14 | seed=42,
15 | verbose=False
16 | )
17 |
18 | def __call__(self, input_text):
19 | output = self.llm.create_chat_completion(
20 | messages=[{
21 | "role": "user",
22 | "content": input_text
23 | }],
24 | max_tokens=self.max_tokens,
25 | temperature=0.0,
26 | top_k=1,
27 | stop=["[/INST]", "[\/INST]", "[;/INST] ", "[INST]", "[/?]", "[/Dk]", "[;/Rationale]", "[Rationale]", "[;/Action]", "[;/Explanation]"],
28 | )
29 | out_text = output['choices'][0]['message']['content']
30 | input_tokens = output['usage']['prompt_tokens']
31 | out_tokens = output['usage']['completion_tokens']
32 | return out_text, input_tokens, out_tokens
33 |
34 | # Loads Prompt with hints
35 | def load_prompt(prompt_type) -> str:
36 | if 'reasoning' in prompt_type:
37 | hints_dir = os.path.join('../', 'prompts/reasoning_hints.txt')
38 | with open(hints_dir, 'r') as f:
39 | reasoning_hints = f.read()
40 | return reasoning_hints
41 | elif 'synthetic' in prompt_type:
42 | hints_dir = os.path.join('../', 'prompts/example_synthetic.txt')
43 | with open(hints_dir, 'r') as f:
44 | synthetic_hints = f.read()
45 | return synthetic_hints
46 | else:
47 | raise ValueError(f"Prompt type {prompt_type} not recognized. Please use 'reasoning' or 'synthetic'.")
--------------------------------------------------------------------------------
/inference/inf_pipeline.py:
--------------------------------------------------------------------------------
1 | from transformers import Pipeline, BitsAndBytesConfig
2 | from unsloth import FastLanguageModel
3 | from unsloth.chat_templates import get_chat_template
4 | import os, time
5 | import numpy as np
6 | import torch
7 |
8 | CHAT_TEMPLATE_OPTIONS = ["phi-3", "qwen-2.5"]
9 |
10 | class RaceLLMPipeline(Pipeline):
11 | def __init__(self, chat_template, model_dir=None, max_seq_length=2048, max_new_tokes=512, dtype=torch.float16, load_in_4bit=False, model=None, tokenizer=None):
12 | os.environ["TOKENIZERS_PARALLELISM"] = "false" # Avoids a warning
13 | self.chat_template = chat_template
14 | self.max_new_tokes = max_new_tokes
15 |
16 | # Load model and tokenizer via dir if not passed directly
17 | if model is None or tokenizer is None:
18 | model, tokenizer = self._load_model_from_dir(model_dir, load_in_4bit, dtype, chat_template, max_seq_length)
19 | else:
20 | print("Model and tokenizer passed directly, skipping loading from dir.")
21 |
22 | self.model = model
23 | self.tokenizer = tokenizer
24 |
25 | super().__init__(model=self.model, tokenizer=self.tokenizer)
26 |
27 | def _load_model_from_dir(self, model_dir, load_in_4bit, dtype, chat_template, max_seq_length):
28 | # Config for Quantization
29 | quantization_config = BitsAndBytesConfig(
30 | load_in_4bit=load_in_4bit,
31 | bnb_4bit_compute_dtype=dtype,
32 | )
33 |
34 | model, tokenizer = FastLanguageModel.from_pretrained(
35 | model_name=model_dir,
36 | max_seq_length=max_seq_length,
37 | dtype=dtype,
38 | quantization_config=quantization_config,
39 | )
40 | FastLanguageModel.for_inference(model)
41 |
42 | if chat_template == "phi-3":
43 | tokenizer = get_chat_template(
44 | tokenizer,
45 | chat_template=chat_template,
46 | mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"},
47 | )
48 | elif chat_template == "qwen-2.5":
49 | tokenizer = get_chat_template(
50 | tokenizer,
51 | chat_template="qwen-2.5",
52 | mapping={"role": "role", "content": "content", "user": "user", "assistant": "assistant"},
53 | )
54 | else:
55 | raise ValueError(f"Chat template {chat_template} not recognized. Please use 'phi-3' or 'qwen-2.5'.")
56 |
57 | # Add pad token if it does not exist
58 | if tokenizer.pad_token is None:
59 | tokenizer.add_special_tokens({'pad_token': ''})
60 | model.resize_token_embeddings(len(tokenizer))
61 |
62 | return model, tokenizer
63 |
64 |
65 | def _sanitize_parameters(self, **pipeline_parameters):
66 | preprocess_params = {}
67 | forward_params = {}
68 | postprocess_params = {}
69 |
70 | return preprocess_params, forward_params, postprocess_params
71 |
72 | def preprocess(self, text, **preprocess_params):
73 | messages = [{"from": "human", "value": text}] if self.chat_template == "phi-3" else [{"role": "user", "content": text}]
74 | encoded = self.tokenizer.apply_chat_template(
75 | messages,
76 | tokenize=True,
77 | add_generation_prompt=True,
78 | return_tensors="pt",
79 | ).to("cuda")
80 |
81 | # Manually construct inputs
82 | inputs = {
83 | "input_ids": encoded, # Direct tensor
84 | "attention_mask": (encoded != self.tokenizer.pad_token_id).long(), # Manually create attention mask
85 | }
86 |
87 | return inputs
88 |
89 | def _forward(self, model_inputs, **forward_params):
90 | outputs = self.model.generate(
91 | input_ids=model_inputs["input_ids"],
92 | attention_mask=model_inputs["attention_mask"], # Explicit attention mask
93 | max_new_tokens=self.max_new_tokes,
94 | use_cache=True,
95 | temperature=1.0,
96 | do_sample=False, # Greedy decoding
97 | top_k=20, # 20 Qwen recommended
98 | top_p=0.8, # 0.8 Qwen recommended
99 | repetition_penalty=1.0, # 1.0 = no penalty
100 | encoder_repetition_penalty=1.0, # 1.0 = no penalty
101 | length_penalty=-1.0, # < 0.0 encourages shorter sentences
102 | )
103 | return self.tokenizer.batch_decode(outputs)
104 |
105 | def postprocess(self, model_outputs, **postprocess_params):
106 | if self.chat_template == "phi-3":
107 | # Filter out the chat template, we want what is between <|assistant|> and <|end|>
108 | model_outputs = [output.split("<|assistant|>")[1].split("<|end|>")[0].strip() for output in model_outputs]
109 | return model_outputs[0], None, None
110 | elif self.chat_template == "qwen-2.5":
111 | # Filter out the chat template for Qwen-2.5, extract content between <|im_start|>assistant and <|im_end|>
112 | model_outputs = [
113 | output.split("<|im_start|>assistant")[1].split("<|im_end|>")[0].strip()
114 | if "<|im_start|>assistant" in output and "<|im_end|>" in output
115 | else output # Fallback to raw output if markers are missing
116 | for output in model_outputs
117 | ]
118 | return model_outputs[0], None, None
119 | else:
120 | raise ValueError(f"Chat template {self.chat_template} not recognized.")
121 |
122 | # Loads Prompt with hints
123 | def load_prompt(prompt_type) -> str:
124 | if 'reasoning' in prompt_type:
125 | hints_dir = os.path.join('../', 'prompts/reasoning_hints.txt')
126 | with open(hints_dir, 'r') as f:
127 | reasoning_hints = f.read()
128 | return reasoning_hints
129 | elif 'synthetic' in prompt_type:
130 | hints_dir = os.path.join('../', 'prompts/example_synthetic.txt')
131 | with open(hints_dir, 'r') as f:
132 | synthetic_hints = f.read()
133 | return synthetic_hints
134 | else:
135 | raise ValueError(f"Prompt type {prompt_type} not recognized. Please use 'reasoning' or 'synthetic'.")
136 |
--------------------------------------------------------------------------------
/inference/inf_transformer.py:
--------------------------------------------------------------------------------
1 | from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
2 | import torch
3 | import os
4 |
5 | class RaceLLMTransformer:
6 | def __init__(self, model_dir, max_seq_length=2048, max_new_tokens=512, chat_template="phi-3", dtype=torch.float16, load_in_4bit=False):
7 | os.environ["TOKENIZERS_PARALLELISM"] = "false" # Avoids a warning
8 | self.model_dir = model_dir
9 | self.max_seq_length = max_seq_length
10 | self.dtype = dtype
11 | self.max_new_tokens = max_new_tokens
12 | self.chat_template = chat_template
13 |
14 | # Config for Quantization
15 | quantization_config = BitsAndBytesConfig(
16 | load_in_4bit=load_in_4bit,
17 | bnb_4bit_compute_dtype=dtype,
18 | )
19 |
20 | # Load model and tokenizer from transformers
21 | self.model = AutoModelForCausalLM.from_pretrained(
22 | model_dir,
23 | torch_dtype=dtype,
24 | quantization_config=quantization_config,
25 | device_map="auto", # Automatically assigns layers to devices
26 | )
27 | self.tokenizer : AutoTokenizer = AutoTokenizer.from_pretrained(model_dir)
28 |
29 | # Add pad and eos tokens if not present
30 | if self.tokenizer.pad_token is None:
31 | self.tokenizer.add_special_tokens({'pad_token': ''})
32 | if self.tokenizer.eos_token is None:
33 | self.tokenizer.add_special_tokens({'eos_token': '<|end|>'})
34 | self.model.resize_token_embeddings(len(self.tokenizer))
35 |
36 | # Move model to CUDA if available
37 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
38 | self.model.to(self.device)
39 |
40 | def __call__(self, message):
41 | # Preprocess the input using a chat template
42 | inputs = self.preprocess(message)
43 |
44 | # Perform the forward pass
45 | outputs = self._forward(inputs)
46 |
47 | # Postprocess the output
48 | return self.postprocess(outputs, message)
49 |
50 | def preprocess(self, text):
51 | # Prepare messages based on the chat template
52 | if self.chat_template == "phi-3":
53 | messages = [{"from": "human", "value": text}]
54 | elif self.chat_template == "qwen-2.5":
55 | messages = [{"role": "user", "content": text}]
56 | else:
57 | raise ValueError(f"Chat template {self.chat_template} not recognized.")
58 |
59 | # Tokenize and construct inputs
60 | encoded = self.tokenizer.apply_chat_template(
61 | messages,
62 | tokenize=True,
63 | add_generation_prompt=True,
64 | return_tensors="pt",
65 | )
66 | # If `encoded` is already a Tensor
67 | if isinstance(encoded, torch.Tensor):
68 | input_ids = encoded.to(self.device)
69 | else:
70 | # Otherwise, handle the structure with `input_ids` and `attention_mask`
71 | input_ids = encoded["input_ids"].to(self.device)
72 |
73 | attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
74 |
75 | return {"input_ids": input_ids, "attention_mask": attention_mask}
76 |
77 | def _forward(self, model_inputs, **forward_params):
78 | # Generate model outputs
79 | outputs = self.model.generate(
80 | input_ids=model_inputs["input_ids"],
81 | attention_mask=model_inputs["attention_mask"],
82 | max_new_tokens=self.max_new_tokens,
83 | use_cache=True,
84 | temperature=1.0,
85 | do_sample=False, # Greedy decoding ignores `temperature`, `top_k`, and `top_p`
86 | top_k=20,
87 | top_p=0.8,
88 | repetition_penalty=1.0,
89 | encoder_repetition_penalty=1.0,
90 | length_penalty=-1.0,
91 | num_beams=2,
92 | )
93 | return outputs
94 |
95 | def postprocess(self, outputs, input_text):
96 | # Decode outputs and filter response
97 | decoded = self.tokenizer.batch_decode(outputs, skip_special_tokens=False)
98 | if self.chat_template == "phi-3":
99 | model_outputs = [output.split("<|assistant|>")[1].split("<|endoftext|>")[0].strip() for output in decoded]
100 | elif self.chat_template == "qwen-2.5":
101 | model_outputs = [
102 | output.split("<|im_start|>assistant")[1].split("<|im_end|>")[0].strip()
103 | if "<|im_start|>assistant" in output and "<|im_end|>" in output
104 | else output # Fallback to raw output if markers are missing
105 | for output in decoded
106 | ]
107 |
108 | # TODO: This slows down the inference
109 | # Tokenize input and output text
110 | input_tokens = self.tokenizer.tokenize(input_text)
111 | output_tokens = self.tokenizer.tokenize(model_outputs[0])
112 |
113 | return model_outputs[0], len(input_tokens), len(output_tokens)
114 |
--------------------------------------------------------------------------------
/linux_requirements.txt:
--------------------------------------------------------------------------------
1 | pandoc
2 | texlive-latex-base
3 | texlive-fonts-recommended
4 | texlive-fonts-extra
5 | texlive-latex-extra
--------------------------------------------------------------------------------
/models/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/models/.gitkeep
--------------------------------------------------------------------------------
/prompts/RAG_memory.txt:
--------------------------------------------------------------------------------
1 | # Hint 1:
2 | If the d-speed is above than 0.5m/s is high.
3 |
4 | # Hint 2:
5 | Unless specified differently by the human, the car is usually driving at speeds between 5 and 7m/s.
6 |
7 | # Hint 3:
8 | If the distance to a wall is smaller than 0.4m, the car is close to that wall. Staying close to the wall means maintaining a consistent distance below 0.4m.
9 |
10 | # Hint 4:
11 | If the s-speed is close to 0m/s, then the car is stopped.
12 |
13 | # Hint 5:
14 | The car is reversing if the s-speed is negative or if the s-coordinate is reducing over multiple samples.
15 |
16 | # Hint 6:
17 | A d-coordinate above 0.3m is considered not to be on the racing line.
18 |
19 | # Hint 7:
20 | If the d-coordinate oscillates from positive to negative values with larger magnitude than 0.3 metres, then the car is oscillating around the racing line.
21 |
22 | # Hint 8:
23 | Oscillations in d-coordinate under 0.3m or d-speed under 0.5m/s are negligible.
24 |
25 | # Hint 9:
26 | If the human requires you to drive at a certain speed, first calculate the current average speed and compare it to what the human wants.
27 |
28 | # Hint 10:
29 | You are on the centerline if the left and right distances to the walls are about equal.
30 |
31 | # Hint 11:
32 | Unless told otherwise, you should be driving forwards!
--------------------------------------------------------------------------------
/prompts/custom_mpc_cost.txt:
--------------------------------------------------------------------------------
1 | This is dummy example of a custom cost function for the MPC. The cost function is defined as a weighted sum of the following terms:
2 |
3 | ```python
4 |
5 | import numpy as np
6 |
7 | #cost function
8 | def custom_cost_function(x, u, p):
9 | # Define parameters and matrix
10 | weight_n = 100
11 | weight_alpha = 0
12 | weight_v = 0
13 | weight_der_v = 0
14 | weight_derDelta = 0
15 | V_target = 1 # Target velocity
16 | R = np.diag([1, 1]) # Control effort weights
17 |
18 | # Cost expression
19 | cost_expr = (
20 | weight_n * x[1]**2 +
21 | weight_alpha * x[2]**2 +
22 | weight_v * (x[3] - V_target)**2 +
23 | weight_der_v * u[0]**2 +
24 | weight_derDelta * u[1]**2 +
25 | u.T @ R @ u #It needs the input cost within the cost function
26 | )
27 | return cost_expr
28 |
29 | #Terminal cost function
30 | def custom_cost_function_e(x, u, p):
31 | # Define parameters and matrix
32 | weight_n = 100
33 | weight_alpha = 0
34 | weight_v = 0
35 | weight_der_v = 0
36 | weight_derDelta = 0
37 | V_target = 1 # Target velocity
38 |
39 | # Cost expression
40 | cost_expr = (
41 | weight_n * x[1]**2 +
42 | weight_alpha * x[2]**2 +
43 | weight_v * (x[3] - V_target)**2 +
44 | weight_der_v * u[0]**2 +
45 | weight_derDelta * u[1]**2
46 | )
47 | return cost_expr
48 |
49 | ```
50 |
--------------------------------------------------------------------------------
/prompts/example_synthetic.txt:
--------------------------------------------------------------------------------
1 | The data has been sampled for 2.0 seconds in 5 samples.
2 | - The car's position along the racing line is given by the s-coordinate: [60.0, 60.25, 60.5, 60.75, 61.0]
3 |
4 | - The car's lateral deviation from the racing line is given by the d-coordinate: [0.0, -0.5753545647978832, -0.32641266653362183, 0.3901727040942701, 0.5477671504365765]
5 |
6 | - The car's speed along the racing line is given by the s-speed: [0.9576145990634815, 0.5100690745118428, 0.598160519871164, 0.789294135979236, 0.8423945285141192]
7 |
8 | - The car's speed perpendicular to the racing line is given by the d-speed: [-0.5902566276033918, -0.59449723461161, -0.08489652140578396, 0.0512708009967443, -0.03745596817162955]
9 |
10 | - The distance to the left wall is: [2.694233813545882, 2.605423973892499, 2.1277900096124538, 2.9040873438282255, 3.194765065388279]
11 |
12 | - The distance to the right wall is: [0.316882708096204, 0.24300351431392975, 0.30333191336205745, 0.3116525004013847, 0.31631822033359946]
13 |
14 | - Bool if the car has crashed: True
15 |
16 | - Bool if the car is facing the wall: False
17 |
18 | Explanation: This data depicts a scenario where the car is oscillating around the racing line, has dangerously high d-speed, is close to the right wall, and has crashed.
--------------------------------------------------------------------------------
/prompts/mpc_base_memory.txt:
--------------------------------------------------------------------------------
1 | This is the cost expression of the MPC, where all weights are tuneable:
2 |
3 | model.cost_expr_ext_cost = (
4 | weight_qn * n**2 +
5 | weight_qalpha * alpha**2 +
6 | weight_qv * (v - V_target)**2 +
7 | weight_qac * der_v**2 +
8 | weight_ddelta * derDelta**2 +
9 | u.T @ R @ u
10 | )
11 |
12 |
13 | Here are the tuneable parameters of cost weights and constraints (STRICTLY ADHERE TO THESE NAMES, DONT INVENT NEW ONES!!!):
14 | # param: min, max, default # description
15 | qv 0, 2, 10 # weight for velocity tracking -> minimizes error in velocity tracking of the racing line
16 | qn 0, 100, 20 # weight for lateral deviation tracking -> minimizes error in lateral deviation tracking of the racing line
17 | qalpha 0, 100, 7 # weight for heading tracking -> minimizes error in heading error tracking of the racing line
18 | qac 0, 1, 0.01 # weight for limiting the acceleration -> minimizes the acceleration of the vehicle
19 | qddelta 0, 100, 0.1 # weight for limiting the steering angle rate -> minimizes the rate of change of the steering angle of the vehicle
20 | alat_max 0, 20, 10 # maximum lateral acceleration -> constraints the lateral acceleration to be below alat_max
21 | a_min -20, 0, -5 # minimum acceleration -> constraints the acceleration to be above a_min
22 | a_max 0, 20, 5 # maximum acceleration -> constraints the acceleration to be below a_max
23 | v_min -2, 5, 1 # minimum velocity -> constraints the velocity to be above v_min
24 | v_max -1, 10, 5 # maximum velocity -> constraints the velocity to be below v_max
25 | track_safety_margin 0, 1.0, 0.45 # inflation of the boundary of the track to increase the safety margin to the track walls
--------------------------------------------------------------------------------
/prompts/mpc_memory.txt:
--------------------------------------------------------------------------------
1 | # Memory Entry 0:
2 | Scenario:
3 | To force going forwards v_min should be positive. If you want it to be able to reverse, then set v_min to negative.
4 | MPC Action:
5 | mpc_params = {
6 | 'v_min': positive, if you want to go forwards, else negative to reverse
7 | }
8 |
9 | # Memory Entry 1:
10 | Scenario:
11 | Always have v_max be higher than v_min.
12 | MPC Action:
13 | mpc_params = {
14 | 'v_max': higher than v_min
15 | }
16 |
17 | # Memory Entry 2:
18 | Scenario:
19 | To drive far away from the walls, the boundary inflation should be increased. That way it will drive closer to the centerline. This is the most important to stay away from the walls.
20 | MPC Action:
21 | mpc_params = {
22 | 'track_safety_margin': high
23 | }
24 |
25 | # Memory Entry 3:
26 | Scenario:
27 | To reverse the car slowly on the racing line, the v_min must be negative!
28 | MP Action:
29 | mpc_params = {
30 | 'v_min': negative,
31 | }
32 |
33 | # Memory Entry 4:
34 | Scenario:
35 | To track the racing line better, the qalpha should be set high to track the heading. If you want the heading to be free, then set qalpha to 0.0.
36 | MPC Action:
37 | mpc_params = {
38 | 'qalpha': high, if you want to track the racing line or set it to 0.0 to have more freedom on the heading
39 | }
40 |
41 | # Memory Entry 5:
42 | Scenario:
43 | To track the racing line better, the qn should be high. If you want to drive a different trajectory, then set qn to 0.0, such that you can deviate from it.
44 | MPC Action:
45 | mpc_params = {
46 | 'qn': high to track the racing line, else any other trajectory: 0.0
47 | }
48 |
49 | # Memory Entry 6:
50 | Scenario:
51 | To be safer or to have better tracking, you can reduce v_max to approximately 4m/s. On the other hand, if you want to be more aggressive, then increase v_max to the maximum!
52 | MPC Action:
53 | mpc_params = {
54 | 'v_max': 4.0 for safety, else for maximum aggressiveness set it to the maximum
55 | }
56 |
57 | # Memory Entry 7:
58 | Scenario:
59 | To track the racing line better, the boundary inflation should be lower. If you want to stay away from the walls, then increase the boundary inflation.
60 | MPC Action:
61 | mpc_params = {
62 | 'track_safety_margin': lower for tracking the racing line, else high to stay away from the walls
63 | }
64 |
65 | # Memory Entry 8:
66 | Scenario:
67 | To force the car to reverse, the v_max should be negative!
68 | MP Action:
69 | mpc_params = {
70 | 'v_max': negative (but larger than v_min) to force the car to reverse
71 | }
72 |
73 | # Memory Entry 9:
74 | Scenario:
75 | To track a reference velocity better, the v_min and v_max should be set closely to the reference velocity.
76 | MPC Action:
77 | mpc_params = {
78 | 'v_min': slightly less than the reference velocity,
79 | 'v_max': set to the reference velocity
80 | }
81 |
82 | # Memory Entry 10:
83 | Scenario:
84 | To minimize the lateral acceleration and jerk, set alat_max to a low value and a_min and a_max close to zero. If you want to drive more aggressive, then set alat_max to a higher value.
85 | MPC Action:
86 | mpc_params = {
87 | 'alat_max': low for minimizing lateral acceleration, else high for aggressive driving
88 | }
89 |
90 | # Memory Entry 11:
91 | Scenario:
92 | To drive smoother and reduce oscillations, increase qalpha. To drive jerky and aggressive, set qalpha to a low value.
93 | MPC Action:
94 | mpc_params = {
95 | 'qalpha': high for smoother driving, low for jerky and aggressive driving
96 | }
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.26.4
2 | wandb==0.19.10
3 |
4 | #Huggingface
5 | python-dotenv==1.1.0
6 | transformers==4.50.3
7 | pillow==11.1.0
8 | #flash-attn==2.7.4.post1
9 |
10 | #Langchain
11 | langchain==0.3.22
12 | langchain-experimental==0.3.4
13 | unstructured[md]
14 | sentence-transformers==4.0.1
15 | chromadb==0.6.3
16 | langchain-openai==0.3.11
17 |
18 | #Unsloth
19 | unsloth==2025.3.19
20 | xformers==0.0.29.post2
21 | trl==0.15.2
22 | peft==0.15.1
23 | triton==3.2.0
24 | accelerate==1.5.2
25 | bitsandbytes==0.45.4
26 | vllm==0.8.2
27 |
28 | #ROS stuff
29 | roslibpy==1.8.1
30 |
31 | #misc
32 | matplotlib==3.10.1
33 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/tests/__init__.py
--------------------------------------------------------------------------------
/tests/decision_tester/README.md:
--------------------------------------------------------------------------------
1 | ## 📚 **Overview**
2 |
3 | `DecisionTester` is an evaluation framework designed to assess the reasoning capabilities of an AI model embedded in an autonomous racing car. It evaluates the car's behavior against various test scenarios using predefined evaluation functions and logs the results for further analysis.
4 |
5 | ---
6 |
7 | ## 🚀 **Usage**
8 |
9 | ### **Run Evaluation on All Datasets**
10 |
11 | ```bash
12 | python3 -m tests.decision_tester.decision_tester --model nibauman/RobotxLLM_Qwen7B_SFT --dataset all --mini --rag
13 | ```
14 |
15 | ### **Available Arguments:**
16 | - `--model`: Choose the model (`gpt-4o` or `nibauman/RobotxLLM_Qwen7B_SFT` or any model available from [unsloth](https://huggingface.co/unsloth)).
17 | - `--dataset`: Choose the dataset (`all`, `stop`, `reverse`, etc.).
18 | - `--mini`: Run a reduced dataset (`--mini` enables mini evaluation mode).
19 | - `--quant`: Use GGUF quantized model (`Q5`) for faster inference.
20 | - `--rag`: Uses the RAG for hints.
21 |
22 | ---
23 |
24 | ## 📊 **Available Test Scenarios**
25 |
26 | | Test Case | Description |
27 | |-------------------------|-----------------------------------|
28 | | **Don't move** | Ensures the car remains stationary. |
29 | | **Reverse the car** | Checks if the car moves backward. |
30 | | **Drive forward** | Validates forward movement. |
31 | | **Oscillate!** | Detects irregular lateral motion. |
32 | | **Drive close to the left wall** | Measures wall proximity. |
33 | | **Drive on the centerline** | Tests alignment with the centerline. |
34 | | **Drive faster than 3 m/s** | Verifies speed threshold. |
35 | | **Drive on the racing line** | Ensures adherence to the optimal path. |
36 |
37 | ---
38 |
39 | ## 📁 **Log Files**
40 |
41 | - Logs are saved in the `logs` directory with timestamped filenames:
42 | ```
43 | tests/decision_tester/logs/{model}_{dataset}_{timestamp}.txt
44 | ```
45 | - Includes:
46 | - **Case Accuracies:** Success rates per scenario.
47 | - **Incorrect Entries:** Detailed logs of mismatches, including prompts and responses.
48 |
49 | ---
50 |
51 | ## 🛡️ **Environment Variables**
52 |
53 | Ensure the following are set in your `.env` file if you want to use GPT4o but also needed for RAG embeddings:
54 | ```
55 | OPENAI_API_TOKEN=your_openai_key
56 | ```
57 |
58 | ---
59 |
60 | Happy Testing! 🚗💨
--------------------------------------------------------------------------------
/tests/decision_tester/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/tests/decision_tester/__init__.py
--------------------------------------------------------------------------------
/tests/decision_tester/logs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/tests/decision_tester/logs/.gitkeep
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/centerline.json:
--------------------------------------------------------------------------------
1 | [{"time":4, "data_samples":8, "s_pos":[10,15,20,25,30,35,40,45], "d_pos":[0,0,0,0,0,0,0,0], "s_speed":[6,6,6,6,6,6,6,6], "d_speed": [0,0,0,0,0,0,0,0], "d_left": [1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":2, "data_samples":15, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], "d_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":5, "data_samples":11, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50], "d_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":4, "data_samples":4, "s_pos":[0,5,10,15], "d_pos":[0,0,0,0], "s_speed":[5,5,5,5], "d_speed": [0,0,0,0], "d_left": [1,1,1,1], "d_right":[1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":2, "data_samples":6, "s_pos":[5, 10, 15, 20, 25, 30], "d_pos":[0, 0, 0, 0, 0, 0], "s_speed":[5, 6, 5, 6, 5, 6], "d_speed":[0, 0, 0, 0, 0, 0], "d_left":[2, 2, 2, 2, 2, 2], "d_right":[2, 2, 2, 2, 2, 2], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":3, "data_samples":6, "s_pos":[5, 6, 7, 8, 9, 10], "d_pos":[0, 0, 0, 0, 0, 0], "s_speed":[6, 6, 6, 6, 6, 6], "d_speed":[0, 0, 0, 0, 0, 0], "d_left":[1, 1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":5, "data_samples":8, "s_pos":[10,15,20,25,30,35,40,45], "d_pos":[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], "s_speed":[6,6,6,6,6,6,6,6], "d_speed":[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], "d_left":[2,2,2,2,2,2,2,2], "d_right":[2,2,2,2,2,2,2,2], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":3, "data_samples":7, "s_pos":[0,1,2,3,4,5,6], "d_pos":[0,0,0,0,0,0,0], "s_speed":[6,6,6,6,6,6,6], "d_speed": [0,0,0,0,0,0,0], "d_left": [1,1,1,1,1,1,1], "d_right":[1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":5, "data_samples":14, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "d_pos":[0, 0.05, -0.1, 0.1, -0.05, 0, 0.1, -0.1, 0.05, 0, -0.05, 0.1, -0.1, 0.05], "s_speed":[6, 6.5, 6, 5.5, 6, 6.5, 6, 5.5, 6, 6.5, 6, 5.5, 6, 6.5], "d_speed":[0, 0.1, -0.2, 0.2, -0.1, 0, 0.2, -0.2, 0.1, 0, -0.1, 0.2, -0.2, 0.1], "d_left":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":2, "data_samples":14, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12,13], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[6,6,6,6,6,6,6,6,6,6,6,6,6,6], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [1,1,1,1,1,1,1,1,1,1,1,1,1,1], "d_right":[1,1,1,1,1,1,1,1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":2, "data_samples":10, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "d_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing":false, "crashed":false, "facing_wall":false},
12 | {"time":3, "data_samples":3, "s_pos":[5,10,15], "d_pos":[0.1,0.1,0.1], "s_speed":[6,6,6], "d_speed":[0.1,0.1,0.1], "d_left":[1,1,1], "d_right":[1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":3, "data_samples":15, "s_pos":[0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0], "d_pos":[0, 0.02, 0.01, -0.01, 0, 0.03, -0.02, 0.01, -0.01, 0.02, 0, -0.02, 0.01, 0, -0.01], "s_speed":[6.5, 6.7, 6.6, 6.4, 6.5, 6.8, 6.6, 6.7, 6.5, 6.6, 6.7, 6.5, 6.6, 6.7, 6.5], "d_speed":[0.1, 0.12, 0.11, 0.09, 0.1, 0.13, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1, 0.11, 0.12, 0.1], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":4, "data_samples":12, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11], "d_pos":[0,0,0,0,0,0,0,0,0,0,0,0], "s_speed":[6,6,6,6,6,6,6,6,6,6,6,6], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[1,1,1,1,1,1,1,1,1,1,1,1], "d_right":[1,1,1,1,1,1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":5, "data_samples":15, "s_pos":[0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0], "d_pos":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "s_speed":[5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], "d_speed":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "d_left":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "d_right":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":1, "data_samples":3, "s_pos":[0,2,4], "d_pos":[0.1,0.1,0.1], "s_speed":[6,6,6], "d_speed":[0,0,0], "d_left":[1.2,1.2,1.2], "d_right":[1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":1, "data_samples":8, "s_pos":[0,1,2,3,4,5,6,7], "d_pos":[0,0,0,0,0,0,0,0], "s_speed":[6,6,6,6,6,6,6,6], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[1,1,1,1,1,1,1,1], "d_right":[1,1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":2, "data_samples":5, "s_pos":[10, 15, 20, 25, 30], "d_pos":[0, 0, 0, 0, 0], "s_speed":[5, 6, 6, 5, 5], "d_speed":[0, 0, 0, 0, 0], "d_left":[1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":2, "data_samples":13, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], "d_pos":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "s_speed":[5.5, 5.6, 5.7, 5.5, 5.8, 5.9, 6.0, 5.8, 6.1, 6.2, 5.9, 6.0, 6.1], "d_speed":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "d_left":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "d_right":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":2, "data_samples":13, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12], "d_pos":[0.05,0.04,0.03,0.02,0.01,0.0,-0.01,-0.02,-0.01,0.0,0.01,0.02,0.03], "s_speed":[6,6.2,6.1,5.9,6.0,6.3,6.1,5.8,6.0,6.2,5.9,6.1,6.0], "d_speed": [0.1,0.2,0.1,0.15,0.1,0.05,0.1,0.15,0.1,0.05,0.1,0.2,0.1], "d_left": [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":5, "data_samples":12, "s_pos":[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], "d_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":2, "data_samples":3, "s_pos":[5,10,15], "d_pos":[0,0,0], "s_speed":[6,6,6], "d_speed":[0,0,0], "d_left":[1.5,1.5,1.5], "d_right":[1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":1, "data_samples":3, "s_pos":[10,15,20], "d_pos":[0.1,0.1,0.1], "s_speed":[6,6.5,7], "d_speed": [0.1,0.1,0.1], "d_left": [1.2,1.2,1.2], "d_right":[1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false}
24 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/close_to_wall.json:
--------------------------------------------------------------------------------
1 | [{"time":2, "data_samples":13, "s_pos":[10,12,14,16,18,20,22,24,26,28,30,32,34], "d_pos":[0.2,0.25,0.3,0.2,0.1,0,-0.1,-0.2,-0.15,-0.1,0,0.1,0.2], "s_speed":[6,6,6,6,6,6,6,6,6,6,6,6,6], "d_speed":[0.1,0.15,0.2,0.05,0.1,0.1,0.2,0.15,0.1,0.05,0.1,0.15,0.2], "d_left":[0.35,0.3,0.25,0.35,0.4,0.45,0.5,0.55,0.5,0.45,0.4,0.35,0.3], "d_right":[2.65,2.7,2.75,2.65,2.6,2.55,2.5,2.45,2.5,2.55,2.6,2.65,2.7], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":4, "data_samples":14, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "d_pos":[-0.3, -0.28, -0.27, -0.25, -0.24, -0.23, -0.22, -0.21, -0.2, -0.18, -0.17, -0.15, -0.14, -0.12], "s_speed":[6, 6.1, 6.2, 6.3, 6.1, 6, 6.2, 6.3, 6.1, 6, 6.2, 6.3, 6.1, 6], "d_speed":[0.1, 0.1, 0.05, 0.05, 0.1, 0.1, 0.05, 0.05, 0.1, 0.1, 0.05, 0.05, 0.1, 0.1], "d_left":[0.35, 0.36, 0.37, 0.38, 0.35, 0.36, 0.37, 0.38, 0.35, 0.36, 0.37, 0.38, 0.35, 0.36], "d_right":[2.5, 2.6, 2.7, 2.8, 2.5, 2.6, 2.7, 2.8, 2.5, 2.6, 2.7, 2.8, 2.5, 2.6], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":2, "data_samples":12, "s_pos":[1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5], "d_pos":[0.35,0.36,0.37,0.35,0.34,0.33,0.35,0.36,0.37,0.35,0.34,0.33], "s_speed":[5.5,5.5,5.6,5.6,5.7,5.7,5.6,5.6,5.5,5.5,5.6,5.6], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.7,1.6,1.6,1.7,1.7,1.7,1.7,1.6,1.6,1.7,1.7,1.7], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":1, "data_samples":7, "s_pos":[10, 12, 14, 16, 18, 20, 22], "d_pos":[0.35, 0.36, 0.38, 0.37, 0.36, 0.35, 0.34], "s_speed":[6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[0.35, 0.34, 0.32, 0.33, 0.34, 0.35, 0.36], "d_right":[2.65, 2.66, 2.68, 2.67, 2.66, 2.65, 2.64], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":5, "data_samples":6, "s_pos":[10,15,20,25,30,35], "d_pos":[0.1,0.2,0.15,0.1,0.05,0.1], "s_speed":[6,6,6,6,6,6], "d_speed":[0.2,0.3,0.25,0.2,0.15,0.2], "d_left":[0.3,0.35,0.3,0.25,0.3,0.35], "d_right":[3.7,3.65,3.7,3.75,3.7,3.65], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":1, "data_samples":10, "s_pos":[10,11,12,13,14,15,16,17,18,19], "d_pos":[0.25,0.26,0.27,0.28,0.29,0.28,0.27,0.26,0.25,0.24], "s_speed":[6,6,6,6,6,6,6,6,6,6], "d_speed": [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left": [0.35,0.35,0.35,0.35,0.35,0.35,0.35,0.35,0.35,0.35], "d_right":[2.15,2.15,2.15,2.15,2.15,2.15,2.15,2.15,2.15,2.15], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":5, "data_samples":9, "s_pos":[10, 15, 20, 25, 30, 35, 40, 45, 50], "d_pos":[-0.35, -0.35, -0.35, -0.35, -0.35, -0.35, -0.35, -0.35, -0.35], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], "d_right":[3.8, 3.8, 3.8, 3.8, 3.8, 3.8, 3.8, 3.8, 3.8], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":1, "data_samples":13, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12], "d_pos":[-0.35,-0.36,-0.37,-0.38,-0.39,-0.39,-0.38,-0.37,-0.36,-0.35,-0.34,-0.33,-0.32], "s_speed":[5.5,5.6,5.7,5.6,5.5,5.4,5.3,5.4,5.5,5.6,5.7,5.6,5.5], "d_speed": [0.2,0.2,0.2,0.3,0.3,0.2,0.1,0.1,0.2,0.3,0.3,0.2,0.1], "d_left": [0.35,0.34,0.33,0.32,0.31,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38], "d_right":[4.65,4.66,4.67,4.68,4.69,4.69,4.68,4.67,4.66,4.65,4.64,4.63,4.62], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":1, "data_samples":7, "s_pos":[10,11,12,13,14,15,16], "d_pos":[-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3], "s_speed":[6,6,6,6,6,6,6], "d_speed": [0,0,0,0,0,0,0], "d_left": [0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[5,5,5,5,5,5,5], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":3, "data_samples":12, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11], "d_pos":[-0.35,-0.36,-0.34,-0.33,-0.35,-0.36,-0.37,-0.35,-0.36,-0.34,-0.33,-0.35], "s_speed":[5,5.5,6,5.8,6.2,5.9,6.1,5.7,6,6.3,6,5.8], "d_speed": [0.2,0.3,0.1,0.2,0.3,0.4,0.2,0.3,0.2,0.1,0.2,0.3], "d_left": [0.35,0.34,0.36,0.37,0.35,0.34,0.33,0.35,0.34,0.36,0.37,0.35], "d_right":[2.65,2.66,2.64,2.63,2.65,2.66,2.67,2.65,2.66,2.64,2.63,2.65], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":3, "data_samples":8, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7], "d_pos":[-0.3, -0.25, -0.2, -0.15, -0.1, -0.05, 0, 0.05], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7], "d_right":[2.65, 2.6, 2.55, 2.5, 2.45, 2.4, 2.35, 2.3], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":2, "data_samples":5, "s_pos":[10,15,20,25,30], "d_pos":[0.3,0.35,0.33,0.32,0.31], "s_speed":[6,6.5,6.7,6.5,6.2], "d_speed": [0.3,0.4,0.35,0.32,0.3], "d_left": [0.35,0.34,0.36,0.38,0.39], "d_right":[4.65,4.66,4.64,4.62,4.61], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":1, "data_samples":7, "s_pos":[5,10,15,20,25,30,35], "d_pos":[-0.35,-0.36,-0.37,-0.38,-0.39,-0.39,-0.39], "s_speed":[6,6,6,6,6,6,6], "d_speed": [0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left": [0.35,0.34,0.33,0.32,0.31,0.31,0.31], "d_right":[3.65,3.66,3.67,3.68,3.69,3.69,3.69], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":2, "data_samples":8, "s_pos":[2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5], "d_pos":[-0.35, -0.36, -0.37, -0.35, -0.34, -0.36, -0.38, -0.35], "s_speed":[6.0, 6.2, 6.1, 6.0, 6.3, 6.1, 6.2, 6.0], "d_speed": [0.1, 0.2, 0.15, 0.1, 0.2, 0.15, 0.1, 0.2], "d_left": [0.35, 0.36, 0.37, 0.35, 0.34, 0.36, 0.38, 0.35], "d_right":[2.65, 2.64, 2.63, 2.65, 2.66, 2.64, 2.62, 2.65], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":1, "data_samples":11, "s_pos":[10,11,12,13,14,15,16,17,18,19,20], "d_pos":[-0.35,-0.35,-0.35,-0.35,-0.35,-0.35,-0.35,-0.35,-0.35,-0.35,-0.35], "s_speed":[6,6,6,6,6,6,6,6,6,6,6], "d_speed": [0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left": [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[4.7,4.7,4.7,4.7,4.7,4.7,4.7,4.7,4.7,4.7,4.7], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":2, "data_samples":4, "s_pos":[10, 15, 20, 25], "d_pos":[0.35, 0.35, 0.36, 0.36], "s_speed":[6, 6, 6, 6], "d_speed": [0.1, 0.1, 0.1, 0.1], "d_left": [0.35, 0.35, 0.36, 0.36], "d_right":[3.65, 3.65, 3.64, 3.64], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":5, "data_samples":10, "s_pos":[0,1,2,3,4,5,6,7,8,9], "d_pos":[-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3], "s_speed":[6,6,6,6,6,6,6,6,6,6], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.7,1.7,1.7,1.7,1.7,1.7,1.7,1.7,1.7,1.7], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":3, "data_samples":5, "s_pos":[10, 15, 20, 25, 30], "d_pos":[0.35, 0.3, 0.32, 0.33, 0.34], "s_speed":[6, 6, 6, 6, 6], "d_speed": [0.1, 0.1, 0.1, 0.1, 0.1], "d_left": [0.35, 0.35, 0.35, 0.35, 0.35], "d_right":[3.65, 3.65, 3.65, 3.65, 3.65], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":5, "data_samples":11, "s_pos":[0,1,2,3,4,5,6,7,8,9,10], "d_pos":[-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3], "s_speed":[6,6,6,6,6,6,6,6,6,6,6], "d_speed":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[3.7,3.7,3.7,3.7,3.7,3.7,3.7,3.7,3.7,3.7,3.7], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":3, "data_samples":13, "s_pos":[0,1.5,3,4.5,6,7.5,9,10.5,12,13.5,15,16.5,18], "d_pos":[-0.3,-0.29,-0.28,-0.3,-0.29,-0.28,-0.3,-0.29,-0.28,-0.3,-0.29,-0.28,-0.3], "s_speed":[6,6,6,6,6,6,6,6,6,6,6,6,6], "d_speed":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_left":[0.3,0.31,0.32,0.3,0.31,0.32,0.3,0.31,0.32,0.3,0.31,0.32,0.3], "d_right":[2.7,2.69,2.68,2.7,2.69,2.68,2.7,2.69,2.68,2.7,2.69,2.68,2.7], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":3, "data_samples":8, "s_pos":[5, 10, 15, 20, 25, 30, 35, 40], "d_pos":[-0.35, -0.36, -0.37, -0.35, -0.34, -0.33, -0.32, -0.31], "s_speed":[6, 6.2, 6.1, 6.3, 6.5, 6.4, 6.6, 6.7], "d_speed":[0.2, 0.3, 0.25, 0.22, 0.28, 0.24, 0.26, 0.27], "d_left":[0.3, 0.29, 0.28, 0.3, 0.31, 0.32, 0.33, 0.34], "d_right":[2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":3, "data_samples":12, "s_pos":[10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65], "d_pos":[0.35, 0.36, 0.37, 0.38, 0.39, 0.38, 0.37, 0.36, 0.35, 0.34, 0.33, 0.32], "s_speed":[6, 6.5, 6.7, 6.8, 7, 6.9, 6.8, 6.7, 6.5, 6.3, 6, 5.8], "d_speed": [0.1, 0.1, 0.15, 0.15, 0.2, 0.2, 0.15, 0.15, 0.1, 0.1, 0.05, 0.05], "d_left": [0.35, 0.34, 0.33, 0.32, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38], "d_right":[3.65, 3.66, 3.67, 3.68, 3.69, 3.68, 3.67, 3.66, 3.65, 3.64, 3.63, 3.62], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":3, "data_samples":3, "s_pos":[5, 12, 19], "d_pos":[0.1, 0.15, 0.2], "s_speed":[6.5, 6.8, 7.0], "d_speed":[0.2, 0.25, 0.3], "d_left":[0.35, 0.3, 0.25], "d_right":[2.0, 2.1, 2.2], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":1, "data_samples":8, "s_pos":[10,11,12,13,14,15,16,17], "d_pos":[-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3], "s_speed":[6,6,6,6,6,6,6,6], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[4.7,4.7,4.7,4.7,4.7,4.7,4.7,4.7], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":1, "data_samples":15, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14], "d_pos":[0.35,0.36,0.34,0.37,0.33,0.38,0.36,0.35,0.34,0.37,0.35,0.33,0.36,0.34,0.35], "s_speed":[5.5,5.6,5.5,5.7,5.5,5.6,5.5,5.6,5.5,5.7,5.5,5.6,5.5,5.6,5.5], "d_speed":[0.3,0.4,0.2,0.3,0.1,0.3,0.2,0.2,0.3,0.4,0.3,0.2,0.1,0.3,0.2], "d_left":[0.35,0.36,0.34,0.37,0.33,0.38,0.36,0.35,0.34,0.37,0.35,0.33,0.36,0.34,0.35], "d_right":[3.65,3.64,3.66,3.63,3.67,3.62,3.64,3.65,3.66,3.63,3.65,3.67,3.64,3.66,3.65], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/forward.json:
--------------------------------------------------------------------------------
1 | [{"time":5, "data_samples":15, "s_pos":[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7], "d_pos":[0.1, 0.1, -0.1, 0, 0.1, -0.1, 0, 0.1, 0, -0.1, 0.1, 0, 0, -0.1, 0.1], "s_speed":[6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5, 6.5], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[1.5, 1.6, 1.5, 1.7, 1.5, 1.6, 1.5, 1.6, 1.5, 1.6, 1.5, 1.6, 1.5, 1.6, 1.5], "d_right":[1.5, 1.4, 1.5, 1.3, 1.5, 1.4, 1.5, 1.4, 1.5, 1.4, 1.5, 1.4, 1.5, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":0, "data_samples":3, "s_pos":[10, 15, 20], "d_pos":[0.1, 0.2, 0.15], "s_speed":[6, 6.5, 7], "d_speed":[0.1, 0.2, 0.15], "d_left":[0.8, 0.7, 0.75], "d_right":[0.9, 0.8, 0.85], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time": 1, "data_samples": 14, "s_pos": [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5], "d_pos": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "s_speed": [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5], "d_speed": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "d_right": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":2, "data_samples":3, "s_pos":[10,15,20], "d_pos":[0.1,0.2,0.1], "s_speed":[6,6.5,7], "d_speed":[0.1,0.2,0.1], "d_left":[1.5,1.6,1.5], "d_right":[1.4,1.5,1.4], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":3, "data_samples":8, "s_pos":[1, 6, 11, 16, 21, 26, 31, 36], "d_pos":[0.1, 0.1, 0.1, -0.1, -0.1, 0.1, 0.1, -0.1], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.2, 0.2, 0.2, 0.3, 0.3, 0.2, 0.2, 0.3], "d_left":[1.5, 1.5, 1.5, 1.4, 1.4, 1.5, 1.5, 1.4], "d_right":[1.5, 1.5, 1.5, 1.4, 1.4, 1.5, 1.5, 1.4], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":5, "data_samples":14, "s_pos":[0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39], "d_pos":[0.1, 0.1, 0.2, 0.2, 0.1, 0, -0.1, -0.1, 0, 0.1, 0.1, 0.2, 0.2, 0.1], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[1, 1.2, 1, 1.1, 1.2, 1.3, 1.1, 1, 1.2, 1.1, 1, 1.2, 1.1, 1], "d_right":[1.5, 1.3, 1.5, 1.4, 1.3, 1.2, 1.4, 1.5, 1.3, 1.4, 1.5, 1.3, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":1, "data_samples":9, "s_pos":[0.0, 0.7, 1.4, 2.1, 2.8, 3.5, 4.2, 4.9, 5.6], "d_pos":[0.1, 0.1, 0.0, -0.1, -0.05, 0.05, 0.1, 0.0, -0.1], "s_speed":[5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], "d_speed":[0.1, 0.1, 0.0, -0.1, -0.05, 0.05, 0.1, 0.0, -0.1], "d_left":[1.0, 1.1, 1.0, 0.9, 1.0, 1.1, 1.0, 1.1, 0.9], "d_right":[1.0, 0.9, 1.0, 1.1, 1.0, 0.9, 1.0, 0.9, 1.1], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":2, "data_samples":10, "s_pos":[0,1.2,2.5,3.8,5.1,6.4,7.7,9.0,10.3,11.6], "d_pos":[0.1,0.05,-0.02,0.03,-0.01,0.02,0.00,-0.05,0.04,0.02], "s_speed":[6.0,6.1,6.2,6.0,6.1,6.3,6.2,6.0,6.1,6.2], "d_speed":[0.1,0.15,0.2,0.1,0.05,0.1,0.2,0.15,0.1,0.05], "d_left":[0.9,0.95,1.0,0.95,0.9,0.85,0.9,0.95,1.0,0.95], "d_right":[1.1,1.05,1.0,1.05,1.1,1.15,1.1,1.05,1.0,1.05], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":2, "data_samples":8, "s_pos":[10,12,14,16,18,20,22,24], "d_pos":[0.1,0.1,0.2,0.2,0.1,0.1,0.1,0.1], "s_speed":[6,6.5,6,5.5,6,6,6.5,6], "d_speed": [0.2,0.1,0.3,0.2,0.1,0.2,0.1,0.1], "d_left": [1.5,1.4,1.3,1.6,1.5,1.4,1.5,1.4], "d_right":[1.3,1.5,1.4,1.3,1.5,1.4,1.3,1.5], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":5, "data_samples":15, "s_pos":[0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0,6.5,7.0], "d_pos":[0.1,0.1,0.2,0.2,0.1,0.0,-0.1,-0.1,-0.2,-0.1,0.0,0.1,0.2,0.2,0.1], "s_speed":[6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5,6.5], "d_speed":[0.05,0.05,0.1,0.1,0.05,0.0,-0.05,-0.05,-0.1,-0.05,0.0,0.05,0.1,0.1,0.05], "d_left":[1.2,1.2,1.1,1.1,1.2,1.3,1.4,1.4,1.3,1.4,1.5,1.4,1.3,1.3,1.2], "d_right":[1.2,1.2,1.3,1.3,1.2,1.1,1.0,1.0,1.1,1.0,0.9,1.0,1.1,1.1,1.2], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":0, "data_samples":6, "s_pos":[0,1.2,2.4,3.6,4.8,6], "d_pos":[0.1,0.15,0.2,0.18,0.16,0.12], "s_speed":[6,6,6,6,6,6], "d_speed": [0.1,0.1,0.1,0.1,0.1,0.1], "d_left": [1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":5, "data_samples":6, "s_pos":[10, 15, 20, 25, 30, 35], "d_pos":[0.1, 0.1, 0.2, 0.2, 0.1, 0.1], "s_speed":[6, 6, 6, 6, 6, 6], "d_speed": [0.2, 0.2, 0.2, 0.2, 0.2, 0.2], "d_left": [1.5, 1.6, 1.4, 1.5, 1.4, 1.5], "d_right":[1.4, 1.3, 1.5, 1.4, 1.5, 1.4], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":5, "data_samples":9, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8], "d_pos":[0.1, 0.0, -0.1, 0.0, 0.1, 0.0, -0.1, 0.0, 0.1], "s_speed":[6, 6.2, 6.1, 6.3, 6.4, 6.2, 6.1, 6.3, 6.4], "d_speed": [0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2, 0.1, 0.2], "d_left": [1.5, 1.4, 1.5, 1.4, 1.5, 1.4, 1.5, 1.4, 1.5], "d_right":[1.5, 1.6, 1.5, 1.6, 1.5, 1.6, 1.5, 1.6, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":0, "data_samples":13, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], "d_pos":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "s_speed":[5.5, 5.8, 6.0, 6.2, 6.5, 6.7, 6.8, 6.9, 7.0, 6.8, 6.7, 6.5, 6.3], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2], "d_right":[1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":1, "data_samples":6, "s_pos":[1,2,3,4,5,6], "d_pos":[0.1,0.2,0.1,0.0,-0.1,-0.2], "s_speed":[6,6,6,6,6,6], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":4, "data_samples":4, "s_pos":[10,15,20,25], "d_pos":[0.1,0.1,0.1,0.1], "s_speed":[6,6,6,6], "d_speed": [0.2,0.2,0.2,0.2], "d_left": [2,2,2,2], "d_right":[2,2,2,2], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":5, "data_samples":9, "s_pos":[10, 12, 14, 16, 18, 20, 22, 24, 26], "d_pos":[0.1, 0.15, 0.05, -0.1, -0.05, 0.2, 0.1, -0.05, 0], "s_speed":[6, 6.2, 6.1, 6, 6.3, 6.1, 6.2, 6, 6.1], "d_speed":[0.2, 0.3, 0.1, 0.2, 0.15, 0.25, 0.2, 0.1, 0.05], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":5, "data_samples":13, "s_pos":[0, 1.5, 3.0, 4.5, 6.0, 7.5, 9.0, 10.5, 12.0, 13.5, 15.0, 16.5, 18.0], "d_pos":[0.1, 0.05, 0.0, -0.05, -0.1, -0.05, 0.0, 0.05, 0.1, 0.05, 0.0, -0.05, -0.1], "s_speed":[6.0, 6.2, 6.1, 5.9, 6.0, 6.1, 5.8, 6.0, 6.2, 6.1, 5.9, 6.0, 6.1], "d_speed":[0.2, 0.15, 0.0, -0.15, -0.2, -0.15, 0.0, 0.15, 0.2, 0.15, 0.0, -0.15, -0.2], "d_left":[1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5], "d_right":[1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time": 5, "data_samples": 11, "s_pos": [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50], "d_pos": [0.1, 0.15, 0.2, 0.1, 0, -0.1, -0.05, 0, 0.05, 0.1, 0.15], "s_speed": [6, 6.2, 6.5, 6.3, 6.1, 6, 6.2, 6.4, 6.3, 6.5, 6.1], "d_speed": [0.2, 0.25, 0.3, 0.15, 0.1, 0.05, 0.1, 0.15, 0.2, 0.25, 0.2], "d_left": [1.5, 1.4, 1.3, 1.5, 1.6, 1.7, 1.6, 1.5, 1.4, 1.3, 1.4], "d_right": [1.5, 1.6, 1.7, 1.5, 1.4, 1.3, 1.4, 1.5, 1.6, 1.7, 1.6], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":5, "data_samples":13, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], "d_pos":[0.1, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.2, 0.1, 0.3, 0.2], "s_speed":[6, 6.5, 6, 6.5, 6, 6.5, 6, 6.5, 6, 6.5, 6, 6.5, 6], "d_speed":[0.2, 0.3, 0.2, 0.4, 0.3, 0.2, 0.4, 0.3, 0.2, 0.3, 0.2, 0.4, 0.3], "d_left":[2, 2.1, 2, 2.1, 2, 2.1, 2, 2.1, 2, 2.1, 2, 2.1, 2], "d_right":[1.9, 1.8, 1.9, 1.8, 1.9, 1.8, 1.9, 1.8, 1.9, 1.8, 1.9, 1.8, 1.9], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":4, "data_samples":5, "s_pos":[10, 15, 20, 25, 30], "d_pos":[0.1, 0.1, 0.2, 0.1, 0.0], "s_speed":[6, 6, 6, 6, 6], "d_speed":[0.2, 0.1, 0.1, 0.1, 0.0], "d_left":[1.2, 1.3, 1.4, 1.3, 1.2], "d_right":[1.1, 1.1, 1.0, 1.1, 1.2], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":5, "data_samples":8, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7], "d_pos":[0.1, 0.15, 0.05, 0.1, 0.2, 0.25, 0.3, 0.2], "s_speed":[6, 6.5, 7, 6.8, 6.9, 6, 6.5, 7], "d_speed": [0.1, 0.2, 0.15, 0.1, 0.05, 0.1, 0.2, 0.15], "d_left": [1.5, 1.4, 1.6, 1.5, 1.4, 1.3, 1.2, 1.3], "d_right":[1.5, 1.6, 1.4, 1.5, 1.6, 1.7, 1.8, 1.7], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":3, "data_samples":14, "s_pos":[5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5], "d_pos":[0.1, 0.05, 0, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, 0, -0.05, -0.1, -0.05], "s_speed":[6, 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8, 6.9, 7, 6.9, 6.8, 6.7], "d_speed":[0.1, 0.1, 0, -0.1, -0.1, 0, 0.1, 0.1, 0, -0.1, -0.1, 0, 0.1, 0.1], "d_left":[1.2, 1.3, 1.4, 1.5, 1.6, 1.5, 1.4, 1.3, 1.2, 1.3, 1.4, 1.5, 1.6, 1.5], "d_right":[1.8, 1.7, 1.6, 1.5, 1.4, 1.5, 1.6, 1.7, 1.8, 1.7, 1.6, 1.5, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":4, "data_samples":6, "s_pos":[10,15,20,25,30,35], "d_pos":[0.1,0.1,0.1,0.2,0.2,0.1], "s_speed":[6,6,6,6,6,6], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":4, "data_samples":10, "s_pos":[0, 0.7, 1.4, 2.1, 2.8, 3.5, 4.2, 4.9, 5.6, 6.3], "d_pos":[0.05, 0.02, -0.01, 0.03, -0.02, 0.01, 0.04, -0.03, 0.01, 0], "s_speed":[6, 6.5, 6.8, 7, 6.9, 6.7, 6.5, 6.3, 6.1, 6], "d_speed":[0.1, 0.05, 0.07, 0.02, 0.03, 0.08, 0.05, 0.06, 0.04, 0.03], "d_left":[1.5, 1.6, 1.4, 1.5, 1.4, 1.6, 1.5, 1.6, 1.7, 1.5], "d_right":[1.4, 1.3, 1.5, 1.4, 1.5, 1.3, 1.4, 1.3, 1.2, 1.4], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/oscillating.json:
--------------------------------------------------------------------------------
1 | [{"time":2, "data_samples":6, "s_pos":[10,15,20,25,30,35], "d_pos":[0.4,-0.5,0.6,-0.7,0.8,-0.9], "s_speed":[6,6.5,5.5,6,6.5,5.5], "d_speed":[0.6,-0.7,0.8,-0.9,1.0,-1.1], "d_left":[0.6,0.5,0.7,0.4,0.8,0.9], "d_right":[1.2,1.3,1.1,1.4,1.0,0.9], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":5, "data_samples":7, "s_pos":[0, 1.5, 3, 4.5, 6, 7.5, 9], "d_pos":[0.1, -0.2, 0.3, -0.3, 0.2, -0.1, 0.1], "s_speed":[6, 6.5, 6, 5.5, 6, 6.5, 6], "d_speed":[0.4, 0.6, 0.5, 0.7, 0.4, 0.6, 0.5], "d_left":[1.5, 1.3, 1.6, 1.2, 1.4, 1.3, 1.5], "d_right":[1.5, 1.7, 1.4, 1.8, 1.6, 1.7, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":5, "data_samples":14, "s_pos":[10, 15, 21, 27, 33, 39, 45, 51, 57, 63, 69, 75, 81, 87], "d_pos":[-0.4, 0.5, -0.6, 0.4, -0.5, 0.6, -0.4, 0.5, -0.6, 0.4, -0.5, 0.6, -0.4, 0.5], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.6, 0.7, 0.8, 0.5, 0.6, 0.7, 0.8, 0.5, 0.6, 0.7, 0.8, 0.5, 0.6, 0.7], "d_left":[1.5, 1.4, 1.6, 1.4, 1.5, 1.6, 1.4, 1.5, 1.6, 1.4, 1.5, 1.6, 1.4, 1.5], "d_right":[1.2, 1.3, 1.1, 1.3, 1.2, 1.1, 1.3, 1.2, 1.1, 1.3, 1.2, 1.1, 1.3, 1.2], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":2, "data_samples":9, "s_pos":[0,1,2,3,4,5,6,7,8], "d_pos":[0.4,-0.4,0.5,-0.5,0.6,-0.6,0.7,-0.7,0.8], "s_speed":[6,6.5,7,6.5,6,5.5,6,6.5,7], "d_speed": [0.6,-0.6,0.7,-0.7,0.8,-0.8,0.9,-0.9,1.0], "d_left": [1.2,0.8,1.3,0.7,1.4,0.6,1.5,0.5,1.6], "d_right":[0.8,1.2,0.7,1.3,0.6,1.4,0.5,1.5,0.4], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":2, "data_samples":3, "s_pos":[10,15,20], "d_pos":[0.5,-0.5,0.5], "s_speed":[6,6,6], "d_speed":[0.6,-0.6,0.6], "d_left":[1.5,0.3,1.5], "d_right":[1.5,2.7,1.5], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":2, "data_samples":6, "s_pos":[10,12,14,16,18,20], "d_pos":[0.5,-0.6,0.4,-0.5,0.6,-0.4], "s_speed":[6,6,6,6,6,6], "d_speed":[0.6,0.7,0.5,0.6,0.7,0.5], "d_left":[1.5,1.3,1.4,1.5,1.3,1.4], "d_right":[1.3,1.5,1.3,1.4,1.5,1.3], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":4, "data_samples":13, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12], "d_pos":[0.1,-0.1,0.2,-0.2,0.15,-0.15,0.25,-0.25,0.3,-0.3,0.2,-0.2,0.1], "s_speed":[6,6.5,7,6.8,6.5,6.2,6.8,7,6.5,6.2,6.8,7,6.5], "d_speed": [0.4,0.6,0.5,0.7,0.5,0.6,0.5,0.7,0.5,0.6,0.5,0.7,0.6], "d_left": [1.5,1.4,1.6,1.3,1.5,1.4,1.6,1.3,1.5,1.4,1.6,1.3,1.5], "d_right":[1.5,1.6,1.4,1.7,1.5,1.6,1.4,1.7,1.5,1.6,1.4,1.7,1.5], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":2, "data_samples":4, "s_pos":[10,15,21,27,34], "d_pos":[0.4,-0.5,0.6,-0.7,0.5], "s_speed":[6,6,6,6,6], "d_speed":[0.6,0.7,0.8,0.7,0.6], "d_left":[1.2,1.1,1.3,1.1,1.4], "d_right":[0.8,0.9,0.7,0.9,0.6], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":5, "data_samples":6, "s_pos":[10,12,14,16,18,20], "d_pos":[0.5,-0.5,0.6,-0.6,0.5,-0.5], "s_speed":[6,6,6,6,6,6], "d_speed": [0.6,0.6,0.7,0.7,0.6,0.6], "d_left": [0.8,0.9,0.7,0.9,0.8,0.9], "d_right":[0.8,0.9,0.7,0.9,0.8,0.9], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":2, "data_samples":6, "s_pos":[0,1.1,2.3,3.5,4.7,5.9], "d_pos":[0.5,-0.5,0.4,-0.4,0.6,-0.6], "s_speed":[6.5,6.8,6.9,6.7,6.6,6.5], "d_speed":[0.6,-0.6,0.5,-0.5,0.7,-0.7], "d_left":[0.8,0.9,0.7,0.9,0.8,0.9], "d_right":[1.2,1.1,1.3,1.1,1.2,1.1], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":3, "data_samples":9, "s_pos":[0,1,2,3,4,5,6,7,8], "d_pos":[0.4,-0.4,0.5,-0.5,0.6,-0.6,0.7,-0.7,0.8], "s_speed":[6,6,6,6,6,6,6,6,6], "d_speed":[0.6,-0.6,0.7,-0.7,0.8,-0.8,0.9,-0.9,1.0], "d_left":[1.2,0.8,1.3,0.7,1.4,0.6,1.5,0.5,1.6], "d_right":[0.8,1.2,0.7,1.3,0.6,1.4,0.5,1.5,0.4], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":2, "data_samples":7, "s_pos":[10, 15, 20, 25, 30, 35, 40], "d_pos":[0.4, -0.35, 0.45, -0.4, 0.5, -0.45, 0.5], "s_speed":[6, 6.5, 6, 5.5, 6, 6.5, 6], "d_speed":[0.6, -0.7, 0.65, -0.75, 0.7, -0.65, 0.6], "d_left":[0.5, 0.3, 0.6, 0.4, 0.5, 0.3, 0.6], "d_right":[0.5, 0.6, 0.4, 0.6, 0.5, 0.7, 0.4], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":2, "data_samples":4, "s_pos":[2,3,4,5], "d_pos":[0.5,-0.5,0.4,-0.4], "s_speed":[6,6.5,6,6.5], "d_speed": [0.6,0.7,0.6,0.7], "d_left": [1.5,1.2,1.8,1.3], "d_right":[1.7,1.9,1.4,1.8], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":4, "data_samples":5, "s_pos":[0,1.5,3.1,4.6,6.2], "d_pos":[0.2,-0.4,0.3,-0.5,0.4], "s_speed":[5.5,6.0,5.8,6.2,5.9], "d_speed": [0.7,0.8,0.9,0.7,0.8], "d_left": [1.2,1.1,1.3,1.0,1.1], "d_right":[1.3,1.4,1.2,1.5,1.4], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":2, "data_samples":11, "s_pos":[0,1,2,3,4,5,6,7,8,9,10], "d_pos":[0.1,-0.2,0.3,-0.4,0.2,-0.1,0.4,-0.3,0.2,-0.2,0.1], "s_speed":[6,6.5,6,5.5,6,6.5,6,5.5,6,6.5,6], "d_speed":[0.6,-0.7,0.5,-0.6,0.7,-0.5,0.6,-0.7,0.5,-0.6,0.7], "d_left":[0.8,0.7,0.9,0.6,0.8,0.7,0.9,0.6,0.8,0.7,0.9], "d_right":[0.7,0.8,0.6,0.9,0.7,0.8,0.6,0.9,0.7,0.8,0.6], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":1, "data_samples":3, "s_pos":[0,1,2], "d_pos":[0.4,-0.4,0.5], "s_speed":[6,6,6], "d_speed":[0.6,-0.6,0.7], "d_left":[0.6,0.5,0.4], "d_right":[0.5,0.4,0.6], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":0, "data_samples":14, "s_pos":[0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5], "d_pos":[0.1,-0.2,0.3,-0.3,0.2,-0.1,0.3,-0.3,0.2,-0.2,0.3,-0.3,0.1,-0.2], "s_speed":[6,6.5,7,6.5,6,6.5,7,6.5,6,6.5,7,6.5,6,6.5], "d_speed": [0.6,0.7,0.8,0.7,0.6,0.7,0.8,0.7,0.6,0.7,0.8,0.7,0.6,0.7], "d_left": [1.5,1.3,1.6,1.4,1.5,1.3,1.6,1.4,1.5,1.3,1.6,1.4,1.5,1.3], "d_right":[1.5,1.7,1.4,1.6,1.5,1.7,1.4,1.6,1.5,1.7,1.4,1.6,1.5,1.7], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":1, "data_samples":14, "s_pos":[10, 10.5, 11, 11.5, 12, 12.5, 13, 13.5, 14, 14.5, 15, 15.5, 16, 16.5], "d_pos":[0.3, -0.4, 0.5, -0.5, 0.4, -0.3, 0.5, -0.4, 0.4, -0.5, 0.5, -0.4, 0.3, -0.3], "s_speed":[5.5, 6, 5.8, 6.2, 5.9, 5.7, 6.1, 5.8, 6, 5.9, 6.2, 5.8, 6, 5.9], "d_speed": [0.6, 0.7, 0.6, 0.7, 0.6, 0.7, 0.6, 0.7, 0.6, 0.7, 0.6, 0.7, 0.6, 0.7], "d_left": [1, 0.9, 0.8, 0.9, 1, 0.9, 0.8, 1, 0.9, 0.8, 1, 0.9, 1, 0.9], "d_right":[0.9, 1, 0.9, 1, 0.8, 1, 0.9, 0.8, 1, 0.9, 1, 0.8, 0.9, 1], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":0, "data_samples":5, "s_pos":[0,1.8,3.5,5.4,7.2], "d_pos":[0.2,-0.4,0.3,-0.5,0.4], "s_speed":[6,6.2,6.1,6.3,6.2], "d_speed":[0.6,-0.7,0.6,-0.8,0.7], "d_left":[1.2,1.1,1.3,1.0,1.2], "d_right":[0.8,0.9,0.7,1.0,0.8], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":5, "data_samples":15, "s_pos":[0,0.3,0.6,0.9,1.2,1.5,1.8,2.1,2.4,2.7,3.0,3.3,3.6,3.9,4.2], "d_pos":[0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3], "s_speed":[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5], "d_speed":[0.6,-0.6,0.6,-0.6,0.6,-0.6,0.6,-0.6,0.6,-0.6,0.6,-0.6,0.6,-0.6,0.6], "d_left":[0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5], "d_right":[0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":1, "data_samples":14, "s_pos":[0,1,2,3,4,5,6,7,8,9,10,11,12,13], "d_pos":[0.4,-0.4,0.5,-0.5,0.4,-0.4,0.5,-0.5,0.4,-0.4,0.5,-0.5,0.4,-0.4], "s_speed":[6,6.5,7,6.5,6,6.5,7,6.5,6,6.5,7,6.5,6,6.5], "d_speed": [0.6,-0.6,0.7,-0.7,0.6,-0.6,0.7,-0.7,0.6,-0.6,0.7,-0.7,0.6,-0.6], "d_left": [1.2,1.3,1.1,1.2,1.3,1.2,1.3,1.1,1.2,1.3,1.2,1.3,1.1,1.2], "d_right":[0.8,0.7,0.9,0.8,0.7,0.8,0.7,0.9,0.8,0.7,0.8,0.7,0.9,0.8], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":2, "data_samples":13, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], "d_pos":[0.4, -0.4, 0.35, -0.35, 0.3, -0.3, 0.25, -0.25, 0.2, -0.2, 0.15, -0.15, 0.1], "s_speed":[6, 6.5, 6.8, 7, 6.5, 6, 6.2, 7, 6.7, 7, 6.9, 6.8, 6.5], "d_speed": [0.6, -0.6, 0.55, -0.55, 0.5, -0.5, 0.45, -0.45, 0.4, -0.4, 0.35, -0.35, 0.3], "d_left": [1, 0.9, 1.1, 0.8, 1.2, 0.7, 1.3, 0.6, 1.4, 0.5, 1.5, 0.4, 1.6], "d_right":[1.5, 1.6, 1.4, 1.7, 1.3, 1.8, 1.2, 1.9, 1.1, 2, 1, 2.1, 0.9], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":5, "data_samples":6, "s_pos":[10,15,20,25,30,35], "d_pos":[0.4,-0.4,0.5,-0.5,0.6,-0.6], "s_speed":[6,6,6,6,6,6], "d_speed":[0.7,-0.7,0.8,-0.8,0.9,-0.9], "d_left":[1,1.5,1,1.5,1,1.5], "d_right":[1.5,1,1.5,1,1.5,1], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":3, "data_samples":8, "s_pos":[5,10,15,20,25,30,35,40], "d_pos":[0.2,-0.3,0.4,-0.5,0.3,-0.4,0.5,-0.3], "s_speed":[6,6,6,6,6,6,6,6], "d_speed": [0.6,-0.6,0.7,-0.7,0.6,-0.7,0.7,-0.6], "d_left": [0.6,0.5,0.7,0.4,0.6,0.5,0.7,0.4], "d_right":[0.6,0.5,0.7,0.4,0.6,0.5,0.7,0.4], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":3, "data_samples":10, "s_pos":[10, 15, 20, 25, 30, 35, 40, 45, 50, 55], "d_pos":[0.5, -0.5, 0.4, -0.4, 0.3, -0.3, 0.2, -0.2, 0.1, -0.1], "s_speed":[6, 6.5, 7, 6.8, 6.2, 6.5, 7, 6.6, 6.3, 6.4], "d_speed": [0.6, -0.6, 0.5, -0.5, 0.4, -0.4, 0.3, -0.3, 0.2, -0.2], "d_left": [1.5, 1.4, 1.6, 1.5, 1.4, 1.5, 1.6, 1.4, 1.5, 1.6], "d_right":[1.5, 1.6, 1.4, 1.5, 1.6, 1.5, 1.4, 1.6, 1.5, 1.4], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/racingline.json:
--------------------------------------------------------------------------------
1 | [{"time":0, "data_samples":11, "s_pos":[0, 1.5, 3, 4.5, 6, 7.5, 9, 10.5, 12, 13.5, 15], "d_pos":[0.1, 0.05, 0, -0.05, 0.1, 0.05, 0, -0.05, 0.1, 0.05, 0], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.2, 0.1, 0, -0.1, 0.2, 0.1, 0, -0.1, 0.2, 0.1, 0], "d_left":[1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2], "d_right":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":3, "data_samples":5, "s_pos":[10,15,20,25,30], "d_pos":[0.1,0.2,-0.1,0.1,0.0], "s_speed":[6,6.5,7,6.8,6.9], "d_speed":[0.1,0.2,0.3,0.1,0.0], "d_left":[2,2.1,2,1.9,2], "d_right":[1.8,1.9,1.8,1.7,1.8], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":5, "data_samples":8, "s_pos":[20, 25, 30, 35, 40, 45, 50, 55], "d_pos":[0.1, 0.05, 0.02, -0.01, 0.04, 0.08, 0.1, 0.05], "s_speed":[6, 6.5, 6.2, 6.8, 7, 6.5, 6.3, 6.8], "d_speed":[0.2, 0.15, 0.1, 0.05, 0.1, 0.14, 0.2, 0.15], "d_left":[1.2, 1.3, 1.1, 1.4, 1.5, 1.3, 1.2, 1.4], "d_right":[1.5, 1.4, 1.6, 1.3, 1.2, 1.4, 1.5, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":2, "data_samples":4, "s_pos":[10,15,20,25], "d_pos":[0.1,0.2,0.1,0.2], "s_speed":[6,6.5,6.8,7], "d_speed": [0.2,0.1,0.2,0.1], "d_left": [1.5,1.3,1.7,1.6], "d_right":[1.2,1.4,1.1,1.3], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":2, "data_samples":11, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "d_pos":[0.1, 0.05, 0.0, -0.05, -0.1, -0.15, -0.1, -0.05, 0.0, 0.05, 0.1], "s_speed":[5.5, 5.6, 5.5, 5.7, 5.6, 5.5, 5.6, 5.5, 5.7, 5.6, 5.5], "d_speed": [0.0, 0.05, 0.0, -0.05, 0.0, 0.05, 0.0, -0.05, 0.0, 0.05, 0.0], "d_left": [1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":3, "data_samples":12, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], "d_pos":[0.1, 0.05, 0.1, 0.0, -0.05, -0.1, 0.05, 0.0, 0.1, -0.05, 0.0, 0.05], "s_speed":[6, 6.5, 6, 5.5, 6, 6.5, 6, 5.5, 6, 6.5, 6, 5.5], "d_speed":[0.1, 0.2, 0.1, 0.15, 0.1, 0.2, 0.1, 0.15, 0.1, 0.2, 0.1, 0.15], "d_left":[1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4], "d_right":[1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":4, "data_samples":12, "s_pos":[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], "d_pos":[0.1, 0.15, 0.1, 0.05, 0.1, 0.15, 0.1, 0.05, 0.1, 0.15, 0.1, 0.05], "s_speed":[6, 6.5, 6.2, 6.8, 6.5, 6.7, 6.3, 6.9, 6.6, 6.4, 6.7, 6.5], "d_speed":[0.1, 0.2, 0.15, 0.1, 0.2, 0.15, 0.1, 0.2, 0.15, 0.1, 0.2, 0.15], "d_left":[1, 1.1, 1.2, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1.2, 1.1], "d_right":[0.9, 0.8, 0.7, 0.8, 0.7, 0.8, 0.9, 0.8, 0.7, 0.8, 0.7, 0.8], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":5, "data_samples":11, "s_pos":[0,1,2,3,4,5,6,7,8,9,10], "d_pos":[0.1,0.1,0.1,0.2,0.2,0.2,0.1,0.1,0.1,0.2,0.2], "s_speed":[6,6,6,6,6,6,6,6,6,6,6], "d_speed": [0.1,0.1,0.2,0.2,0.1,0.1,0.1,0.2,0.2,0.1,0.1], "d_left": [1.5,1.5,1.5,1.4,1.4,1.5,1.5,1.5,1.5,1.4,1.4], "d_right":[1.5,1.5,1.5,1.6,1.6,1.5,1.5,1.5,1.5,1.6,1.6], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":2, "data_samples":13, "s_pos":[0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0], "d_pos":[0.1, 0.05, 0.0, -0.05, -0.1, -0.05, 0.0, 0.05, 0.1, 0.05, 0.0, -0.05, -0.1], "s_speed":[6.5, 6.6, 6.5, 6.4, 6.5, 6.6, 6.5, 6.4, 6.5, 6.6, 6.5, 6.4, 6.5], "d_speed":[0.0, 0.05, 0.1, 0.05, 0.0, -0.05, -0.1, -0.05, 0.0, 0.05, 0.1, 0.05, 0.0], "d_left":[1.2, 1.3, 1.2, 1.3, 1.2, 1.3, 1.2, 1.3, 1.2, 1.3, 1.2, 1.3, 1.2], "d_right":[0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8, 0.7, 0.8], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":1, "data_samples":10, "s_pos":[0,1,2,3,4,5,6,7,8,9], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[6,6,6,6,6,6,6,6,6,6], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[1,1.1,1.1,1,1,1.1,1.1,1,1,1.1], "d_right":[1.2,1.1,1.1,1.2,1.2,1.1,1.1,1.2,1.2,1.1], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":5, "data_samples":7, "s_pos":[0,5,10,15,20,25,30], "d_pos":[0.1,0.05,-0.1,0.0,0.1,0.05,-0.05], "s_speed":[6,6.5,7,6.8,6.5,6.2,6], "d_speed":[0.1,0.2,0.1,0.05,0.1,0.15,0.1], "d_left":[1.5,1.6,1.4,1.5,1.5,1.6,1.4], "d_right":[1.2,1.3,1.1,1.2,1.2,1.3,1.1], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":1, "data_samples":15, "s_pos":[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7], "d_pos":[0.1, 0.12, 0.08, 0.05, 0.1, 0.15, 0.1, 0.05, 0.08, 0.12, 0.1, 0.07, 0.05, 0.1, 0.12], "s_speed":[6, 6.1, 6.2, 6.1, 6, 5.9, 6, 6.1, 6.2, 6.1, 6, 5.9, 6, 6.1, 6.2], "d_speed": [0.2, 0.15, 0.1, 0.05, 0.2, 0.25, 0.15, 0.1, 0.05, 0.1, 0.2, 0.15, 0.1, 0.05, 0.2], "d_left": [1.5, 1.4, 1.3, 1.5, 1.6, 1.5, 1.4, 1.3, 1.5, 1.5, 1.4, 1.3, 1.5, 1.6, 1.5], "d_right":[1.5, 1.6, 1.7, 1.5, 1.4, 1.5, 1.6, 1.7, 1.5, 1.5, 1.6, 1.7, 1.5, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":0, "data_samples":4, "s_pos":[0,2,4,6,8], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[6,6,6,6,6], "d_speed":[0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":2, "data_samples":9, "s_pos":[0, 1.5, 3.2, 4.8, 6.5, 8.1, 9.7, 11.4, 12.9], "d_pos":[0.1, 0.05, 0.0, -0.05, 0.0, 0.05, 0.1, 0.05, 0.0], "s_speed":[6.5, 6.7, 6.8, 6.6, 6.7, 6.9, 6.8, 6.7, 6.5], "d_speed":[0.1, 0.2, 0.1, 0.0, -0.1, -0.2, -0.1, 0.0, 0.1], "d_left":[1.5, 1.6, 1.5, 1.5, 1.6, 1.5, 1.6, 1.5, 1.6], "d_right":[1.4, 1.3, 1.4, 1.5, 1.3, 1.4, 1.3, 1.4, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":1, "data_samples":7, "s_pos":[0,1,2,3,4,5,6], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[6,6,6,6,6,6,6], "d_speed": [0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left": [1.2,1.1,1.3,1.2,1.2,1.1,1.3], "d_right":[1.5,1.6,1.4,1.5,1.5,1.6,1.4], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":5, "data_samples":9, "s_pos":[10, 15, 20, 25, 30, 35, 40, 45, 50], "d_pos":[0.1, 0.15, 0.05, 0.1, 0.2, 0.1, 0.05, 0.1, 0.15], "s_speed":[6, 6.5, 7, 6.8, 6.7, 6.9, 7, 6.8, 6.5], "d_speed": [0.2, 0.25, 0.15, 0.2, 0.3, 0.2, 0.15, 0.2, 0.25], "d_left": [1.5, 1.4, 1.6, 1.5, 1.4, 1.6, 1.5, 1.4, 1.6], "d_right":[1.0, 1.1, 0.9, 1.0, 1.1, 0.9, 1.0, 1.1, 0.9], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":1, "data_samples":6, "s_pos":[10, 15, 20, 25, 30, 35], "d_pos":[0.1, 0.1, 0.2, 0.2, 0.1, 0.1], "s_speed":[6, 6.5, 7, 6.5, 6, 5.5], "d_speed":[0.2, 0.1, 0.3, 0.2, 0.1, 0.2], "d_left":[1.5, 1.6, 1.4, 1.5, 1.6, 1.5], "d_right":[1.2, 1.1, 1.3, 1.2, 1.1, 1.2], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":2, "data_samples":15, "s_pos":[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7], "d_pos":[0.1, 0.05, 0, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, 0, -0.05, -0.1, -0.05, 0], "s_speed":[5.5, 5.6, 5.7, 5.7, 5.8, 5.8, 5.9, 5.9, 6, 6, 6.1, 6.1, 6.2, 6.2, 6.3], "d_speed":[0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.15, -0.1, -0.05, 0, 0.05, 0.1], "d_left":[1.2, 1.3, 1.2, 1.1, 1.2, 1.3, 1.2, 1.1, 1.2, 1.3, 1.2, 1.1, 1.2, 1.3, 1.2], "d_right":[0.8, 0.7, 0.8, 0.9, 0.8, 0.7, 0.8, 0.9, 0.8, 0.7, 0.8, 0.9, 0.8, 0.7, 0.8], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":2, "data_samples":12, "s_pos":[0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 11.0, 12.1], "d_pos":[0.1, 0.05, 0.0, -0.05, -0.1, -0.05, 0.0, 0.05, 0.1, 0.05, 0.0, -0.05], "s_speed":[6.5, 6.6, 6.7, 6.8, 6.9, 7.0, 7.1, 7.2, 7.3, 7.4, 7.5, 7.6], "d_speed":[0.1, 0.1, 0.0, -0.1, -0.1, -0.1, 0.0, 0.1, 0.1, 0.1, 0.0, -0.1], "d_left":[1.2, 1.1, 1.3, 1.2, 1.1, 1.3, 1.2, 1.1, 1.3, 1.2, 1.1, 1.3], "d_right":[0.8, 0.9, 0.7, 0.8, 0.9, 0.7, 0.8, 0.9, 0.7, 0.8, 0.9, 0.7], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":5, "data_samples":9, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40], "d_pos":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed": [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], "d_left": [2, 2, 2, 2, 2, 2, 2, 2, 2], "d_right":[2, 2, 2, 2, 2, 2, 2, 2, 2], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":1, "data_samples":5, "s_pos":[10,12,14,16,18], "d_pos":[0.1,0.1,0.2,0.1,0.1], "s_speed":[6,6,6,6,6], "d_speed":[0.1,0.1,0.1,0.1,0.1], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":0, "data_samples":6, "s_pos":[0,1,2,3,4,5], "d_pos":[0.1,0.2,0.15,0.2,0.1,0.05], "s_speed":[6.5,6.7,6.6,6.8,6.5,6.6], "d_speed": [0.1,0.2,0.15,0.2,0.1,0.05], "d_left": [1.2,1.1,1.3,1.2,1.3,1.2], "d_right":[1.1,1.2,1.1,1.1,1.2,1.3], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":1, "data_samples":14, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "d_pos":[0.1, 0.05, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, 0, -0.05, -0.1, -0.05, 0], "s_speed":[6, 6.5, 6.8, 7, 6.9, 6.7, 6.5, 6.3, 6.4, 6.6, 6.8, 7, 6.9, 6.8], "d_speed":[0.2, 0.15, 0.1, 0.05, 0.1, 0.15, 0.2, 0.25, 0.2, 0.15, 0.1, 0.05, 0.1, 0.15], "d_left":[1.5, 1.6, 1.7, 1.8, 1.7, 1.6, 1.5, 1.4, 1.5, 1.6, 1.7, 1.8, 1.7, 1.6], "d_right":[1.2, 1.3, 1.4, 1.5, 1.4, 1.3, 1.2, 1.1, 1.2, 1.3, 1.4, 1.5, 1.4, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":2, "data_samples":6, "s_pos":[10, 15, 20, 25, 30, 35], "d_pos":[0.1, 0.1, 0.15, 0.1, 0.05, 0.1], "s_speed":[5.5, 6.0, 6.5, 6.0, 5.5, 6.0], "d_speed":[0.1, 0.1, 0.2, 0.1, 0.1, 0.1], "d_left":[1.2, 1.3, 1.4, 1.3, 1.2, 1.3], "d_right":[0.8, 0.7, 0.6, 0.7, 0.8, 0.7], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":3, "data_samples":11, "s_pos":[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], "d_pos":[0.1, 0.15, 0, -0.1, 0.05, -0.05, 0.1, 0.05, -0.1, 0.05, 0], "s_speed":[6.5, 6.7, 6.6, 6.8, 6.7, 6.6, 6.5, 6.7, 6.8, 6.6, 6.7], "d_speed":[0.2, 0.3, 0.1, 0.25, 0.15, 0.2, 0.1, 0.25, 0.3, 0.2, 0.15], "d_left":[1.5, 1.4, 1.5, 1.3, 1.6, 1.5, 1.4, 1.3, 1.5, 1.4, 1.6], "d_right":[1.2, 1.3, 1.2, 1.4, 1.1, 1.2, 1.3, 1.4, 1.2, 1.3, 1.1], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/reversed.json:
--------------------------------------------------------------------------------
1 | [{"time":0, "data_samples":11, "s_pos":[100.5, 100.3, 100.1, 99.9, 99.7, 99.5, 99.3, 99.1, 98.9, 98.7, 98.5], "d_pos":[0.2, 0.25, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25], "s_speed":[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5], "d_speed":[0.12, 0.1, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25], "d_left":[2.0, 2.1, 2.2, 2.1, 2.0, 1.9, 1.8, 1.9, 2.0, 2.1, 2.2], "d_right":[1.8, 1.7, 1.6, 1.7, 1.8, 1.9, 2.0, 1.9, 1.8, 1.7, 1.6], "reversing": true, "crashed": false, "facing_wall": false},
2 | {"time":3, "data_samples":11, "s_pos":[50, 49.5, 49, 48.5, 48, 47.5, 47, 46.5, 46, 45.5, 45], "d_pos":[0.1, 0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25], "s_speed":[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], "d_speed":[0.1, 0.1, 0.05, 0, -0.05, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1], "d_left":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "reversing": true, "crashed": false, "facing_wall": false},
3 | {"time":5, "data_samples":15, "s_pos":[5.0, 4.9, 4.8, 4.7, 4.6, 4.5, 4.4, 4.3, 4.2, 4.1, 4.0, 3.9, 3.8, 3.7, 3.6], "d_pos":[0.1, 0.15, 0.2, 0.25, 0.3, 0.28, 0.25, 0.2, 0.15, 0.1, 0.05, 0.0, -0.05, -0.1, -0.15], "s_speed":[-0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2], "d_speed": [0.05, 0.1, 0.15, 0.2, 0.25, 0.2, 0.15, 0.1, 0.05, 0.0, -0.05, -0.1, -0.15, -0.2, -0.25], "d_left": [1.5, 1.45, 1.4, 1.35, 1.3, 1.28, 1.25, 1.2, 1.15, 1.1, 1.05, 1.0, 0.95, 0.9, 0.85], "d_right":[2.5, 2.55, 2.6, 2.65, 2.7, 2.72, 2.75, 2.8, 2.85, 2.9, 2.95, 3.0, 3.05, 3.1, 3.15], "reversing": true, "crashed": false, "facing_wall": false},
4 | {"time":2, "data_samples":12, "s_pos":[50,49,48,47,46,45,44,43,42,41,40,39], "d_pos":[0.1,0.15,0.2,0.25,0.3,0.2,0.1,0.05,0,-0.05,-0.1,-0.15], "s_speed":[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[1.5,1.4,1.3,1.2,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8], "d_right":[1.5,1.6,1.7,1.8,1.9,1.8,1.7,1.6,1.5,1.4,1.3,1.2], "reversing": true, "crashed": false, "facing_wall": false},
5 | {"time":3, "data_samples":5, "s_pos":[50, 49, 48, 47, 46], "d_pos":[0.2, 0.25, 0.3, 0.35, 0.4], "s_speed":[-2, -2, -2, -2, -2], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[1, 1.2, 1.3, 1.4, 1.5], "d_right":[0.8, 0.6, 0.5, 0.4, 0.3], "reversing": true, "crashed": false, "facing_wall": false},
6 | {"time":5, "data_samples":6, "s_pos":[50, 49, 48, 47, 46, 45], "d_pos":[0.2, 0.25, 0.3, 0.28, 0.26, 0.24], "s_speed":[-1, -1, -1, -1, -1, -1], "d_speed":[0.1, 0.15, 0.2, 0.18, 0.16, 0.14], "d_left":[1.2, 1.1, 1.0, 1.0, 1.1, 1.2], "d_right":[0.8, 0.9, 1.0, 1.0, 0.9, 0.8], "reversing": true, "crashed": false, "facing_wall": false},
7 | {"time":1, "data_samples":12, "s_pos":[10,9.5,9,8.5,8,7.5,7,6.5,6,5.5,5,4.5], "d_pos":[0.2,0.15,0.1,0.05,0,-0.05,-0.1,-0.15,-0.2,-0.25,-0.3,-0.35], "s_speed":[-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5], "d_speed": [0.4,0.35,0.3,0.25,0.2,0.15,0.1,0.05,0,-0.05,-0.1,-0.15], "d_left": [0.8,0.85,0.9,0.95,1,1.05,1.1,1.15,1.2,1.25,1.3,1.35], "d_right":[0.8,0.75,0.7,0.65,0.6,0.55,0.5,0.45,0.4,0.35,0.3,0.25], "reversing": true, "crashed": false, "facing_wall": false},
8 | {"time":13, "data_samples":13, "s_pos":[10,9,8,7,6,5,4,3,2,1,0,-1,-2], "d_pos":[0.1,0.0,-0.1,0.0,0.1,0.0,-0.1,0.0,0.1,0.0,-0.1,0.0,0.1], "s_speed":[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1], "d_speed": [0.1,-0.1,0.1,-0.1,0.1,-0.1,0.1,-0.1,0.1,-0.1,0.1,-0.1,0.1], "d_left": [0.8,0.9,0.8,0.9,0.8,0.9,0.8,0.9,0.8,0.9,0.8,0.9,0.8], "d_right":[1.2,1.1,1.2,1.1,1.2,1.1,1.2,1.1,1.2,1.1,1.2,1.1,1.2], "reversing": true, "crashed": false, "facing_wall": false},
9 | {"time":2, "data_samples":6, "s_pos":[10,9,8,7,6,5], "d_pos":[0.1,-0.1,0.1,-0.1,0.1,-0.1], "s_speed":[-1,-1,-1,-1,-1,-1], "d_speed":[0.4,-0.4,0.4,-0.4,0.4,-0.4], "d_left":[0.8,0.9,1,1.1,1.2,1.3], "d_right":[1.2,1.1,1,0.9,0.8,0.7], "reversing": true, "crashed": false, "facing_wall": false},
10 | {"time":3, "data_samples":13, "s_pos":[5, 4.8, 4.6, 4.4, 4.2, 4.0, 3.8, 3.6, 3.4, 3.2, 3.0, 2.8, 2.6], "d_pos":[0.2, 0.25, 0.28, 0.3, 0.26, 0.22, 0.2, 0.18, 0.15, 0.1, 0.05, 0.02, 0.0], "s_speed":[-0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6], "d_speed":[0.1, 0.15, 0.1, 0.05, -0.05, -0.1, -0.15, -0.2, -0.15, -0.1, -0.05, 0.0, 0.0], "d_left":[1.2, 1.15, 1.1, 1.0, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45], "d_right":[0.8, 0.85, 0.9, 1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55], "reversing": true, "crashed": false, "facing_wall": false},
11 | {"time":2, "data_samples":6, "s_pos":[10, 9.5, 9, 8.5, 8, 7.5], "d_pos":[0.2, 0.1, 0, -0.1, -0.2, -0.3], "s_speed":[-2.5, -2.5, -2.5, -2.5, -2.5, -2.5], "d_speed":[0.3, 0.3, 0.3, 0.3, 0.3, 0.3], "d_left":[0.8, 0.9, 1, 1.1, 1.2, 1.3], "d_right":[1.2, 1.1, 1, 0.9, 0.8, 0.7], "reversing": true, "crashed": false, "facing_wall": false},
12 | {"time":2, "data_samples":14, "s_pos":[25.0, 24.6, 24.2, 23.8, 23.4, 23.0, 22.6, 22.2, 21.8, 21.4, 21.0, 20.6, 20.2, 19.8], "d_pos":[0.2, 0.3, 0.4, 0.3, 0.2, 0.3, 0.4, 0.3, 0.2, 0.3, 0.4, 0.3, 0.2, 0.3], "s_speed":[-3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0, -3.0], "d_speed":[0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5, 0.6, 0.5], "d_left":[0.6, 0.5, 0.4, 0.5, 0.6, 0.5, 0.4, 0.5, 0.6, 0.5, 0.4, 0.5, 0.6, 0.5], "d_right":[1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5], "reversing": true, "crashed": false, "facing_wall": false},
13 | {"time":1, "data_samples":9, "s_pos":[10,9.8,9.6,9.4,9.2,9,8.8,8.6,8.4], "d_pos":[0.2,0.15,0.1,0.05,0,-0.05,-0.1,-0.15,-0.2], "s_speed":[-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2], "d_speed":[-0.05,-0.05,-0.05,-0.05,-0.05,-0.05,-0.05,-0.05,-0.05], "d_left":[1.5,1.6,1.7,1.8,1.9,2,2.1,2.2,2.3], "d_right":[1,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2], "reversing": true, "crashed": false, "facing_wall": false},
14 | {"time":4, "data_samples":15, "s_pos":[5.0, 4.7, 4.4, 4.1, 3.8, 3.5, 3.2, 2.9, 2.6, 2.3, 2.0, 1.7, 1.4, 1.1, 0.8], "d_pos":[0.2, 0.15, 0.1, 0.05, 0.0, -0.05, -0.1, -0.15, -0.2, -0.25, -0.3, -0.35, -0.4, -0.45, -0.5], "s_speed":[-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1], "d_right":[1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9], "reversing": true, "crashed": false, "facing_wall": false},
15 | {"time":4, "data_samples":15, "s_pos":[100, 99.5, 99, 98.5, 98, 97.5, 97, 96.5, 96, 95.5, 95, 94.5, 94, 93.5, 93], "d_pos":[0.1, 0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25, -0.2, -0.15, -0.1, -0.05], "s_speed":[-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2], "d_speed": [-0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1], "d_left": [0.8, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35], "d_right":[1.2, 1.3, 1.25, 1.2, 1.15, 1.1, 1.05, 1, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65], "reversing": true, "crashed": false, "facing_wall": false},
16 | {"time":1, "data_samples":12, "s_pos":[10.5,10.4,10.3,10.2,10.1,10.0,9.9,9.8,9.7,9.6,9.5,9.4], "d_pos":[0.1,-0.2,0.15,-0.1,0.2,-0.15,0.1,-0.2,0.15,-0.1,0.2,-0.15], "s_speed":[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0], "d_speed": [0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3,0.3,-0.3], "d_left": [0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6], "d_right":[0.5,0.4,0.5,0.4,0.5,0.4,0.5,0.4,0.5,0.4,0.5,0.4], "reversing": true, "crashed": false, "facing_wall": false},
17 | {"time":0, "data_samples":10, "s_pos":[50,48,46,44,42,40,38,36,34,32], "d_pos":[0.5,0.4,0.3,0.2,0.1,0,-0.1,-0.2,-0.3,-0.4], "s_speed":[-2,-2,-2,-2,-2,-2,-2,-2,-2,-2], "d_speed":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_left":[0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6], "d_right":[1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4,1.4], "reversing": true, "crashed": false, "facing_wall": false},
18 | {"time":2, "data_samples":4, "s_pos":[10,9,8,7], "d_pos":[0.2,0.1,0.0,-0.1], "s_speed":[-2,-2,-2,-2], "d_speed": [0.1,0.1,0.1,0.1], "d_left": [0.5,0.6,0.7,0.8], "d_right":[0.7,0.8,0.9,1.0], "reversing": true, "crashed": false, "facing_wall": true},
19 | {"time":2, "data_samples":14, "s_pos":[10, 9.8, 9.6, 9.4, 9.2, 9, 8.8, 8.6, 8.4, 8.2, 8, 7.8, 7.6, 7.4], "d_pos":[0.1, 0.15, 0.2, 0.25, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15], "s_speed":[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, -0.1], "d_left":[1.2, 1.3, 1.4, 1.5, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7], "d_right":[0.8, 0.7, 0.6, 0.5, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3], "reversing": true, "crashed": false, "facing_wall": false},
20 | {"time":4, "data_samples":8, "s_pos":[50, 49, 48, 47, 46, 45, 44, 43], "d_pos":[0.2, 0.25, 0.28, 0.3, 0.28, 0.25, 0.2, 0.15], "s_speed":[-3, -3, -3, -3, -3, -3, -3, -3], "d_speed":[0.1, 0.15, 0.18, 0.2, 0.18, 0.15, 0.1, 0.05], "d_left":[0.9, 0.85, 0.8, 0.75, 0.8, 0.85, 0.9, 0.95], "d_right":[1.1, 1.15, 1.2, 1.25, 1.2, 1.15, 1.1, 1.05], "reversing": true, "crashed": false, "facing_wall": false},
21 | {"time":1, "data_samples":6, "s_pos":[5,4.8,4.6,4.4,4.2,4], "d_pos":[0.1,0.15,0.1,0.05,0,-0.05], "s_speed":[-1,-1,-1,-1,-1,-1], "d_speed":[0.2,0.1,-0.1,-0.15,-0.2,-0.25], "d_left":[1.5,1.4,1.6,1.7,1.8,1.9], "d_right":[1.5,1.6,1.4,1.3,1.2,1.1], "reversing": true, "crashed": false, "facing_wall": false},
22 | {"time":5, "data_samples":13, "s_pos":[3.5, 3.4, 3.3, 3.2, 3.1, 3.0, 2.9, 2.8, 2.7, 2.6, 2.5, 2.4, 2.3], "d_pos":[0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25, -0.2, -0.15, -0.1], "s_speed":[-0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2, -0.2], "d_speed":[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left":[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1], "d_right":[1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4], "reversing": true, "crashed": false, "facing_wall": false},
23 | {"time":3, "data_samples":10, "s_pos":[15.3, 15.1, 14.9, 14.7, 14.5, 14.3, 14.1, 13.9, 13.7, 13.5], "d_pos":[0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1, -0.15, -0.2, -0.25], "s_speed":[-0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6, -0.6], "d_speed": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left": [1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1], "d_right":[2.5, 2.4, 2.3, 2.2, 2.1, 2.0, 1.9, 1.8, 1.7, 1.6], "reversing": true, "crashed": false, "facing_wall": true},
24 | {"time":5, "data_samples":14, "s_pos":[25, 24.5, 24, 23.5, 23, 22.5, 22, 21.5, 21, 20.5, 20, 19.5, 19, 18.5], "d_pos":[0.2, 0.1, 0.0, -0.1, -0.2, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.2, 0.1], "s_speed":[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], "d_speed": [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], "d_left": [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2, 1.25], "d_right":[1.4, 1.35, 1.3, 1.25, 1.2, 1.15, 1.1, 1.05, 1.0, 0.95, 0.9, 0.85, 0.8, 0.75], "reversing": true, "crashed": false, "facing_wall": false}
25 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/speed.json:
--------------------------------------------------------------------------------
1 | [{"time":1, "data_samples":8, "s_pos":[10, 11, 12, 13, 14, 15, 16, 17], "d_pos":[0.1, 0.15, 0.2, 0.1, 0.05, 0.0, 0.05, 0.1], "s_speed":[4.5, 5.0, 5.5, 5.0, 4.5, 5.0, 5.5, 5.0], "d_speed": [0.1, 0.2, 0.3, 0.1, 0.0, 0.1, 0.2, 0.1], "d_left": [1.5, 1.4, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5], "d_right":[1.0, 1.1, 0.9, 1.0, 1.1, 1.0, 0.9, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":5, "data_samples":5, "s_pos":[10,15,21,27,34], "d_pos":[0.1,0.05,-0.1,0.1,-0.05], "s_speed":[5,6,6,7,7], "d_speed":[0.2,0.3,0.4,0.2,0.3], "d_left":[1.5,1.6,1.5,1.4,1.6], "d_right":[1.6,1.5,1.6,1.7,1.5], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":1, "data_samples":11, "s_pos":[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "d_pos":[0.1, 0.2, 0.1, 0.0, -0.1, -0.2, -0.1, 0.0, 0.1, 0.2, 0.1], "s_speed":[4, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6], "d_speed":[0.1, 0.2, 0.1, 0.0, -0.1, -0.2, -0.1, 0.0, 0.1, 0.2, 0.1], "d_left":[1.0, 1.1, 1.0, 1.0, 0.9, 0.8, 0.9, 1.0, 1.1, 1.2, 1.1], "d_right":[1.1, 1.0, 1.1, 1.1, 1.2, 1.3, 1.2, 1.1, 1.0, 0.9, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":3, "data_samples":7, "s_pos":[0, 1.2, 2.4, 3.6, 4.8, 6.0, 7.2], "d_pos":[0.05, 0.1, 0.05, 0, -0.05, 0, 0.05], "s_speed":[3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5], "d_speed":[0.1, 0.2, 0.1, 0, -0.1, 0, 0.1], "d_left":[1.5, 1.4, 1.5, 1.6, 1.7, 1.6, 1.5], "d_right":[1.5, 1.6, 1.5, 1.4, 1.3, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":1, "data_samples":3, "s_pos":[0,1.8,3.6], "d_pos":[0.1,0.15,0.2], "s_speed":[6,6,6], "d_speed":[0.3,0.3,0.3], "d_left":[1.2,1.1,1.0], "d_right":[1.0,1.1,1.2], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":5, "data_samples":12, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], "d_pos":[0.1, 0.1, 0.2, 0.15, 0.2, 0.1, 0.15, 0.2, 0.1, 0.15, 0.2, 0.1], "s_speed":[5, 5.5, 5.2, 5.7, 6, 5.8, 6.2, 5.5, 5.9, 6.1, 5.3, 5.6], "d_speed": [0.2, 0.25, 0.3, 0.2, 0.25, 0.3, 0.2, 0.25, 0.3, 0.2, 0.25, 0.3], "d_left": [1, 1.2, 1.1, 1.3, 1.2, 1.4, 1.3, 1.5, 1.4, 1.3, 1.5, 1.4], "d_right":[1.5, 1.3, 1.4, 1.2, 1.3, 1.1, 1.2, 1, 1.1, 1.2, 1, 1.1], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":1, "data_samples":3, "s_pos":[10,15,20], "d_pos":[0.1,0.2,0.1], "s_speed":[5,5.5,6], "d_speed":[0.1,0.2,0.1], "d_left":[2,2.1,2.2], "d_right":[1.9,1.8,1.7], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":5, "data_samples":6, "s_pos":[0, 1.2, 2.5, 4.0, 5.5, 7.1], "d_pos":[0.2, 0.25, 0.3, 0.15, 0.1, 0.05], "s_speed":[4.5, 5.0, 5.5, 6.0, 6.5, 7.0], "d_speed":[0.3, 0.35, 0.4, 0.25, 0.2, 0.15], "d_left":[0.8, 0.85, 0.9, 0.95, 1.0, 1.05], "d_right":[1.2, 1.15, 1.1, 1.05, 1.0, 0.95], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":4, "data_samples":7, "s_pos":[10, 15, 20, 25, 30, 35, 40], "d_pos":[0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.0], "s_speed":[4, 5, 6, 5, 6, 5, 6], "d_speed":[0.3, 0.4, 0.2, 0.3, 0.2, 0.4, 0.3], "d_left":[0.8, 1.0, 0.9, 1.1, 1.0, 1.2, 1.1], "d_right":[1.2, 1.0, 1.1, 0.9, 1.0, 0.8, 0.9], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":2, "data_samples":13, "s_pos":[0.0, 0.6, 1.2, 1.8, 2.4, 3.0, 3.6, 4.2, 4.8, 5.4, 6.0, 6.6, 7.2], "d_pos":[0.1, 0.05, -0.05, -0.1, 0.0, 0.05, -0.05, 0.1, 0.0, -0.05, 0.05, 0.1, -0.1], "s_speed":[3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5, 3.5], "d_speed":[0.3, 0.25, 0.2, 0.15, 0.1, 0.05, 0.2, 0.15, 0.1, 0.05, 0.2, 0.15, 0.3], "d_left":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":5, "data_samples":8, "s_pos":[0, 5, 11, 17, 23, 29, 35, 41], "d_pos":[0.1, 0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1], "s_speed":[5, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.2, 0.15, 0.1, 0.05, 0, -0.05, -0.1], "d_left":[1.5, 1.6, 1.55, 1.5, 1.45, 1.4, 1.35, 1.3], "d_right":[1.5, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":3, "data_samples":10, "s_pos":[0, 1.5, 3.1, 4.6, 6.2, 7.8, 9.3, 10.9, 12.4, 14.0], "d_pos":[0.1, 0.05, 0.2, -0.1, 0.05, -0.15, 0.1, 0.0, -0.05, 0.1], "s_speed":[5.2, 5.3, 5.5, 5.6, 5.4, 5.7, 5.5, 5.6, 5.7, 5.4], "d_speed": [0.1, 0.15, 0.2, 0.1, 0.05, 0.1, 0.15, 0.1, 0.05, 0.1], "d_left": [1.2, 1.3, 1.2, 1.3, 1.3, 1.2, 1.3, 1.2, 1.3, 1.2], "d_right":[1.3, 1.2, 1.3, 1.2, 1.2, 1.3, 1.2, 1.3, 1.2, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":1, "data_samples":9, "s_pos":[5,6,7,8,9,10,11,12,13], "d_pos":[0.1,0.2,0.1,0.0,-0.1,0.0,0.1,0.2,0.1], "s_speed":[4,5,5,6,6,5,5,6,5], "d_speed": [0.1,0.2,0.1,0.0,-0.1,0.0,0.1,0.2,0.1], "d_left": [1.5,1.6,1.5,1.4,1.5,1.4,1.5,1.6,1.5], "d_right":[1.5,1.4,1.5,1.6,1.5,1.6,1.5,1.4,1.5], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":4, "data_samples":9, "s_pos":[5, 10, 15, 20, 25, 30, 35, 40, 45], "d_pos":[0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.2, -0.2, 0.1], "s_speed":[4, 5, 5.5, 6, 6.5, 5.5, 5, 4.5, 5], "d_speed":[0.1, -0.1, 0.2, -0.2, 0.3, -0.3, 0.2, -0.2, 0.1], "d_left":[1.5, 1.7, 1.6, 1.8, 1.5, 1.7, 1.6, 1.8, 1.5], "d_right":[1.5, 1.3, 1.4, 1.2, 1.5, 1.3, 1.4, 1.2, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":3, "data_samples":4, "s_pos":[10, 15, 20, 25], "d_pos":[0.1, 0.05, 0, -0.05], "s_speed":[5, 5.5, 6, 6.5], "d_speed": [0.1, 0.1, 0.05, 0], "d_left": [0.8, 0.75, 0.7, 0.65], "d_right":[0.9, 0.85, 0.8, 0.75], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":3, "data_samples":13, "s_pos":[0, 1.5, 3.1, 4.6, 6.2, 7.7, 9.3, 10.8, 12.4, 13.9, 15.5, 17.0, 18.6], "d_pos":[0.1, 0.2, 0.0, -0.1, 0.0, 0.1, 0.0, -0.1, 0.0, 0.1, -0.1, 0.0, 0.1], "s_speed":[5, 5.2, 5.1, 5.3, 5.2, 5.3, 5.1, 5.2, 5.3, 5.2, 5.3, 5.1, 5.2], "d_speed":[0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2], "d_left":[1, 1.2, 1.1, 1.2, 1.3, 1.1, 1.2, 1.3, 1.1, 1.2, 1.3, 1.1, 1.2], "d_right":[1.5, 1.3, 1.4, 1.3, 1.2, 1.4, 1.3, 1.2, 1.4, 1.3, 1.2, 1.4, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":3, "data_samples":7, "s_pos":[0, 5, 10, 15, 20, 25, 30], "d_pos":[0.1, 0.2, -0.1, 0.0, 0.2, -0.2, 0.1], "s_speed":[4, 5, 6, 5, 6, 5, 4], "d_speed":[0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.2], "d_left":[0.8, 0.9, 0.7, 0.8, 0.9, 0.7, 0.8], "d_right":[0.7, 0.8, 0.6, 0.7, 0.8, 0.6, 0.7], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":4, "data_samples":12, "s_pos":[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60], "d_pos":[0.2, 0.1, 0.0, -0.1, 0.1, 0.2, -0.2, 0.0, 0.1, -0.1, 0.0, 0.2], "s_speed":[5.5, 5.6, 5.7, 5.6, 5.5, 5.4, 5.5, 5.6, 5.7, 5.6, 5.5, 5.4], "d_speed":[0.3, 0.2, 0.1, 0.0, 0.2, 0.3, 0.4, 0.1, 0.2, 0.0, 0.1, 0.3], "d_left":[1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2], "d_right":[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":5, "data_samples":11, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50], "d_pos":[0.1, 0.2, 0.1, 0, -0.1, 0, 0.2, -0.1, 0.1, 0, 0.1], "s_speed":[4, 5, 6, 5, 6, 7, 5, 6, 7, 5, 6], "d_speed":[0, 0.1, 0.2, 0.1, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1, 0.2], "d_left":[1.5, 1.4, 1.6, 1.5, 1.7, 1.6, 1.5, 1.4, 1.6, 1.5, 1.6], "d_right":[1.5, 1.6, 1.4, 1.5, 1.3, 1.4, 1.5, 1.6, 1.4, 1.5, 1.4], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":2, "data_samples":7, "s_pos":[0, 1.5, 3, 4.5, 6, 7.5, 9], "d_pos":[0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.2], "s_speed":[5, 5.2, 5.1, 5.3, 5.4, 5.2, 5.1], "d_speed":[0.3, 0.4, 0.2, 0.3, 0.4, 0.3, 0.2], "d_left":[1.5, 1.6, 1.4, 1.5, 1.6, 1.4, 1.5], "d_right":[1.4, 1.3, 1.5, 1.4, 1.3, 1.5, 1.4], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":2, "data_samples":15, "s_pos":[0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30, 32.5, 35], "d_pos":[0.1, 0.05, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, -0.05, -0.1, -0.05, 0, 0.05, 0.1], "s_speed":[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6], "d_speed":[0.1, 0.2, 0.2, 0.1, 0, -0.1, -0.2, -0.1, 0, 0.1, 0.2, 0.1, 0, -0.1, -0.2], "d_left":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "d_right":[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":5, "data_samples":12, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55], "d_pos":[0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.2, -0.2], "s_speed":[4, 5, 6, 5, 6, 5, 4, 5, 6, 5, 4, 5], "d_speed":[0.2, 0.3, 0.4, 0.3, 0.4, 0.3, 0.2, 0.3, 0.4, 0.3, 0.2, 0.3], "d_left":[1.2, 1.1, 1.3, 1.0, 1.2, 1.1, 1.3, 1.0, 1.2, 1.1, 1.3, 1.0], "d_right":[1.0, 1.2, 1.0, 1.3, 1.0, 1.2, 1.0, 1.3, 1.0, 1.2, 1.0, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":2, "data_samples":13, "s_pos":[0, 1.5, 3, 4.5, 6, 7.5, 9, 10.5, 12, 13.5, 15, 16.5, 18], "d_pos":[0.1, 0.05, -0.05, 0.1, -0.1, 0.05, 0, -0.05, 0.05, -0.1, 0.1, 0.05, -0.05], "s_speed":[5, 5.5, 6, 6.5, 7, 6.5, 6, 5.5, 5, 5.5, 6, 6.5, 7], "d_speed":[0.1, 0.2, 0.1, 0.2, 0.15, 0.1, 0.2, 0.1, 0.15, 0.2, 0.1, 0.2, 0.15], "d_left":[1.5, 1.6, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6], "d_right":[1.4, 1.5, 1.6, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5, 1.6, 1.5, 1.4, 1.5], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":5, "data_samples":9, "s_pos":[0, 5, 10, 15, 20, 25, 30, 35, 40], "d_pos":[0.1, -0.1, 0.2, -0.2, 0.1, -0.1, 0.2, -0.2, 0.1], "s_speed":[5, 5.5, 6, 5.8, 6.2, 5.7, 5.9, 6.1, 5.5], "d_speed":[0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1], "d_left":[1.5, 1.4, 1.6, 1.5, 1.4, 1.6, 1.5, 1.4, 1.6], "d_right":[1.4, 1.5, 1.3, 1.4, 1.5, 1.3, 1.4, 1.5, 1.3], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":3, "data_samples":11, "s_pos":[0, 1.5, 3, 4.5, 6, 7.5, 9, 10.5, 12, 13.5, 15], "d_pos":[0.1, 0.05, 0, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, 0], "s_speed":[5, 5.5, 6, 5.5, 5, 5.5, 6, 5.5, 5, 5.5, 6], "d_speed": [0.1, 0.05, 0, -0.05, -0.1, -0.05, 0, 0.05, 0.1, 0.05, 0], "d_left": [1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2], "d_right":[1, 1.1, 1.2, 1.1, 1, 1.1, 1.2, 1.1, 1, 1.1, 1.2], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/stop.json:
--------------------------------------------------------------------------------
1 | [{"time":1, "data_samples":6, "s_pos":[50,50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0], "d_left":[0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[0.9,0.9,0.9,0.9,0.9,0.9], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":1, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [0.6,0.6,0.6,0.6,0.6], "d_right":[1.4,1.4,1.4,1.4,1.4], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":3, "data_samples":8, "s_pos":[50,50,50,50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "d_right":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":2, "data_samples":13, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [2,2,2,2,2,2,2,2,2,2,2,2,2], "d_right":[1,1,1,1,1,1,1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":5, "data_samples":11, "s_pos":[10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_left":[1,1,1,1,1,1,1,1,1,1,1], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":4, "data_samples":14, "s_pos":[50,50,50,50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "d_right":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":1, "data_samples":11, "s_pos":[50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":3, "data_samples":10, "s_pos":[50,50,50,50,50,50,50,50,50,50], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0], "d_left": [2,2,2,2,2,2,2,2,2,2], "d_right":[2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": false, "facing_wall": false},
9 | {"time":5, "data_samples":15, "s_pos":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":1, "data_samples":6, "s_pos":[100,100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0], "d_left": [1,1,1,1,1,1], "d_right":[1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":5, "data_samples":12, "s_pos":[30,30,30,30,30,30,30,30,30,30,30,30], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":2, "data_samples":15, "s_pos":[50,50,50,50,50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], "d_right":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":2, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [0.3,0.3,0.3,0.3,0.3], "d_right":[0.3,0.3,0.3,0.3,0.3], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time":1, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [0.8,0.8,0.8,0.8,0.8], "d_right":[1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":2, "data_samples":7, "s_pos":[15,15,15,15,15,15,15], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":1, "data_samples":12, "s_pos":[100,100,100,100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":5, "data_samples":15, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], "d_right":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":2, "data_samples":15, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9], "d_right":[0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":1, "data_samples":10, "s_pos":[50,50,50,50,50,50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":2, "data_samples":6, "s_pos":[100,100,100,100,100,100], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0], "d_left":[1,1,1,1,1,1], "d_right":[1,1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":2, "data_samples":7, "s_pos":[10,10,10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0.8,0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[0.8,0.8,0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":3, "data_samples":12, "s_pos":[5,5,5,5,5,5,5,5,5,5,5,5], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":3, "data_samples":8, "s_pos":[15,15,15,15,15,15,15,15], "d_pos":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":3, "data_samples":12, "s_pos":[50,50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8], "d_right":[2,2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":3, "data_samples":10, "s_pos":[100,100,100,100,100,100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0], "d_left": [1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
27 |
28 |
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/unused/crashed_car.json:
--------------------------------------------------------------------------------
1 | [{"time":1, "data_samples":7, "s_pos":[120,120,120,120,120,120,120], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[0.8,0.8,0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": true, "facing_wall": true},
2 | {"time":2, "data_samples":7, "s_pos":[10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": true},
3 | {"time":2, "data_samples":3, "s_pos":[100,100,100], "d_pos":[-0.2,-0.2,-0.2], "s_speed":[0,0,0], "d_speed":[0,0,0], "d_left":[0.3,0.3,0.3], "d_right":[1.2,1.2,1.2], "reversing": false, "crashed": true, "facing_wall": false},
4 | {"time":2, "data_samples":10, "s_pos":[50,50,50,50,50,50,50,50,50,50], "d_pos":[0.35,0.37,0.36,0.34,0.35,0.36,0.35,0.34,0.36,0.35], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": false},
5 | {"time":5, "data_samples":7, "s_pos":[10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[2.0,2.0,2.0,2.0,2.0,2.0,2.0], "reversing": false, "crashed": true, "facing_wall": false},
6 | {"time":5, "data_samples":15, "s_pos":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.6,0.4,0.5,0.6,0.5,0.4,0.5,0.6,0.5,0.4,0.5,0.6,0.5,0.4], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.3,0.2,0.2,0.3,0.2,0.2,0.3,0.2,0.2,0.3,0.2,0.2,0.3,0.2], "d_right":[0.5,0.4,0.6,0.5,0.4,0.5,0.6,0.5,0.4,0.5,0.6,0.5,0.4,0.5,0.6], "reversing": false, "crashed": true, "facing_wall": false},
7 | {"time":2, "data_samples":12, "s_pos":[100,100,100,100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": true, "facing_wall": true},
8 | {"time":2, "data_samples":12, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8], "reversing": false, "crashed": true, "facing_wall": false},
9 | {"time":1, "data_samples":13, "s_pos":[15,15,15,15,15,15,15,15,15,15,15,15,15], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8], "reversing": false, "crashed": true, "facing_wall": true},
10 | {"time":3, "data_samples":11, "s_pos":[100,100,100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1], "d_right":[2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5,2.5], "reversing": false, "crashed": true, "facing_wall": true},
11 | {"time":5, "data_samples":10, "s_pos":[50,50,50,50,50,50,50,50,50,50], "d_pos":[0.5,0.6,0.55,0.5,0.52,0.53,0.54,0.51,0.56,0.55], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.25,0.22,0.21,0.23,0.19,0.24,0.2,0.26,0.23], "d_right":[2.0,1.95,1.98,1.99,1.97,2.01,1.96,2.0,1.94,1.97], "reversing": false, "crashed": true, "facing_wall": true},
12 | {"time":1, "data_samples":15, "s_pos":[50,50,50,50,50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": true, "facing_wall": false},
13 | {"time":2, "data_samples":4, "s_pos":[10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0], "d_speed": [0,0,0,0], "d_left": [0.3,0.3,0.3,0.3], "d_right":[1.7,1.7,1.7,1.7], "reversing": false, "crashed": true, "facing_wall": true},
14 | {"time":0, "data_samples":5, "s_pos":[150,150,150,150,150], "d_pos":[0.35,0.35,0.35,0.35,0.35], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3], "d_right":[1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": true, "facing_wall": true},
15 | {"time":4, "data_samples":8, "s_pos":[10,10,10,10,10,10,10,10], "d_pos":[0.5,0.6,0.5,0.6,0.5,0.6,0.5,0.6], "s_speed":[0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[0.3,0.35,0.3,0.35,0.3,0.35,0.3,0.35], "d_right":[2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
16 | {"time":5, "data_samples":15, "s_pos":[50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0], "d_pos":[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8], "s_speed":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "d_speed":[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "d_left":[0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], "d_right":[1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2, 1.2], "reversing": false, "crashed": true, "facing_wall": false},
17 | {"time":1, "data_samples":13, "s_pos":[50,50,50,50,50,50,50,50,50,50,50,50,50], "d_pos":[0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left": [0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8], "reversing": false, "crashed": true, "facing_wall": true},
18 | {"time":4, "data_samples":7, "s_pos":[10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[2.5,2.5,2.5,2.5,2.5,2.5,2.5], "reversing": false, "crashed": true, "facing_wall": true},
19 | {"time":2, "data_samples":11, "s_pos":[10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": true, "facing_wall": true},
20 | {"time":4, "data_samples":14, "s_pos":[20,20,20,20,20,20,20,20,20,20,20,20,20,20], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": true, "facing_wall": false},
21 | {"time":2, "data_samples":9, "s_pos":[100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": true},
22 | {"time":1, "data_samples":10, "s_pos":[100,100,100,100,100,100,100,100,100,100], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0], "d_left": [0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8,1.8], "reversing": false, "crashed": true, "facing_wall": true},
23 | {"time":4, "data_samples":11, "s_pos":[10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], "d_speed":[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": false},
24 | {"time":3, "data_samples":13, "s_pos":[5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2,5.2], "d_pos":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2], "d_right":[2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0,2.0], "reversing": false, "crashed": true, "facing_wall": false},
25 | {"time":3, "data_samples":14, "s_pos":[5,5,5,5,5,5,5,5,5,5,5,5,5,5], "d_pos":[-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2,-0.2], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3], "d_right":[0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5], "reversing": false, "crashed": true, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/unused/data_checker.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 |
4 | with open('centerline_car.json', 'r') as file:
5 | robot_states = json.load(file)
6 |
7 | for i in range(len(robot_states)):
8 | print(robot_states[i])
9 | print("\n")
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/unused/improved_stopped_car.json:
--------------------------------------------------------------------------------
1 | [{"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.8,0.8,0.8,0.8,0.8], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
2 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.6,0.6,0.6,0.6,0.6], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
3 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0,0,0,0,0], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
4 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1,1,1,1,1], "d_right":[1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
5 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.0,0.0,0.0,0.0,0.0], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
6 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[2,2,2,2,2], "d_right":[2,2,2,2,2], "reversing": false, "crashed": false, "facing_wall": false},
7 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [0.6,0.6,0.6,0.6,0.6], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
8 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3], "d_right":[1.2,1.2,1.2,1.2,1.2], "reversing":false, "crashed":false, "facing_wall":false},
9 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1,1,1,1,1], "d_right":[1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
10 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.6,0.6,0.6,0.6,0.6], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
11 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.0,1.0,1.0,1.0,1.0], "d_right":[1.0,1.0,1.0,1.0,1.0], "reversing": false, "crashed": false, "facing_wall": false},
12 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
13 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0.0,0.0,0.0,0.0,0.0], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.2,1.2,1.2,1.2,1.2], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
14 | {"time": 5,"data_samples":5, "s_pos":[120,120,120,120,120], "d_pos": [0.1, 0.1, 0.1, 0.1, 0.1], "s_speed": [0, 0, 0, 0, 0], "d_speed": [0, 0, 0, 0, 0], "d_left": [2, 2, 2, 2, 2], "d_right": [2, 2, 2, 2, 2], "reversing": false, "crashed": false, "facing_wall": false},
15 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[-0.2,-0.2,-0.2,-0.2,-0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.2,1.2,1.2,1.2,1.2], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
16 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1,1,1,1,1], "d_right":[1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
17 | {"time":5, "data_samples":5, "s_pos":[50,50,50,50,50], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1,1,1,1,1], "d_right":[1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
18 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.6,0.6,0.6,0.6,0.6], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false},
19 | {"time":5, "data_samples":5, "s_pos":[50,50,50,50,50], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
20 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
21 | {"time":5, "data_samples":5, "s_pos":[100,100,100,100,100], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [1.5,1.5,1.5,1.5,1.5], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": false, "facing_wall": false},
22 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.5,0.5,0.5,0.5,0.5], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0.3,0.3,0.3,0.3,0.3], "d_right":[0.7,0.7,0.7,0.7,0.7], "reversing": false, "crashed": false, "facing_wall": false},
23 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.2,0.2,0.2,0.2,0.2], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1.2,1.2,1.2,1.2,1.2], "d_right":[1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": false, "facing_wall": false},
24 | {"time":5, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[1,1,1,1,1], "d_right":[1,1,1,1,1], "reversing": false, "crashed": false, "facing_wall": false},
25 | {"time":5, "data_samples":5, "s_pos":[12,12,12,12,12], "d_pos":[0.1,0.1,0.1,0.1,0.1], "s_speed":[0,0,0,0,0], "d_speed": [0,0,0,0,0], "d_left": [0.8,0.8,0.8,0.8,0.8], "d_right":[0.8,0.8,0.8,0.8,0.8], "reversing": false, "crashed": false, "facing_wall": false}
26 | ]
--------------------------------------------------------------------------------
/tests/decision_tester/robot_states/unused/wall_crashed_car.json:
--------------------------------------------------------------------------------
1 | [{"time":2, "data_samples":10, "s_pos":[10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0], "d_right":[5,5,5,5,5,5,5,5,5,5], "reversing": false, "crashed": true, "facing_wall": true},
2 | {"time":4, "data_samples":6, "s_pos":[10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0], "d_left":[0,0,0,0,0,0], "d_right":[2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
3 | {"time":3, "data_samples":11, "s_pos":[10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0], "d_right":[2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
4 | {"time":3, "data_samples":3, "s_pos":[10,10,10], "d_pos":[0,0,0], "s_speed":[0,0,0], "d_speed":[0,0,0], "d_left":[0,0,0], "d_right":[2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
5 | {"time":2, "data_samples":4, "s_pos":[10,10,10,10], "d_pos":[0,0,0,0], "s_speed":[0,0,0,0], "d_speed":[0,0,0,0], "d_left":[0,0,0,0], "d_right":[1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": true},
6 | {"time":2, "data_samples":8, "s_pos":[10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0], "d_right":[1,1,1,1,1,1,1,1], "reversing": false, "crashed": true, "facing_wall": true},
7 | {"time":3, "data_samples":4, "s_pos":[100,100,100,100], "d_pos":[0,0,0,0], "s_speed":[0,0,0,0], "d_speed":[0,0,0,0], "d_left":[0,0,0,0], "d_right":[2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
8 | {"time":0, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0,0,0,0,0], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0,0,0,0,0], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": true},
9 | {"time":1, "data_samples":3, "s_pos":[10,10,10], "d_pos":[0,0,0], "s_speed":[0,0,0], "d_speed":[0,0,0], "d_left":[0,0,0], "d_right":[2,2,2], "reversing":false, "crashed":true, "facing_wall":false},
10 | {"time":2, "data_samples":13, "s_pos":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_pos":[-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3,-0.3], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[5,5,5,5,5,5,5,5,5,5,5,5,5], "reversing": false, "crashed": true, "facing_wall": true},
11 | {"time":3, "data_samples":11, "s_pos":[10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0,0], "d_speed": [0,0,0,0,0,0,0,0,0,0,0], "d_left": [0,0,0,0,0,0,0,0,0,0,0], "d_right":[3,3,3,3,3,3,3,3,3,3,3], "reversing": false, "crashed": true, "facing_wall": false},
12 | {"time":4, "data_samples":15, "s_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_pos":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "s_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_speed":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_left":[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "d_right":[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5], "reversing": false, "crashed": true, "facing_wall": true},
13 | {"time":1, "data_samples":10, "s_pos":[20,20,20,20,20,20,20,20,20,20], "d_pos":[0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0], "d_right":[5,5,5,5,5,5,5,5,5,5], "reversing": false, "crashed": true, "facing_wall": true},
14 | {"time":3, "data_samples":4, "s_pos":[10,10,10,10], "d_pos":[-0.4,-0.4,-0.4,-0.4], "s_speed":[0,0,0,0], "d_speed": [0,0,0,0], "d_left": [0,0,0,0], "d_right":[2,2,2,2], "reversing": false, "crashed": true, "facing_wall": false},
15 | {"time":3, "data_samples":3, "s_pos":[10,10,10], "d_pos":[0,0,0], "s_speed":[0,0,0], "d_speed":[0,0,0], "d_left":[0,0,0], "d_right":[2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
16 | {"time":5, "data_samples":13, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[2,2,2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
17 | {"time":5, "data_samples":9, "s_pos":[10,10,10,10,10,10,10,10,10], "d_pos":[0,-0.05,0,0.1,0.15,0.2,0.25,0.3,0.35], "s_speed":[0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0], "d_right":[3,3,3,3,3,3,3,3,3], "reversing": false, "crashed": true, "facing_wall": true},
18 | {"time":2, "data_samples":13, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[2,2,2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
19 | {"time":2, "data_samples":5, "s_pos":[10,10,10,10,10], "d_pos":[0,0,0,0,0], "s_speed":[0,0,0,0,0], "d_speed":[0,0,0,0,0], "d_left":[0,0,0,0,0], "d_right":[1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": false},
20 | {"time":2, "data_samples":4, "s_pos":[50,50,50,50], "d_pos":[-0.2,-0.2,-0.2,-0.2], "s_speed":[0,0,0,0], "d_speed": [0,0,0,0], "d_left": [0,0,0,0], "d_right":[1.2,1.2,1.2,1.2], "reversing": false, "crashed": true, "facing_wall": true},
21 | {"time":2, "data_samples":14, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1,-0.1], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[2,2,2,2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
22 | {"time":5, "data_samples":9, "s_pos":[10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0], "d_right":[1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5,1.5], "reversing": false, "crashed": true, "facing_wall": false},
23 | {"time":4, "data_samples":7, "s_pos":[100,100,100,100,100,100,100], "d_pos":[0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0], "d_right":[1.2,1.2,1.2,1.2,1.2,1.2,1.2], "reversing": false, "crashed": true, "facing_wall": true},
24 | {"time":1, "data_samples":13, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[2,2,2,2,2,2,2,2,2,2,2,2,2], "reversing": false, "crashed": true, "facing_wall": true},
25 | {"time":4, "data_samples":12, "s_pos":[10,10,10,10,10,10,10,10,10,10,10,10], "d_pos":[0,0,0,0,0,0,0,0,0,0,0,0], "s_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_speed":[0,0,0,0,0,0,0,0,0,0,0,0], "d_left":[0,0,0,0,0,0,0,0,0,0,0,0], "d_right":[5,5,5,5,5,5,5,5,5,5,5,5], "reversing": false, "crashed": true, "facing_wall": true}
26 | ]
--------------------------------------------------------------------------------
/tests/mpc_tester/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/tests/mpc_tester/__init__.py
--------------------------------------------------------------------------------
/train/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/train/__init__.py
--------------------------------------------------------------------------------
/train/config/sft_train.yaml:
--------------------------------------------------------------------------------
1 | # === General Training Configuration ===
2 | training:
3 | out_dir: "train/outputs"
4 | dataset_dir: "train/dataset" # W.r.t. the root directory of the repo
5 | train_bool: true
6 | chat_template: "qwen-2.5" # Options: "phi-3" or "qwen-2.5"
7 | seed: 3407
8 | create_merged_model: true
9 |
10 | # === Model and Tokenizer ===
11 | model:
12 | base_model: "unsloth/Qwen2.5-7B-Instruct"
13 | load_in_4bit: true
14 | max_seq_length: 2048
15 | lora_alpha: 16
16 | lora_rank: 16
17 | target_modules:
18 | - "q_proj"
19 | - "k_proj"
20 | - "v_proj"
21 | - "o_proj"
22 | - "gate_proj"
23 | - "up_proj"
24 | - "down_proj"
25 | lora_dropout: 0
26 | use_gradient_checkpointing: "unsloth"
27 |
28 | # === Trainer Arguments ===
29 | trainer:
30 | max_steps: 150
31 | per_device_train_batch_size: 2
32 | gradient_accumulation_steps: 4
33 | warmup_steps: 5
34 | learning_rate: 0.0002
35 | weight_decay: 0.01
36 | logging_steps: 1
37 | optim: "adamw_8bit"
38 | lr_scheduler_type: "linear"
39 |
40 | # === Tokens ===
41 | tokens:
42 | huggingfacehub: "${HUGGINGFACEHUB_API_TOKEN}"
43 |
--------------------------------------------------------------------------------
/train/data_generator/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/train/data_generator/__init__.py
--------------------------------------------------------------------------------
/train/data_generator/distil_combined_reason_mpc_dataset_generator.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import random
4 | import json
5 | from dotenv import load_dotenv, find_dotenv
6 | from langchain_openai import ChatOpenAI
7 | from langchain_core.documents import Document
8 | from llm_mpc import RaceLLMMPC
9 | from typing import List
10 |
11 | # Load environment variables
12 | load_dotenv(find_dotenv())
13 | OPENAI_API_TOKEN = os.getenv("OPENAI_API_TOKEN")
14 | HF_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
15 |
16 | # Initialize the LLMs
17 | gpt = ChatOpenAI(model_name='o1-preview', openai_api_key=OPENAI_API_TOKEN)
18 | race_llm = RaceLLMMPC(hf_token=HF_API_TOKEN, openai_token=OPENAI_API_TOKEN, model='o1-preview', no_ROS=False)
19 |
20 | # Function to construct the LLM query prompt for MPC tuning
21 | def build_mpc_prompt(race_llm : RaceLLMMPC, scenario : str, memory_nb : int=0) -> str:
22 | # Generate the LLM MPC query
23 | LLM_query = race_llm.race_mpc_interact(scenario=scenario, memory_nb=memory_nb, prompt_only=True)
24 | return LLM_query
25 |
26 | def build_reasoning_prompt(race_llm : RaceLLMMPC) -> str:
27 | # Generate the LLM Reasoning query
28 | LLM_query = race_llm.race_reasoning(prompt_only=True)
29 | # Augment the query to explain the reasoning
30 | pattern = r"Instruct the car to take the appropriate action very briefly, no explanation needed:\s*I want the car to prioritize: !"
31 | replace_str = "Instruct the car to take the appropriate action very briefly and explain why you chose the action:\nI want the car to prioritize: ! Because of ."
32 |
33 | # Perform the replacement
34 | LLM_query = re.sub(pattern, replace_str, LLM_query)
35 | return LLM_query
36 |
37 | # Function to construct a conversation object
38 | def construct_conversation(human_prompt: str, gpt_response: str) -> dict:
39 | return {
40 | "conversations": [
41 | {"from": "human", "value": human_prompt},
42 | {"from": "gpt", "value": gpt_response}
43 | ]
44 | }
45 |
46 | # Function to load an existing dataset
47 | def load_existing_dataset(filename):
48 | if os.path.exists(filename):
49 | if os.path.getsize(filename) > 0: # Check if the file is not empty
50 | with open(filename, 'r') as f:
51 | try:
52 | return json.load(f)
53 | except json.JSONDecodeError:
54 | print(f"Warning: {filename} is corrupted. Starting with an empty dataset.")
55 | return [] # Return empty list if JSON is corrupted
56 | else:
57 | print(f"Warning: {filename} is empty. Starting with an empty dataset.")
58 | return [] # Return empty list if file is empty
59 | return []
60 |
61 | # Function to save a conversation to the dataset
62 | def save_conversation_to_file(filename: str, conversation: dict):
63 | dataset = load_existing_dataset(filename)
64 | dataset.append(conversation)
65 | with open(filename, 'w') as f:
66 | json.dump(dataset, f, indent=4)
67 |
68 |
69 | # Main dataset generation loop
70 | filename = 'train/dataset/combined_reason_mpc_dataset.json'
71 | for i in range(200):
72 | reasoning_query = build_reasoning_prompt(race_llm=race_llm)
73 | reasoning_llm_out = gpt.invoke(reasoning_query, temperature=1.0).content
74 | # Apply LLMxMPC to get new kinds of data
75 | _, _, _, mpc_query, raw_mpc_llm_out = race_llm.race_mpc_interact(scenario=reasoning_llm_out, memory_nb=random.randint(0, 8))
76 |
77 | combined_query = f"{reasoning_query}\n{mpc_query}"
78 | combined_llm_out = f"{reasoning_llm_out}\n{raw_mpc_llm_out}"
79 |
80 | # Construct the conversation
81 | conversation = construct_conversation(human_prompt=combined_query, gpt_response=combined_llm_out)
82 |
83 | # Save the conversation to the file
84 | save_conversation_to_file(filename, conversation)
85 | print('-----------------------------------')
--------------------------------------------------------------------------------
/train/data_generator/distil_mpc_dataset_generator.py:
--------------------------------------------------------------------------------
1 | import os, re, random, json
2 | from dotenv import load_dotenv, find_dotenv
3 | from typing import List
4 | from langchain_openai import ChatOpenAI
5 | from langchain_core.documents import Document
6 | from llm_mpc import RaceLLMMPC
7 |
8 | load_dotenv(find_dotenv())
9 | OPENAI_API_TOKEN = os.getenv("OPENAI_API_TOKEN")
10 | HF_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
11 |
12 | gpt4o = ChatOpenAI(model_name='gpt-4o', openai_api_key=OPENAI_API_TOKEN)
13 | race_llm = RaceLLMMPC(hf_token=HF_API_TOKEN, openai_token=OPENAI_API_TOKEN, model='gpt-4o', no_ROS=True)
14 |
15 | def randomize_parameters(input_str):
16 | # Regex pattern to find the min, max, and default values
17 | pattern = r'(\w+)\s*(-?\d+(?:\.\d+)?),\s*(-?\d+(?:\.\d+)?),\s*(-?\d+(?:\.\d+)?)'
18 |
19 | def randomize_match(match):
20 | param_name = match.group(1)
21 | min_val = float(match.group(2))
22 | max_val = float(match.group(3))
23 |
24 | # For v_min and v_max, ensure they allow for negative velocities
25 | if param_name in ["v_min", "v_max"]:
26 | # Randomize min and max, ensuring min < max
27 | new_min = round(random.uniform(min_val, 0), 1)
28 | new_max = round(random.uniform(new_min, max_val), 1)
29 |
30 | else:
31 | # Randomize min and max, ensuring min < max
32 | new_min = round(random.uniform(min_val, max_val), 1)
33 | new_max = round(random.uniform(new_min, max_val), 1)
34 |
35 | # Randomize default, ensuring it is between new_min and new_max
36 | new_default = round(random.uniform(new_min, new_max), 1)
37 |
38 | return f"{param_name} {new_min}, {new_max}, {new_default}"
39 |
40 | # Substitute using the randomize_match function
41 | randomized_str = re.sub(pattern, randomize_match, input_str)
42 |
43 | return randomized_str
44 |
45 | def build_prompt(race_llm : RaceLLMMPC, base_memory : str, scenario : str, memory_nb : int=0) -> str:
46 | # Query the index and pass the result to the command chain for processing
47 | RAG_query = f"""
48 | Task: {scenario}\n
49 | """
50 | #Perform RAG manually
51 | #Retrieve docs from the RAG
52 | rag_sources: List[Document] = race_llm.vector_index.vectorstore.search(query=RAG_query, search_type='similarity', k=memory_nb) if memory_nb > 0 else []
53 | rag_sources = [{'meta': doc.metadata, 'content': doc.page_content} for doc in rag_sources]
54 | LLM_query = f"""
55 | You are an AI assistant helping to tune the parameters of an MPC controller for an autonomous racing car. Below is the context and the task:
56 |
57 | ## Context
58 | 1. **Scenario**: {scenario}
59 | 2. **Base Memory and Cost Formulation**: {base_memory}
60 |
61 | ## Task
62 | Adapt the tuneable parameters of the MPC so that the car achieves the following: **{scenario}**.
63 |
64 | ## Constraints
65 | - Use the min and max ranges of the parameters provided in the base memory.
66 | - Focus on the primary task and utilize the chat history if necessary.
67 | - Only consider relevant RAG information and quote it briefly in your explanation.
68 |
69 | ## RAG Information
70 | - !!! Not all memories are relevant to the task. Select the most relevant ones. !!!
71 | - **Memory Entries**:
72 | {rag_sources}
73 |
74 | ## Expected Output Format
75 | Always strictly return your answers in the following format (no other dicts in your response and no comments in the params part!):
76 | new_mpc_params = {{
77 | 'param1': new_value1,
78 | 'param2': new_value2,
79 | ...
80 | }}
81 |
82 | Explanation:
83 | """
84 | return LLM_query
85 |
86 | def construct_conversation(human_prompt, gpt_response):
87 | return {
88 | "conversations": [
89 | {
90 | "from": "human",
91 | "value": human_prompt
92 | },
93 | {
94 | "from": "gpt",
95 | "value": gpt_response
96 | }
97 | ]
98 | }
99 |
100 | def load_existing_dataset(filename):
101 | if os.path.exists(filename):
102 | if os.path.getsize(filename) > 0: # Check if the file is not empty
103 | with open(filename, 'r') as f:
104 | try:
105 | return json.load(f)
106 | except json.JSONDecodeError:
107 | print(f"Warning: {filename} is corrupted. Starting with an empty dataset.")
108 | return [] # Return empty list if JSON is corrupted
109 | else:
110 | print(f"Warning: {filename} is empty. Starting with an empty dataset.")
111 | return [] # Return empty list if file is empty
112 | return []
113 |
114 | def save_conversation_to_file(filename, conversation):
115 | # Load existing data
116 | dataset = load_existing_dataset(filename)
117 |
118 | # Append the new conversation
119 | dataset.append(conversation)
120 |
121 | # Write updated dataset to file
122 | with open(filename, 'w') as f:
123 | json.dump(dataset, f, indent=4)
124 |
125 | filename = 'train/dataset/gpt_distil_mpc_dataset.json'
126 | scenarios = [
127 | "Complete a lap around the track in the shortest time possible",
128 | "Drive as safely as possible around the track",
129 | "Stay away from the walls while driving around the track",
130 | "Reverse the car around the track",
131 | "Do not exceed speeds of 10 km/h while driving around the track",
132 | "Drive around the track without using the brakes",
133 | ]
134 | for i in range(len(scenarios)):
135 | scenario = scenarios[i]
136 | for j in range(3):
137 | base_mem = randomize_parameters(input_str=race_llm.base_memory)
138 | last_rephrase = scenario
139 | for k in range(10):
140 | rephrased_scenario = gpt4o.invoke(f'Rephrase this:\n {last_rephrase}').content
141 | last_rephrase = rephrased_scenario
142 | llm_query = build_prompt(race_llm=race_llm, base_memory=base_mem, scenario=rephrased_scenario, memory_nb=k)
143 | print('LLM_query', llm_query)
144 | llm_out = gpt4o.invoke(llm_query).content
145 | print('LLM_out', llm_out)
146 | # Construct the conversation
147 |
148 | conversation = construct_conversation(human_prompt=llm_query, gpt_response=llm_out)
149 |
150 | # Save the conversation to the file
151 | save_conversation_to_file(filename, conversation)
152 | print('-----------------------------------')
153 |
--------------------------------------------------------------------------------
/train/data_generator/distil_randomized_reasoning.py:
--------------------------------------------------------------------------------
1 | import os, re, random, json
2 | from dotenv import load_dotenv, find_dotenv
3 | from langchain_openai import ChatOpenAI
4 | from llm_mpc import RaceLLMMPC
5 |
6 | load_dotenv(find_dotenv())
7 | OPENAI_API_TOKEN = os.getenv("OPENAI_API_TOKEN")
8 | HF_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
9 |
10 | gpt4o = ChatOpenAI(model_name='gpt-4o', openai_api_key=OPENAI_API_TOKEN)
11 | gpt4omini = ChatOpenAI(model_name='gpt-4o-mini', openai_api_key=OPENAI_API_TOKEN)
12 |
13 | def construct_conversation(human_prompt, gpt_response):
14 | return {
15 | "conversations": [
16 | {
17 | "from": "human",
18 | "value": human_prompt
19 | },
20 | {
21 | "from": "gpt",
22 | "value": gpt_response
23 | }
24 | ]
25 | }
26 |
27 | # Loads Prompt with hints
28 | def load_prompt(prompt_type) -> str:
29 | print('Loading prompt from CWD:', os.getcwd())
30 | if 'reasoning' in prompt_type:
31 | hints_dir = os.path.join('.', 'prompts/reasoning_hints.txt')
32 | with open(hints_dir, 'r') as f:
33 | reasoning_hints = f.read()
34 | return reasoning_hints
35 | elif 'synthetic' in prompt_type:
36 | hints_dir = os.path.join('.', 'prompts/example_synthetic.txt')
37 | with open(hints_dir, 'r') as f:
38 | synthetic_hints = f.read()
39 | return synthetic_hints
40 | else:
41 | raise ValueError(f"Prompt type {prompt_type} not recognized. Please use 'reasoning' or 'synthetic'.")
42 |
43 | def load_existing_dataset(filename):
44 | if os.path.exists(filename):
45 | if os.path.getsize(filename) > 0: # Check if the file is not empty
46 | with open(filename, 'r') as f:
47 | try:
48 | return json.load(f)
49 | except json.JSONDecodeError:
50 | print(f"Warning: {filename} is corrupted. Starting with an empty dataset.")
51 | return [] # Return empty list if JSON is corrupted
52 | else:
53 | print(f"Warning: {filename} is empty. Starting with an empty dataset.")
54 | return [] # Return empty list if file is empty
55 | return []
56 |
57 | def save_conversation_to_file(filename, conversation):
58 | # Load existing data
59 | dataset = load_existing_dataset(filename)
60 |
61 | # Append the new conversation
62 | dataset.append(conversation)
63 |
64 | # Write updated dataset to file
65 | with open(filename, 'w') as f:
66 | json.dump(dataset, f, indent=4)
67 |
68 | def randomize_text_parameters(input_str, lower_bound=0.6, upper_bound=1.8):
69 | # Pattern to match numbers followed by 'm' or 'm/s'
70 | pattern = r'(\d+(?:\.\d+)?)(m/s|m)'
71 |
72 | rand_frac = random.uniform(lower_bound, upper_bound)
73 |
74 | def randomize_match(match):
75 | original_value = float(match.group(1))
76 | unit = match.group(2)
77 |
78 | # Apply % randomization
79 | new_value = round(original_value * rand_frac, 2)
80 |
81 | return f"{new_value}{unit}"
82 |
83 | # Replace numerical values in the text with randomized ones
84 | randomized_str = re.sub(pattern, randomize_match, input_str)
85 | return randomized_str
86 |
87 | def build_analysis_prompt(robot_state : str, randomized_hints : str) -> str:
88 | prompt = f"""
89 | {robot_state}\n
90 | Here are some hints to help you analyze the state: {randomized_hints}\n
91 |
92 | Focus on briefly identifying the car's current state by referring to the provided data (e.g., position, speed, distance to walls, and any safety concerns).
93 | - State towards which side of the wall the car is closer to and if this is too close to the wall or not.
94 | - State the average s-velocity of the car and relate if this is fast, slow or nominal.
95 | - State the average d-velocity of the car and relate if this is fast, slow or nominal.
96 | - State the magnitude of the positional oscillation of the car and if they are nominal or too high.
97 | - State the magnitude of the velocity oscillation of the car and if they are nominal or too high.
98 | - State if the car is facing the wall.
99 | - State if the car has crashed.
100 | - State if the car is reversing, driving forward, or stopped.
101 | """
102 | return prompt
103 |
104 | def build_decision_prompt(human_prompt : str, robot_state : str, randomized_hints : str) -> str:
105 | # Query the index and pass the result to the command chain for processing
106 | prompt = f"""
107 | The human wants the car to drive: {human_prompt}\n
108 | Check if the car is adhering to the human's command or not.\n
109 |
110 | {robot_state}\n
111 | Here are some hints to help you analyze the state: {randomized_hints}\n
112 |
113 | Briefly identify the car's current state by referring to the provided data (e.g., position, speed, distance to walls, and any safety concerns from the hints).
114 |
115 | Decide if the car is adhering to the human's desired drving style of: {human_prompt} by choosing from the two actions:\n
116 |
117 | a) Continue: The car is driving as expected and should continue driving in the same manner.
118 | b) Correct: The car is not driving as expected and state how the car should correct its driving style.
119 |
120 | Strictly adhere to the reply format:
121 |
122 | State Recap:
123 | Action:
124 |
125 | """
126 | return prompt
127 |
128 | def distil_randomized_state_analysis(dataset='train/dataset/gpt_distil_reason_dataset.json', out_filename='train/dataset/randomized_state_analysis.json'):
129 | og_hints = load_prompt('reasoning')
130 | dataset = load_existing_dataset(dataset)
131 | for conversation in dataset:
132 | state_with_query = conversation['conversations'][0]['value']
133 | robot_state = state_with_query.split("\n\n\n Here are some hints to help you reason about the car's current state:")[0]
134 | randomized_hints = randomize_text_parameters(og_hints)
135 |
136 | llm_prompt = build_analysis_prompt(robot_state, randomized_hints)
137 |
138 | state_analyze = gpt4o.invoke(llm_prompt).content
139 |
140 | conversation = construct_conversation(human_prompt=llm_prompt, gpt_response=state_analyze)
141 | save_conversation_to_file(out_filename, conversation)
142 | print(conversation)
143 | print('-----------------------------------')
144 |
145 | def distil_randomized_decision_making(dataset='train/dataset/excluded/gpt_distil_reason_dataset.json', out_filename='train/dataset/randomized_decision_making.json'):
146 | og_hints = load_prompt('reasoning')
147 | dataset = load_existing_dataset(dataset)
148 |
149 | # scenarios = [
150 | # "Stop the car!",
151 | # "Drive as safely as possible around the track",
152 | # "Stay away from the walls while driving around the track",
153 | # "Reverse the car around the track",
154 | # "Do not exceed speeds of 4m/s while driving around the track",
155 | # "I want the car to drive as smoothly as possible",
156 | # ]
157 | scenarios = [
158 | "Drive at speeds above 4.75m/s!",
159 | "Drive at speeds between 2.5m/s and 5.5m/s!",
160 | ]
161 |
162 | for conversation in dataset:
163 | state_with_query = conversation['conversations'][0]['value']
164 | robot_state = state_with_query.split("\n\n\n Here are some hints to help you reason about the car's current state:")[0]
165 |
166 | for scenario in scenarios:
167 | randomized_hints = randomize_text_parameters(og_hints)
168 |
169 | # Randomize number values in the scenario
170 | if 'm/s' in scenario:
171 | scenario = randomize_text_parameters(scenario, lower_bound=0.8, upper_bound=1.9)
172 |
173 | rephrased_human_prompt = gpt4omini.invoke(f'Rephrase this:\n {scenario}').content
174 |
175 | llm_prompt = build_decision_prompt(rephrased_human_prompt, robot_state, randomized_hints)
176 |
177 | decision_making = gpt4o.invoke(llm_prompt).content
178 |
179 | conversation = construct_conversation(human_prompt=llm_prompt, gpt_response=decision_making)
180 | save_conversation_to_file(out_filename, conversation)
181 | print(conversation)
182 | print('-----------------------------------')
183 |
184 |
185 | if __name__ == '__main__':
186 | #distil_randomized_state_analysis()
187 | distil_randomized_decision_making()
--------------------------------------------------------------------------------
/train/data_generator/distil_reason_dataset_generator.py:
--------------------------------------------------------------------------------
1 | import os, re, random, json
2 | from dotenv import load_dotenv, find_dotenv
3 | from langchain_openai import ChatOpenAI
4 | from llm_mpc import RaceLLMMPC
5 |
6 | load_dotenv(find_dotenv())
7 | OPENAI_API_TOKEN = os.getenv("OPENAI_API_TOKEN")
8 | HF_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
9 |
10 | gpt4o = ChatOpenAI(model_name='gpt-4o', openai_api_key=OPENAI_API_TOKEN)
11 | gpt4omini = ChatOpenAI(model_name='gpt-4o-mini', openai_api_key=OPENAI_API_TOKEN)
12 |
13 | def build_mpc_prompt(race_llm : RaceLLMMPC, scenario : str, memory_nb : int=0) -> str:
14 | # Generate the LLM MPC query
15 | LLM_query = race_llm.race_mpc_interact(scenario=scenario, memory_nb=memory_nb, prompt_only=True)
16 | return LLM_query
17 |
18 | def build_reasoning_prompt(race_llm : RaceLLMMPC) -> str:
19 | # Generate the LLM Reasoning query
20 | LLM_query = race_llm.race_reasoning(prompt_only=True)
21 | # Augment the query to explain the reasoning
22 | pattern = r"Instruct the car to take the appropriate action very briefly, no explanation needed:\s*I want the car to prioritize: !"
23 | replace_str = "Instruct the car to take the appropriate action very briefly and explain why you chose the action:\nI want the car to prioritize: ! Because of ."
24 |
25 | # Perform the replacement
26 | LLM_query = re.sub(pattern, replace_str, LLM_query)
27 | return LLM_query
28 |
29 | def construct_conversation(human_prompt, gpt_response):
30 | return {
31 | "conversations": [
32 | {
33 | "from": "human",
34 | "value": human_prompt
35 | },
36 | {
37 | "from": "gpt",
38 | "value": gpt_response
39 | }
40 | ]
41 | }
42 |
43 | def load_existing_dataset(filename):
44 | if os.path.exists(filename):
45 | if os.path.getsize(filename) > 0: # Check if the file is not empty
46 | with open(filename, 'r') as f:
47 | try:
48 | return json.load(f)
49 | except json.JSONDecodeError:
50 | print(f"Warning: {filename} is corrupted. Starting with an empty dataset.")
51 | return [] # Return empty list if JSON is corrupted
52 | else:
53 | print(f"Warning: {filename} is empty. Starting with an empty dataset.")
54 | return [] # Return empty list if file is empty
55 | return []
56 |
57 | def save_conversation_to_file(filename, conversation):
58 | # Load existing data
59 | dataset = load_existing_dataset(filename)
60 |
61 | # Append the new conversation
62 | dataset.append(conversation)
63 |
64 | # Write updated dataset to file
65 | with open(filename, 'w') as f:
66 | json.dump(dataset, f, indent=4)
67 |
68 | def distil_reasoning_from_ros_mpc(filename='train/dataset/gpt_distil_reason_dataset.json'):
69 | race_llm = RaceLLMMPC(hf_token=HF_API_TOKEN, openai_token=OPENAI_API_TOKEN, model='gpt-4o', no_ROS=False)
70 |
71 | for i in range(200):
72 | reasoning_query = build_reasoning_prompt(race_llm=race_llm)
73 | reasoning_llm_out = gpt4o.invoke(reasoning_query).content
74 | print('Reasoning_query', reasoning_query, 'Reasoning_llm_out', reasoning_llm_out)
75 | # Apply LLMxMPC to get new kinds of data
76 | race_llm.race_mpc_interact(scenario=reasoning_llm_out, memory_nb=random.randint(0, 8))
77 |
78 | if 'Reverse' in reasoning_llm_out:
79 | # Construct the conversation
80 | conversation = construct_conversation(human_prompt=reasoning_query, gpt_response=reasoning_llm_out)
81 |
82 | # Save the conversation to the file
83 | save_conversation_to_file(filename, conversation)
84 | print('-----------------------------------')
85 |
86 | def distil_reasoning_from_dataset(dataset='train/dataset/synthetic_robot_data.json', out_filename='train/dataset/gpt_distil_reasonfromdata_dataset.json'):
87 | dataset = load_existing_dataset(dataset)
88 | dataset = dataset[-30:] # Only use the last 30 conversations
89 | for conversation in dataset:
90 | reasoning_query = conversation['conversations'][1]['value']
91 | llm_prompt = f"""Analyze the provided data from an autonomous racing car driving on a track in the Frenet coordinate frame.
92 | Focus on identifying the car's current state by referring to the provided data (e.g., position, speed, distance to walls, and any safety concerns)
93 | and suggest the most appropriate action, with a brief explanation of why by
94 | reasoning on the provided state:
95 | - State towards which side of the wall the car is closer to and how close it is.
96 | - State the average s-velocity of the car and relate if this is fast, slow or nominal.
97 | - State the average d-velocity of the car and relate if this is fast, slow or nominal.
98 | - State if the car is facing the wall.
99 | - State if the car has crashed.
100 |
101 | Also be aware that reversing is a must if the car has crashed and is facing the wall!!!!
102 |
103 | Then based on this explain why you chose the action you did.\n\n {reasoning_query}\n"""
104 | reasoning_llm_out = gpt4o.invoke(llm_prompt).content
105 | rephrased_reasoning_query = gpt4omini.invoke(f"Slightly rephrase the prompt but leave the values as they are: {reasoning_query}").content
106 |
107 | conversation = construct_conversation(human_prompt=rephrased_reasoning_query, gpt_response=reasoning_llm_out)
108 | save_conversation_to_file(out_filename, conversation)
109 | print(conversation)
110 | print('-----------------------------------')
111 |
112 | if __name__ == '__main__':
113 | #distil_reasoning_from_ros_mpc()
114 | distil_reasoning_from_dataset()
--------------------------------------------------------------------------------
/train/data_generator/synthetic_data_generator.py:
--------------------------------------------------------------------------------
1 | import os, re, random, json
2 | from dotenv import load_dotenv, find_dotenv
3 | from langchain_openai import ChatOpenAI
4 | from llm_mpc import RaceLLMMPC
5 |
6 | load_dotenv(find_dotenv())
7 | OPENAI_API_TOKEN = os.getenv("OPENAI_API_TOKEN")
8 | HF_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
9 |
10 | gpt4o = ChatOpenAI(model_name='gpt-4o', openai_api_key=OPENAI_API_TOKEN)
11 | gpt4omini = ChatOpenAI(model_name='gpt-4o-mini', openai_api_key=OPENAI_API_TOKEN)
12 |
13 | def construct_conversation(human_prompt, gpt_response):
14 | return {
15 | "conversations": [
16 | {
17 | "from": "human",
18 | "value": human_prompt
19 | },
20 | {
21 | "from": "gpt",
22 | "value": gpt_response
23 | }
24 | ]
25 | }
26 |
27 | def load_existing_dataset(filename):
28 | if os.path.exists(filename):
29 | if os.path.getsize(filename) > 0: # Check if the file is not empty
30 | with open(filename, 'r') as f:
31 | try:
32 | return json.load(f)
33 | except json.JSONDecodeError:
34 | print(f"Warning: {filename} is corrupted. Starting with an empty dataset.")
35 | return [] # Return empty list if JSON is corrupted
36 | else:
37 | print(f"Warning: {filename} is empty. Starting with an empty dataset.")
38 | return [] # Return empty list if file is empty
39 | return []
40 |
41 | def save_conversation_to_file(filename, conversation):
42 | # Load existing data
43 | dataset = load_existing_dataset(filename)
44 |
45 | # Append the new conversation
46 | dataset.append(conversation)
47 |
48 | # Write updated dataset to file
49 | with open(filename, 'w') as f:
50 | json.dump(dataset, f, indent=4)
51 |
52 | def load_prompt(prompt_type) -> str:
53 | if 'reasoning' in prompt_type:
54 | hints_dir = os.path.join('./', 'prompts/reasoning_hints.txt')
55 | with open(hints_dir, 'r') as f:
56 | reasoning_hints = f.read()
57 | return reasoning_hints
58 | elif 'synthetic' in prompt_type:
59 | hints_dir = os.path.join('./', 'prompts/example_synthetic.txt')
60 | with open(hints_dir, 'r') as f:
61 | synthetic_hints = f.read()
62 | return synthetic_hints
63 | else:
64 | raise ValueError(f"Prompt type {prompt_type} not recognized. Please use 'reasoning' or 'synthetic'.")
65 |
66 | def generate_synthetic_race_data(filename='train/dataset/synthetic_robot_data.json', driving_style="Centered on the centerline without crashes"):
67 | reasoning_hints = load_prompt(prompt_type='reasoning')
68 | synth_example = load_prompt(prompt_type='synthetic')
69 | for i in range(25):
70 | data_time = random.randint(1, 5)
71 | data_samples = random.randint(max(3, data_time), 15)
72 | prompt = f"""
73 | I want you to generate synthetic data for a robot racing car. The car is currently driving on a track, data available is in the Frenet Coordinate frame, units are in meters and meters per second.
74 | The racing line is a minimal curvature trajectory to optimize the lap time, which is not the centerline of the track.
75 |
76 | The data has to represent the car driving: {driving_style}.
77 |
78 | Here are some hints of nominal driving behavior: \n{reasoning_hints}\n\n
79 |
80 | The crash_bool and the facing_wall bool are single values, while the rest of the data is sampled for {data_time} second in {data_samples} samples and represented as list of {data_samples} entries.
81 |
82 | Here an example output: \n{synth_example}\n
83 |
84 | STRICTLY ADHERE TO THIS TEMPLATE AND OUTPUT IT AS TEXT:
85 | The data has been sampled for {data_time} seconds in {data_samples} samples.
86 | - The car's position along the racing line is given by the s-coordinate: s_pos\n
87 | - The car's lateral deviation from the racing line is given by the d-coordinate: d_pos\n
88 | - The car's speed along the racing line is given by the s-speed: s_speed\n
89 | - The car's speed perpendicular to the racing line is given by the d-speed: d_speed\n
90 | - The distance to the left wall is: d_left\n
91 | - The distance to the right wall is: d_right\n
92 | - Bool if the car has crashed: crashed_bool\n
93 | - Bool if the car is facing the wall: facing_wall\n
94 | Explanation: Short analysis why the data represents the car driving: {driving_style}\n
95 | """
96 | synth_data = gpt4o.invoke(prompt).content
97 |
98 | human_base_prompt = f"Generate synthetic data for a robot racing car driving on a track in the Frenet coordinate frame. The data has to represent the car driving: {driving_style}."
99 | human_prompt = gpt4o.invoke(f"Rephrase this sentence: {human_base_prompt}").content
100 | conversation = construct_conversation(human_prompt=human_prompt, gpt_response=synth_data)
101 | save_conversation_to_file(filename, conversation)
102 | print(conversation)
103 | print('-----------------------------------')
104 |
105 | def generate_dummy_math_logic(filename='train/dataset/dummy_math.json'):
106 | dumb_boi = "Yes, 0.74 is within the range of 5-7. To explain why, we need to understand that the question seems to be asking whether the number 0.74 falls between the numbers 5 and 7 on a number line. Since 0.74 is less than 1 and both 5 and 7 are greater than 1, it is clear that 0.74 is not between 5 and 7. The number 0.74 is actually closer to 0 than to 5 or 7 on the number line."
107 | for i in range(10):
108 | prompt = f"I have a small LLM that struggles with the concept of determining if a value is within a given range. Look at it's output: {dumb_boi}. Give me a nice question for it to solve and randomize the values."
109 | dummy_math = gpt4omini.invoke(prompt).content
110 | smart_math = gpt4o.invoke(dummy_math).content
111 |
112 | conversation = construct_conversation(human_prompt=dummy_math, gpt_response=smart_math)
113 | save_conversation_to_file(filename, conversation)
114 | print(conversation)
115 | print('-----------------------------------')
116 |
117 | if __name__ == '__main__':
118 | driving_style = input("The data has to represent the car driving...")
119 | generate_synthetic_race_data(driving_style=driving_style)
120 |
121 | #generate_dummy_math_logic()
--------------------------------------------------------------------------------
/train/dataset/excluded/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ForzaETH/LLMxRobot/07d156a596af0bc77366ae6a9c76294ac5f1763d/train/dataset/excluded/.gitkeep
--------------------------------------------------------------------------------
/train/dataset/excluded/dummy_math.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "conversations": [
4 | {
5 | "from": "human",
6 | "value": "Sure! Here's a question that will test the LLM's ability to determine if a value is within a specified range. I've randomized the values for you:\n\n**Question:** Is the number 3.42 within the range of 2.5 to 4.5? \n\nPlease explain your reasoning for your answer."
7 | },
8 | {
9 | "from": "gpt",
10 | "value": "Yes, the number 3.42 is within the range of 2.5 to 4.5.\n\nHere's the reasoning: \n\n- The lower bound of the range is 2.5, and the upper bound is 4.5.\n- To determine if 3.42 is within this range, we need to check if it is greater than or equal to the lower bound and less than or equal to the upper bound.\n\nSince 3.42 is greater than 2.5 and less than 4.5, it falls within the specified range."
11 | }
12 | ]
13 | },
14 | {
15 | "conversations": [
16 | {
17 | "from": "human",
18 | "value": "Sure! Here's a question that you can use to test your LLM's understanding of ranges, with randomized values:\n\n---\n\n**Question:** Determine if the number 3.25 is within the range of 2.5 to 4.5. Explain your reasoning.\n\n---\n\nYou can change the values (the number and the range) to create more variations. For example:\n\n1. **Question:** Determine if the number 6.8 is within the range of 4 to 8. Explain your reasoning.\n2. **Question:** Determine if the number 1.5 is within the range of 0 to 1. Explain your reasoning.\n3. **Question:** Determine if the number 9 is within the range of 7 to 10. Explain your reasoning.\n\nFeel free to shuffle the numbers and ranges to create your desired questions!"
19 | },
20 | {
21 | "from": "gpt",
22 | "value": "Sure! Let's go through each of the questions provided and determine if the given number is within the specified range:\n\n1. **Question:** Determine if the number 3.25 is within the range of 2.5 to 4.5. Explain your reasoning.\n\n **Answer:** Yes, the number 3.25 is within the range of 2.5 to 4.5. This is because 3.25 is greater than 2.5 and less than 4.5. Therefore, it falls within the specified range.\n\n2. **Question:** Determine if the number 6.8 is within the range of 4 to 8. Explain your reasoning.\n\n **Answer:** Yes, the number 6.8 is within the range of 4 to 8. This is because 6.8 is greater than 4 and less than 8. Thus, it lies within the given range.\n\n3. **Question:** Determine if the number 1.5 is within the range of 0 to 1. Explain your reasoning.\n\n **Answer:** No, the number 1.5 is not within the range of 0 to 1. This is because 1.5 is greater than the upper limit of 1. Therefore, it is outside the specified range.\n\n4. **Question:** Determine if the number 9 is within the range of 7 to 10. Explain your reasoning.\n\n **Answer:** Yes, the number 9 is within the range of 7 to 10. This is because 9 is greater than 7 and less than 10. Therefore, it fits within the defined range.\n\nFeel free to modify the numbers and ranges to create additional variations!"
23 | }
24 | ]
25 | },
26 | {
27 | "conversations": [
28 | {
29 | "from": "human",
30 | "value": "Sure! Here\u2019s a question you can use to test the LLM's understanding of ranges, with randomized values:\n\n**Question: Is the number 3.14 within the range of 2.5 to 4.5? Explain your reasoning.**\n\nYou can change the values for further testing, for example:\n\n**Question: Is the number 8.2 within the range of 6.0 to 9.0? Explain your reasoning.**\n\nFeel free to replace the numbers to keep it randomized!"
31 | },
32 | {
33 | "from": "gpt",
34 | "value": "Certainly! Let's evaluate the examples you provided.\n\n**Example 1:**\nIs the number 3.14 within the range of 2.5 to 4.5?\n\n**Explanation:**\nTo determine if 3.14 is within the range of 2.5 to 4.5, we need to check if 3.14 is greater than or equal to the lower bound (2.5) and less than or equal to the upper bound (4.5).\n\n- 3.14 is greater than 2.5.\n- 3.14 is less than 4.5.\n\nSince both conditions are satisfied, 3.14 is within the range of 2.5 to 4.5.\n\n**Example 2:**\nIs the number 8.2 within the range of 6.0 to 9.0?\n\n**Explanation:**\nTo determine if 8.2 is within the range of 6.0 to 9.0, we need to check if 8.2 is greater than or equal to the lower bound (6.0) and less than or equal to the upper bound (9.0).\n\n- 8.2 is greater than 6.0.\n- 8.2 is less than 9.0.\n\nSince both conditions are satisfied, 8.2 is within the range of 6.0 to 9.0.\n\nYou can replace the numbers with any other values to create new scenarios for testing."
35 | }
36 | ]
37 | },
38 | {
39 | "conversations": [
40 | {
41 | "from": "human",
42 | "value": "Sure! Here\u2019s a question designed to test the LLM's understanding of determining if a value is within a specified range. The values are randomized:\n\n**Question:** Is the number 3.2 within the range of 2.5 to 4.5? Explain your reasoning.\n\nFeel free to change the numbers to other random values if you want to test it further!"
43 | },
44 | {
45 | "from": "gpt",
46 | "value": "Yes, the number 3.2 is within the range of 2.5 to 4.5.\n\n**Explanation:**\n\nWhen determining if a number is within a specified range, you need to check if it is greater than or equal to the lower bound and less than or equal to the upper bound of that range.\n\nIn this case:\n- The lower bound is 2.5.\n- The upper bound is 4.5.\n\nThe number 3.2 is greater than 2.5 and less than 4.5, which means it falls within the specified range."
47 | }
48 | ]
49 | },
50 | {
51 | "conversations": [
52 | {
53 | "from": "human",
54 | "value": "Sure! Here's a question you can use to test your LLM's understanding of whether a value is within a given range. The values are randomized:\n\n**Question:** Is the number **3.25** within the range of **2.5** to **4.5**? Explain your reasoning.\n\nFeel free to replace the numbers with any other random values you'd like!"
55 | },
56 | {
57 | "from": "gpt",
58 | "value": "Yes, the number **3.25** is within the range of **2.5** to **4.5**. \n\nHere's the reasoning:\n\n- A range is defined by its lower bound and upper bound. In this case, the range is from **2.5** (the lower bound) to **4.5** (the upper bound).\n- To determine if a number is within a range, it must be greater than or equal to the lower bound and less than or equal to the upper bound.\n- **3.25** is greater than **2.5** and less than **4.5**.\n- Therefore, **3.25** is within the range of **2.5** to **4.5**."
59 | }
60 | ]
61 | },
62 | {
63 | "conversations": [
64 | {
65 | "from": "human",
66 | "value": "Sure! Here\u2019s a question that your LLM can solve, with randomized values for the range and the number to check:\n\n**Question:** Is the number **3.12** within the range of **2.5 to 4.5**? \n\nFeel free to change the number and range values to further randomize the question!"
67 | },
68 | {
69 | "from": "gpt",
70 | "value": "To determine if the number 3.12 is within the range of 2.5 to 4.5, we need to check if it is greater than or equal to the lower bound of the range (2.5) and less than or equal to the upper bound of the range (4.5).\n\n- Check if 3.12 is greater than or equal to 2.5:\n \\[\n 3.12 \\geq 2.5\n \\]\n This is true.\n\n- Check if 3.12 is less than or equal to 4.5:\n \\[\n 3.12 \\leq 4.5\n \\]\n This is also true.\n\nSince both conditions are satisfied, the number 3.12 is indeed within the range of 2.5 to 4.5."
71 | }
72 | ]
73 | },
74 | {
75 | "conversations": [
76 | {
77 | "from": "human",
78 | "value": "Sure! Here's a question that you can present to your LLM, with randomized values:\n\n**Question:** Is the number **3.21** within the range of **2.5 - 4.5**? Please explain your reasoning.\n\nFeel free to adjust the numbers if you want to randomize them further!"
79 | },
80 | {
81 | "from": "gpt",
82 | "value": "Certainly! Here's a similar question with randomized values:\n\n**Question:** Is the number **5.78** within the range of **4.2 - 6.9**? Please explain your reasoning.\n\n**Answer:** Yes, the number **5.78** is within the range of **4.2 - 6.9**. To determine this, we compare **5.78** to the lower bound, **4.2**, and the upper bound, **6.9**. Since **5.78** is greater than **4.2** and less than **6.9**, it falls within the specified range."
83 | }
84 | ]
85 | },
86 | {
87 | "conversations": [
88 | {
89 | "from": "human",
90 | "value": "Sure! Here's a question that will test the LLM's understanding of determining if a value is within a given range, with randomized values:\n\n---\n\n**Question:** Is the number **3.56** within the range of **2.5** to **4.5**? \n\nPlease explain your reasoning.\n\n--- \n\nFeel free to ask for more questions, or for different ranges and values!"
91 | },
92 | {
93 | "from": "gpt",
94 | "value": "To determine if the number **3.56** is within the range of **2.5** to **4.5**, we need to check if it falls between the lower bound (2.5) and the upper bound (4.5) of the range.\n\n1. **Lower Bound Check:** We check if 3.56 is greater than or equal to 2.5. Since 3.56 is greater than 2.5, it satisfies this condition.\n\n2. **Upper Bound Check:** We check if 3.56 is less than or equal to 4.5. Since 3.56 is less than 4.5, it satisfies this condition as well.\n\nSince 3.56 meets both the lower and upper bound conditions, it is indeed within the range of 2.5 to 4.5."
95 | }
96 | ]
97 | },
98 | {
99 | "conversations": [
100 | {
101 | "from": "human",
102 | "value": "Sure! Here\u2019s a question that you can use to test the LLM\u2019s understanding of determining if a value is within a given range. The values will be randomized each time you use it:\n\n**Question:** Is the number \\( X \\) within the range of \\( A \\) to \\( B \\)? \n\nLet\u2019s randomize the values:\n\n- Set \\( X = 3.5 \\)\n- Set \\( A = 1 \\)\n- Set \\( B = 5 \\)\n\nSo the question would be: \n\n**Is the number 3.5 within the range of 1 to 5?**\n\nYou can change the values of \\( X \\), \\( A \\), and \\( B \\) to create different scenarios. For example:\n\n1. \\( X = 2.2 \\), \\( A = 2 \\), \\( B = 4 \\)\n2. \\( X = 6.8 \\), \\( A = 5 \\), \\( B = 10 \\)\n3. \\( X = -1 \\), \\( A = -3 \\), \\( B = 0 \\)\n\nFeel free to use any combination of numbers within different ranges!"
103 | },
104 | {
105 | "from": "gpt",
106 | "value": "To determine if a number \\( X \\) is within the range defined by \\( A \\) and \\( B \\), we need to check if \\( A \\leq X \\leq B \\).\n\nLet's go through the examples you provided:\n\n1. **Example 1:**\n - \\( X = 3.5 \\)\n - \\( A = 1 \\)\n - \\( B = 5 \\)\n\n Here, \\( 1 \\leq 3.5 \\leq 5 \\) is true. Therefore, 3.5 is within the range of 1 to 5.\n\n2. **Example 2:**\n - \\( X = 2.2 \\)\n - \\( A = 2 \\)\n - \\( B = 4 \\)\n\n In this case, \\( 2 \\leq 2.2 \\leq 4 \\) is true. Hence, 2.2 is within the range of 2 to 4.\n\n3. **Example 3:**\n - \\( X = 6.8 \\)\n - \\( A = 5 \\)\n - \\( B = 10 \\)\n\n Here, \\( 5 \\leq 6.8 \\leq 10 \\) is true. Therefore, 6.8 is within the range of 5 to 10.\n\n4. **Example 4:**\n - \\( X = -1 \\)\n - \\( A = -3 \\)\n - \\( B = 0 \\)\n\n For this example, \\( -3 \\leq -1 \\leq 0 \\) is true. Thus, -1 is within the range of -3 to 0.\n\nYou can create different scenarios by changing the values of \\( X \\), \\( A \\), and \\( B \\) and applying the same logic to determine if \\( X \\) is within the range \\([A, B]\\)."
107 | }
108 | ]
109 | },
110 | {
111 | "conversations": [
112 | {
113 | "from": "human",
114 | "value": "Sure! Here\u2019s a question that your LLM can solve, with randomized values for the range:\n\n**Question:** Is the number 3.14 within the range of 2.5 to 4.5? \n\nTo explain your answer, consider the following:\n1. Identify the lower and upper bounds of the range.\n2. Determine if the number falls between these two values on a number line.\n\nFeel free to change the values of the number (3.14) and the range (2.5 to 4.5) to keep it randomized!"
115 | },
116 | {
117 | "from": "gpt",
118 | "value": "To determine if the number 3.14 is within the range of 2.5 to 4.5, we need to follow these steps:\n\n1. **Identify the Lower and Upper Bounds of the Range:**\n - The lower bound of the range is 2.5.\n - The upper bound of the range is 4.5.\n\n2. **Determine if the Number Falls Between These Two Values:**\n - We check if 3.14 is greater than or equal to the lower bound (2.5) and less than or equal to the upper bound (4.5).\n\nLet's evaluate:\n\n- \\(3.14 \\geq 2.5\\) is true.\n- \\(3.14 \\leq 4.5\\) is also true.\n\nSince both conditions are satisfied, 3.14 is indeed within the range of 2.5 to 4.5.\n\nFeel free to substitute different numbers and range values to see how the logic applies to other scenarios!"
119 | }
120 | ]
121 | }
122 | ]
--------------------------------------------------------------------------------
/train/dataset/racing_guide.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "conversations": [
4 | {
5 | "from": "human",
6 | "value": "What is the ideal direction of force in cornering according to 'The Perfect Corner' guide?"
7 | },
8 | {
9 | "from": "gpt",
10 | "value": "The ideal direction of force in cornering is to maximize acceleration in the direction of travel. This means generating maximum force in the primary direction of the track, minimizing sideways forces, which are only used to shift the car's path towards the apex and not to exceed necessary lateral movement."
11 | }
12 | ]
13 | },
14 | {
15 | "conversations": [
16 | {
17 | "from": "human",
18 | "value": "Why is it important to minimize sideways force in cornering?"
19 | },
20 | {
21 | "from": "gpt",
22 | "value": "Minimizing sideways force is crucial because any excess lateral force must be countered during corner exit, which can limit throttle application and reduce acceleration. This optimal force direction maximizes forward acceleration and reduces the time spent cornering."
23 | }
24 | ]
25 | },
26 | {
27 | "conversations": [
28 | {
29 | "from": "human",
30 | "value": "How does 'The Perfect Corner' guide suggest handling corner exit?"
31 | },
32 | {
33 | "from": "gpt",
34 | "value": "'The Perfect Corner' guide suggests handling corner exit by focusing on maximizing acceleration. This involves reaching the apex with a minimal lateral force requirement and then applying as much throttle as possible to accelerate out of the corner while maintaining optimal traction."
35 | }
36 | ]
37 | },
38 | {
39 | "conversations": [
40 | {
41 | "from": "human",
42 | "value": "What role does the apex play in achieving optimal cornering?"
43 | },
44 | {
45 | "from": "gpt",
46 | "value": "The apex is crucial for optimal cornering as it represents the point where the car is closest to the inside of the turn. Hitting the apex correctly allows for the most efficient use of the track's width and helps in minimizing the distance traveled while maximizing speed and control through the corner."
47 | }
48 | ]
49 | },
50 | {
51 | "conversations": [
52 | {
53 | "from": "human",
54 | "value": "Describe the concept of the 'acceleration arc' as outlined in the guide."
55 | },
56 | {
57 | "from": "gpt",
58 | "value": "The 'acceleration arc' refers to the trajectory a car takes while accelerating through and out of a corner. This arc is optimized to allow the driver to apply maximum throttle while maintaining control, ensuring the car exits the corner as quickly as possible. It is a critical component of achieving fast lap times."
59 | }
60 | ]
61 | },
62 | {
63 | "conversations": [
64 | {
65 | "from": "human",
66 | "value": "What is Line Theory in the context of racing?"
67 | },
68 | {
69 | "from": "gpt",
70 | "value": "Line Theory in racing refers to the set of rules that dictate the optimal path a car should take around a track. It focuses on maximizing speed and efficiency by following the ideal racing line, which is constantly adjusted based on the car's dynamics and the driver's skill level."
71 | }
72 | ]
73 | },
74 | {
75 | "conversations": [
76 | {
77 | "from": "human",
78 | "value": "How does a driver's car control skill affect their optimal racing line?"
79 | },
80 | {
81 | "from": "gpt",
82 | "value": "A driver's car control skill directly affects their optimal racing line. Advanced drivers can handle the car closer to its limits, allowing for more aggressive and faster lines. In contrast, novice drivers may need to take more conservative lines to maintain control and safety."
83 | }
84 | ]
85 | },
86 | {
87 | "conversations": [
88 | {
89 | "from": "human",
90 | "value": "Explain the importance of the 'corner entry' phase."
91 | },
92 | {
93 | "from": "gpt",
94 | "value": "The 'corner entry' phase is critical because it sets the stage for the entire cornering process. Proper entry determines the car's balance, speed, and trajectory towards the apex. A good entry maximizes control and sets up an optimal line for a fast and efficient corner exit."
95 | }
96 | ]
97 | },
98 | {
99 | "conversations": [
100 | {
101 | "from": "human",
102 | "value": "What is the significance of using the whole track in racing?"
103 | },
104 | {
105 | "from": "gpt",
106 | "value": "Using the whole track is significant in racing as it allows for the smoothest and fastest path around a corner. By utilizing the full width, a driver can minimize steering inputs, maintain higher speeds, and achieve better lap times through optimal cornering lines."
107 | }
108 | ]
109 | },
110 | {
111 | "conversations": [
112 | {
113 | "from": "human",
114 | "value": "Describe how an advanced driver might approach a corner differently than a novice driver."
115 | },
116 | {
117 | "from": "gpt",
118 | "value": "An advanced driver might approach a corner with a more aggressive line, braking later and carrying more speed through the entry and apex. They can manage higher forces and maintain control closer to the car's limits, allowing for faster corner exits. Novice drivers, on the other hand, may brake earlier and take a more conservative line to ensure stability and control."
119 | }
120 | ]
121 | },
122 | {
123 | "conversations": [
124 | {
125 | "from": "human",
126 | "value": "How is drifting initiated in rear-wheel-drive and front-wheel-drive cars?"
127 | },
128 | {
129 | "from": "gpt",
130 | "value": "In rear-wheel-drive cars, drifting can be initiated by depressing the clutch, gearing down into second gear, revving the engine to about 4000-5000 RPM, and then turning the steering wheel away from the corner before quickly turning back towards it while releasing the clutch. This causes the rear wheels to spin and lose traction, initiating the drift. For front-wheel-drive cars, drifting is initiated by pulling the handbrake to cause the rear wheels to lose traction while steering into the turn."
131 | }
132 | ]
133 | },
134 | {
135 | "conversations": [
136 | {
137 | "from": "human",
138 | "value": "Explain the heel-toe-shifting technique used in drifting."
139 | },
140 | {
141 | "from": "gpt",
142 | "value": "Heel-toe-shifting is a race shifting technique that allows drivers to downshift quickly while applying the brakes. The driver uses their right heel to blip the throttle while braking with the toes of the same foot, matching the engine speed to the transmission speed. This prevents driveline shock and helps maintain traction during a drift."
143 | }
144 | ]
145 | },
146 | {
147 | "conversations": [
148 | {
149 | "from": "human",
150 | "value": "What is the Kansei Drift technique?"
151 | },
152 | {
153 | "from": "gpt",
154 | "value": "The Kansei Drift is performed at high speeds by sharply turning the wheels into a turn and then quickly letting off the throttle. The resulting loss of throttle combined with cornering forces causes the vehicle to oversteer. The driver then gets back on the throttle to maintain the drift and countersteers to keep the vehicle on the desired path."
155 | }
156 | ]
157 | },
158 | {
159 | "conversations": [
160 | {
161 | "from": "human",
162 | "value": "Describe the long slide drift technique."
163 | },
164 | {
165 | "from": "gpt",
166 | "value": "The long slide drift involves entering a turn at high speed, turning the wheels into the turn, and then using the e-brake to initiate the drift. The driver must countersteer immediately to control the drift and keep the vehicle sliding sideways. If the vehicle loses speed, the driver heel-toe downshifts to maintain the drift until the turn is completed."
167 | }
168 | ]
169 | },
170 | {
171 | "conversations": [
172 | {
173 | "from": "human",
174 | "value": "What is the clutch-kick drift technique?"
175 | },
176 | {
177 | "from": "gpt",
178 | "value": "The clutch-kick drift technique involves entering a turn at high speed, turning the wheels into the turn, and then clutching in while staying on the throttle. The engine revs up, and when the clutch is released, the rear wheels break traction, initiating the drift. The driver must countersteer and maintain throttle to control the drift."
179 | }
180 | ]
181 | },
182 | {
183 | "conversations": [
184 | {
185 | "from": "human",
186 | "value": "Explain the shift-lock drift technique."
187 | },
188 | {
189 | "from": "gpt",
190 | "value": "The shift-lock drift technique involves entering a turn at high speed, quickly downshifting without heel-toe shifting, which causes the driveline to slow down and the engine RPM to increase. This stress on the driveline causes the rear wheels to lose traction, initiating the drift. The driver must then countersteer and control the throttle to maintain the drift."
191 | }
192 | ]
193 | },
194 | {
195 | "conversations": [
196 | {
197 | "from": "human",
198 | "value": "Describe the dirt drop drift technique."
199 | },
200 | {
201 | "from": "gpt",
202 | "value": "The dirt drop drift involves entering a turn at low to medium speed, turning the wheels into the turn, and allowing the rear wheels to drop off the road surface onto a low traction surface like dirt. This causes the rear wheels to lose traction and initiate a drift. The driver must then countersteer and maintain throttle to control the drift as the vehicle returns to the road."
203 | }
204 | ]
205 | },
206 | {
207 | "conversations": [
208 | {
209 | "from": "human",
210 | "value": "What is the feint or inertia drift technique?"
211 | },
212 | {
213 | "from": "gpt",
214 | "value": "The feint or inertia drift involves steering the vehicle away from the turn just before entering it to load the suspension on one side. The driver then quickly steers back into the turn, using the vehicle's momentum to induce oversteer. The driver must countersteer and control the throttle to maintain the drift through the turn."
215 | }
216 | ]
217 | },
218 | {
219 | "conversations": [
220 | {
221 | "from": "human",
222 | "value": "Explain the Scandinavian flick technique."
223 | },
224 | {
225 | "from": "gpt",
226 | "value": "The Scandinavian flick involves flicking the car away from the turn, then applying full braking and clutching in to lock the wheels. The car skids sideways, and as the turn approaches, the driver releases the brakes and steers into the turn, using the car's momentum to slingshot into the turn. The driver then releases the clutch and applies throttle to maintain the drift."
227 | }
228 | ]
229 | },
230 | {
231 | "conversations": [
232 | {
233 | "from": "human",
234 | "value": "What is the power-over drift technique?"
235 | },
236 | {
237 | "from": "gpt",
238 | "value": "The power-over drift technique involves entering a turn at any speed and using throttle to cause the rear wheels to lose traction. The driver turns the wheels sharply into the turn and applies enough throttle to induce oversteer. The driver must countersteer and control the throttle to maintain the drift through the turn."
239 | }
240 | ]
241 | }
242 | ]
243 |
244 |
--------------------------------------------------------------------------------
/train/sft_train.py:
--------------------------------------------------------------------------------
1 | import argparse, yaml
2 | from datetime import datetime
3 | from unsloth import FastLanguageModel, save
4 | from trl import SFTTrainer
5 | from transformers import TrainingArguments
6 | from unsloth import is_bfloat16_supported
7 | from unsloth.chat_templates import get_chat_template
8 | from datasets import load_dataset
9 | from peft.peft_model import PeftModelForCausalLM
10 | from transformers.models.llama.tokenization_llama_fast import LlamaTokenizerFast
11 | from dotenv import load_dotenv, find_dotenv
12 | import os, json
13 | from typing import Any, Dict
14 |
15 | load_dotenv(find_dotenv())
16 |
17 | HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
18 |
19 | def preprocess(text, chat_template="phi-3", answer=False):
20 | if chat_template == "phi-3":
21 | if answer:
22 | messages = [{"from": "gpt", "value": text}]
23 | else:
24 | messages = [{"from": "human", "value": text}]
25 | elif chat_template == "qwen-2.5":
26 | if answer:
27 | messages = [{"role": "assistant", "content": text}]
28 | else:
29 | messages = [{"role": "user", "content": text}]
30 | else:
31 | raise ValueError(f"Chat template {chat_template} not recognized. Please use 'phi-3' or 'qwen-2.5'.")
32 | return messages
33 |
34 | def chat_mapping(chat_template="phi-3"):
35 | if chat_template == "phi-3":
36 | return {"role": "from", "content": "value", "user": "human", "assistant": "gpt"}
37 | elif chat_template == "qwen-2.5":
38 | return {"role": "role", "content": "content", "user": "user", "assistant": "assistant"}
39 | else:
40 | raise ValueError(f"Chat template {chat_template} not recognized. Please use 'phi-3' or 'qwen-2.5'.")
41 |
42 | def load_config(config_path):
43 | with open(config_path, "r") as f:
44 | return yaml.safe_load(f)
45 |
46 | def train(cfg: Dict[str, Dict[str, Any]]):
47 | out_dir: str = cfg["training"]["out_dir"]
48 | chat_template: str = cfg["training"]["chat_template"]
49 | seed: int = cfg["training"]["seed"]
50 |
51 | base_model: str = cfg["model"]["base_model"]
52 | experiment_name: str = base_model.split("/")[-1] + "_SFT_"
53 | experiment_name += datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
54 |
55 | model, tokenizer = FastLanguageModel.from_pretrained(
56 | model_name = cfg["model"]["base_model"],
57 | max_seq_length = cfg["model"]["max_seq_length"],
58 | dtype = None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
59 | load_in_4bit = cfg["model"]["load_in_4bit"],
60 | token=HUGGINGFACEHUB_API_TOKEN
61 | )
62 |
63 | model = FastLanguageModel.get_peft_model(
64 | model,
65 | r = cfg["model"]["lora_rank"],
66 | target_modules = cfg["model"]["target_modules"],
67 | lora_alpha = cfg["model"]["lora_alpha"],
68 | lora_dropout = cfg["model"]["lora_dropout"],
69 | bias = "none",
70 | use_gradient_checkpointing = cfg["model"]["use_gradient_checkpointing"],
71 | random_state = seed,
72 | use_rslora = False,
73 | loftq_config = None,
74 | )
75 |
76 | tokenizer = get_chat_template(
77 | tokenizer,
78 | chat_template=chat_template,
79 | mapping=chat_mapping(),
80 | )
81 |
82 | ####################################CUSTOM DATA####################################
83 | def formatting_prompts_custom_func(examples):
84 | convos = []
85 | for i in range(len(examples["conversations"])):
86 | input_text = examples["conversations"][i][0]["value"]
87 | output_text = examples["conversations"][i][1]["value"]
88 | input_sample = preprocess(text=input_text, chat_template=chat_template, answer=False)
89 | output_sample = preprocess(text=output_text, chat_template=chat_template, answer=True)
90 | sample = input_sample + output_sample
91 | convos.append(sample)
92 | texts = [tokenizer.apply_chat_template(convo, tokenize=False, add_generation_prompt=False) for convo in convos]
93 | return { "text" : texts, }
94 |
95 | # Create a combined json file from all json files in the dataset folder
96 | dataset_dir = os.path.join(os.getcwd(), cfg["training"]["dataset_dir"])
97 | all_json_files = [os.path.join(dataset_dir, f) for f in os.listdir(dataset_dir) if f.endswith('.json')]
98 | # Initialize an empty list to hold all conversations
99 | all_conversations = []
100 | # Read and combine all JSON files
101 | for fname in all_json_files:
102 | print(f"Reading: {fname}")
103 | with open(fname) as infile:
104 | data = json.load(infile)
105 | all_conversations.extend(data)
106 | # Write the combined data to a new JSON file
107 | combined_json_path = os.path.join(dataset_dir, 'combined/full_data.json')
108 | with open(combined_json_path, 'w') as outfile:
109 | json.dump(all_conversations, outfile, indent=4)
110 |
111 | custom_dataset = load_dataset('json', data_files=combined_json_path, split='train')
112 | dataset = custom_dataset.map(formatting_prompts_custom_func, batched=True)
113 | ####################################TRAINING####################################
114 | if cfg["training"]["train_bool"]:
115 | trainer = SFTTrainer(
116 | model=model,
117 | tokenizer=tokenizer,
118 | train_dataset=dataset,
119 | dataset_text_field="text",
120 | max_seq_length=cfg["model"]["max_seq_length"],
121 | dataset_num_proc=2,
122 | packing=False, # Can make training 5x faster for short sequences.
123 | args=TrainingArguments(
124 | per_device_train_batch_size=cfg["trainer"]["per_device_train_batch_size"],
125 | gradient_accumulation_steps=cfg["trainer"]["gradient_accumulation_steps"],
126 | warmup_steps=cfg["trainer"]["warmup_steps"],
127 | max_steps=cfg["trainer"]["max_steps"],
128 | learning_rate=cfg["trainer"]["learning_rate"],
129 | fp16=not is_bfloat16_supported(),
130 | bf16=is_bfloat16_supported(),
131 | logging_steps=cfg["trainer"]["logging_steps"],
132 | optim=cfg["trainer"]["optim"],
133 | weight_decay=cfg["trainer"]["weight_decay"],
134 | lr_scheduler_type=cfg["trainer"]["lr_scheduler_type"],
135 | seed=seed,
136 | output_dir=out_dir,
137 | ),
138 | )
139 |
140 | # Stats
141 | trainer_stats = trainer.train()
142 | print("Training stats: ", trainer_stats)
143 |
144 | model.save_pretrained(f"{out_dir}/{experiment_name}")
145 | print("Training complete, saved model: ", f"{out_dir}/{experiment_name}")
146 |
147 | if cfg["training"]["create_merged_model"]:
148 | create_merged(model, tokenizer, out_dir=out_dir)
149 | print("Merged model saved to: ", f"{out_dir}/merged")
150 |
151 | return model, tokenizer
152 |
153 | def create_merged(model: PeftModelForCausalLM, tokenizer: LlamaTokenizerFast, out_dir="outputs/race_llm"):
154 | merged_dir = os.path.join(out_dir, "merged")
155 |
156 | # Create merged directory if not existing
157 | if not os.path.exists(merged_dir):
158 | os.makedirs(merged_dir)
159 |
160 | save.unsloth_save_model(model=model,
161 | tokenizer=tokenizer,
162 | save_directory=merged_dir,
163 | push_to_hub=False,
164 | save_method="merged_16bit",)
165 |
166 | if __name__ == "__main__":
167 | parser = argparse.ArgumentParser()
168 | parser.add_argument("--config", type=str, required=False,
169 | default="train/config/sft_train.yaml", help="Path to YAML config file")
170 | args = parser.parse_args()
171 | train(load_config(args.config))
172 |
--------------------------------------------------------------------------------