├── .gitignore
├── CMakeLists.txt
├── README.md
├── dependencies.yaml
├── package.xml
├── params.yaml
├── params_esim_davis.yaml
├── params_esim_davis346.yaml
├── params_esim_xplorer.yaml
├── params_mars_yard.yaml
├── params_rpg_davis.yaml
├── params_rpg_davis_rotation.yaml
├── params_rpg_fpv.yaml
├── params_wells.yaml
├── params_wells_gt.yaml
├── requirements.txt
├── scripts
├── analyze_rt_factor_with_event_rate.py
├── analyze_tracks_angles.py
├── compare.py
├── compare_odometry.py
├── compare_update_strategy.py
├── compare_uslam.py
├── convert_tracks_csv_to_txt.py
├── create_speedup_table.py
├── customize_rpe_error_arrays.py
├── estimate_q0_from_imu_a.py
├── evaluate_uslam.py
├── extract_initial_state_from_bag.py
├── parameter_exploration.sh
├── plot_event_rate_from_bag.py
├── plot_gt_dynamic_comparison_from_bags.py
├── plot_performance_pie_charts.py
├── plot_summaries.py
├── qualitative_evaluation.py
├── rename.py
├── search_best_tables.py
├── search_evaluations.py
├── showcase_mars_environment.py
├── visualize_esim_imu.py
├── visualize_esim_spline_derivatives.py
├── visualize_feature_tracks.py
├── visualize_feature_tracks_3d.py
├── visualize_frame_output.py
├── visualize_trajectories.py
└── visualize_trajectory_animation.py
├── setup.py
├── src
└── x_evaluate
│ ├── __init__.py
│ ├── comparisons.py
│ ├── evaluation_data.py
│ ├── math_utils.py
│ ├── performance_evaluation.py
│ ├── plots.py
│ ├── rpg_tracking_analysis
│ ├── __init__.py
│ ├── bag2dataframe.py
│ ├── compare_tracks.py
│ ├── dataset.py
│ ├── evaluate_tracks.py
│ ├── plot_track.py
│ ├── plot_tracks.py
│ ├── tracker.py
│ ├── tracker_init.py
│ ├── tracker_utils.py
│ ├── tracks.py
│ └── visualizer.py
│ ├── rpg_trajectory_evaluation.py
│ ├── scriptlets.py
│ ├── tracking_evaluation.py
│ ├── trajectory_evaluation.py
│ ├── utils.py
│ └── visualizer
│ ├── __init__.py
│ ├── dataset_player.py
│ ├── renderer.py
│ └── utils.py
└── test
├── evaluate.cpp
├── evaluate.py
├── evaluate.yaml
├── evaluate_mars_yard.yaml
├── evaluate_poster_6dof_speedup.yaml
├── evaluate_rpg_davis_rotation.yaml
├── evaluate_rpg_fpv.yaml
├── evaluate_sim.yaml
├── evaluate_sim_ral.yaml
├── evaluate_wells.yaml
├── evaluate_wells_dop.yaml
└── evaluate_wells_gt.yaml
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
140 | .idea
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | cmake_minimum_required(VERSION 2.8.3)
3 | cmake_policy(SET CMP0048 NEW) # avoids CMAKE_PROJECT_VERSION warning
4 | project(x_evaluate)
5 |
6 | set (CMAKE_BUILD_TYPE Release)
7 |
8 | if (CMAKE_BUILD_TYPE MATCHES Release)
9 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
10 | endif()
11 |
12 | # build with catkin build x_evaluate --cmake-args -DMY_DEBUG=1
13 | if (MY_DEBUG)
14 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -Og")
15 | endif()
16 |
17 | # for debugging memory leaks
18 | #set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -Og -fno-omit-frame-pointer -fsanitize=address") # REMOVE ME LATER
19 | #set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} -g -Og -fno-omit-frame-pointer -fsanitize=address") # REMOVE ME LATER
20 |
21 |
22 | find_package(catkin REQUIRED COMPONENTS
23 | x
24 | cv_bridge
25 | rosbag
26 | dvs_msgs
27 | sensor_msgs
28 | std_msgs
29 | gflags_catkin
30 | glog_catkin
31 | easy_profiler_catkin
32 | )
33 | find_package(yaml-cpp REQUIRED) # used in evaluation executable
34 | catkin_python_setup()
35 |
36 | catkin_package()
37 |
38 | # Set build flags, depending on the architecture
39 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall")
40 |
41 |
42 | include_directories (
43 | ${OpenCV_INCLUDE_DIRS}
44 | ${catkin_INCLUDE_DIRS}
45 | ${easy_profiler_catkin_INCLUDE_DIRS}
46 | ${glog_catkin_INCLUDE_DIRS}
47 | )
48 |
49 |
50 | set (FS_LIBRARY "")
51 | if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.1)
52 | set(FS_LIBRARY stdc++fs)
53 | endif()
54 |
55 | # add generic evaluation program as executable to be a able to call it from python
56 | add_executable(evaluate ${SOURCE} test/evaluate.cpp)
57 | target_link_libraries(evaluate
58 | ${OpenCV_LIBRARIES}
59 | ${catkin_LIBRARIES}
60 | ${rostest_LIBRARIES}
61 | ${YAML_CPP_LIBRARIES}
62 | ${easy_profiler_catkin_LIBRARIES}
63 | ${FS_LIBRARY}
64 | )
65 |
66 | target_compile_definitions(evaluate PUBLIC -DUSING_EASY_PROFILER)
67 |
68 | catkin_install_python(PROGRAMS test/evaluate.py DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION})
69 |
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Exploring Event Camera-based Odometry for Planetary Robots
2 |
3 |
4 |
5 |
6 | This repository contains code that implements the event-based VIO algorithm described in Mahlknecht et al. RAL'22. The paper can be found [here](http://rpg.ifi.uzh.ch/docs/RAL22_Mahlknecht.pdf), and datasets can be found [here](https://uzh-rpg.github.io/eklt-vio/).
7 | If you use this code in an academic context, please cite the following work:
8 |
9 | [Florian Mahlknecht](https://florian.world), [Daniel Gehrig](https://danielgehrig18.github.io/), Jeremy Nash,
10 | Friedrich M.
11 | Rockenbauer, Benjamin Morrell, Jeff Delaune, [Davide Scaramuzza](http://rpg.ifi.uzh.ch/people_scaramuzza.html), "Exploring Event Camera-based Odometry for Planetary Robots", IEEE Robotics and Automation Letters (RA-L), 2022
12 | ```bibtex
13 | @article{Mahlknecht22RAL,
14 | title={Exploring Event Camera-based Odometry for Planetary Robots},
15 | author={Mahlknecht, Florian and Gehrig, Daniel and Nash, Jeremy and Rockenbauer, Friedrich M. and Morrell, Benjamin and Delaune, Jeff and Scaramuzza, Davide},
16 | journal={IEEE Robotics and Automation Letters (RA-L)},
17 | year={2022}
18 | }
19 | ```
20 |
21 | ## x_evaluate
22 |
23 | Performance evaluation for (range-) visual-inertial odometry xVIO framework.
24 |
25 | Provides an extensive toolset to evaluate frontend and backend accuracy (i.e. pose and feature tracking) as well as
26 | computational efficiency, such as realtime factor, CPU and memory usage.
27 |
28 | ### Dependencies
29 |
30 | The library has been tested only on Ubuntu 20.04 with Ros Noetic. Although no roscore is needed, feature tracking
31 | evaluation requires Python3 rosbag reader, therefore lifting the requirements to Ros Noetic.
32 |
33 | Beyond ROS, the major dependencies are the X library and gflags_catkin (see `dependencies.yaml`). Typical installation
34 | commands might be:
35 |
36 | ```bash
37 | cd YOUR_ROS_WORKSPACE/
38 | source devel/setup.zsh # or .bash if you use BASH
39 | cd src
40 | git clone ... x_evaluate
41 | vcs-import < x_evaluate/dependencies.yaml
42 | catkin build x_evaluate # DO NOT BUILD all packages, as the rpg_ros_driver might fail, but it's not needed
43 | ```
44 |
45 | For the python package it is recommended to create a virtual environment and install the requirements with pip:
46 |
47 | ```bash
48 | conda create -n x python=3.8
49 | conda activate x
50 | pip install -r requirements.txt
51 | pip install . # Optionally install the x_evaluate in the python dist-packages
52 | ```
53 |
54 | ### Basic architecture
55 |
56 | The library consists of one C++ file which directly calls X library callbacks such as `processIMU()` from the data
57 | read from a rosbag. In this way a runtime-independent evaluation can be performed. Results are then dumped to CSV files
58 | and analyzed in Python, with the main module `x_evaluate`.
59 |
60 | The python evaluation runs additionally dump all the main results to a pickle file, such that different runs can be
61 | easily compared and reproduced.
62 |
63 | ### Usage
64 |
65 | An evaluation run is performed by `evaluate.py`. A configuration file e.g. `evaluate.yaml` defines which datasets are
66 | processed and based on which base parameter files, e.g. `params_rpg_davis.yaml`. Parameters can be overwritten for a
67 | single sequence or for all sequences on the respective entry in `evaluate.yaml`. Additionally, by passing
68 | `--overrides some_param=17 some_other_param=False` out-rules all other settings for the purpose of parameter tuning runs.
69 |
70 | A simple example run:
71 |
72 | ```bash
73 | python test/evaluate.py --configuration test/evaluate.yaml --output_folder /path/to/_out/ \
74 | --dataset_dir /path/to_datasets --frontend XVIO --name "XVIO"
75 | ```
76 |
77 | And a comparison run:
78 |
79 | ```bash
80 | python scripts/compare.py --input_folder /path/to --sub_folders _out:_out_2 ----output_folder /path/to/results
81 | ```
82 |
83 |
84 |
--------------------------------------------------------------------------------
/dependencies.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | rpg_dvs_ros:
3 | type: git
4 | url: git@github.com:uzh-rpg/rpg_dvs_ros.git
5 | version: master
6 | gflags_catkin:
7 | type: git
8 | url: git@github.com:ethz-asl/gflags_catkin.git
9 | version: master
10 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 | x_evaluate
3 | 1.0.0
4 | Perfromance evaluation toolbox for Visual-Inertial Odometry with the X framework (xVIO).
5 | Florian Mahlknecht
6 |
7 | California Institute of Technology
8 |
9 | catkin
10 |
11 | x
12 | cv_bridge
13 | dvs_msgs
14 | sensor_msgs
15 | geometry_msgs
16 | std_msgs
17 | gflags_catkin
18 | easy_profiler_catkin
19 |
20 | glog_catkin
21 |
22 |
--------------------------------------------------------------------------------
/params.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | ## Default initial state: static, level.
6 | #p: [0, 0, 0.5] # [x,y,z]
7 | #v: [0.0, 0.0, 0.0] # [x,y,z]
8 | #q: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
9 |
10 | ## Initial state for myBags/201604026/circle_high_vel.bag at t = 0 s
11 | p: [-0.168942875105, -0.573647949818, 1.09445961816] # [x,y,z]
12 | v: [0.0, 0.0, 0.0] # [x,y,z]
13 | q: [-0.329112656567, -0.0699724433216, 0.0214667765852, 0.941449889249] # [w,x,y,z]
14 |
15 | # Initial IMU bias estimates
16 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
17 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
18 |
19 | # Initial standard deviation estimates [x,y,z]
20 | sigma_dp: [0.0, 0.0, 0.0] # [m]
21 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
22 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
23 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
24 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
25 |
26 | ###############
27 | # Calibration #
28 | ###############
29 |
30 | # Camera calibration for myBags/20160426 bags
31 | cam1_fx: 0.46513
32 | cam1_fy: 0.726246
33 | cam1_cx: 0.499822
34 | cam1_cy: 0.458086
35 | cam1_dist_model: FOV
36 | cam1_dist_params: [0.950711]
37 | cam1_img_width: 752
38 | cam1_img_height: 480
39 | cam1_q_ic: [-0.004059133072894, 0.928890463133302, 0.370065735566358, -0.014049352983967] # [w,x,y,z]
40 | cam1_p_ic: [-0.05914814, 0.04309647, -0.03085167] # [x,y,z]
41 | cam1_time_offset: -0.00397400522578
42 |
43 | cam2_fx: 0.46513
44 | cam2_fy: 0.726246
45 | cam2_cx: 0.499822
46 | cam2_cy: 0.458086
47 | cam2_dist_model: FOV
48 | cam2_dist_params: [0.950711]
49 | cam2_img_width: 752
50 | cam2_img_height: 480
51 | cam2_q_ic: [-0.004059133072894, 0.928890463133302, 0.370065735566358, -0.014049352983967] # [w,x,y,z]
52 | cam2_p_ic: [-0.05914814, 0.04309647, -0.03085167] # [x,y,z]
53 | cam2_time_offset: -0.00397400522578
54 |
55 | # Feature noise (normalized coordinate standard deviation)
56 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
57 |
58 | #######
59 | # IMU #
60 | #######
61 |
62 | # MXR9500G/M accels (Astec)
63 | n_a: 0.0083 # Accel noise spectral density [m/s^2/sqrt(Hz)]
64 | n_ba: 0.00083 # Accel bias random walk [m/s^3/sqrt(Hz)]
65 |
66 | # ADXRS610 gyros (Astec)
67 | n_w: 0.0013 # Gyro noise spectral density [rad/s/sqrt(Hz)]
68 | n_bw: 0.00013 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
69 |
70 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
71 | a_m_max: 50.0
72 |
73 | #######
74 | # LRF #
75 | #######
76 |
77 | # Noise (standard deviation in m)
78 | sigma_range: 0.05
79 |
80 | ##############
81 | # Sun Sensor #
82 | ##############
83 |
84 | # Currently unused
85 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
86 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
87 |
88 | ###########
89 | # Tracker #
90 | ###########
91 |
92 | fast_detection_delta: 20
93 | non_max_supp: True
94 | block_half_length: 20
95 | margin: 20
96 | n_feat_min: 400
97 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
98 | outlier_method: 8
99 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
100 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
101 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
102 | # image resolution, and the image noise.
103 | outlier_param1: 0.3
104 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
105 | # (probability) that the estimated matrix is correct.
106 | outlier_param2: 0.99
107 | # 1 <=> no tiling
108 | n_tiles_h: 1
109 | n_tiles_w: 1
110 | max_feat_per_tile: 10000
111 |
112 | ########
113 | # EKLT #
114 | ########
115 |
116 | eklt_batch_size: 300
117 | eklt_update_every_n_events: 20
118 | eklt_displacement_px: 0.6
119 | eklt_patch_size: 25
120 | eklt_max_num_iterations: 10
121 | eklt_tracking_quality: 0.6
122 | eklt_bootstrap: "klt"
123 | eklt_lk_window_size: 20
124 | eklt_num_pyramidal_layers: 3
125 | eklt_scale: 4
126 | eklt_display_features: true
127 | eklt_display_feature_id: false
128 | eklt_display_feature_patches: false
129 | eklt_arrow_length: 5
130 |
131 | eklt_max_corners: 100
132 | eklt_min_corners: 0
133 | eklt_log_eps: 0.01
134 | eklt_use_linlog_scale: false
135 |
136 | eklt_first_image_t: -1
137 | eklt_tracks_file_txt: ""
138 |
139 | eklt_patch_timestamp_assignment: latest-event
140 |
141 | eklt_enable_outlier_removal: false
142 | eklt_outlier_method: 8
143 | eklt_outlier_param1: 0.6
144 | eklt_outlier_param2: 0.99
145 |
146 | eklt_ekf_feature_interpolation: linear-no-limit
147 | eklt_ekf_feature_extrapolation_limit: -1.0
148 | eklt_ekf_update_every_n: -1
149 | eklt_ekf_update_strategy: every-ros-event-message
150 | eklt_ekf_update_timestamp: patches-maximum
151 | eklt_harris_block_size: 3
152 | eklt_detection_min_distance: 10
153 | eklt_harris_k: 0.04
154 | eklt_harris_quality_level: 0.01
155 |
156 | #########
157 | # HASTE #
158 | #########
159 |
160 | haste_tracker_type: haste-correlation-star
161 | haste_patch_size: 31
162 | haste_max_corners: 60
163 | haste_min_corners: 35
164 | haste_bootstrap_from_frames: false
165 | haste_bootstrap_with_unit_length_of: true
166 |
167 | haste_enable_outlier_removal: true
168 | haste_outlier_method: 8
169 | haste_outlier_param1: 0.6
170 | haste_outlier_param2: 0.99
171 |
172 | haste_ekf_feature_interpolation: nearest-neighbor
173 | haste_ekf_feature_extrapolation_limit: -1.0
174 | haste_ekf_update_every_n: 3200
175 | haste_ekf_update_strategy: every-n-events
176 | haste_ekf_update_timestamp: patches-maximum
177 | haste_harris_block_size: 3
178 | haste_detection_min_distance: 10
179 | haste_harris_k: 0
180 | haste_harris_quality_level: 0.3
181 |
182 | ##############
183 | # SLAM-MSCKF #
184 | ##############
185 |
186 | # Number of poses in the sliding window
187 | n_poses_max: 10
188 |
189 | # Number of SLAM features
190 | n_slam_features_max: 15
191 |
192 | # Initial inverse depth of SLAM features [1/m]
193 | # (default: 1 / (2 * d_min) [Montiel, 2006])
194 | rho_0: 2.5
195 |
196 | # Initial standard deviation of SLAM inverse depth [1/m]
197 | # (default: 1 / (4 * d_min) [Montiel, 2006])
198 | sigma_rho_0: 1.25
199 |
200 | # Number of IEKF iterations (1 <=> EKF)
201 | iekf_iter: 1
202 |
203 | # Minimum baseline to trigger MSCKF measurement (pixels)
204 | msckf_baseline: 30.0
205 |
206 | # Minimum track length for a visual feature to be processed
207 | min_track_length: 10
208 |
209 | #######
210 | # EKF #
211 | #######
212 |
213 | # State buffer size (default: 250 states)
214 | state_buffer_size: 250
215 |
216 | # Expected difference between successive IMU sequence IDs.
217 | delta_seq_imu: 1
218 |
219 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
220 | state_buffer_time_margin: 0.02
221 |
222 | # Gravity vector in world frame [m/s^2]
223 | g: [0.0, 0.0, -9.81] # [x,y,z]
224 |
225 | ########
226 | # Misc #
227 | ########
228 |
229 | # Timeout before 3D trajectory disappears in GUI (s)
230 | traj_timeout_gui: 3
231 |
232 | # flags to play rosbags
233 | /use_sim_time: True
234 |
235 | # Initialize at startup
236 | init_at_startup: False
237 |
238 | ##########
239 | # Events #
240 | ##########
241 |
242 | # Event camera calibration for DVxplorer without distortion
243 | event_cam1_fx: 0.6437500
244 | event_cam1_fy: 0.8583333
245 | event_cam1_cx: 0.5000000
246 | event_cam1_cy: 0.5000000
247 | event_cam1_dist_model: FOV
248 | event_cam1_dist_params: [0.0]
249 | event_cam1_img_width: 640
250 | event_cam1_img_height: 480
251 | event_cam1_q_ic: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
252 | event_cam1_p_ic: [0.0, 0.0, 0.0] # [x,y,z]
253 | event_cam1_time_offset: 0.0
254 |
255 | event_accumulation_methode: 0
256 | event_accumulation_period: 0.1
257 | n_events_min: 15000
258 | n_events_max: 40000
259 | n_events_noise_detection_min: 40
260 | noise_event_rate: 20000
261 | event_norm_factor: 4.0
262 | correct_event_motion: False
263 |
--------------------------------------------------------------------------------
/params_esim_davis.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | p: [0.0, 0.0, 20.0] # [x,y,z]
6 | v: [1.0, 1.0, 0.0] # [x,y,z]
7 | q: [0.0, 1.0, 0.0, 0.0] # [w,x,y,z]
8 |
9 | # Initial IMU bias estimates
10 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
11 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
12 |
13 | # Initial standard deviation estimates [x,y,z]
14 | sigma_dp: [0.0, 0.0, 0.0] # [m]
15 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
16 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
17 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
18 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
19 |
20 | ###############
21 | # Calibration #
22 | ###############
23 |
24 | # Camera calibration for myBags/20160426 bags
25 | cam1_fx: 0.8333333333333333333
26 | cam1_fy: 1.1111111111111111111
27 | cam1_cx: 0.5
28 | cam1_cy: 0.5
29 | cam1_dist_model: FOV
30 | cam1_dist_params: [0.0]
31 | cam1_img_width: 240
32 | cam1_img_height: 180
33 | cam1_q_ic: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
34 | cam1_p_ic: [0.0, 0.0, 0.0] # [x,y,z]
35 | cam1_time_offset: 0.0
36 |
37 | #cam2_fx: 0.46513
38 | #cam2_fy: 0.726246
39 | #cam2_cx: 0.499822
40 | #cam2_cy: 0.458086
41 | #cam2_s: 0.950711
42 | #cam2_dist_model: FOV
43 | #cam2_dist_params: [ 0.950711 ]
44 | #cam2_img_width: 752
45 | #cam2_img_height: 480
46 | #cam2_q_ic: [-0.004059133072894, 0.928890463133302, 0.370065735566358, -0.014049352983967] # [w,x,y,z]
47 | #cam2_p_ic: [-0.05914814, 0.04309647, -0.03085167] # [x,y,z]
48 | #cam2_time_offset: -0.00397400522578
49 |
50 | # Feature noise (normalized coordinate standard deviation)
51 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
52 |
53 | #######
54 | # IMU #
55 | #######
56 |
57 | # MXR9500G/M accels (Astec)
58 | n_a: 0.0083 # Accel noise spectral density [m/s^2/sqrt(Hz)]
59 | n_ba: 0.00083 # Accel bias random walk [m/s^3/sqrt(Hz)]
60 | # ADXRS610 gyros (Astec)
61 | n_w: 0.0013 # Gyro noise spectral density [rad/s/sqrt(Hz)]
62 | n_bw: 0.00013 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
63 |
64 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
65 | a_m_max: 50.0
66 |
67 | #######
68 | # LRF #
69 | #######
70 |
71 | # Noise (standard deviation in m)
72 | sigma_range: 0.05
73 |
74 | ##############
75 | # Sun Sensor #
76 | ##############
77 |
78 | # Currently unused
79 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
80 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
81 |
82 | ###########
83 | # Tracker #
84 | ###########
85 |
86 | fast_detection_delta: 20
87 | non_max_supp: True
88 | block_half_length: 6
89 | margin: 6
90 | n_feat_min: 400
91 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
92 | outlier_method: 8
93 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
94 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
95 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
96 | # image resolution, and the image noise.
97 | outlier_param1: 0.3
98 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
99 | # (probability) that the estimated matrix is correct.
100 | outlier_param2: 0.99
101 | # 1 <=> no tiling
102 | n_tiles_h: 1
103 | n_tiles_w: 1
104 | max_feat_per_tile: 10000
105 |
106 | ########
107 | # EKLT #
108 | ########
109 |
110 | eklt_batch_size: 500
111 | eklt_update_every_n_events: 20
112 | eklt_displacement_px: 0.6
113 | eklt_patch_size: 35
114 | eklt_max_num_iterations: 25
115 | eklt_tracking_quality: 0.7
116 | eklt_bootstrap: "klt"
117 | eklt_lk_window_size: 20
118 | eklt_num_pyramidal_layers: 3
119 | eklt_scale: 4
120 | eklt_display_features: true
121 | eklt_display_feature_id: true
122 | eklt_display_feature_patches: true
123 | eklt_arrow_length: 5
124 |
125 | eklt_max_corners: 60
126 | eklt_min_corners: 35
127 | eklt_log_eps: 0.01
128 | eklt_use_linlog_scale: false
129 |
130 | eklt_first_image_t: -1
131 | eklt_tracks_file_txt: ""
132 |
133 | eklt_patch_timestamp_assignment: latest-event
134 |
135 | eklt_enable_outlier_removal: false
136 | eklt_outlier_method: 8
137 | eklt_outlier_param1: 0.6
138 | eklt_outlier_param2: 0.99
139 |
140 | eklt_ekf_feature_interpolation: linear-no-limit
141 | eklt_ekf_feature_extrapolation_limit: -1.0
142 | eklt_ekf_update_every_n: -1
143 | eklt_ekf_update_strategy: every-ros-event-message
144 | eklt_ekf_update_timestamp: patches-average
145 | eklt_harris_block_size: 3
146 | eklt_detection_min_distance: 25
147 | eklt_harris_k: 0
148 | eklt_harris_quality_level: 0.3
149 |
150 | #########
151 | # HASTE #
152 | #########
153 |
154 | haste_tracker_type: haste-correlation-star
155 | haste_patch_size: 31
156 | haste_max_corners: 60
157 | haste_min_corners: 35
158 | haste_bootstrap_from_frames: false
159 | haste_bootstrap_with_unit_length_of: true
160 |
161 | haste_enable_outlier_removal: true
162 | haste_outlier_method: 8
163 | haste_outlier_param1: 0.6
164 | haste_outlier_param2: 0.99
165 |
166 | haste_ekf_feature_interpolation: nearest-neighbor
167 | haste_ekf_feature_extrapolation_limit: -1.0
168 | haste_ekf_update_every_n: 3200
169 | haste_ekf_update_strategy: every-n-events
170 | haste_ekf_update_timestamp: patches-maximum
171 | haste_harris_block_size: 3
172 | haste_detection_min_distance: 15
173 | haste_harris_k: 0
174 | haste_harris_quality_level: 0.3
175 |
176 | ##############
177 | # SLAM-MSCKF #
178 | ##############
179 |
180 | # Number of poses in the sliding window
181 | n_poses_max: 10
182 |
183 | # Number of SLAM features
184 | n_slam_features_max: 25
185 |
186 | # Initial inverse depth of SLAM features [1/m]
187 | # (default: 1 / (2 * d_min) [Montiel, 2006])
188 | rho_0: 0.05
189 |
190 | # Initial standard deviation of SLAM inverse depth [1/m]
191 | # (default: 1 / (4 * d_min) [Montiel, 2006])
192 | sigma_rho_0: 0.025
193 |
194 | # Number of IEKF iterations (1 <=> EKF)
195 | iekf_iter: 1
196 |
197 | # Minimum baseline to trigger MSCKF measurement (pixels)
198 | msckf_baseline: 15.0
199 |
200 | # Minimum track length for a visual feature to be processed
201 | min_track_length: 10
202 |
203 | #######
204 | # EKF #
205 | #######
206 |
207 | # State buffer size (default: 250 states)
208 | state_buffer_size: 250
209 |
210 | # Expected difference between successive IMU sequence IDs.
211 | delta_seq_imu: 1
212 |
213 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
214 | state_buffer_time_margin: 0.02
215 |
216 | # Gravity vector in world frame [m/s^2]
217 | g: [0.0, 0.0, -9.81] # [x,y,z]
218 |
219 | ########
220 | # Misc #
221 | ########
222 |
223 | # Timeout before 3D trajectory disappears in GUI (s)
224 | traj_timeout_gui: 3
225 |
226 | # flags to play rosbags
227 | /use_sim_time: True
228 |
229 | # Initialize at startup
230 | init_at_startup: False
231 |
232 | ##########
233 | # Events #
234 | ##########
235 |
236 | event_cam1_fx: 0.8333333333333333333
237 | event_cam1_fy: 1.1111111111111111111
238 | event_cam1_cx: 0.5
239 | event_cam1_cy: 0.5
240 | event_cam1_dist_model: FOV
241 | event_cam1_dist_params: [0.0]
242 | event_cam1_img_width: 240
243 | event_cam1_img_height: 180
244 | event_cam1_q_ic: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
245 | event_cam1_p_ic: [0.0, 0.0, 0.0] # [x,y,z]
246 | event_cam1_time_offset: 0.0
247 |
248 | event_accumulation_methode: 0
249 | event_accumulation_period: 0.1
250 | n_events_min: 15000
251 | n_events_max: 40000
252 | n_events_noise_detection_min: 40
253 | noise_event_rate: 20000
254 | event_norm_factor: 4.0
255 | correct_event_motion: False
256 |
--------------------------------------------------------------------------------
/params_esim_xplorer.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | p: [0.0, 0.0, 20.0] # [x,y,z]
6 | v: [1.0, 1.0, 0.0] # [x,y,z]
7 | q: [0.0, 1.0, 0.0, 0.0] # [w,x,y,z]
8 |
9 | # Initial IMU bias estimates
10 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
11 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
12 |
13 | # Initial standard deviation estimates [x,y,z]
14 | sigma_dp: [0.0, 0.0, 0.0] # [m]
15 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
16 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
17 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
18 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
19 |
20 | ###############
21 | # Calibration #
22 | ###############
23 |
24 | # Camera info published for ESIM rosbags:
25 |
26 | #height: 480
27 | #width: 640
28 | #distortion_model: "plumb_bob"
29 | #D: []
30 | #K: [320.0, 0.0, 320.0,
31 | # 0.0, 320.0, 240.0,
32 | # 0.0, 0.0, 1.0]
33 | #R: [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
34 | #P: [320.0, 0.0, 320.0, 0.0,
35 | # 0.0, 320.0, 240.0, 0.0,
36 | # 0.0, 0.0, 1.0, 0.0]
37 | #binning_x: 0
38 | #binning_y: 0
39 | #roi:
40 | # x_offset: 0
41 | # y_offset: 0
42 | # height: 0
43 | # width: 0
44 | # do_rectify: False
45 |
46 | cam1_fx: 0.6437500
47 | cam1_fy: 0.8583333
48 | cam1_cx: 0.5000000
49 | cam1_cy: 0.5000000
50 | cam1_dist_model: FOV
51 | cam1_dist_params: [0.0]
52 | cam1_img_width: 640
53 | cam1_img_height: 480
54 | cam1_q_ic: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
55 | cam1_p_ic: [0.0, 0.0, 0.0] # [x,y,z]
56 | cam1_time_offset: 0.0
57 |
58 | cam2_fx: 0.5
59 | cam2_fy: 0.66
60 | cam2_cx: 0.5
61 | cam2_cy: 0.5
62 | cam2_dist_model: FOV
63 | cam2_dist_params: [0.0]
64 | cam2_img_width: 640
65 | cam2_img_height: 480
66 | cam2_q_ic: [1, 0, 0, 0] # [w,x,y,z]
67 | #cam2_q_ic: [0.0000, -0.0000, -0.7071, 0.7071] # [w,x,y,z]
68 | cam2_p_ic: [0, 0, 0] # [x,y,z]
69 | cam2_time_offset: 0.0
70 |
71 | # Feature noise (normalized coordinate standard deviation)
72 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
73 |
74 | #######
75 | # IMU #
76 | #######
77 |
78 | # MXR9500G/M accels (Astec)
79 | n_a: 0.0083 # Accel noise spectral density [m/s^2/sqrt(Hz)]
80 | n_ba: 0.00083 # Accel bias random walk [m/s^3/sqrt(Hz)]
81 | # ADXRS610 gyros (Astec)
82 | n_w: 0.0013 # Gyro noise spectral density [rad/s/sqrt(Hz)]
83 | n_bw: 0.00013 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
84 |
85 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
86 | a_m_max: 50.0
87 |
88 | #######
89 | # LRF #
90 | #######
91 |
92 | # Noise (standard deviation in m)
93 | sigma_range: 0.05
94 |
95 | ##############
96 | # Sun Sensor #
97 | ##############
98 |
99 | # Currently unused
100 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
101 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
102 |
103 | ###########
104 | # Tracker #
105 | ###########
106 |
107 | fast_detection_delta: 20
108 | non_max_supp: True
109 | block_half_length: 20
110 | margin: 20
111 | n_feat_min: 400
112 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
113 | outlier_method: 8
114 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
115 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
116 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
117 | # image resolution, and the image noise.
118 | outlier_param1: 0.3
119 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
120 | # (probability) that the estimated matrix is correct.
121 | outlier_param2: 0.99
122 | # 1 <=> no tiling
123 | n_tiles_h: 1
124 | n_tiles_w: 1
125 | max_feat_per_tile: 10000
126 |
127 | ########
128 | # EKLT #
129 | ########
130 |
131 | eklt_batch_size: 500
132 | eklt_update_every_n_events: 20
133 | eklt_displacement_px: 0.6
134 | eklt_patch_size: 21
135 | eklt_max_num_iterations: 25
136 | eklt_tracking_quality: 0.7
137 | eklt_bootstrap: "klt"
138 | eklt_lk_window_size: 20
139 | eklt_num_pyramidal_layers: 3
140 | eklt_scale: 4
141 | eklt_display_features: true
142 | eklt_display_feature_id: true
143 | eklt_display_feature_patches: true
144 | eklt_arrow_length: 5
145 |
146 | eklt_max_corners: 60
147 | eklt_min_corners: 35
148 | eklt_log_eps: 0.01
149 | eklt_use_linlog_scale: false
150 |
151 | eklt_first_image_t: -1
152 | eklt_tracks_file_txt: ""
153 |
154 | eklt_patch_timestamp_assignment: latest-event
155 |
156 | eklt_enable_outlier_removal: false
157 | eklt_outlier_method: 8
158 | eklt_outlier_param1: 0.6
159 | eklt_outlier_param2: 0.99
160 |
161 | eklt_ekf_feature_interpolation: linear-no-limit
162 | eklt_ekf_feature_extrapolation_limit: 5.0
163 | eklt_ekf_update_every_n: -1
164 | eklt_ekf_update_strategy: every-ros-event-message
165 | eklt_ekf_update_timestamp: patches-average
166 | eklt_harris_block_size: 3
167 | eklt_detection_min_distance: 25
168 | eklt_harris_k: 0
169 | eklt_harris_quality_level: 0.3
170 |
171 | #########
172 | # HASTE #
173 | #########
174 |
175 | haste_tracker_type: haste-correlation-star
176 | haste_patch_size: 31
177 | haste_max_corners: 60
178 | haste_min_corners: 35
179 | haste_bootstrap_from_frames: false
180 | haste_bootstrap_with_unit_length_of: true
181 |
182 | haste_enable_outlier_removal: true
183 | haste_outlier_method: 8
184 | haste_outlier_param1: 0.6
185 | haste_outlier_param2: 0.99
186 |
187 | haste_ekf_feature_interpolation: nearest-neighbor
188 | haste_ekf_feature_extrapolation_limit: -1.0
189 | haste_ekf_update_every_n: 3200
190 | haste_ekf_update_strategy: every-n-events
191 | haste_ekf_update_timestamp: patches-maximum
192 | haste_harris_block_size: 3
193 | haste_detection_min_distance: 15
194 | haste_harris_k: 0
195 | haste_harris_quality_level: 0.3
196 |
197 | ##############
198 | # SLAM-MSCKF #
199 | ##############
200 |
201 | # Number of poses in the sliding window
202 | n_poses_max: 10
203 |
204 | # Number of SLAM features
205 | n_slam_features_max: 25
206 |
207 | # Initial inverse depth of SLAM features [1/m]
208 | # (default: 1 / (2 * d_min) [Montiel, 2006])
209 | rho_0: 0.05
210 |
211 | # Initial standard deviation of SLAM inverse depth [1/m]
212 | # (default: 1 / (4 * d_min) [Montiel, 2006])
213 | sigma_rho_0: 0.025
214 |
215 | # Number of IEKF iterations (1 <=> EKF)
216 | iekf_iter: 1
217 |
218 | # Minimum baseline to trigger MSCKF measurement (pixels)
219 | msckf_baseline: 15.0
220 |
221 | # Minimum track length for a visual feature to be processed
222 | min_track_length: 10
223 |
224 | #######
225 | # EKF #
226 | #######
227 |
228 | # State buffer size (default: 250 states)
229 | state_buffer_size: 250
230 |
231 | # Expected difference between successive IMU sequence IDs.
232 | delta_seq_imu: 1
233 |
234 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
235 | state_buffer_time_margin: 0.02
236 |
237 | # Gravity vector in world frame [m/s^2]
238 | g: [0.0, 0.0, -9.81] # [x,y,z]
239 |
240 | ########
241 | # Misc #
242 | ########
243 |
244 | # Timeout before 3D trajectory disappears in GUI (s)
245 | traj_timeout_gui: 3
246 |
247 | # flags to play rosbags
248 | /use_sim_time: True
249 |
250 | # Initialize at startup
251 | init_at_startup: False
252 |
253 | ##########
254 | # Events #
255 | ##########
256 |
257 | # Event camera calibration for DVxplorer without distortion
258 | event_cam1_fx: 0.6437500
259 | event_cam1_fy: 0.8583333
260 | event_cam1_cx: 0.5000000
261 | event_cam1_cy: 0.5000000
262 | event_cam1_dist_model: FOV
263 | event_cam1_dist_params: [0.0]
264 | event_cam1_img_width: 640
265 | event_cam1_img_height: 480
266 | event_cam1_q_ic: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
267 | event_cam1_p_ic: [0.0, 0.0, 0.0] # [x,y,z]
268 | event_cam1_time_offset: 0.0
269 |
270 | event_accumulation_methode: 0
271 | event_accumulation_period: 0.1
272 | n_events_min: 15000
273 | n_events_max: 40000
274 | n_events_noise_detection_min: 40
275 | noise_event_rate: 20000
276 | event_norm_factor: 4.0
277 | correct_event_motion: False
278 |
--------------------------------------------------------------------------------
/params_mars_yard.yaml:
--------------------------------------------------------------------------------
1 | /use_sim_time: true
2 | a_m_max: 88.29
3 | b_a:
4 | - 0.0
5 | - 0.0
6 | - 0.0
7 | b_w:
8 | - 0.0
9 | - 0.0
10 | - 0.0
11 | block_half_length: 6
12 |
13 |
14 | # Camera calibration for mv_extrinsics_2016-02-11-08-42-49
15 | cam1_fx: 0.466736650521
16 | cam1_fy: 0.730390741945
17 | cam1_cx: 0.488159288758
18 | cam1_cy: 0.469376440052
19 | cam1_dist_model: fov
20 | cam1_dist_params: [0.9528150174424326]
21 | cam1_img_width: 752
22 | cam1_img_height: 480
23 | cam1_q_ic: [-0.008063997339346242, 0.999622102981051, -0.02524918431200863, -0.007287103006700131] #[w,x,y,z]
24 | cam1_p_ic: [-0.7408400097995479, -0.023650805694636692, -0.86111483612561] #[x,y,z]
25 | cam1_time_offset: -0.0220438178876
26 |
27 | #cam1_cx: 0.533839538103
28 | #cam1_cy: 0.452191783609
29 | #cam1_dist_model: radial-tangential
30 | #cam1_dist_params:
31 | #- -0.37964381989293516
32 | #- 0.16588122497853042
33 | #- 0.000719295985613572
34 | #- -0.0012078763628782995
35 | #cam1_fx: 0.925339173705
36 | #cam1_fy: 1.22248310329
37 | #cam1_img_height: 480
38 | #cam1_img_width: 640
39 | #cam1_p_ic:
40 | #- -0.21518750448932794
41 | #- 0.0007401427423516556
42 | #- -0.09215016543882257
43 | #cam1_q_ic:
44 | #- -0.024082500605671017
45 | #- 0.9997023667993531
46 | #- 0.00041708735658335357
47 | #- -0.0038777594673196705
48 | #cam1_time_offset: 0.0141923822342
49 | cam2_cx: 0.533839538103
50 | cam2_cy: 0.452191783609
51 | cam2_dist_model: radial-tangential
52 | cam2_dist_params:
53 | - -0.37964381989293516
54 | - 0.16588122497853042
55 | - 0.000719295985613572
56 | - -0.0012078763628782995
57 | cam2_fx: 0.925339173705
58 | cam2_fy: 1.22248310329
59 | cam2_img_height: 480
60 | cam2_img_width: 640
61 | cam2_p_ic:
62 | - -0.21518750448932794
63 | - 0.0007401427423516556
64 | - -0.09215016543882257
65 | cam2_q_ic:
66 | - -0.024082500605671017
67 | - 0.9997023667993531
68 | - 0.00041708735658335357
69 | - -0.0038777594673196705
70 | cam2_time_offset: 0.0141923822342
71 | correct_event_motion: false
72 | delta_seq_imu: 1
73 | eklt_arrow_length: 5
74 | eklt_batch_size: 500
75 | eklt_bootstrap: klt
76 | eklt_detection_min_distance: 40
77 | eklt_displacement_px: 0.6
78 | eklt_display_feature_id: false
79 | eklt_display_feature_patches: false
80 | eklt_display_features: false
81 | eklt_ekf_feature_extrapolation_limit: -1.0
82 | eklt_ekf_feature_interpolation: linear-no-limit
83 | eklt_ekf_update_every_n: 20000
84 | eklt_ekf_update_strategy: every-n-events
85 | eklt_ekf_update_timestamp: patches-maximum
86 | eklt_enable_outlier_removal: true
87 | eklt_first_image_t: -1
88 | eklt_harris_block_size: 3
89 | eklt_harris_k: 0.01
90 | eklt_harris_quality_level: 0.01
91 | eklt_lk_window_size: 20
92 | eklt_log_eps: 0.01
93 | eklt_max_corners: 70
94 | eklt_max_num_iterations: 10
95 | eklt_min_corners: 45
96 | eklt_num_pyramidal_layers: 3
97 | eklt_outlier_method: 8
98 | eklt_outlier_param1: 0.6
99 | eklt_outlier_param2: 0.99
100 | eklt_patch_size: 33
101 | eklt_patch_timestamp_assignment: accumulated-events-center
102 | eklt_scale: 4
103 | eklt_tracking_quality: 0.3
104 | eklt_tracks_file_txt: ''
105 | eklt_update_every_n_events: 20
106 | eklt_use_linlog_scale: false
107 | event_accumulation_methode: 0
108 | event_accumulation_period: 0.1
109 | event_cam1_cx: 0.550800297
110 | event_cam1_cy: 0.615070333
111 | event_cam1_dist_model: radial-tangential
112 | event_cam1_dist_params:
113 | - -0.3684363117977873
114 | - 0.1509472435566583
115 | - -0.0002961305343848646
116 | - -0.000759431726241032
117 | - 0
118 | event_cam1_fx: 0.829551527
119 | event_cam1_fy: 1.104604558
120 | event_cam1_img_height: 180
121 | event_cam1_img_width: 240
122 | event_cam1_p_ic:
123 | - 0.00674696422488
124 | - 0.0007279224709
125 | - 0.0342573613538
126 | event_cam1_q_ic:
127 | - 0.999965419952174
128 | - 0.00466020631918
129 | - -0.003190841088148
130 | - -0.006104089619662
131 | event_cam1_time_offset: 0.0
132 | event_motion_correction_delay: 10
133 | event_norm_factor: 4.0
134 | fast_detection_delta: 20
135 | g:
136 | - 0.0
137 | - 0.0
138 | - -9.81
139 | haste_bootstrap_from_frames: false
140 | haste_bootstrap_with_unit_length_of: true
141 | haste_detection_min_distance: 8
142 | haste_ekf_feature_extrapolation_limit: -1.0
143 | haste_ekf_feature_interpolation: nearest-neighbor
144 | haste_ekf_update_every_n: 3200
145 | haste_ekf_update_strategy: every-n-events
146 | haste_ekf_update_timestamp: patches-maximum
147 | haste_enable_outlier_removal: true
148 | haste_harris_block_size: 3
149 | haste_harris_k: 0
150 | haste_harris_quality_level: 0.3
151 | haste_max_corners: 60
152 | haste_min_corners: 45
153 | haste_outlier_method: 8
154 | haste_outlier_param1: 1.3
155 | haste_outlier_param2: 0.95
156 | haste_patch_size: 31
157 | haste_tracker_type: haste-correlation-star
158 | iekf_iter: 1
159 | init_at_startup: false
160 | margin: 6
161 | max_feat_per_tile: 10000
162 | min_track_length: 10
163 | msckf_baseline: 30.0
164 | n_a: 0.004316
165 | n_ba: 0.0004316
166 | n_bw: 2.0e-05
167 | n_events_max: 40000
168 | n_events_min: 15000
169 | n_events_noise_detection_min: 40
170 | n_feat_min: 400
171 | n_poses_max: 10
172 | n_slam_features_max: 25
173 | n_tiles_h: 1
174 | n_tiles_w: 1
175 | n_w: 0.0002
176 | noise_event_rate: 20000
177 | non_max_supp: true
178 | outlier_method: 8
179 | outlier_param1: 1.6
180 | outlier_param2: 0.99
181 | p:
182 | - 0.0
183 | - 0.0
184 | - 0.0
185 | q:
186 | - 0.991193058235731
187 | - -0.1257946472590912
188 | - 0.04137666040486585
189 | - 0.0
190 | q_sc:
191 | - 1.0
192 | - 0.0
193 | - 0.0
194 | - 0.0
195 | rho_0: 0.5
196 | sigma_dba:
197 | - 0.3
198 | - 0.3
199 | - 0.3
200 | sigma_dbw:
201 | - 6.0
202 | - 6.0
203 | - 6.0
204 | sigma_dp:
205 | - 0.0
206 | - 0.0
207 | - 0.0
208 | sigma_dtheta:
209 | - 3.0
210 | - 3.0
211 | - 3.0
212 | sigma_dv:
213 | - 0.05
214 | - 0.05
215 | - 0.05
216 | sigma_img: 0.02541924711215672
217 | sigma_range: 0.05
218 | sigma_rho_0: 0.125
219 | state_buffer_size: 500
220 | state_buffer_time_margin: 0.02
221 | traj_timeout_gui: 3
222 | v:
223 | - 0.0
224 | - 0.0
225 | - 0.0
226 | w_s:
227 | - 0.0
228 | - 0.0
229 | - 1.0
230 |
--------------------------------------------------------------------------------
/params_rpg_davis.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | # boxes 6DOF - (WIP)
6 | p: [0.3973783621825099, 1.4634966189993928, 1.277009988555527]
7 | v: [0.0, 0.0, 0.0] # [x,y,z]
8 | q: [0.0073897794554689862, -0.06762916364373475, 0.8856791184114311, -0.4592866061561111]
9 |
10 | # Initial IMU bias estimates
11 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
12 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
13 |
14 | # Initial standard deviation estimates [x,y,z]
15 | sigma_dp: [0.0, 0.0, 0.0] # [m]
16 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
17 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
18 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
19 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
20 |
21 | ###############
22 | # Calibration #
23 | ###############
24 |
25 | cam1_fx: 0.829551527
26 | cam1_fy: 1.104604558
27 | cam1_cx: 0.550800297
28 | cam1_cy: 0.615070333
29 | cam1_dist_model: radial-tangential
30 | cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
31 | cam1_img_width: 240
32 | cam1_img_height: 180
33 | cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
34 | cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
35 | cam1_time_offset: 0.0027626144716691903 # from Kalibr run on checkerboard sequence
36 |
37 | cam2_fx: 0.8307143111366176
38 | cam2_fy: 1.1094881524187095
39 | cam2_cx: 0.5573099196496173
40 | cam2_cy: 0.6332898330570906
41 | cam2_dist_model: FOV
42 | cam2_dist_params: [0.950711]
43 | cam2_img_width: 240
44 | cam2_img_height: 180
45 | cam2_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
46 | cam2_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
47 | cam2_time_offset: 0.0
48 |
49 | # Feature noise (normalized coordinate standard deviation)
50 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
51 |
52 | #######
53 | # IMU #
54 | #######
55 |
56 | ## Optimized for RPG Davis Data
57 | n_a: 0.004316 # Accel noise spectral density [m/s^2/sqrt(Hz)]
58 | n_ba: 0.0004316 # Accel bias random walk [m/s^3/sqrt(Hz)]
59 | n_w: 0.0002 # Gyro noise spectral density [rad/s/sqrt(Hz)]
60 | n_bw: 0.00002 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
61 |
62 | #n_a: 0.004316
63 | #n_ba: 0.0004316
64 | #n_w: 0.000225
65 | #n_bw: 0.0000225
66 |
67 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
68 | a_m_max: 88.29 # 9g should be enough https://www.youtube.com/watch?v=_0nbRYIBVDQ
69 |
70 | #######
71 | # LRF #
72 | #######
73 |
74 | # Noise (standard deviation in m)
75 | sigma_range: 0.05
76 |
77 | ##############
78 | # Sun Sensor #
79 | ##############
80 |
81 | # Currently unused
82 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
83 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
84 |
85 | ###########
86 | # Tracker #
87 | ###########
88 |
89 | fast_detection_delta: 20
90 | non_max_supp: True
91 | block_half_length: 6
92 | margin: 6
93 | n_feat_min: 400
94 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
95 | outlier_method: 8
96 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
97 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
98 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
99 | # image resolution, and the image noise.
100 | outlier_param1: 0.6
101 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
102 | # (probability) that the estimated matrix is correct.
103 | outlier_param2: 0.99
104 | # 1 <=> no tiling
105 | n_tiles_h: 1
106 | n_tiles_w: 1
107 | max_feat_per_tile: 10000
108 |
109 | ########
110 | # EKLT #
111 | ########
112 |
113 | eklt_batch_size: 500
114 | eklt_update_every_n_events: 20
115 | eklt_displacement_px: 0.6
116 | eklt_patch_size: 19
117 | eklt_max_num_iterations: 25
118 | eklt_tracking_quality: 0.6
119 | eklt_bootstrap: "klt"
120 | eklt_lk_window_size: 20
121 | eklt_num_pyramidal_layers: 3
122 | eklt_scale: 4
123 | eklt_display_features: false
124 | eklt_display_feature_id: false
125 | eklt_display_feature_patches: false
126 | eklt_arrow_length: 5
127 |
128 | eklt_max_corners: 60
129 | eklt_min_corners: 45
130 | eklt_log_eps: 0.01
131 | eklt_use_linlog_scale: false
132 |
133 | eklt_first_image_t: -1
134 | eklt_tracks_file_txt: ""
135 |
136 | eklt_patch_timestamp_assignment: accumulated-events-center
137 |
138 | eklt_enable_outlier_removal: false
139 | eklt_outlier_method: 8
140 | eklt_outlier_param1: 0.6
141 | eklt_outlier_param2: 0.99
142 |
143 | eklt_ekf_feature_interpolation: linear-no-limit
144 | eklt_ekf_feature_extrapolation_limit: -1.0
145 | eklt_ekf_update_every_n: 2200
146 | eklt_ekf_update_strategy: every-n-events
147 | eklt_ekf_update_timestamp: patches-maximum
148 | eklt_harris_block_size: 3
149 | eklt_detection_min_distance: 9 # 10
150 | eklt_harris_k: 0.04 # 0.04
151 | eklt_harris_quality_level: 0.3 # 0.01
152 |
153 | #########
154 | # HASTE #
155 | #########
156 |
157 | haste_tracker_type: haste-correlation-star
158 | haste_patch_size: 31
159 | haste_max_corners: 60
160 | haste_min_corners: 45
161 | haste_bootstrap_from_frames: false
162 | haste_bootstrap_with_unit_length_of: true
163 |
164 | haste_enable_outlier_removal: true
165 | haste_outlier_method: 8
166 | haste_outlier_param1: 1.3
167 | haste_outlier_param2: 0.95
168 |
169 | haste_ekf_feature_interpolation: nearest-neighbor
170 | haste_ekf_feature_extrapolation_limit: -1.0
171 | haste_ekf_update_every_n: 3200
172 | haste_ekf_update_strategy: every-n-events
173 | haste_ekf_update_timestamp: patches-maximum
174 | haste_harris_block_size: 3
175 | haste_detection_min_distance: 8
176 | haste_harris_k: 0
177 | haste_harris_quality_level: 0.3
178 |
179 | ##############
180 | # SLAM-MSCKF #
181 | ##############
182 |
183 | # Number of poses in the sliding window
184 | n_poses_max: 10
185 |
186 | # Number of SLAM features
187 | n_slam_features_max: 15
188 |
189 | # Initial inverse depth of SLAM features [1/m]
190 | # (default: 1 / (2 * d_min) [Montiel, 2006])
191 | rho_0: 1.3
192 |
193 | # Initial standard deviation of SLAM inverse depth [1/m]
194 | # (default: 1 / (4 * d_min) [Montiel, 2006])
195 | sigma_rho_0: 0.65
196 |
197 | # Number of IEKF iterations (1 <=> EKF)
198 | iekf_iter: 1
199 |
200 | # Minimum baseline to trigger MSCKF measurement (pixels)
201 | msckf_baseline: 30.0
202 |
203 | # Minimum track length for a visual feature to be processed
204 | min_track_length: 10
205 |
206 | #######
207 | # EKF #
208 | #######
209 |
210 | # State buffer size (default: 250 states)
211 | state_buffer_size: 500
212 |
213 | # Expected difference between successive IMU sequence IDs.
214 | delta_seq_imu: 1
215 |
216 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
217 | state_buffer_time_margin: 0.02
218 |
219 | # Gravity vector in world frame [m/s^2]
220 | g: [0.0, 0.0, -9.81] # [x,y,z]
221 |
222 | ########
223 | # Misc #
224 | ########
225 |
226 | # Timeout before 3D trajectory disappears in GUI (s)
227 | traj_timeout_gui: 3
228 |
229 | # flags to play rosbags
230 | /use_sim_time: True
231 |
232 | # Initialize at startup
233 | init_at_startup: False
234 |
235 | ##########
236 | # Events #
237 | ##########
238 |
239 | event_cam1_fx: 0.829551527
240 | event_cam1_fy: 1.104604558
241 | event_cam1_cx: 0.550800297
242 | event_cam1_cy: 0.615070333
243 | event_cam1_dist_model: radial-tangential
244 | event_cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
245 | event_cam1_img_width: 240
246 | event_cam1_img_height: 180
247 | event_cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
248 | event_cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
249 | event_cam1_time_offset: 0.0
250 |
251 |
252 | event_accumulation_methode: 0
253 | event_accumulation_period: 0.1
254 | n_events_min: 15000
255 | n_events_max: 40000
256 | n_events_noise_detection_min: 40
257 | noise_event_rate: 20000
258 | event_norm_factor: 4.0
259 | correct_event_motion: False
260 |
--------------------------------------------------------------------------------
/params_rpg_davis_rotation.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | # boxes 6DOF - (WIP)
6 | p: [0.3973783621825099, 1.4634966189993928, 1.277009988555527]
7 | v: [0.0, 0.0, 0.0] # [x,y,z]
8 | q: [0.0073897794554689862, -0.06762916364373475, 0.8856791184114311, -0.4592866061561111]
9 |
10 | # Initial IMU bias estimates
11 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
12 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
13 |
14 | # Initial standard deviation estimates [x,y,z]
15 | sigma_dp: [0.0, 0.0, 0.0] # [m]
16 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
17 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
18 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
19 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
20 |
21 | ###############
22 | # Calibration #
23 | ###############
24 |
25 | cam1_fx: 0.829551527
26 | cam1_fy: 1.104604558
27 | cam1_cx: 0.550800297
28 | cam1_cy: 0.615070333
29 | cam1_dist_model: radial-tangential
30 | cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
31 | cam1_img_width: 240
32 | cam1_img_height: 180
33 | cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
34 | cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
35 | cam1_time_offset: 0.0027626144716691903 # from Kalibr run on checkerboard sequence
36 |
37 | cam2_fx: 0.8307143111366176
38 | cam2_fy: 1.1094881524187095
39 | cam2_cx: 0.5573099196496173
40 | cam2_cy: 0.6332898330570906
41 | cam2_dist_model: FOV
42 | cam2_dist_params: [0.950711]
43 | cam2_img_width: 240
44 | cam2_img_height: 180
45 | cam2_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
46 | cam2_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
47 | cam2_time_offset: 0.0
48 |
49 | # Feature noise (normalized coordinate standard deviation)
50 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
51 |
52 | #######
53 | # IMU #
54 | #######
55 |
56 | ## Optimized for RPG Davis Data
57 | #n_a: 0.004316 # Accel noise spectral density [m/s^2/sqrt(Hz)]
58 | #n_ba: 0.0004316 # Accel bias random walk [m/s^3/sqrt(Hz)]
59 | #n_w: 0.0002 # Gyro noise spectral density [rad/s/sqrt(Hz)]
60 | #n_bw: 0.00002 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
61 |
62 | n_a: 0.004316
63 | n_ba: 0.0004316
64 | n_w: 0.000225
65 | n_bw: 0.0000225
66 |
67 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
68 | a_m_max: 88.29 # 9g should be enough https://www.youtube.com/watch?v=_0nbRYIBVDQ
69 |
70 | #######
71 | # LRF #
72 | #######
73 |
74 | # Noise (standard deviation in m)
75 | sigma_range: 0.05
76 |
77 | ##############
78 | # Sun Sensor #
79 | ##############
80 |
81 | # Currently unused
82 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
83 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
84 |
85 | ###########
86 | # Tracker #
87 | ###########
88 |
89 | fast_detection_delta: 20
90 | non_max_supp: True
91 | block_half_length: 6
92 | margin: 6
93 | n_feat_min: 400
94 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
95 | outlier_method: 8
96 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
97 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
98 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
99 | # image resolution, and the image noise.
100 | outlier_param1: 0.6
101 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
102 | # (probability) that the estimated matrix is correct.
103 | outlier_param2: 0.99
104 | # 1 <=> no tiling
105 | n_tiles_h: 1
106 | n_tiles_w: 1
107 | max_feat_per_tile: 10000
108 |
109 | ########
110 | # EKLT #
111 | ########
112 |
113 | eklt_batch_size: 500
114 | eklt_update_every_n_events: 20
115 | eklt_displacement_px: 0.6
116 | eklt_patch_size: 17
117 | eklt_max_num_iterations: 25
118 | eklt_tracking_quality: 0.6
119 | eklt_bootstrap: "klt"
120 | eklt_lk_window_size: 20
121 | eklt_num_pyramidal_layers: 3
122 | eklt_scale: 4
123 | eklt_display_features: false
124 | eklt_display_feature_id: false
125 | eklt_display_feature_patches: false
126 | eklt_arrow_length: 5
127 |
128 | eklt_max_corners: 100
129 | eklt_min_corners: 60
130 | eklt_log_eps: 0.01
131 | eklt_use_linlog_scale: false
132 |
133 | eklt_first_image_t: -1
134 | eklt_tracks_file_txt: ""
135 |
136 | eklt_patch_timestamp_assignment: accumulated-events-center
137 |
138 | eklt_enable_outlier_removal: false
139 | eklt_outlier_method: 8
140 | eklt_outlier_param1: 0.6
141 | eklt_outlier_param2: 0.99
142 |
143 | eklt_ekf_feature_interpolation: linear-no-limit
144 | eklt_ekf_feature_extrapolation_limit: -1.0
145 | eklt_ekf_update_every_n: 2200
146 | eklt_ekf_update_strategy: every-n-events
147 | eklt_ekf_update_timestamp: patches-maximum
148 | eklt_harris_block_size: 3
149 | eklt_detection_min_distance: 8 # 10
150 | eklt_harris_k: 0
151 | eklt_harris_quality_level: 0.15 # 0.01
152 |
153 | #########
154 | # HASTE #
155 | #########
156 |
157 | haste_tracker_type: haste-correlation-star
158 | haste_patch_size: 31
159 | haste_max_corners: 60
160 | haste_min_corners: 45
161 | haste_bootstrap_from_frames: false
162 | haste_bootstrap_with_unit_length_of: true
163 |
164 | haste_enable_outlier_removal: true
165 | haste_outlier_method: 8
166 | haste_outlier_param1: 1.3
167 | haste_outlier_param2: 0.98
168 |
169 | haste_ekf_feature_interpolation: nearest-neighbor
170 | haste_ekf_feature_extrapolation_limit: -1.0
171 | haste_ekf_update_every_n: 9200
172 | haste_ekf_update_strategy: every-n-events
173 | haste_ekf_update_timestamp: patches-maximum
174 | haste_harris_block_size: 3
175 | haste_detection_min_distance: 8
176 | haste_harris_k: 0.1
177 | haste_harris_quality_level: 0.01
178 |
179 | ##############
180 | # SLAM-MSCKF #
181 | ##############
182 |
183 | # Number of poses in the sliding window
184 | n_poses_max: 10
185 |
186 | # Number of SLAM features
187 | n_slam_features_max: 15
188 |
189 | # Initial inverse depth of SLAM features [1/m]
190 | # (default: 1 / (2 * d_min) [Montiel, 2006])
191 | rho_0: 1.3
192 |
193 | # Initial standard deviation of SLAM inverse depth [1/m]
194 | # (default: 1 / (4 * d_min) [Montiel, 2006])
195 | sigma_rho_0: 0.65
196 |
197 | # Number of IEKF iterations (1 <=> EKF)
198 | iekf_iter: 1
199 |
200 | # Minimum baseline to trigger MSCKF measurement (pixels)
201 | msckf_baseline: 30.0
202 |
203 | # Minimum track length for a visual feature to be processed
204 | min_track_length: 10
205 |
206 | #######
207 | # EKF #
208 | #######
209 |
210 | # State buffer size (default: 250 states)
211 | state_buffer_size: 500
212 |
213 | # Expected difference between successive IMU sequence IDs.
214 | delta_seq_imu: 1
215 |
216 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
217 | state_buffer_time_margin: 0.02
218 |
219 | # Gravity vector in world frame [m/s^2]
220 | g: [0.0, 0.0, -9.81] # [x,y,z]
221 |
222 | ########
223 | # Misc #
224 | ########
225 |
226 | # Timeout before 3D trajectory disappears in GUI (s)
227 | traj_timeout_gui: 3
228 |
229 | # flags to play rosbags
230 | /use_sim_time: True
231 |
232 | # Initialize at startup
233 | init_at_startup: False
234 |
235 | ##########
236 | # Events #
237 | ##########
238 |
239 | event_cam1_fx: 0.829551527
240 | event_cam1_fy: 1.104604558
241 | event_cam1_cx: 0.550800297
242 | event_cam1_cy: 0.615070333
243 | event_cam1_dist_model: radial-tangential
244 | event_cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
245 | event_cam1_img_width: 240
246 | event_cam1_img_height: 180
247 | event_cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
248 | event_cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
249 | event_cam1_time_offset: 0.0
250 |
251 |
252 | event_accumulation_methode: 0
253 | event_accumulation_period: 0.1
254 | n_events_min: 15000
255 | n_events_max: 40000
256 | n_events_noise_detection_min: 40
257 | noise_event_rate: 20000
258 | event_norm_factor: 4.0
259 | correct_event_motion: False
260 |
--------------------------------------------------------------------------------
/params_rpg_fpv.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | p: [0.3973783621825099, 1.4634966189993928, 1.277009988555527]
6 | v: [0.0, 0.0, 0.0] # [x,y,z]
7 | q: [0.0073897794554689862, -0.06762916364373475, 0.8856791184114311, -0.4592866061561111]
8 |
9 | # Initial IMU bias estimates
10 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
11 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
12 |
13 | # Initial standard deviation estimates [x,y,z]
14 | sigma_dp: [0.0, 0.0, 0.0] # [m]
15 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
16 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
17 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
18 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
19 |
20 | ###############
21 | # Calibration #
22 | ###############
23 |
24 | cam1_fx: 0.5002309156507843
25 | cam1_fy: 0.6656671073341341
26 | cam1_cx: 0.4719951933816623
27 | cam1_cy: 0.5192265112426236
28 | cam1_dist_model: equidistant
29 | cam1_dist_params: [-0.03252275347038443, 0.0010042799356776398, -0.0048537750326187136, 0.0014604134198771906]
30 | cam1_img_width: 346
31 | cam1_img_height: 260
32 | cam1_q_ic: [ -0.99999034, 0.00117482, -0.00392135, -0.00160356] # [w, x, y, z]
33 | cam1_p_ic: [-0.00109897, 0.00261366, 0.02223472] # [x,y,z]
34 | cam1_time_offset: 0.005733889300479769
35 |
36 | cam2_fx: 0.8307143111366176
37 | cam2_fy: 1.1094881524187095
38 | cam2_cx: 0.5573099196496173
39 | cam2_cy: 0.6332898330570906
40 | cam2_dist_model: FOV
41 | cam2_dist_params: [0.950711]
42 | cam2_img_width: 240
43 | cam2_img_height: 180
44 | cam2_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
45 | cam2_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
46 | cam2_time_offset: 0.0
47 |
48 | # Feature noise (normalized coordinate standard deviation)
49 | sigma_img: 0.028888392540119216
50 |
51 |
52 | #######
53 | # IMU #
54 | #######
55 |
56 | # calculated from Kalibr output
57 | n_a: 0.0031622776601683794 # Accel noise spectral density [m/s^2/sqrt(Hz)]
58 | n_ba: 6.324555320336759e-05 # Accel bias random walk [m/s^3/sqrt(Hz)]
59 | n_w: 0.0015811388300841897 # Gyro noise spectral density [rad/s/sqrt(Hz)]
60 | n_bw: 1.2649110640673519e-06 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
61 |
62 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
63 | a_m_max: 88.29 # 9g should be enough https://www.youtube.com/watch?v=_0nbRYIBVDQ
64 |
65 | #######
66 | # LRF #
67 | #######
68 |
69 | # Noise (standard deviation in m)
70 | sigma_range: 0.05
71 |
72 | ##############
73 | # Sun Sensor #
74 | ##############
75 |
76 | # Currently unused
77 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
78 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
79 |
80 | ###########
81 | # Tracker #
82 | ###########
83 |
84 | fast_detection_delta: 20
85 | non_max_supp: True
86 | block_half_length: 6
87 | margin: 6
88 | n_feat_min: 400
89 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
90 | outlier_method: 8
91 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
92 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
93 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
94 | # image resolution, and the image noise.
95 | outlier_param1: 0.3
96 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
97 | # (probability) that the estimated matrix is correct.
98 | outlier_param2: 0.99
99 | # 1 <=> no tiling
100 | n_tiles_h: 1
101 | n_tiles_w: 1
102 | max_feat_per_tile: 10000
103 |
104 | ########
105 | # EKLT #
106 | ########
107 |
108 | eklt_batch_size: 500
109 | eklt_update_every_n_events: 20
110 | eklt_displacement_px: 0.6
111 | eklt_patch_size: 17
112 | eklt_max_num_iterations: 10
113 | eklt_tracking_quality: 0.6
114 | eklt_bootstrap: "klt"
115 | eklt_lk_window_size: 20
116 | eklt_num_pyramidal_layers: 3
117 | eklt_scale: 4
118 | eklt_display_features: true
119 | eklt_display_feature_id: true
120 | eklt_display_feature_patches: true
121 | eklt_arrow_length: 5
122 |
123 | eklt_max_corners: 60
124 | eklt_min_corners: 45
125 | eklt_log_eps: 0.01
126 | eklt_use_linlog_scale: false
127 |
128 | eklt_first_image_t: -1
129 | eklt_tracks_file_txt: ""
130 |
131 | eklt_patch_timestamp_assignment: accumulated-events-center
132 |
133 | eklt_enable_outlier_removal: false
134 | eklt_outlier_method: 8
135 | eklt_outlier_param1: 0.6
136 | eklt_outlier_param2: 0.99
137 |
138 | eklt_ekf_feature_interpolation: linear-no-limit
139 | eklt_ekf_feature_extrapolation_limit: -1.0
140 | eklt_ekf_update_every_n: 2200
141 | eklt_ekf_update_strategy: every-n-events
142 | eklt_ekf_update_timestamp: patches-maximum
143 | eklt_harris_block_size: 3
144 | eklt_detection_min_distance: 8 # 10
145 | eklt_harris_k: 0 # 0.04
146 | eklt_harris_quality_level: 0.3 # 0.01
147 |
148 | #########
149 | # HASTE #
150 | #########
151 |
152 | haste_tracker_type: haste-correlation-star
153 | haste_patch_size: 31
154 | haste_max_corners: 60
155 | haste_min_corners: 45
156 | haste_bootstrap_from_frames: false
157 | haste_bootstrap_with_unit_length_of: true
158 |
159 | haste_enable_outlier_removal: true
160 | haste_outlier_method: 8
161 | haste_outlier_param1: 0.6
162 | haste_outlier_param2: 0.95
163 |
164 | haste_ekf_feature_interpolation: nearest-neighbor
165 | haste_ekf_feature_extrapolation_limit: -1.0
166 | haste_ekf_update_every_n: 3200
167 | haste_ekf_update_strategy: every-n-events
168 | haste_ekf_update_timestamp: patches-maximum
169 | haste_harris_block_size: 3
170 | haste_detection_min_distance: 8
171 | haste_harris_k: 0
172 | haste_harris_quality_level: 0.3
173 |
174 | ##############
175 | # SLAM-MSCKF #
176 | ##############
177 |
178 | # Number of poses in the sliding window
179 | n_poses_max: 10
180 |
181 | # Number of SLAM features
182 | n_slam_features_max: 25
183 |
184 | # Initial inverse depth of SLAM features [1/m]
185 | # (default: 1 / (2 * d_min) [Montiel, 2006])
186 | rho_0: 1.3
187 |
188 | # Initial standard deviation of SLAM inverse depth [1/m]
189 | # (default: 1 / (4 * d_min) [Montiel, 2006])
190 | sigma_rho_0: 0.65
191 |
192 | # Number of IEKF iterations (1 <=> EKF)
193 | iekf_iter: 1
194 |
195 | # Minimum baseline to trigger MSCKF measurement (pixels)
196 | msckf_baseline: 15.0
197 |
198 | # Minimum track length for a visual feature to be processed
199 | min_track_length: 10
200 |
201 | #######
202 | # EKF #
203 | #######
204 |
205 | # State buffer size (default: 250 states)
206 | state_buffer_size: 500
207 |
208 | # Expected difference between successive IMU sequence IDs.
209 | delta_seq_imu: 1
210 |
211 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
212 | state_buffer_time_margin: 0.02
213 |
214 | # Gravity vector in world frame [m/s^2]
215 | g: [0.0, 0.0, -9.81] # [x,y,z]
216 |
217 | ########
218 | # Misc #
219 | ########
220 |
221 | # Timeout before 3D trajectory disappears in GUI (s)
222 | traj_timeout_gui: 3
223 |
224 | # flags to play rosbags
225 | /use_sim_time: True
226 |
227 | # Initialize at startup
228 | init_at_startup: False
229 |
230 | ##########
231 | # Events #
232 | ##########
233 |
234 | event_cam1_fx: 0.829551527
235 | event_cam1_fy: 1.104604558
236 | event_cam1_cx: 0.550800297
237 | event_cam1_cy: 0.615070333
238 | event_cam1_dist_model: radial-tangential
239 | event_cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
240 | event_cam1_img_width: 240
241 | event_cam1_img_height: 180
242 | event_cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
243 | event_cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
244 | event_cam1_time_offset: 0.0
245 |
246 |
247 | event_accumulation_methode: 0
248 | event_accumulation_period: 0.1
249 | n_events_min: 15000
250 | n_events_max: 40000
251 | n_events_noise_detection_min: 40
252 | noise_event_rate: 20000
253 | event_norm_factor: 4.0
254 | correct_event_motion: False
255 |
--------------------------------------------------------------------------------
/params_wells.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | p: [0.0, 0.0, 0.0]
6 | v: [0.0, 0.0, 0.0] # [x,y,z]
7 | q: [1.0, 0.0, 0.0, 0.0] # [w, x, y, z]
8 |
9 | # Initial IMU bias estimates
10 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
11 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
12 |
13 | # Initial standard deviation estimates [x,y,z]
14 | sigma_dp: [0.0, 0.0, 0.0] # [m]
15 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
16 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
17 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
18 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
19 |
20 | ###############
21 | # Calibration #
22 | ###############
23 |
24 | # Camera calibration for davis jpl experiments (July 29th)
25 | cam1_fx: 0.768355396282
26 | cam1_fy: 1.01195919222
27 | cam1_cx: 0.51616891455
28 | cam1_cy: 0.427496165375
29 | cam1_dist_model: fov
30 | cam1_dist_params: [1.0055850862383553]
31 | cam1_img_width: 346
32 | cam1_img_height: 260
33 | cam1_q_ic: [-0.9999606049044857, -0.008547063359323525, 0.0009018869256839605, 0.002218771497939202] #[w,x,y,z]
34 | cam1_p_ic: [-0.002467518063158655, -0.017257545834253946, -0.061161457247268326] #[x,y,z]
35 | cam1_time_offset: 0.00247114850605
36 |
37 | cam2_fx: 0.8307143111366176
38 | cam2_fy: 1.1094881524187095
39 | cam2_cx: 0.5573099196496173
40 | cam2_cy: 0.6332898330570906
41 | cam2_dist_model: FOV
42 | cam2_dist_params: [0.950711]
43 | cam2_img_width: 240
44 | cam2_img_height: 180
45 | cam2_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
46 | cam2_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
47 | cam2_time_offset: 0.0
48 |
49 | # Feature noise (normalized coordinate standard deviation)
50 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
51 |
52 | #######
53 | # IMU #
54 | #######
55 |
56 | ## Optimized for RPG Davis Data
57 | n_a: 0.004316 # Accel noise spectral density [m/s^2/sqrt(Hz)]
58 | n_ba: 0.0004316 # Accel bias random walk [m/s^3/sqrt(Hz)]
59 | n_w: 0.0002 # Gyro noise spectral density [rad/s/sqrt(Hz)]
60 | n_bw: 0.00002 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
61 |
62 | #n_a: 0.004316
63 | #n_ba: 0.0004316
64 | #n_w: 0.000225
65 | #n_bw: 0.0000225
66 |
67 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
68 | a_m_max: 88.29 # 9g should be enough https://www.youtube.com/watch?v=_0nbRYIBVDQ
69 |
70 | #######
71 | # LRF #
72 | #######
73 |
74 | # Noise (standard deviation in m)
75 | sigma_range: 0.05
76 |
77 | ##############
78 | # Sun Sensor #
79 | ##############
80 |
81 | # Currently unused
82 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
83 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
84 |
85 | ###########
86 | # Tracker #
87 | ###########
88 |
89 | fast_detection_delta: 20
90 | non_max_supp: True
91 | block_half_length: 6
92 | margin: 6
93 | n_feat_min: 400
94 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
95 | outlier_method: 8
96 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
97 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
98 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
99 | # image resolution, and the image noise.
100 | outlier_param1: 0.6
101 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
102 | # (probability) that the estimated matrix is correct.
103 | outlier_param2: 0.99
104 | # 1 <=> no tiling
105 | n_tiles_h: 1
106 | n_tiles_w: 1
107 | max_feat_per_tile: 10000
108 |
109 | ########
110 | # EKLT #
111 | ########
112 |
113 | eklt_batch_size: 800
114 | eklt_update_every_n_events: 20
115 | eklt_displacement_px: 0.6
116 | eklt_patch_size: 31
117 | eklt_max_num_iterations: 25
118 | eklt_tracking_quality: 0.3
119 | eklt_bootstrap: "klt"
120 | eklt_lk_window_size: 20
121 | eklt_num_pyramidal_layers: 3
122 | eklt_scale: 4
123 | eklt_display_features: false
124 | eklt_display_feature_id: false
125 | eklt_display_feature_patches: false
126 | eklt_arrow_length: 5
127 |
128 | eklt_max_corners: 60
129 | eklt_min_corners: 45
130 | eklt_log_eps: 0.01
131 | eklt_use_linlog_scale: false
132 |
133 | eklt_first_image_t: -1
134 | eklt_tracks_file_txt: ""
135 |
136 | eklt_patch_timestamp_assignment: accumulated-events-center
137 |
138 | eklt_enable_outlier_removal: false
139 | eklt_outlier_method: 8
140 | eklt_outlier_param1: 0.6
141 | eklt_outlier_param2: 0.99
142 |
143 | eklt_ekf_feature_interpolation: linear-no-limit
144 | eklt_ekf_feature_extrapolation_limit: -1.0
145 | eklt_ekf_update_every_n: 2200
146 | eklt_ekf_update_strategy: every-n-events
147 | eklt_ekf_update_timestamp: patches-maximum
148 | eklt_harris_block_size: 3
149 | eklt_detection_min_distance: 9 # 10
150 | eklt_harris_k: 0.04 # 0.04
151 | eklt_harris_quality_level: 0.3 # 0.01
152 |
153 | #########
154 | # HASTE #
155 | #########
156 |
157 | haste_tracker_type: haste-correlation-star
158 | haste_patch_size: 31
159 | haste_max_corners: 60
160 | haste_min_corners: 45
161 | haste_bootstrap_from_frames: false
162 | haste_bootstrap_with_unit_length_of: true
163 |
164 | haste_enable_outlier_removal: true
165 | haste_outlier_method: 8
166 | haste_outlier_param1: 1.3
167 | haste_outlier_param2: 0.95
168 |
169 | haste_ekf_feature_interpolation: nearest-neighbor
170 | haste_ekf_feature_extrapolation_limit: -1.0
171 | haste_ekf_update_every_n: 3200
172 | haste_ekf_update_strategy: every-n-events
173 | haste_ekf_update_timestamp: patches-maximum
174 | haste_harris_block_size: 3
175 | haste_detection_min_distance: 8
176 | haste_harris_k: 0
177 | haste_harris_quality_level: 0.3
178 |
179 | ##############
180 | # SLAM-MSCKF #
181 | ##############
182 |
183 | # Number of poses in the sliding window
184 | n_poses_max: 10
185 |
186 | # Number of SLAM features
187 | n_slam_features_max: 15
188 |
189 | # Initial inverse depth of SLAM features [1/m]
190 | # (default: 1 / (2 * d_min) [Montiel, 2006])
191 | rho_0: 0.7
192 |
193 | # Initial standard deviation of SLAM inverse depth [1/m]
194 | # (default: 1 / (4 * d_min) [Montiel, 2006])
195 | sigma_rho_0: 0.35
196 |
197 | # Number of IEKF iterations (1 <=> EKF)
198 | iekf_iter: 1
199 |
200 | # Minimum baseline to trigger MSCKF measurement (pixels)
201 | msckf_baseline: 30.0
202 |
203 | # Minimum track length for a visual feature to be processed
204 | min_track_length: 10
205 |
206 | #######
207 | # EKF #
208 | #######
209 |
210 | # State buffer size (default: 250 states)
211 | state_buffer_size: 500
212 |
213 | # Expected difference between successive IMU sequence IDs.
214 | delta_seq_imu: 1
215 |
216 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
217 | state_buffer_time_margin: 0.02
218 |
219 | # Gravity vector in world frame [m/s^2]
220 | g: [0.0, 0.0, -9.81] # [x,y,z]
221 |
222 | ########
223 | # Misc #
224 | ########
225 |
226 | # Timeout before 3D trajectory disappears in GUI (s)
227 | traj_timeout_gui: 3
228 |
229 | # flags to play rosbags
230 | /use_sim_time: True
231 |
232 | # Initialize at startup
233 | init_at_startup: False
234 |
235 | ##########
236 | # Events #
237 | ##########
238 |
239 | event_cam1_fx: 0.829551527
240 | event_cam1_fy: 1.104604558
241 | event_cam1_cx: 0.550800297
242 | event_cam1_cy: 0.615070333
243 | event_cam1_dist_model: radial-tangential
244 | event_cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
245 | event_cam1_img_width: 240
246 | event_cam1_img_height: 180
247 | event_cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
248 | event_cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
249 | event_cam1_time_offset: 0.0
250 |
251 |
252 | event_accumulation_methode: 0
253 | event_accumulation_period: 0.1
254 | n_events_min: 15000
255 | n_events_max: 40000
256 | n_events_noise_detection_min: 40
257 | noise_event_rate: 20000
258 | event_norm_factor: 4.0
259 | correct_event_motion: False
260 |
--------------------------------------------------------------------------------
/params_wells_gt.yaml:
--------------------------------------------------------------------------------
1 | ###########################
2 | # Initial state estimates #
3 | ###########################
4 |
5 | p: [0.0, 0.0, 0.0]
6 | v: [0.0, 0.0, 0.0] # [x,y,z]
7 | q: [1.0, 0.0, 0.0, 0.0] # [w, x, y, z]
8 |
9 | # Initial IMU bias estimates
10 | b_w: [0.0, 0.0, 0.0] # [x,y,z]
11 | b_a: [0.0, 0.0, 0.0] # [x,y,z]
12 |
13 | # Initial standard deviation estimates [x,y,z]
14 | sigma_dp: [0.0, 0.0, 0.0] # [m]
15 | sigma_dv: [0.05, 0.05, 0.05] # [m/s]
16 | sigma_dtheta: [3.0, 3.0, 3.0] # [deg]
17 | sigma_dbw: [6.0, 6.0, 6.0] # [deg/s]
18 | sigma_dba: [0.3, 0.3, 0.3] # [m/s^2]
19 |
20 | ###############
21 | # Calibration #
22 | ###############
23 |
24 | # Camera calibration for davis jpl experiments (July 29th)
25 | cam1_fx: 0.768355396282
26 | cam1_fy: 1.01195919222
27 | cam1_cx: 0.51616891455
28 | cam1_cy: 0.427496165375
29 | cam1_dist_model: fov
30 | cam1_dist_params: [1.0055850862383553]
31 | cam1_img_width: 346
32 | cam1_img_height: 260
33 | cam1_q_ic: [-0.9999606049044857, -0.008547063359323525, 0.0009018869256839605, 0.002218771497939202] #[w,x,y,z]
34 | cam1_p_ic: [-0.002467518063158655, -0.017257545834253946, -0.061161457247268326] #[x,y,z]
35 | cam1_time_offset: 0.00247114850605
36 |
37 | cam2_fx: 0.8307143111366176
38 | cam2_fy: 1.1094881524187095
39 | cam2_cx: 0.5573099196496173
40 | cam2_cy: 0.6332898330570906
41 | cam2_dist_model: FOV
42 | cam2_dist_params: [0.950711]
43 | cam2_img_width: 240
44 | cam2_img_height: 180
45 | cam2_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
46 | cam2_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
47 | cam2_time_offset: 0.0
48 |
49 | # Feature noise (normalized coordinate standard deviation)
50 | sigma_img: 0.02541924711215672 # sqrt(0.00012922762474977756 * 5.0)
51 |
52 | #######
53 | # IMU #
54 | #######
55 |
56 | ## Optimized for RPG Davis Data
57 | n_a: 0.004316 # Accel noise spectral density [m/s^2/sqrt(Hz)]
58 | n_ba: 0.0004316 # Accel bias random walk [m/s^3/sqrt(Hz)]
59 | n_w: 0.0002 # Gyro noise spectral density [rad/s/sqrt(Hz)]
60 | n_bw: 0.00002 # Gyro bias random walk [rad/s^2/sqrt(Hz)]
61 |
62 | #n_a: 0.004316
63 | #n_ba: 0.0004316
64 | #n_w: 0.000225
65 | #n_bw: 0.0000225
66 |
67 | # Max specific force norm threshold, after which accelerometer readings are detected as spikes. [m/s^2]
68 | a_m_max: 88.29 # 9g should be enough https://www.youtube.com/watch?v=_0nbRYIBVDQ
69 |
70 | #######
71 | # LRF #
72 | #######
73 |
74 | # Noise (standard deviation in m)
75 | sigma_range: 0.05
76 |
77 | ##############
78 | # Sun Sensor #
79 | ##############
80 |
81 | # Currently unused
82 | q_sc: [1.0, 0.0, 0.0, 0.0] # [w,x,y,z]
83 | w_s: [0.0, 0.0, 1.0] # [x,y,z]
84 |
85 | ###########
86 | # Tracker #
87 | ###########
88 |
89 | fast_detection_delta: 20
90 | non_max_supp: True
91 | block_half_length: 6
92 | margin: 6
93 | n_feat_min: 400
94 | # RANSAC: 8 / LMEDS: 4 (see OpenCV doc)
95 | outlier_method: 8
96 | # Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels,
97 | # beyond which the point is considered an outlier and is not used for computing the final fundamental
98 | # matrix. It can be set to something like 1-3, depending on the accuracy of the point localization,
99 | # image resolution, and the image noise.
100 | outlier_param1: 0.6
101 | # Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of confidence
102 | # (probability) that the estimated matrix is correct.
103 | outlier_param2: 0.99
104 | # 1 <=> no tiling
105 | n_tiles_h: 1
106 | n_tiles_w: 1
107 | max_feat_per_tile: 10000
108 |
109 | ########
110 | # EKLT #
111 | ########
112 |
113 | eklt_batch_size: 800
114 | eklt_update_every_n_events: 20
115 | eklt_displacement_px: 0.6
116 | eklt_patch_size: 31
117 | eklt_max_num_iterations: 25
118 | eklt_tracking_quality: 0.3
119 | eklt_bootstrap: "klt"
120 | eklt_lk_window_size: 20
121 | eklt_num_pyramidal_layers: 3
122 | eklt_scale: 4
123 | eklt_display_features: false
124 | eklt_display_feature_id: false
125 | eklt_display_feature_patches: false
126 | eklt_arrow_length: 5
127 |
128 | eklt_max_corners: 60
129 | eklt_min_corners: 45
130 | eklt_log_eps: 0.01
131 | eklt_use_linlog_scale: false
132 |
133 | eklt_first_image_t: -1
134 | eklt_tracks_file_txt: ""
135 |
136 | eklt_patch_timestamp_assignment: accumulated-events-center
137 |
138 | eklt_enable_outlier_removal: false
139 | eklt_outlier_method: 8
140 | eklt_outlier_param1: 0.6
141 | eklt_outlier_param2: 0.99
142 |
143 | eklt_ekf_feature_interpolation: linear-no-limit
144 | eklt_ekf_feature_extrapolation_limit: -1.0
145 | eklt_ekf_update_every_n: 12
146 | eklt_ekf_update_strategy: every-n-msec
147 | eklt_ekf_update_timestamp: latest-event
148 | eklt_harris_block_size: 3
149 | eklt_detection_min_distance: 9 # 10
150 | eklt_harris_k: 0.04 # 0.04
151 | eklt_harris_quality_level: 0.3 # 0.01
152 |
153 | #########
154 | # HASTE #
155 | #########
156 |
157 | haste_tracker_type: haste-correlation-star
158 | haste_patch_size: 31
159 | haste_max_corners: 60
160 | haste_min_corners: 45
161 | haste_bootstrap_from_frames: false
162 | haste_bootstrap_with_unit_length_of: true
163 |
164 | haste_enable_outlier_removal: true
165 | haste_outlier_method: 8
166 | haste_outlier_param1: 1.3
167 | haste_outlier_param2: 0.95
168 |
169 | haste_ekf_feature_interpolation: nearest-neighbor
170 | haste_ekf_feature_extrapolation_limit: -1.0
171 | haste_ekf_update_every_n: 3200
172 | haste_ekf_update_strategy: every-n-events
173 | haste_ekf_update_timestamp: patches-maximum
174 | haste_harris_block_size: 3
175 | haste_detection_min_distance: 8
176 | haste_harris_k: 0
177 | haste_harris_quality_level: 0.3
178 |
179 | ##############
180 | # SLAM-MSCKF #
181 | ##############
182 |
183 | # Number of poses in the sliding window
184 | n_poses_max: 10
185 |
186 | # Number of SLAM features
187 | n_slam_features_max: 15
188 |
189 | # Initial inverse depth of SLAM features [1/m]
190 | # (default: 1 / (2 * d_min) [Montiel, 2006])
191 | rho_0: 0.7
192 |
193 | # Initial standard deviation of SLAM inverse depth [1/m]
194 | # (default: 1 / (4 * d_min) [Montiel, 2006])
195 | sigma_rho_0: 0.35
196 |
197 | # Number of IEKF iterations (1 <=> EKF)
198 | iekf_iter: 1
199 |
200 | # Minimum baseline to trigger MSCKF measurement (pixels)
201 | msckf_baseline: 30.0
202 |
203 | # Minimum track length for a visual feature to be processed
204 | min_track_length: 10
205 |
206 | #######
207 | # EKF #
208 | #######
209 |
210 | # State buffer size (default: 250 states)
211 | state_buffer_size: 500
212 |
213 | # Expected difference between successive IMU sequence IDs.
214 | delta_seq_imu: 1
215 |
216 | # Tolerance for how far in the future/past the closest state request can be without returning an invalid state. [s]
217 | state_buffer_time_margin: 0.02
218 |
219 | # Gravity vector in world frame [m/s^2]
220 | g: [0.0, 0.0, -9.81] # [x,y,z]
221 |
222 | ########
223 | # Misc #
224 | ########
225 |
226 | # Timeout before 3D trajectory disappears in GUI (s)
227 | traj_timeout_gui: 3
228 |
229 | # flags to play rosbags
230 | /use_sim_time: True
231 |
232 | # Initialize at startup
233 | init_at_startup: False
234 |
235 | ##########
236 | # Events #
237 | ##########
238 |
239 | event_cam1_fx: 0.829551527
240 | event_cam1_fy: 1.104604558
241 | event_cam1_cx: 0.550800297
242 | event_cam1_cy: 0.615070333
243 | event_cam1_dist_model: radial-tangential
244 | event_cam1_dist_params: [-0.3684363117977873, 0.1509472435566583, -0.0002961305343848646, -0.000759431726241032, 0]
245 | event_cam1_img_width: 240
246 | event_cam1_img_height: 180
247 | event_cam1_q_ic: [0.999965419952174, 0.004660206319180, -0.003190841088148, -0.006104089619662]
248 | event_cam1_p_ic: [0.00674696422488, 0.0007279224709, 0.0342573613538] # [x,y,z]
249 | event_cam1_time_offset: 0.0
250 |
251 |
252 | event_accumulation_methode: 0
253 | event_accumulation_period: 0.1
254 | n_events_min: 15000
255 | n_events_max: 40000
256 | n_events_noise_detection_min: 40
257 | noise_event_rate: 20000
258 | event_norm_factor: 4.0
259 | correct_event_motion: False
260 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.19.2
2 | matplotlib>=3.3.4
3 | envyaml>=1.7
4 | pyyaml>=5.4.1
5 | pandas>=1.2.3
6 | gitpython
7 | pycryptodomex
8 | gnupg
9 | tqdm
10 | scipy>=1.6.2
11 | klampt>=0.8.7
12 | evo>=1.13.5
13 | pygame>=2.0.1
14 | unrealcv>=0.3.10
--------------------------------------------------------------------------------
/scripts/analyze_rt_factor_with_event_rate.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path
3 | from typing import Dict
4 |
5 | import numpy as np
6 | import x_evaluate.plots
7 | from matplotlib import pyplot as plt
8 |
9 | from x_evaluate.comparisons import identify_common_datasets
10 | from x_evaluate.evaluation_data import FrontEnd, EvaluationDataSummary
11 | from x_evaluate.performance_evaluation import plot_realtime_factor, plot_events_per_second, RT_FACTOR_RESOLUTION
12 | from x_evaluate.plots import PlotContext, plot_evo_trajectory_with_euler_angles, time_series_plot
13 | from x_evaluate.rpg_tracking_analysis.plot_tracks import plot_num_features
14 | from x_evaluate.scriptlets import find_evaluation_files_recursively, read_evaluation_pickle
15 | from x_evaluate.tracking_evaluation import plot_xvio_num_features, plot_eklt_num_features_comparison
16 | from x_evaluate.utils import name_to_identifier
17 |
18 |
19 | def main():
20 | parser = argparse.ArgumentParser(description='RT factor vs event rate')
21 | parser.add_argument('--input_folder', type=str, required=True, help='Input folder containing evaluation.pickle '
22 | 'files in subdirs, with ideally EKLT, '
23 | 'HASTE and XVIO frontend performance runs')
24 |
25 | args = parser.parse_args()
26 |
27 | output_folder = os.path.join(args.input_folder, "results")
28 |
29 | eval_files = find_evaluation_files_recursively(args.input_folder)
30 |
31 |
32 |
33 | evaluations = [read_evaluation_pickle(os.path.dirname(f)) for f in eval_files]
34 |
35 | summaries: Dict[FrontEnd, EvaluationDataSummary] = {e.frontend: e for e in evaluations}
36 |
37 | for k in summaries.keys():
38 | if len([e for e in evaluations if e.frontend == k]) > 1:
39 | print(F"WARNING: multiple evaluation files for frontend '{k}' found, using {summaries[k].name}")
40 |
41 | frontends = list(summaries.keys())
42 | frontend_labels = [str(f) for f in frontends]
43 | summary_list = [summaries[f] for f in frontends]
44 | common_datasets = list(identify_common_datasets(summary_list))
45 | common_datasets.sort()
46 |
47 | x_evaluate.plots.use_paper_style_plots = True
48 |
49 | rtf_per_event_dict = dict()
50 | event_rate_dict = dict()
51 |
52 | rtf_per_event_overall = np.array([])
53 | event_rate_overall = np.array([])
54 |
55 | def filename_overall(plot_name):
56 | return os.path.join(output_folder, F"rtf_overall_{plot_name}")
57 |
58 | for dataset in common_datasets:
59 | print(F"Plotting for sequence {dataset}")
60 |
61 | def filename_dataset(plot_name):
62 | return os.path.join(output_folder, F"rtf_{plot_name}_{name_to_identifier(dataset)}")
63 |
64 | eklt_eval = summaries[FrontEnd.EKLT].data[dataset]
65 | evaluations = [s.data[dataset] for s in summary_list]
66 | t_er = np.arange(0.0, len(eklt_eval.eklt_performance_data.events_per_sec_sim))
67 |
68 | # for combined plots...
69 | t_rtf = np.arange(0.0, len(eklt_eval.performance_data.rt_factors)) * RT_FACTOR_RESOLUTION
70 | t_nf = eklt_eval.feature_data.df_eklt_num_features['t']
71 | nf = np.interp(t_rtf, t_nf, eklt_eval.feature_data.df_eklt_num_features['num_features'])
72 | nf_min_1 = np.max(np.vstack((nf, np.ones_like(nf))), axis=0)
73 | rt_factor = eklt_eval.performance_data.rt_factors
74 | rt_factor_per_feature = rt_factor / nf_min_1
75 | event_rate = np.interp(t_rtf, t_er, eklt_eval.eklt_performance_data.events_per_sec_sim)
76 |
77 | k = 5
78 | event_rate_smooth = np.convolve(event_rate, np.ones(k), 'same') / k
79 | rtf_per_feature_smooth = np.convolve(rt_factor_per_feature, np.ones(k), 'same') / k
80 |
81 | rtf_per_event_overall = np.hstack((rtf_per_event_overall, rtf_per_feature_smooth))
82 | event_rate_overall = np.hstack((event_rate_overall, event_rate_smooth))
83 |
84 | rtf_per_event_dict[dataset] = rtf_per_feature_smooth
85 | event_rate_dict[dataset] = event_rate_smooth
86 |
87 | with PlotContext(filename_dataset("overview"), subplot_cols=3, subplot_rows=2) as pc:
88 | plot_realtime_factor(pc, evaluations, frontend_labels)
89 | plot_eklt_num_features_comparison(pc, [eklt_eval], ["EKLT"], dataset)
90 | time_series_plot(pc, t_er, [eklt_eval.eklt_performance_data.events_per_sec_sim], ["events/s"])
91 |
92 | time_series_plot(pc, t_rtf, [rt_factor_per_feature], ["RTF / feature"])
93 |
94 | time_series_plot(pc, [event_rate], [rt_factor_per_feature], ["event rate - RTF/F"],
95 | xlabel="Event rate", ylabel="RTF per feature", use_scatter=True)
96 |
97 | time_series_plot(pc, [event_rate_smooth], [rtf_per_feature_smooth], ["event rate - RTF/F"],
98 | xlabel="Event rate", ylabel="RTF per feature", use_scatter=True, title="Same but smooth")
99 |
100 | # smooth_rt_factor = np.convolve(eklt_eval.performance_data.rt_factors, np.ones(10), 'same') / 10
101 | # time_series_plot(pc, t_rtf, [smooth_rt_factor], ["Smooth RT factor EKLT"])
102 |
103 | with PlotContext(filename_dataset("rtfpf_vs_event_rate")) as pc:
104 | plot_rtfpf_vs_event_rate(pc.get_axis(), event_rate_smooth, rtf_per_feature_smooth)
105 |
106 | with PlotContext(filename_overall("rtfpf_vs_event_rate")) as pc:
107 | ax = pc.get_axis()
108 | plot_rtfpf_vs_event_rate(ax, event_rate_overall, rtf_per_event_overall)
109 |
110 | # plt.show()
111 |
112 |
113 | def plot_rtfpf_vs_event_rate(ax, event_rate, rtf_per_feature):
114 | ax.scatter(event_rate/1e6, rtf_per_feature, alpha=0.5)
115 | ax.set_xlabel("Event rate [Me/s]")
116 | ax.set_ylabel("Realtime factor / feature")
117 |
118 | # ax.scatter(times[i], d, s=size, label=labels[i], alpha=0.5, color=DEFAULT_COLORS[i])
119 | # time_series_plot(pc, [event_rate_smooth], [rtf_per_feature_smooth], ["event rate - RTF/F"],
120 | # xlabel="Event rate", ylabel="RTF per feature", use_scatter=True, title="Same but smooth")
121 |
122 |
123 | if __name__ == '__main__':
124 | main()
125 |
--------------------------------------------------------------------------------
/scripts/analyze_tracks_angles.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from x_evaluate.plots import PlotContext
9 | from x_evaluate.utils import timestamp_to_rosbag_time_zero
10 |
11 |
12 | def main():
13 | parser = argparse.ArgumentParser(description='Angle correlation analyzer')
14 | parser.add_argument('--output_folder', type=str, required=True)
15 | parser.add_argument('--input_folder', type=str, required=True)
16 | args = parser.parse_args()
17 | tracks_file = os.path.join(args.input_folder, 'tracks.csv')
18 | time_file = os.path.join(args.input_folder, 'realtime.csv')
19 | print(F"opening {tracks_file}")
20 | tracks = pd.read_csv(tracks_file, delimiter=";")
21 | rt = pd.read_csv(time_file, delimiter=";")
22 |
23 | analyzed_tracks = 0
24 | cur_id = 0
25 |
26 | while analyzed_tracks < 100:
27 | cur_id += 1
28 | track = tracks.loc[tracks.id == cur_id]
29 |
30 | if len(track) > 10:
31 | track_times = track['patch_t_current'].to_numpy() - track.iloc[0]['patch_t_current']
32 | delta_t = track_times[-1] - track_times[0]
33 |
34 | if delta_t < 0.1:
35 | print(F"Delta t too small: {delta_t}")
36 | continue
37 |
38 | print(F"Analyzing track with {len(track)} updates, tracked over {delta_t:.1f}s")
39 | print(F" had the following updates: {track['update_type'].unique()}")
40 |
41 | last_row_is_lost = track.loc[track.update_type == 'Lost'].index[0] == track.iloc[-1:].index[0]
42 | assert last_row_is_lost, "Assuming only last update to be 'lost'"
43 |
44 | features_pos = track[['center_x', 'center_y']].to_numpy()
45 | diff = features_pos[1:, :] - features_pos[:-1, :]
46 |
47 | angle_estimates = np.arctan2(diff[:, 1], diff[:, 0])
48 | angle_estimates = np.mod(angle_estimates, 2 * np.pi)
49 | angle = track['flow_angle'].to_numpy()[1:]
50 |
51 | print(F" flow_angle in [{np.min(angle)}, {np.max(angle)}]"
52 | F" estimate in [{np.min(angle_estimates)},{np.max(angle_estimates)}]")
53 |
54 | filename = F"track_{cur_id}"
55 | file = os.path.join(args.output_folder, filename)
56 | with PlotContext(file) as f:
57 | ax = f.get_axis()
58 |
59 | ax.plot(track_times[1:-1], np.rad2deg(angle_estimates[:-1]), label="Differential feature position "
60 | "angle")
61 | ax.plot(track_times[1:-1], np.rad2deg(angle[:-1]), label="Optimized angle")
62 | ax.set_ylabel("angle [deg]")
63 | ax.legend()
64 |
65 | analyzed_tracks += 1
66 |
67 |
68 | if __name__ == '__main__':
69 | main()
70 |
--------------------------------------------------------------------------------
/scripts/compare_odometry.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | from typing import List, Optional
4 |
5 | import pandas as pd
6 |
7 | from x_evaluate.evaluation_data import EvaluationDataSummary
8 | from x_evaluate.plots import plot_evo_trajectory_with_euler_angles, PlotContext
9 | from x_evaluate.scriptlets import read_evaluation_pickle, find_evaluation_files_recursively
10 | from x_evaluate.utils import name_to_identifier
11 |
12 |
13 | def prepare_output_folder(output_folder: Optional[str], fall_back: Optional[str]):
14 | if output_folder is None:
15 | if fall_back is None:
16 | return None
17 | output_folder = fall_back
18 | output_folder = os.path.normpath(output_folder)
19 | if not os.path.exists(output_folder):
20 | os.makedirs(output_folder)
21 | return output_folder
22 |
23 |
24 | def main():
25 | parser = argparse.ArgumentParser(description="Prints odometry-specific comparison plots")
26 | parser.add_argument('--input_folder', type=str, required=True)
27 |
28 | args = parser.parse_args()
29 |
30 | pd.options.display.max_colwidth = None
31 | pd.options.display.width = 0
32 |
33 | evaluation_files = find_evaluation_files_recursively(args.input_folder)
34 |
35 | output_folder = os.path.join(args.input_folder, "results")
36 |
37 | prepare_output_folder(output_folder, None)
38 |
39 | print(F"Found {evaluation_files}")
40 |
41 | evaluations: List[EvaluationDataSummary] = []
42 |
43 | for f in evaluation_files:
44 |
45 | e = read_evaluation_pickle(os.path.dirname(f), os.path.basename(f))
46 |
47 | evaluations.append(e)
48 |
49 | # can be converted to for loop in common datasets when needed
50 | dataset = "Wells Test 5"
51 |
52 | for e in evaluations:
53 | gt_trajectory = e.data[dataset].trajectory_data.traj_gt
54 | estimate = e.data[dataset].trajectory_data.traj_est_aligned
55 |
56 | with PlotContext(os.path.join(output_folder, F"{name_to_identifier(dataset)}_"
57 | F"{name_to_identifier(e.name)}_xyz_euler"),
58 | subplot_cols=3, subplot_rows=3) as pc:
59 | print(F"Length GT {gt_trajectory.timestamps[-1]-gt_trajectory.timestamps[0]:.1f}s Estimate "
60 | F"{estimate.timestamps[-1]-estimate.timestamps[0]:.1f}s")
61 | plot_evo_trajectory_with_euler_angles(pc, estimate, e.name, gt_trajectory)
62 |
63 |
64 |
65 | if __name__ == '__main__':
66 | main()
67 |
--------------------------------------------------------------------------------
/scripts/convert_tracks_csv_to_txt.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from x_evaluate.utils import convert_to_tracks_txt, read_eklt_output_files
5 |
6 |
7 | def main():
8 | parser = argparse.ArgumentParser(description='Converting tracks.csv to EKLT compatible txt file')
9 |
10 | parser.add_argument('--input_folder', type=str, required=True)
11 | parser.add_argument('--output', type=str)
12 | args = parser.parse_args()
13 |
14 | if args.output is None:
15 | args.output = os.path.join(args.input_folder, "tracks.txt")
16 |
17 | _, _, df_tracks = read_eklt_output_files(args.input_folder)
18 |
19 | convert_to_tracks_txt(df_tracks, args.output)
20 |
21 |
22 | if __name__ == '__main__':
23 | main()
24 |
--------------------------------------------------------------------------------
/scripts/create_speedup_table.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import numpy as np
5 | import pandas as pd
6 |
7 | from x_evaluate.evaluation_data import EvaluationDataSummary
8 | from x_evaluate.scriptlets import read_evaluation_pickle
9 | from x_evaluate.trajectory_evaluation import create_trajectory_result_table_wrt_traveled_dist
10 | from x_evaluate.utils import get_common_stats_functions, merge_tables
11 |
12 |
13 | def create_rtf_table(s: EvaluationDataSummary):
14 |
15 | datasets = s.data.keys()
16 |
17 | data = np.empty((len(datasets), 4), dtype=np.float)
18 | i = 0
19 | for k in s.data.keys():
20 | stats_func = get_common_stats_functions()
21 | rtfs = s.data[k].performance_data.rt_factors
22 |
23 | data[i, :] = [stats_func['min'](rtfs), stats_func['median'](rtfs),
24 | stats_func['max'](rtfs), stats_func['mean'](rtfs)]
25 | i += 1
26 |
27 | index_columns = [(s.name, "RTF Min"), (s.name, "RTF Median"),
28 | (s.name, "RTF Max"), (s.name, "RTF Mean")]
29 | index = pd.MultiIndex.from_tuples(index_columns, names=["Evaluation Run", "Metric"])
30 | result_table = pd.DataFrame(data, index=datasets, columns=index)
31 |
32 | return result_table
33 |
34 |
35 | def main():
36 | parser = argparse.ArgumentParser(description='Speedup through event filtering analysis')
37 | parser.add_argument('--input_file', type=str, required=True, help='Full path to evaluation file')
38 |
39 | args = parser.parse_args()
40 |
41 | evaluation = read_evaluation_pickle(os.path.dirname(args.input_file))
42 |
43 | merge_with_new_eval_run(evaluation, '/storage/data/projects/nasa-eve/experiments/speedup/local/2022-05-08-poster-6dof-speedup/000-speedup-run')
44 | merge_with_new_eval_run(evaluation, '/storage/data/projects/nasa-eve/experiments/speedup/local/2022-05-09-poster'
45 | '-6dof-speedup/000-speedup-run-konstantin')
46 | merge_with_new_eval_run(evaluation, '/storage/data/projects/nasa-eve/experiments/speedup/local/2022-05-10-poster'
47 | '-6dof-speedup/000-speedup-run-konstantin')
48 | merge_with_new_eval_run(evaluation, '/storage/data/projects/nasa-eve/experiments/speedup/local/2022-05-11-poster'
49 | '-6dof-speedup/000-speedup-run-konstantin')
50 |
51 | pd.options.display.max_colwidth = None
52 | pd.options.display.width = 0
53 |
54 | error_table = create_trajectory_result_table_wrt_traveled_dist(evaluation)
55 | rtf_table = create_rtf_table(evaluation)
56 |
57 | table = merge_tables([error_table, rtf_table])
58 |
59 | cropped_table = table[[('Poster 6DOF Speedup run', 'Mean Position Error [%]'), ('Poster 6DOF Speedup run',
60 | 'Mean Yaw error [deg/m]'),
61 | ('Poster 6DOF Speedup run', 'RTF Max'), ('Poster 6DOF Speedup run', 'RTF Mean'),
62 | ('Poster 6DOF Speedup run', 'RTF Median')]]
63 | print()
64 | print(cropped_table.round(2))
65 | cropped_table.round(2).to_excel('speedup.xlsx')
66 |
67 |
68 | def merge_with_new_eval_run(evaluation, other_name):
69 | evaluation_2 = read_evaluation_pickle(other_name)
70 | keys_to_copy = list(evaluation_2.data.keys())
71 | for k in keys_to_copy:
72 | evaluation.data[k] = evaluation_2.data[k]
73 |
74 |
75 | if __name__ == '__main__':
76 | main()
77 |
--------------------------------------------------------------------------------
/scripts/customize_rpe_error_arrays.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import numpy as np
5 | import tqdm
6 |
7 | from x_evaluate.scriptlets import read_evaluation_pickle, write_evaluation_pickle
8 | from x_evaluate.trajectory_evaluation import calculate_rpe_errors_for_pairs_at_different_distances
9 |
10 |
11 | def main():
12 | parser = argparse.ArgumentParser(description='Reads evaluation.pickle and re-calculates RPE error arrays for new '
13 | 'pose pair distances')
14 | parser.add_argument('--input', type=str, required=True)
15 | parser.add_argument('--distances', nargs='+', required=True, type=float)
16 | parser.add_argument('--force_recalculations', default=False, action='store_true')
17 |
18 | args = parser.parse_args()
19 |
20 | output_root = os.path.dirname(args.input)
21 | filename = os.path.basename(args.input)
22 |
23 | print(F"Reading {args.input}")
24 | s = read_evaluation_pickle(output_root, filename)
25 |
26 | for k, e in tqdm.tqdm(s.data.items()):
27 | rpe_error_t = e.trajectory_data.rpe_error_t
28 | rpe_error_r = e.trajectory_data.rpe_error_r
29 |
30 | # print(F"For dataset '{k}', the following RPE pair distances are available:")
31 | # print(rpe_error_r.keys())
32 | # print(rpe_error_t.keys())
33 |
34 | to_calculate = [d for d in args.distances if d not in rpe_error_t or d not in rpe_error_r or
35 | args.force_recalculations]
36 |
37 | # print(F"Calculating {to_calculate}")
38 |
39 | error_t, error_r = calculate_rpe_errors_for_pairs_at_different_distances(to_calculate,
40 | e.trajectory_data.traj_gt_synced,
41 | e.trajectory_data.traj_est_aligned)
42 |
43 | rpe_error_t.update(error_t)
44 | rpe_error_r.update(error_r)
45 |
46 | print("Writing evaluation pickle")
47 | write_evaluation_pickle(s, output_root, filename)
48 |
49 |
50 | if __name__ == '__main__':
51 | main()
52 |
--------------------------------------------------------------------------------
/scripts/estimate_q0_from_imu_a.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import unittest
3 |
4 | import numpy as np
5 | from scipy.spatial.transform import Rotation as R
6 | from rosbag import Bag
7 |
8 | from x_evaluate.utils import get_ros_topic_name_from_msg_type
9 |
10 |
11 | def calculate_smallest_rotation_to_z_up_frame(gravity_observation: np.array, imu_acc_bias: np.array) -> R:
12 | gravity_target_frame = np.array([0, 0, 9.81])
13 | # bias = np.array([0.599, 0.008, 1.476])
14 |
15 | gravity_observation = gravity_observation - imu_acc_bias # Bias is subtracted - see in x library: state.cpp:209
16 |
17 | xyzw = np.zeros(4)
18 |
19 | a = np.cross(gravity_observation, gravity_target_frame)
20 | xyzw[:3] = a
21 |
22 | b = np.sqrt((np.linalg.norm(gravity_observation) ** 2) * (np.linalg.norm(gravity_target_frame) ** 2)) + np.dot(gravity_observation, gravity_target_frame)
23 | xyzw[3] = b
24 |
25 | rot = R.from_quat(xyzw)
26 | return rot
27 |
28 |
29 | class TestAttitudeEstimationFromImu(unittest.TestCase):
30 |
31 | def setUp(self):
32 | # generate 10 random rotations (always the same with seed 76419)
33 | self.random_rotations = R.random(10, 76419).as_matrix()
34 | np.random.seed(732094)
35 | self.random_biases = np.random.random((10, 3))
36 | self.expected_gravity = np.array([0, 0, 9.81])
37 |
38 | def general_test(self, gravity_observation, imu_acc_bias, expected_rotatation):
39 | rotation = calculate_smallest_rotation_to_z_up_frame(gravity_observation, imu_acc_bias)
40 | self.assertTrue(np.allclose(rotation.as_matrix(), expected_rotatation.as_matrix()))
41 |
42 | def general_test_with_biases(self, gravity_observation, expected_result):
43 | self.general_test(gravity_observation, [0, 0, 0], expected_result)
44 | for b in self.random_biases:
45 | self.general_test(gravity_observation + b, b, expected_result)
46 |
47 | def test_perfect_imu_measurement(self):
48 | self.general_test_with_biases(self.expected_gravity, R.identity())
49 | self.general_test(self.expected_gravity*2, [0, 0, 0], R.identity())
50 |
51 | def test_z_alignment(self):
52 | for random_rot in self.random_rotations:
53 | for bias in self.random_biases:
54 | rotated_observation = random_rot @ self.expected_gravity
55 | restult_rot = calculate_smallest_rotation_to_z_up_frame(rotated_observation + bias, bias)
56 |
57 | # scaling of observation must not influence result
58 | restult_rot_scaled = calculate_smallest_rotation_to_z_up_frame(rotated_observation*10 + bias*10,
59 | bias*10)
60 | rot_with_scaling_close = np.allclose(restult_rot.as_matrix(), restult_rot_scaled.as_matrix())
61 |
62 | z_up_body = rotated_observation / np.linalg.norm(rotated_observation)
63 | z_alignment_close = np.allclose(restult_rot.as_matrix() @ z_up_body, [0, 0, 1])
64 | self.assertTrue(z_alignment_close and rot_with_scaling_close)
65 |
66 |
67 | def main():
68 | #
69 | # # RUN TESTS
70 | # unittest.main()
71 |
72 | parser = argparse.ArgumentParser(description="Reads first IMU measurements from bag and calculates the attitude q0 "
73 | "that would bring us closest to a z-up gravity-down "
74 | "reference frame. This is done by assuming zero body acceleration in "
75 | "the first IMU message, i.e. assuming that the gravity vector can be"
76 | "observed. If pose groundtruth is available in the bag, results are "
77 | "compared to it")
78 | parser.add_argument('--input_bag', type=str, required=True)
79 | parser.add_argument("--ba_xyz", nargs=3, metavar=('ba_x', 'ba_y', 'ba_z'), help="my help message", type=float,
80 | default=[0.0, 0.0, 0.0])
81 |
82 | args = parser.parse_args()
83 |
84 | input_bag = Bag(args.input_bag, 'r')
85 |
86 | # t_0 = input_bag.get_start_time()
87 | bias = np.array(args.ba_xyz)
88 | imu_topic = None
89 | gt_topic = None
90 | try:
91 | imu_topic = get_ros_topic_name_from_msg_type(input_bag, 'sensor_msgs/Imu')
92 | except LookupError:
93 | print("No IMU topic found in file...")
94 | exit(1)
95 |
96 | try:
97 | gt_topic = get_ros_topic_name_from_msg_type(input_bag, 'geometry_msgs/PoseStamped')
98 | except LookupError:
99 | pass # ok for gt_topic to be non existent
100 |
101 | _, imu_message, t_imu = next(input_bag.read_messages([imu_topic]))
102 |
103 | imu_acc = np.array([imu_message.linear_acceleration.x, imu_message.linear_acceleration.y,
104 | imu_message.linear_acceleration.z])
105 |
106 | R_hat = calculate_smallest_rotation_to_z_up_frame(imu_acc, args.ba_xyz)
107 |
108 | print(F"q0_hat in xyzw format: {list(R_hat.as_quat())}")
109 | print()
110 | print("For xVIO parameter file this means:")
111 | #q: [-0.329112656567, -0.0699724433216, 0.0214667765852, 0.941449889249] # [w,x,y,z]
112 | print(F"q: {list(np.roll(R_hat.as_quat(), 1))} # [w,x,y,z]")
113 |
114 | if gt_topic is not None:
115 | _, gt_message, t_gt = next(input_bag.read_messages([gt_topic]))
116 | print()
117 | print("Here is how well we did:")
118 |
119 | t_offset_in_ms = (t_gt.to_sec() - t_imu.to_sec()) * 1000
120 |
121 | print(F"GT minus IMU message times is: {t_offset_in_ms:.2f}ms")
122 |
123 | orientation = gt_message.pose.orientation
124 |
125 | R_gt = R.from_quat([orientation.x, orientation.y, orientation.z, orientation.w])
126 |
127 | # rotations go from from inertial reference frame to body frame, we flip this around and compare the z-up
128 | # vectors in the body frame
129 | e_z_body = R_gt.inv().as_matrix() @ np.array([0, 0, 1])
130 | e_z_body_hat = R_hat.inv().as_matrix() @ np.array([0, 0, 1])
131 | angle = np.arccos(np.dot(e_z_body, e_z_body_hat)) # those are unit vectors, so norm = 1
132 |
133 | print(F"z-direction in body frame [GT]: {list(e_z_body)}")
134 | print(F"z-direction in body frame [EST]: {list(e_z_body_hat)}")
135 | print(F"Z-direction vectors are {np.rad2deg(angle):.2f}° off")
136 |
137 |
138 | if __name__ == '__main__':
139 | main()
140 |
--------------------------------------------------------------------------------
/scripts/evaluate_uslam.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import shutil
4 |
5 | import pandas as pd
6 |
7 | import roslaunch
8 |
9 | from x_evaluate.evaluation_data import EvaluationData, EvaluationDataSummary
10 | from x_evaluate.scriptlets import write_evaluation_pickle
11 | from x_evaluate.trajectory_evaluation import evaluate_trajectory, plot_trajectory_plots, plot_summary_plots, \
12 | create_summary_info
13 |
14 |
15 | def main():
16 | parser = argparse.ArgumentParser(description='Hand-made script for getting all result tables')
17 | parser.add_argument('--uslam_folder', type=str, required=True)
18 | parser.add_argument('--catkin_root', type=str, required=True)
19 | parser.add_argument('--cfg_filename', type=str, default=None)
20 | parser.add_argument('--calib_filename', type=str, default="DAVIS-IJRR17.yaml")
21 | parser.add_argument('--name', type=str, default="Debug")
22 |
23 | args = parser.parse_args()
24 |
25 | # datasets = ["Boxes 6DOF", "Boxes Translation", "Dynamic 6DOF", "Dynamic Translation", "HDR Boxes",
26 | # "HDR Poster", "Poster 6DOF", "Poster Translation", "Shapes 6DOF", "Shapes Translation"]
27 | #
28 | # must_datasets = [x.lower().replace(' ', '_') for x in datasets]
29 | # print(must_datasets)
30 |
31 | datasets = []
32 |
33 | datasets = [
34 | # "Mars Straight Vmax 3.2 Offset 2.5",
35 | "Mars Circle Vmax 7.2 Offset 2.5",
36 | "Mars Circle Vmax 7.2 Offset 2.5 no bootsrapping",
37 | # "Mars Circle Vmax 7.2 Offset 5.0",
38 | # "Mars Circle Vmax 7.2 Offset 10.0",
39 | "Mars Vertical Circle Vmax 2.4 Offset 2.5",
40 | "Mars Vertical Circle Vmax 2.4 Offset 2.5 no bootstrapping",
41 | "Mars Mellon Vmax 12.4 Offset 10",
42 | "Mars Mellon Vmax 12.4 Offset 10 no bootsrapping",
43 | # "Mars Circle Vmax 7.2 Offset 10", "Mars Circle Vmax 16.6 Offset 10"
44 | ]
45 |
46 |
47 |
48 | must_datasets = [
49 | # "neuro_bem_esim_straight_vmax_3.2_offset_2.5",
50 | "neuro_bem_esim_circle_vmax_7.2_offset_2.5",
51 | "neuro_bem_esim_circle_vmax_7.2_offset_2.5_no_bootstrapping",
52 | # "neuro_bem_esim_circle_vmax_7.2_offset_5",
53 | # "neuro_bem_esim_circle_vmax_7.2_offset_10",
54 | "neuro_bem_esim_vcircle_vmax_2.4_offset_2.5",
55 | "neuro_bem_esim_vcircle_vmax_2.4_offset_2.5_no_bootstrapping",
56 | # "neuro_bem_esim_circle_vmax_7.2_offset_10_no_bootstrapping",
57 | # "neuro_bem_esim_circle_vmax_16.6_offset_10_no_bootstrapping",
58 | "neuro_bem_esim_mellon_vmax_12.4_offset_10",
59 | "neuro_bem_esim_mellon_vmax_12.4_offset_10_no_bootstrapping",
60 | ]
61 |
62 | # output_folders = [os.path.join(args.uslam_folder, "run_ijrr_" + x) for x in must_datasets]
63 |
64 | output_folders = [os.path.join(args.uslam_folder, "run_" + args.name.lower().replace(' ', '_'))]
65 |
66 | for output_folder in output_folders:
67 |
68 | if not os.path.exists(output_folder):
69 | os.makedirs(output_folder)
70 |
71 | # for d in must_datasets:
72 | # print()
73 |
74 | s = EvaluationDataSummary()
75 | s.name = os.path.basename(output_folder)
76 |
77 | for i, d in enumerate(must_datasets):
78 | sequence_folder = os.path.join(output_folder, F"{i+1:>03}_{d}")
79 |
80 | if not os.path.exists(sequence_folder):
81 | os.makedirs(sequence_folder)
82 |
83 | # if i >= 1:
84 | # break
85 |
86 | bag_filename = F"rpg_davis_data/{d}.bag"
87 | bag_filename = F"sim/{d}.bag"
88 | cfg_filename = F"{d}.conf"
89 |
90 | if args.cfg_filename:
91 | cfg_filename = args.cfg_filename
92 |
93 | run_uslam(bag_filename, args.catkin_root, cfg_filename, args.calib_filename, sequence_folder)
94 |
95 | gt_file = os.path.join(args.uslam_folder, F"gt/{d}/gt.csv")
96 | output_file = "/tmp/uslam_pose.csv"
97 |
98 | # shutil.copy(output_file, os.path.join(sequence_folder, "pose.csv")) # COMMENT ME OUT
99 | # continue
100 |
101 | # output_file = os.path.join(sequence_folder, "pose.csv")
102 |
103 | uslam_traj_es = pd.read_csv(os.path.join(sequence_folder, "traj_es.csv"))
104 | uslam_traj_es['timestamp'] /= 1e9
105 | uslam_traj_es['update_modality'] = "USLAM OUTPUT"
106 | mapping = {'timestamp': 't', ' x': 'estimated_p_x', ' y': 'estimated_p_y', ' z': 'estimated_p_z',
107 | ' qx': 'estimated_q_x', ' qy': 'estimated_q_y', ' qz': 'estimated_q_z', ' qw': 'estimated_q_w'}
108 | uslam_traj_es = uslam_traj_es.rename(columns=mapping)
109 | uslam_traj_es = uslam_traj_es.drop([' vx', ' vy', ' vz', ' bgx', ' bgy', ' bgz', ' bax', ' bay', ' baz'],
110 | axis=1)
111 |
112 | # df_poses = pd.read_csv(output_file, delimiter=";")
113 | df_poses = uslam_traj_es
114 | df_groundtruth = pd.read_csv(gt_file, delimiter=";")
115 |
116 | d = EvaluationData()
117 | d.name = must_datasets[i]
118 |
119 | print("Analyzing output trajectory...")
120 |
121 | d.trajectory_data = evaluate_trajectory(df_poses, df_groundtruth)
122 |
123 | print("Plotting trajectory plots...")
124 |
125 | plot_trajectory_plots(d.trajectory_data, "USLAM", sequence_folder)
126 |
127 | s.data[datasets[i]] = d
128 |
129 | plot_summary_plots(s, output_folder)
130 | create_summary_info(s, output_folder)
131 |
132 | write_evaluation_pickle(s, output_folder)
133 |
134 |
135 | def run_uslam(bag_filename, catkin_root, cfg_filename, calib_filename, log_dir):
136 | uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
137 | launch_file = "src/rpg_ultimate_slam_pro/applications/ze_vio_ceres/launch/xvio_datasets.launch"
138 | cli_args = [os.path.join(catkin_root, launch_file), F"bag_filename:={bag_filename}",
139 | F"cfg_filename:={cfg_filename}", F'calib_filename:={calib_filename}', F"log_dir:={log_dir}"]
140 | roslaunch_args = cli_args[1:]
141 | roslaunch_file = [(roslaunch.rlutil.resolve_launch_arguments(cli_args)[0], roslaunch_args)]
142 | parent = roslaunch.parent.ROSLaunchParent(uuid, roslaunch_file)
143 | parent.start()
144 | parent.spin()
145 |
146 |
147 | if __name__ == '__main__':
148 | main()
149 |
--------------------------------------------------------------------------------
/scripts/extract_initial_state_from_bag.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 |
5 | import numpy as np
6 | import rospy
7 |
8 | from rosbag import Bag
9 |
10 |
11 | def main():
12 | parser = argparse.ArgumentParser(description="Reads geometry_msgs/PoseStamped messages and calculates initial "
13 | "state for XVIO filter")
14 | parser.add_argument('--input', type=str, required=True)
15 | parser.add_argument('--t_from', type=float, default=None)
16 | parser.add_argument('--no_average', dest='use_average', default=True, action='store_false')
17 | parser.add_argument('--t_avg', type=float, default=0.05, help="timespan on which to calculate average velocity [s]")
18 |
19 | args = parser.parse_args()
20 |
21 | input_bag = Bag(args.input, 'r')
22 |
23 | topic_info = input_bag.get_type_and_topic_info()
24 |
25 | pose_topics = [k for k, t in topic_info.topics.items() if t.msg_type == 'geometry_msgs/PoseStamped']
26 |
27 | if len(pose_topics) > 1:
28 | print(F"Warning, multiple pose topics found ({pose_topics}), taking first: '{pose_topics[0]}'")
29 | elif len(pose_topics) == 0:
30 | print("No geometry_msgs/PoseStamped found in bag")
31 | sys.exit()
32 |
33 | pose_topic = pose_topics[0]
34 |
35 | t_from = None
36 | if args.t_from is not None:
37 | t_from = rospy.Time.from_sec(args.t_from)
38 |
39 | pose_messages = []
40 |
41 | for topic, msg, t in input_bag.read_messages([pose_topic], start_time=t_from):
42 | if len(pose_messages) > 0 and msg.header.stamp.to_sec() - pose_messages[0].header.stamp.to_sec() >= 0.1:
43 | break
44 | pose_messages.append(msg)
45 |
46 | x = [p.pose.position.x for p in pose_messages]
47 | y = [p.pose.position.y for p in pose_messages]
48 | z = [p.pose.position.z for p in pose_messages]
49 | qx = [p.pose.orientation.x for p in pose_messages]
50 | qy = [p.pose.orientation.y for p in pose_messages]
51 | qz = [p.pose.orientation.z for p in pose_messages]
52 | qw = [p.pose.orientation.w for p in pose_messages]
53 | t = [p.header.stamp.to_sec() for p in pose_messages]
54 |
55 | xyz = np.array([x, y, z]).T
56 |
57 | wxyz = np.array([qw, qx, qy, qz]).T
58 |
59 | delta_t = t[1] - t[0]
60 | start_velocity = (xyz[1, :] - xyz[0, :]) / delta_t
61 | avg_velocity = np.mean((xyz[1:, :] - xyz[:-1, :]) / delta_t, axis=0)
62 |
63 | velocity = avg_velocity if args.use_average else start_velocity
64 |
65 | init_v = list(velocity)
66 | init_p = list(xyz[0, :])
67 | init_q = list(wxyz[0, :])
68 |
69 | print(F" # initial state computed from {pose_topic} at {t[0]}s in {os.path.basename(args.input)}:")
70 | print(F" p: {init_p}")
71 | print(F" v: {init_v}")
72 | print(F" q: {init_q} #[w,x,y,z]")
73 |
74 |
75 | if __name__ == '__main__':
76 | main()
77 |
--------------------------------------------------------------------------------
/scripts/plot_event_rate_from_bag.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 |
6 | from rosbag import Bag
7 | from x_evaluate.plots import PlotContext
8 | from x_evaluate.utils import get_ros_topic_name_from_msg_type, read_all_ros_msgs_from_topic_into_dict
9 |
10 |
11 | def main():
12 | parser = argparse.ArgumentParser(description="Plots event rate from BAG")
13 | parser.add_argument('--input', type=str, required=True)
14 |
15 | args = parser.parse_args()
16 |
17 | input_bag = Bag(args.input, 'r')
18 |
19 | event_topic = get_ros_topic_name_from_msg_type(input_bag, 'dvs_msgs/EventArray')
20 | event_array_messages = read_all_ros_msgs_from_topic_into_dict(event_topic, input_bag)
21 |
22 | event_times = np.array([e.ts.to_sec() for ea in event_array_messages.values() for e in ea.events])
23 |
24 | start = input_bag.get_start_time()
25 | end = input_bag.get_end_time()
26 |
27 | event_times -= start
28 |
29 | bins = np.arange(start, end, 1.0)
30 | events_per_sec, t = np.histogram(event_times, bins=bins)
31 |
32 | with PlotContext() as pc:
33 | ax = pc.get_axis()
34 | ax.set_title(F"Events per second")
35 | ax.plot(t[1:], events_per_sec)
36 | ax.set_xlabel("time")
37 | ax.set_ylabel("events/s")
38 |
39 | # block for visualization
40 | plt.show()
41 |
42 |
43 | if __name__ == '__main__':
44 | main()
45 |
--------------------------------------------------------------------------------
/scripts/plot_summaries.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import yaml
5 |
6 | from x_evaluate.utils import name_to_identifier
7 | from x_evaluate.scriptlets import read_evaluation_pickle
8 |
9 | import x_evaluate.tracking_evaluation as fe
10 | import x_evaluate.performance_evaluation as pe
11 | import x_evaluate.trajectory_evaluation as te
12 |
13 |
14 | def main():
15 | parser = argparse.ArgumentParser(description='Reads evaluation.pickle and plots all summary plots')
16 | parser.add_argument('--input', type=str, required=True)
17 |
18 | args = parser.parse_args()
19 |
20 | output_root = os.path.dirname(args.input)
21 | filename = os.path.basename(args.input)
22 |
23 | s = read_evaluation_pickle(output_root, filename)
24 |
25 | i = 1
26 |
27 | for key, evaluation in s.data.items():
28 | output_folder = F"{i:>03}_{name_to_identifier(key)}"
29 | print(F"Plotting summary plots for '{key}' in subfolder '{output_folder}'")
30 | output_folder = os.path.join(output_root, output_folder)
31 |
32 | if not os.path.exists(output_folder):
33 | os.makedirs(output_folder)
34 |
35 | params_yaml_file = os.path.join(output_folder, "params.yaml")
36 | if not os.path.exists(params_yaml_file):
37 | with open(params_yaml_file, 'w') as tmp_yaml_file:
38 | yaml.dump(evaluation.params, tmp_yaml_file)
39 |
40 | pe.plot_performance_plots(evaluation, output_folder)
41 | te.plot_trajectory_plots(evaluation.trajectory_data, evaluation.name, output_folder)
42 | fe.plot_feature_plots(evaluation, output_folder)
43 |
44 | i += 1
45 |
46 | te.plot_summary_plots(s, output_root)
47 | te.create_summary_info(s, output_root)
48 | pe.plot_summary_plots(s, output_root)
49 | pe.print_realtime_factor_summary(s)
50 | fe.plot_summary_plots(s, output_root)
51 |
52 |
53 | if __name__ == '__main__':
54 | main()
55 |
--------------------------------------------------------------------------------
/scripts/rename.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | from x_evaluate.scriptlets import read_evaluation_pickle, write_evaluation_pickle
5 |
6 |
7 | def main():
8 | parser = argparse.ArgumentParser(description='Reads evaluation.pickle and plots all summary plots')
9 | parser.add_argument('--input', type=str, required=True)
10 | parser.add_argument('--new_name', type=str, required=True)
11 |
12 | args = parser.parse_args()
13 |
14 | output_root = os.path.dirname(args.input)
15 | filename = os.path.basename(args.input)
16 |
17 | print(F"Reading {args.input}")
18 | s = read_evaluation_pickle(output_root, filename)
19 |
20 | # Naming quick fix for Ultimate SLAM
21 | rpg_davis_data = ["Boxes 6DOF", "Boxes Translation", "Dynamic 6DOF", "Dynamic Translation", "HDR Boxes",
22 | "HDR Poster", "Poster 6DOF", "Poster Translation", "Shapes 6DOF", "Shapes Translation"]
23 | dataset_mapping = {x.lower().replace(' ', '_'): x for x in rpg_davis_data}
24 |
25 | dataset_mapping["Mars Circle Vmax 7.2 Offset 2.5"] = "Mars Circle"
26 | dataset_mapping["Mars Vertical Circle Vmax 2.4 Offset 2.5"] = "Mars Vertical Circle"
27 | dataset_mapping["Mars Mellon Vmax 12.4 Offset 10"] = "Mars Mellon"
28 |
29 | # dataset_mapping["Mars Circle Vmax 7.2 Offset 2.5 no bootsrapping"] = "Mars Circle"
30 | # dataset_mapping["Mars Vertical Circle Vmax 2.4 Offset 2.5 no bootstrapping"] = "Mars Vertical Circle"
31 | # dataset_mapping["Mars Mellon Vmax 12.4 Offset 10 no bootsrapping"] = "Mars Mellon"
32 |
33 | keys = list(s.data.keys()).copy()
34 | for k in keys:
35 | if k in dataset_mapping.keys():
36 | print(F"Renaming dataset '{k}' --> {dataset_mapping[k]}")
37 | s.data[dataset_mapping[k]] = s.data.pop(k)
38 |
39 | print(F"Renaming '{s.name}' to '{args.new_name}'")
40 | s.name = args.new_name
41 | print("Writing evaluation pickle")
42 | write_evaluation_pickle(s, output_root, filename)
43 |
44 |
45 | if __name__ == '__main__':
46 | main()
47 |
--------------------------------------------------------------------------------
/scripts/search_evaluations.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import glob
3 | import os
4 | import pickle
5 |
6 | from tqdm import tqdm
7 |
8 | from x_evaluate.evaluation_data import FrontEnd
9 | from x_evaluate.scriptlets import find_evaluation_files_recursively, read_evaluation_pickle
10 | from x_evaluate.trajectory_evaluation import create_trajectory_result_table_wrt_traveled_dist, \
11 | create_trajectory_completion_table
12 |
13 |
14 | def main():
15 | parser = argparse.ArgumentParser(description='Hand-made script for getting all result tables')
16 | parser.add_argument('--root_folder', type=str, required=True)
17 | parser.add_argument('--path_match', type=str, default=None)
18 |
19 | args = parser.parse_args()
20 |
21 | evaluation_files = find_evaluation_files_recursively(args.root_folder)
22 |
23 | if args.path_match is not None:
24 | evaluation_files = [e for e in evaluation_files if args.path_match in e]
25 |
26 | print(evaluation_files)
27 |
28 | must_datasets = {"Boxes 6DOF", "Boxes Translation", "Dynamic 6DOF", "Dynamic Translation", "HDR Boxes",
29 | "HDR Poster", "Poster 6DOF", "Poster Translation", "Shapes 6DOF", "Shapes Translation"}
30 |
31 | must_datasets = {"Mars Straight Vmax 3.2 Offset 2.5", "Mars Eight Vmax 3.5 Offset 2.5",
32 | "Mars Circle Vmax 7.2 Offset 2.5", "Mars Vertical Circle Vmax 2.4 Offset 2.5",
33 | "Mars Straight Vmax 3.2 Offset 5", "Mars Eight Vmax 3.5 Offset 5",
34 | "Mars Circle Vmax 7.2 Offset 5", "Mars Vertical Circle Vmax 2.4 Offset 5",
35 | "Mars Straight Vmax 3.2 Offset 10", "Mars Eight Vmax 3.5 Offset 10",
36 | "Mars Circle Vmax 7.2 Offset 10", "Mars Vertical Circle Vmax 2.4 Offset 10"}
37 |
38 | # must_datasets = {"Mars Straight Vmax 3.2 Offset 2.5", "Mars Eight Vmax 3.5 Offset 2.5", "Mars Circle Vmax 7.2 Offset 2.5", "Mars Vertical Circle Vmax 2.4 Offset 2.5", "Mars Circle Vmax 7.2 Offset 10", "Mars Circle Vmax 16.6 Offset 10"}
39 | must_datasets = {"Mars Straight Vmax 3.2 Offset 2.5", "Mars Circle Vmax 7.2 Offset 2.5", "Mars Vertical Circle Vmax 2.4 Offset 2.5", "Mars Circle Vmax 7.2 Offset 10", "Mars Circle Vmax 16.6 Offset 10"}
40 |
41 | must_datasets = {
42 | "Mars Vertical Circle Vmax 2.4 Offset 2.5",
43 | "Mars Circle Vmax 7.2 Offset 2.5",
44 | "Mars Mellon Vmax 12.4 Offset 10",
45 | }
46 |
47 | eklt_tables = {}
48 | haste_tables = {}
49 | xvio_tables = {}
50 |
51 | for f in tqdm(evaluation_files):
52 | # if "eklt" not in f:
53 | # # print(F"Skipping {f}")
54 | # continue
55 | print(F"Reading {f}")
56 | s = read_evaluation_pickle(os.path.dirname(f), os.path.basename(f))
57 | # print(s.data.keys())
58 |
59 | try:
60 | if len(must_datasets.intersection(s.data.keys())) == len(must_datasets):
61 | # print("We want you!")
62 | table = create_trajectory_result_table_wrt_traveled_dist(s)
63 | completion_table = create_trajectory_completion_table(s)
64 | table = table.merge(completion_table, left_index=True, right_index=True)
65 | if s.frontend == FrontEnd.EKLT:
66 | eklt_tables[f] = table
67 | with open("/tmp/eklt-tables.pickle", 'wb') as file:
68 | pickle.dump(eklt_tables, file, pickle.HIGHEST_PROTOCOL)
69 | elif s.frontend == FrontEnd.HASTE:
70 | haste_tables[f] = table
71 | with open("/tmp/haste-tables.pickle", 'wb') as file:
72 | pickle.dump(haste_tables, file, pickle.HIGHEST_PROTOCOL)
73 | elif s.frontend == FrontEnd.XVIO:
74 | xvio_tables[f] = table
75 | with open("/tmp/xvio-tables.pickle", 'wb') as file:
76 | pickle.dump(xvio_tables, file, pickle.HIGHEST_PROTOCOL)
77 | # print(F"Got table and saved it for {s.frontend}")
78 | # print(table)
79 | # else:
80 | # print("Datasets do not match:")
81 | # a = list(s.data.keys())
82 | # b = list(must_datasets)
83 | # a.sort()
84 | # b.sort()
85 | # print(a)
86 | # print(b)
87 | except:
88 | # print(F"Warning: {f} FAILED")
89 | pass
90 |
91 | print("Saving all tables to pickle")
92 | with open("/tmp/eklt-tables.pickle", 'wb') as file:
93 | pickle.dump(eklt_tables, file, pickle.HIGHEST_PROTOCOL)
94 | with open("/tmp/haste-tables.pickle", 'wb') as file:
95 | pickle.dump(haste_tables, file, pickle.HIGHEST_PROTOCOL)
96 | with open("/tmp/xvio-tables.pickle", 'wb') as file:
97 | pickle.dump(xvio_tables, file, pickle.HIGHEST_PROTOCOL)
98 |
99 |
100 | if __name__ == '__main__':
101 | main()
102 |
--------------------------------------------------------------------------------
/scripts/showcase_mars_environment.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import cv2
4 | import numpy as np
5 | import pandas as pd
6 | import tqdm
7 | from scipy import interpolate
8 | from unrealcv import client
9 | import matplotlib.pyplot as plt
10 |
11 |
12 | def main():
13 | # print('Connecting to UnrealCV client')
14 | # client.connect()
15 | #
16 | # unreal_trajectory = pd.DataFrame(columns=["t", "unreal_p_x", "unreal_p_y", "unreal_p_z", "unreal_rot_x",
17 | # "unreal_rot_y", "unreal_rot_z"])
18 | #
19 | # t_0 = time.time()
20 | #
21 | # if not client.isconnected(): # Check if the connection is successfully established
22 | # print('UnrealCV server is not running. Run the game from http://unrealcv.github.io first.')
23 | # else:
24 | # time.sleep(0.03)
25 | # response = client.request('vset /camera/1/fov 70')
26 | # print(response)
27 | # time.sleep(0.03)
28 | # response = client.request('vrun r.setres 720x560')
29 | # print(response)
30 | # time.sleep(0.03)
31 | # file = client.request('vget /camera/1/lit png')
32 | # image = np.frombuffer(file, np.uint8)
33 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
34 | #
35 | # print(F"Got image {image.shape}")
36 | #
37 | # t = time.time() - t_0
38 | #
39 | # while t < 60:
40 | # t = time.time() - t_0
41 | # location = client.request("vget /camera/0/location")
42 | # rotation = client.request("vget /camera/0/rotation")
43 | # new_row = eval(F"[{location.replace(' ', ', ')}, " F"{rotation.replace(' ', ', ')}]")
44 | # new_row = [t] + new_row
45 | # print(new_row)
46 | # unreal_trajectory.loc[len(unreal_trajectory)] = new_row
47 | # print(response)
48 | # time.sleep(1)
49 | #
50 | #
51 | # cv2.imshow("REPLY", image)
52 | # cv2.waitKey()
53 | #
54 | # print(F"image saved to {file}")
55 | # print
56 | # 'Image is saved to %s' % filename
57 | # for gt_type in ['normal', 'object_mask']:
58 | # filename = client.request('vget /camera/0/%s' % gt_type)
59 | # print
60 | # '%s is saved to %s' % (gt_type, filename)
61 | # filename = client.request('vget /camera/0/depth depth.exr')
62 | # print
63 | # 'depth is saved to %s' % filename
64 | # # Depth needs to be saved to HDR image to ensure numerical accuracy
65 | # unreal_trajectory.to_csv("unreal_panoramic_tour.csv")
66 |
67 |
68 |
69 |
70 | # READ, INTERPOLATE, RENDER
71 |
72 | unrealcv_trajectory = pd.read_csv("unreal_panoramic_tour.csv")
73 |
74 | # plt.figure()
75 | # plt.plot(unrealcv_trajectory['t'], unrealcv_trajectory['unreal_rot_y'])
76 | # plt.show()
77 | fps = 25
78 |
79 | # supposes it chnages only once
80 | changing_pair = np.flatnonzero(np.array(abs(unrealcv_trajectory['unreal_rot_y'].diff()) > 300))
81 | unrealcv_trajectory.loc[changing_pair[0]:(changing_pair[1]-1), 'unreal_rot_y'] += 360
82 |
83 | t = np.arange(0, 60.0, 1.0/fps)
84 |
85 | rot_x = normalize_unreal_angle(interpolate_column('unreal_rot_x', t, unrealcv_trajectory))
86 | rot_y = normalize_unreal_angle(interpolate_column('unreal_rot_y', t, unrealcv_trajectory))
87 | p_x = interpolate_column('unreal_p_x', t, unrealcv_trajectory)
88 | p_y = interpolate_column('unreal_p_y', t, unrealcv_trajectory)
89 | p_z = interpolate_column('unreal_p_z', t, unrealcv_trajectory) + 100
90 | plt.figure()
91 | plt.plot(t, rot_y)
92 | plt.show()
93 | plt.figure()
94 | plt.plot(t, rot_x)
95 | plt.show()
96 |
97 | video_writer = cv2.VideoWriter("mars.avi", cv2.VideoWriter_fourcc(*'DIVX'), fps, (1920, 1080))
98 |
99 | print('Connecting to UnrealCV client')
100 | client.connect()
101 |
102 | if not client.isconnected(): # Check if the connection is successfully established
103 | print('UnrealCV server is not running. Run the game from http://unrealcv.github.io first.')
104 | else:
105 | time.sleep(0.03)
106 | response = client.request('vset /camera/1/fov 70')
107 | print(response)
108 | time.sleep(0.03)
109 | response = client.request('vrun r.setres 720x560')
110 | print(response)
111 |
112 | for i in tqdm.tqdm(range(len(p_x))):
113 | response = client.request(F"vset /camera/1/location {p_x[i]} {p_y[i]} {p_z[i]}")
114 | # print(response)
115 | time.sleep(0.03)
116 | response = client.request(F"vset /camera/1/rotation {rot_x[i]} {rot_y[i]} {0.0}")
117 | # print(response)
118 |
119 | time.sleep(0.03)
120 | file = client.request('vget /camera/1/lit png')
121 | image = np.frombuffer(file, np.uint8)
122 | image = cv2.imdecode(image, cv2.IMREAD_COLOR)
123 |
124 | # print(F"Got image {image.shape}")
125 |
126 | video_writer.write(image)
127 |
128 | print("Saving video...")
129 | video_writer.release()
130 |
131 | print("Done")
132 |
133 |
134 | def interpolate_column(column, time, unrealcv_trajectory):
135 | tck = interpolate.splrep(unrealcv_trajectory['t'][::5], unrealcv_trajectory[column][::5], s=0)
136 | ynew = interpolate.splev(time, tck, der=0)
137 | return ynew
138 |
139 |
140 | def normalize_unreal_angle(angle):
141 | angle = np.fmod(angle, 360)
142 | angle[angle > 180] -= 360
143 | return angle
144 |
145 |
146 | if __name__ == '__main__':
147 | main()
148 |
149 |
--------------------------------------------------------------------------------
/scripts/visualize_esim_imu.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import pandas as pd
4 | import matplotlib.pyplot as plt
5 |
6 | from x_evaluate.plots import plot_evo_trajectory_with_euler_angles, PlotContext, time_series_plot
7 | from x_evaluate.utils import read_esim_trajectory_csv
8 |
9 |
10 | def main():
11 | parser = argparse.ArgumentParser(description='Visualizing ESIM simulated IMU')
12 |
13 | parser.add_argument('--esim_imu_dump', type=str, default="/tmp/esim_imu_dump.csv")
14 | parser.add_argument('--esim_spline_dump', type=str, default="/tmp/esim_spline_dump.csv")
15 | args = parser.parse_args()
16 |
17 | trajectory = read_esim_trajectory_csv(args.esim_spline_dump)
18 |
19 | with PlotContext(subplot_cols=3, subplot_rows=3) as pc:
20 | plot_evo_trajectory_with_euler_angles(pc, trajectory, "ESIM Spline")
21 |
22 | imu = pd.read_csv(args.esim_imu_dump)
23 |
24 | with PlotContext(subplot_rows=2, subplot_cols=2) as pc:
25 | t = imu['t']
26 | fields = ['acc_actual_x', 'acc_actual_y', 'acc_actual_z']
27 | time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
28 | fields = ['acc_corrupted_x', 'acc_corrupted_y', 'acc_corrupted_z']
29 | time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
30 | fields = ['ang_vel_actual_x', 'ang_vel_actual_y', 'ang_vel_actual_z']
31 | time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
32 | fields = ['ang_vel_corrupted_x', 'ang_vel_corrupted_y', 'ang_vel_corrupted_z']
33 | time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
34 |
35 | plt.show()
36 |
37 | if __name__ == '__main__':
38 | main()
39 |
--------------------------------------------------------------------------------
/scripts/visualize_esim_spline_derivatives.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import tqdm
6 | from matplotlib import pyplot as plt
7 | from scipy.linalg import expm
8 | from scipy.spatial.transform import Rotation as R
9 |
10 | from x_evaluate.plots import plot_evo_trajectory_with_euler_angles, PlotContext, time_series_plot
11 | from x_evaluate.trajectory_evaluation import plot_trajectory_with_gt_and_euler_angles
12 | from x_evaluate.utils import read_esim_trajectory_csv, convert_t_xyz_wxyz_to_evo_trajectory
13 |
14 |
15 | def vec_to_skew_mat(x):
16 | return np.array([[0, -x[2], x[1]],
17 | [x[2], 0, -x[0]],
18 | [-x[1], x[0], 0]])
19 |
20 |
21 | class DoubleIntegrator:
22 | def __init__(self, p_start, v_start):
23 | self.p = p_start
24 | self.v = v_start
25 |
26 | def propagate(self, a, delta_t):
27 | self.p += self.v * delta_t + 1/2 * delta_t**2 * a
28 | self.v += a * delta_t
29 |
30 |
31 | def main():
32 | parser = argparse.ArgumentParser(description='Visualizing ESIM simulated IMU')
33 |
34 | parser.add_argument('--esim_spline_derivative', type=str, default="/tmp/esim_spline_derivative.csv")
35 |
36 | args = parser.parse_args()
37 |
38 | spline_derivatives = pd.read_csv(args.esim_spline_derivative)
39 | spline_derivatives[['qx_est', 'qy_est', 'qz_est', 'qw_est']] = [0, 0, 0, 1]
40 | spline_derivatives[['x_est', 'y_est', 'z_est']] = [0, 0, 0]
41 |
42 | p_start = spline_derivatives.loc[0, ['x', 'y', 'z']].to_numpy()
43 | delta_t = spline_derivatives.loc[1, ['t']].to_numpy() - spline_derivatives.loc[0, ['t']].to_numpy()
44 | v_start = (spline_derivatives.loc[1, ['x', 'y', 'z']].to_numpy() - p_start) / delta_t
45 |
46 | integrator = DoubleIntegrator(p_start, v_start)
47 | #
48 | # # This solves the issue
49 | # # spline_derivatives[['wx']] = -spline_derivatives[['wx']]
50 | #
51 | # spline_derivatives[['wx', 'wy', 'wz']] = -spline_derivatives[['wx', 'wy', 'wz']]
52 |
53 | current_rotation = R.from_quat(spline_derivatives.loc[0, ['qx', 'qy', 'qz', 'qw']])
54 | spline_derivatives.loc[0, ['qx_est', 'qy_est', 'qz_est', 'qw_est']] = current_rotation.as_quat()
55 | spline_derivatives.loc[0, ['x_est', 'y_est', 'z_est']] = p_start
56 |
57 | for i in tqdm.tqdm(range(len(spline_derivatives)-1)):
58 | delta_t = spline_derivatives.loc[i+1, ['t']].to_numpy() - spline_derivatives.loc[i, ['t']].to_numpy()
59 | a = spline_derivatives.loc[i, ['ax', 'ay', 'az']].to_numpy()
60 | integrator.propagate(np.matmul(current_rotation.as_matrix().T, a), delta_t)
61 | omega = spline_derivatives.loc[i, ['wx', 'wy', 'wz']].to_numpy()
62 | omega_cross = vec_to_skew_mat(omega)
63 | rot_mat = expm(omega_cross * delta_t)
64 | current_rotation = R.from_matrix(np.matmul(current_rotation.as_matrix(), rot_mat))
65 | spline_derivatives.loc[i + 1, ['qx_est', 'qy_est', 'qz_est', 'qw_est']] = current_rotation.as_quat()
66 | spline_derivatives.loc[i + 1, ['x_est', 'y_est', 'z_est']] = integrator.p
67 |
68 | t_xyz_wxyz = spline_derivatives[['t', 'x', 'y', 'z', 'qw', 'qx', 'qy', 'qz']].to_numpy()
69 | reference_trajectory = convert_t_xyz_wxyz_to_evo_trajectory(t_xyz_wxyz)
70 |
71 | t_xyz_wxyz_est = spline_derivatives[['t', 'x_est', 'y_est', 'z_est', 'qw_est', 'qx_est', 'qy_est',
72 | 'qz_est']].to_numpy()
73 | estimated_trajectory = convert_t_xyz_wxyz_to_evo_trajectory(t_xyz_wxyz_est)
74 |
75 | with PlotContext(subplot_cols=3, subplot_rows=3) as pc:
76 | plot_evo_trajectory_with_euler_angles(pc, estimated_trajectory, "ESIM spline", reference_trajectory)
77 |
78 | # imu = pd.read_csv(args.esim_imu_dump)
79 | #
80 | # with PlotContext(subplot_rows=2, subplot_cols=2) as pc:
81 | # t = imu['t']
82 | # fields = ['acc_actual_x', 'acc_actual_y', 'acc_actual_z']
83 | # time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
84 | # fields = ['acc_corrupted_x', 'acc_corrupted_y', 'acc_corrupted_z']
85 | # time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
86 | # fields = ['ang_vel_actual_x', 'ang_vel_actual_y', 'ang_vel_actual_z']
87 | # time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
88 | # fields = ['ang_vel_corrupted_x', 'ang_vel_corrupted_y', 'ang_vel_corrupted_z']
89 | # time_series_plot(pc, t, imu[fields].to_numpy().T, fields)
90 | #
91 | # plt.show()
92 | print('Done')
93 | plt.show()
94 |
95 |
96 | if __name__ == '__main__':
97 | main()
98 |
--------------------------------------------------------------------------------
/scripts/visualize_frame_output.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import numpy as np
5 | import pandas as pd
6 | import tqdm
7 |
8 | from x_evaluate.visualizer.dataset_player import DatasetPlayer
9 | from x_evaluate.visualizer.renderer import RgbFrameRenderer, BlankRenderer
10 |
11 |
12 | def main():
13 | parser = argparse.ArgumentParser(description="Visualizes the output frames provided by ")
14 | parser.add_argument('--input_folders', nargs='+', required=True, type=str)
15 |
16 | args = parser.parse_args()
17 |
18 | frame_tables = []
19 | input_folders = []
20 |
21 | for f in args.input_folders:
22 |
23 | frames_csv = os.path.join(f, "dumped_frames.csv")
24 | if not os.path.exists(frames_csv):
25 | print(F"Warning no 'dumped_frames.csv' found in {f}")
26 | continue
27 |
28 | df_frames_csv = pd.read_csv(frames_csv, delimiter=";")
29 |
30 | if len(df_frames_csv) <= 0:
31 | print(F"Empty 'dumped_frames.csv' found in {f}")
32 | continue
33 |
34 | frame_tables.append(df_frames_csv)
35 | input_folders.append(f)
36 |
37 | renderer = dict()
38 |
39 | image_type_tables = dict()
40 | master_image_types = dict()
41 |
42 | for i in range(len(input_folders)):
43 | input_folder = input_folders[i]
44 | t = frame_tables[i]
45 |
46 | print(F"Considering {input_folder}")
47 |
48 | image_type_tables[input_folder] = {image_type: t.loc[t.type == image_type] for image_type in t['type'].unique()}
49 |
50 | # sort them by time and use time as index
51 | image_type_tables[input_folder] = {k: v.sort_values('t') for k, v in image_type_tables[input_folder].items()}
52 | image_type_tables[input_folder] = {k: v.drop_duplicates('t') for k, v in
53 | image_type_tables[input_folder].items()}
54 | image_type_tables[input_folder] = {k: v.set_index('t') for k, v in image_type_tables[input_folder].items()}
55 | lengths = [len(table) for table in image_type_tables[input_folder].values()]
56 | master_image_types[input_folder] = list(image_type_tables[input_folder].keys())[np.argmax(lengths)]
57 |
58 | print(
59 | F'Found following image types {image_type_tables[input_folder].keys()} with this amount of entries: {lengths}')
60 |
61 | master_folder = input_folders[0]
62 |
63 | master_time = image_type_tables[master_folder][master_image_types[master_folder]].index
64 |
65 | # master_time = np.arange(np.min(master_time)+0.5, np.max(master_time)-0.1, 0.001)
66 |
67 | # master_time = master_time[:10000]
68 |
69 | master_time = np.arange(20, 30, 0.001)
70 |
71 | for input_folder in input_folders:
72 |
73 | file_lists = {k: [] for k in image_type_tables[input_folder].keys()}
74 |
75 | # for loop for now:
76 | for t in tqdm.tqdm(master_time):
77 | for image_type, table in image_type_tables[input_folder].items():
78 | row = table.index.get_loc(t, 'ffill')
79 | file_lists[image_type].append(table.iloc[row]['filename'])
80 |
81 | # see https://stackoverflow.com/a/52115793
82 |
83 | name = os.path.basename(os.path.normpath(os.path.join(input_folder, "..")))
84 |
85 | frames_folder = os.path.join(input_folder, "frames")
86 | renderer[name] = {
87 | image_type: RgbFrameRenderer(F"{name}".upper(), file_lists[image_type], frames_folder) for image_type in
88 | image_type_tables[input_folder].keys()
89 | }
90 | #
91 | # file_list = t.loc[t.type == 'input_img']['filename']
92 | #
93 | # test_renderer = RgbFrameRenderer("TEST", file_list, frames_folder)
94 |
95 | render_list = [renderer['xvio']['feature_img'], renderer['eklt']['feature_img'], renderer['haste']['feature_img']]
96 | # renderer['haste']['feature_img'], BlankRenderer(len(renderer['haste']['feature_img'].file_lists[0]), (240, 180)),
97 | # renderer['xvio']['feature_img'], renderer['xvio']['tracker_img']]
98 |
99 | dataset_player = DatasetPlayer(render_list, 100, scale=2, #)
100 | output_video_file="/tmp/out.avi")
101 | dataset_player.run()
102 |
103 |
104 | if __name__ == '__main__':
105 | main()
106 |
--------------------------------------------------------------------------------
/scripts/visualize_trajectory_animation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import cv2
5 | import numpy as np
6 | import pandas as pd
7 | import tqdm
8 | from x_evaluate.plots import PlotContext
9 |
10 | from x_evaluate.utils import read_x_evaluate_gt_csv, convert_to_evo_trajectory
11 |
12 | from x_evaluate.visualizer.dataset_player import DatasetPlayer
13 | from x_evaluate.visualizer.renderer import RgbFrameRenderer, BlankRenderer
14 |
15 |
16 | def main():
17 | parser = argparse.ArgumentParser(description="Visualizes trajectories as animation")
18 | parser.add_argument('--input_folders', nargs='+', required=True, type=str)
19 |
20 | args = parser.parse_args()
21 |
22 | input_folders = []
23 | ref_trajectory = None
24 | trajectories = []
25 |
26 | for f in args.input_folders:
27 |
28 | if ref_trajectory is None:
29 | ref_trajectory = read_x_evaluate_gt_csv(os.path.join(f, "gt.csv"))
30 |
31 | df_poses = pd.read_csv(os.path.join(f, "pose.csv"), delimiter=";")
32 | traj_est, _ = convert_to_evo_trajectory(df_poses, prefix="estimated_")
33 |
34 | traj_est.timestamps -= ref_trajectory.timestamps[0]
35 | trajectories.append(traj_est)
36 |
37 | input_folders.append(f)
38 |
39 | print(trajectories)
40 |
41 | video_writer = cv2.VideoWriter(os.path.join(input_folder, "feature_tracks_2d.avi"), cv2.VideoWriter_fourcc(
42 | *'DIVX'), 25, (1280, 720))
43 |
44 | with PlotContext(base_width_inch=12.8, base_height_inch=7.2) as pc:
45 | plotter = SlidingWindowFeatureTracksPlotter(pc, input_folder, input_frames, df_backend_tracks,
46 | df_features, df_realtime, 240, 180, 0.1)
47 | step = 0.001
48 | current = 1
49 |
50 | # def press(event):
51 | # if event.key == 'escape':
52 | # plt.close(pc.figure)
53 | # elif event.key == 'right':
54 | # press.current += step
55 | # plotter.plot_till_time(press.current)
56 | # press.current = current
57 |
58 | for t in tqdm.tqdm(np.arange(30, 40, step)):
59 | plotter.plot_till_time(t)
60 | buffer = np.asarray(pc.figure.canvas.buffer_rgba())
61 | buffer = buffer.reshape(pc.figure.canvas.get_width_height()[::-1] + (4,))
62 |
63 | # img = cv2.cvtColor(buffer, cv2.COLOR_RGBA cv2.COLOR_RGBA2BGRA)
64 | # print(img.shape)
65 |
66 | # buffer = cv2.cvtColor(buffer, cv2.COLOR_RGBA2BGRA)
67 | # buffer = cv2.cvtColor(buffer, cv2.COLOR_BGRA2RGBA)
68 | buffer = cv2.cvtColor(buffer, cv2.COLOR_RGB2BGR)
69 | # print(buffer.shape)
70 | # cv2.imshow("plot", buffer)
71 | # cv2.waitKey(1)
72 | video_writer.write(buffer)
73 |
74 | print("Saving video...")
75 | video_writer.release()
76 |
77 |
78 | # renderer = dict()
79 | #
80 | # image_type_tables = dict()
81 | # master_image_types = dict()
82 | #
83 | # for i in range(len(input_folders)):
84 | # input_folder = input_folders[i]
85 | # t = frame_tables[i]
86 | #
87 | # print(F"Considering {input_folder}")
88 | #
89 | # image_type_tables[input_folder] = {image_type: t.loc[t.type == image_type] for image_type in t['type'].unique()}
90 | #
91 | # # sort them by time and use time as index
92 | # image_type_tables[input_folder] = {k: v.sort_values('t') for k, v in image_type_tables[input_folder].items()}
93 | # image_type_tables[input_folder] = {k: v.drop_duplicates('t') for k, v in
94 | # image_type_tables[input_folder].items()}
95 | # image_type_tables[input_folder] = {k: v.set_index('t') for k, v in image_type_tables[input_folder].items()}
96 | # lengths = [len(table) for table in image_type_tables[input_folder].values()]
97 | # master_image_types[input_folder] = list(image_type_tables[input_folder].keys())[np.argmax(lengths)]
98 | #
99 | # print(
100 | # F'Found following image types {image_type_tables[input_folder].keys()} with this amount of entries: {lengths}')
101 | #
102 | # master_folder = input_folders[0]
103 | #
104 | # master_time = image_type_tables[master_folder][master_image_types[master_folder]].index
105 | #
106 | # master_time = np.arange(np.min(master_time)+0.1, np.max(master_time)-0.1, 0.001)
107 | #
108 | # master_time = master_time[35000:40000]
109 | #
110 | # for input_folder in input_folders:
111 | #
112 | # file_lists = {k: [] for k in image_type_tables[input_folder].keys()}
113 | #
114 | # # for loop for now:
115 | # for t in tqdm.tqdm(master_time):
116 | # for image_type, table in image_type_tables[input_folder].items():
117 | # row = table.index.get_loc(t, 'ffill')
118 | # file_lists[image_type].append(table.iloc[row]['filename'])
119 | #
120 | # # see https://stackoverflow.com/a/52115793
121 | #
122 | # name = os.path.basename(os.path.normpath(os.path.join(input_folder, "..")))
123 | #
124 | # frames_folder = os.path.join(input_folder, "frames")
125 | # renderer[name] = {
126 | # image_type: RgbFrameRenderer(F"{name}".upper(), file_lists[image_type], frames_folder) for image_type in
127 | # image_type_tables[input_folder].keys()
128 | # }
129 | # #
130 | # # file_list = t.loc[t.type == 'input_img']['filename']
131 | # #
132 | # # test_renderer = RgbFrameRenderer("TEST", file_list, frames_folder)
133 | #
134 | # render_list = [renderer['eklt']['feature_img'], renderer['eklt']['tracker_img'],
135 | # renderer['haste']['feature_img'], BlankRenderer(len(renderer['haste']['feature_img'].file_lists[0]), (240, 180)),
136 | # renderer['xvio']['feature_img'], renderer['xvio']['tracker_img']]
137 | #
138 | # dataset_player = DatasetPlayer(render_list, 100, scale=2, grid_size=(2, 3), row_first=False)
139 | # # output_video_file="/tmp/out.avi")
140 | # dataset_player.run()
141 |
142 |
143 | if __name__ == '__main__':
144 | main()
145 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 |
3 | setup(
4 | name='x-evaluate',
5 | version='1.0.0',
6 | scripts=['test/evaluate.py'],
7 | packages=['x_evaluate', 'x_evaluate.rpg_tracking_analysis'],
8 | package_dir={'x_evaluate': 'src/x_evaluate'},
9 | install_requires=['numpy>=1.19.2',
10 | 'matplotlib>=3.3.4',
11 | 'envyaml>=1.7',
12 | 'evo>=1.13.4',
13 | 'gitpython>=3.1.14',
14 | 'pyyaml>=5.4.1']
15 | )
16 |
--------------------------------------------------------------------------------
/src/x_evaluate/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jpl-x/x_evaluate/5d95accbed53c21709725b5fff9ffeb05ccbb391/src/x_evaluate/__init__.py
--------------------------------------------------------------------------------
/src/x_evaluate/comparisons.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | import numpy as np
4 | import pandas as pd
5 |
6 | from x_evaluate.evaluation_data import EvaluationDataSummary
7 |
8 |
9 | def identify_common_datasets(summaries: List[EvaluationDataSummary]):
10 | common_datasets = None
11 | for s in summaries:
12 | if common_datasets is None:
13 | common_datasets = set(s.data.keys())
14 | common_datasets = common_datasets.intersection(s.data.keys())
15 | return common_datasets
16 |
17 |
18 | def identify_changing_parameters(summaries: List[EvaluationDataSummary], filter=True, filter_keys=None,
19 | common_datasets=None):
20 | if not common_datasets:
21 | common_datasets = identify_common_datasets(summaries)
22 |
23 | if filter:
24 | if not filter_keys:
25 | # by default filter intitial states
26 | filter_keys = {'p', 'v', 'q', 'b_w', 'b_a'}
27 |
28 | params = [s.data[d].params for s in summaries for d in common_datasets]
29 |
30 | changing_parameters = get_changing_parameter_keys(params)
31 |
32 | if filter:
33 | return changing_parameters.difference(filter_keys)
34 | else:
35 | return changing_parameters
36 |
37 |
38 | def create_parameter_changes_table(summaries: List[EvaluationDataSummary], common_datasets=None):
39 | changes = dict()
40 | equally_change_among_datasets = dict()
41 |
42 | for d in common_datasets:
43 | params = [s.data[d].params for s in summaries]
44 |
45 | changing_parameters = get_changing_parameter_keys(params)
46 |
47 | for k in changing_parameters:
48 | all_params = [p[k] for p in params]
49 | if k in changes:
50 | is_same = np.all([are_list_entries_equal(list(t)) for t in zip(changes[k], all_params)])
51 | if not is_same:
52 | print(F"WARNING parameter '{k}' does not change consistently over datasets ("
53 | F"{list(common_datasets)[0]}, {d})")
54 | equally_change_among_datasets[k] = False
55 | else:
56 | changes[k] = all_params # = changing_parameters.union({k})
57 | equally_change_among_datasets[k] = True
58 |
59 | data = {'Evaluation Runs': [s.name for s in summaries]}
60 | for k, v in changes.items():
61 | if equally_change_among_datasets[k]:
62 | data[k] = v
63 | else:
64 | data[F"{k} DIRTY"] = v
65 |
66 | parameter_changes_table = pd.DataFrame(data)
67 | parameter_changes_table = parameter_changes_table.T
68 | parameter_changes_table.columns = parameter_changes_table.iloc[0]
69 | parameter_changes_table = parameter_changes_table.iloc[1:]
70 | return parameter_changes_table
71 |
72 |
73 | def get_changing_parameter_keys(params):
74 | common_keys = None
75 | changing_parameters = set()
76 | for p in params:
77 | if common_keys:
78 | diff = common_keys.difference(p)
79 | if len(diff) > 0:
80 | print(F"WARNING: different parameter types used when comparing parameters: {diff}")
81 | common_keys = common_keys.intersection(p.keys())
82 | else:
83 | common_keys = set(p.keys())
84 | for k in common_keys:
85 | all_params = [p[k] for p in params]
86 | if not are_list_entries_equal(all_params):
87 | changing_parameters.add(k)
88 | return changing_parameters
89 |
90 |
91 | def are_list_entries_equal(all_params):
92 | if isinstance(all_params[0], list):
93 | are_all_equal = np.all(np.array(all_params) == all_params[0])
94 | else:
95 | are_all_equal = len(set(all_params)) <= 1
96 | return are_all_equal
97 |
98 |
99 |
100 |
101 |
102 |
103 |
--------------------------------------------------------------------------------
/src/x_evaluate/evaluation_data.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from typing import Dict, List, Optional
6 |
7 | from evo.core.trajectory import PoseTrajectory3D
8 |
9 | from x_evaluate.utils import nanrms
10 |
11 |
12 | class FrontEnd(Enum):
13 | XVIO = 'XVIO'
14 | EKLT = 'EKLT'
15 | EVIO = 'EVIO'
16 | HASTE = 'HASTE'
17 |
18 | def __str__(self):
19 | return self.value
20 |
21 |
22 | class AlignmentType(Enum):
23 | Disabled = 1
24 | PosYaw = 2
25 | SE3 = 3
26 | SIM3 = 4
27 |
28 |
29 | class DistributionSummary:
30 | N_BINS = 50
31 | QUANTILES = np.round(np.arange(0, 1.01, 0.05), 2)
32 |
33 | def __init__(self, data: np.ndarray):
34 | self.n = np.size(data)
35 | self.mean = np.nanmean(data)
36 | self.max = np.nanmax(data)
37 | self.min = np.nanmin(data)
38 | self.rms = nanrms(data)
39 | self.std = np.nanstd(data)
40 | self.nans = np.count_nonzero(np.isnan(data))
41 | quantiles = np.nanquantile(data, self.QUANTILES)
42 | # convenience dict
43 | self.quantiles = dict()
44 | for i in range(len(self.QUANTILES)):
45 | self.quantiles[self.QUANTILES[i]] = quantiles[i]
46 | bins = np.linspace(self.min, self.max, self.N_BINS)
47 | if issubclass(data.dtype.type, np.integer) and self.max-self.min < self.N_BINS:
48 | lower = int(self.min)
49 | upper = int(self.max)
50 | bins = np.linspace(lower, upper, upper - lower + 1)
51 | self.hist, self.bins = np.histogram(data, bins=bins)
52 |
53 | if np.all(data > 0):
54 | bins_log = np.logspace(np.log10(self.min), np.log10(self.max), self.N_BINS)
55 | self.hist_log, self.bins_log = np.histogram(data, bins_log)
56 |
57 |
58 | class TrajectoryError:
59 | description: str
60 | error_array: np.ndarray
61 |
62 |
63 | class TrajectoryData:
64 | imu_bias: Optional[pd.DataFrame]
65 | raw_est_t_xyz_wxyz: np.ndarray
66 | traj_gt: PoseTrajectory3D
67 |
68 | traj_gt_synced: PoseTrajectory3D
69 | traj_est_synced: PoseTrajectory3D
70 |
71 | alignment_type: AlignmentType
72 | alignment_frames: int
73 |
74 | traj_est_aligned: PoseTrajectory3D
75 |
76 | ate_errors: Dict[str, np.ndarray]
77 |
78 | rpe_error_t: Dict[float, np.ndarray]
79 | rpe_error_r: Dict[float, np.ndarray]
80 |
81 | def __init__(self):
82 | self.imu_bias = None
83 | self.ate_errors = dict()
84 | self.rpe_error_t = dict()
85 | self.rpe_error_r = dict()
86 |
87 |
88 | class FeatureTrackingData:
89 | df_xvio_num_features: pd.DataFrame
90 | df_xvio_tracks: Optional[pd.DataFrame]
91 | xvio_tracks_gt: np.ndarray
92 | xvio_tracks_error: np.ndarray
93 | xvio_tracking_evaluation_config: Dict
94 |
95 | df_eklt_num_features: Optional[pd.DataFrame]
96 | df_eklt_feature_age: Optional[pd.DataFrame]
97 | df_eklt_tracks: Optional[pd.DataFrame]
98 | eklt_tracks_gt: Optional[np.ndarray]
99 | eklt_tracks_error: Optional[np.ndarray]
100 | eklt_tracking_evaluation_config: Optional[Dict]
101 |
102 | def __init__(self):
103 | self.df_eklt_num_features = None
104 | self.df_eklt_feature_age = None
105 | self.df_eklt_tracks = None
106 | self.eklt_tracks_gt = None
107 | self.eklt_tracks_error = None
108 | self.eklt_tracking_evaluation_config = None
109 | self.df_xvio_tracks = None
110 |
111 |
112 | class PerformanceData:
113 | """
114 | Holds computational performance data useful for plots and comparisons
115 | """
116 | df_realtime: pd.DataFrame
117 | df_resources: pd.DataFrame
118 | rt_factors: np.ndarray
119 |
120 |
121 | class EKLTPerformanceData:
122 | events_per_sec: np.ndarray
123 | events_per_sec_sim: np.ndarray
124 | optimizations_per_sec: np.ndarray
125 | optimization_iterations: DistributionSummary
126 | event_processing_times: DistributionSummary
127 |
128 |
129 | class EvaluationData:
130 | name: str
131 | params: Dict
132 | command: str
133 | configuration: Dict
134 | trajectory_data: Optional[TrajectoryData]
135 | performance_data: PerformanceData
136 | feature_data: FeatureTrackingData
137 | eklt_performance_data: Optional[EKLTPerformanceData]
138 |
139 | df_ekf_updates: Optional[pd.DataFrame]
140 |
141 | def __init__(self):
142 | self.trajectory_data = None
143 | self.eklt_performance_data = None
144 |
145 |
146 | class GitInfo:
147 | branch: str
148 | last_commit: str
149 | files_changed: bool
150 |
151 | def __init__(self, branch, last_commit, files_changed):
152 | self.branch = branch
153 | self.last_commit = last_commit
154 | self.files_changed = files_changed
155 |
156 |
157 | class EvaluationDataSummary:
158 | name: str
159 | data: Dict[str, EvaluationData]
160 |
161 | trajectory_summary_table: pd.DataFrame
162 |
163 | configuration: Dict
164 | frontend: FrontEnd
165 |
166 | x_git_info: GitInfo
167 | x_vio_ros_git_info: GitInfo
168 |
169 | def __init__(self):
170 | self.data = dict()
171 |
--------------------------------------------------------------------------------
/src/x_evaluate/math_utils.py:
--------------------------------------------------------------------------------
1 | import evo.core
2 | import numpy as np
3 | from evo.core.transformations import quaternion_matrix
4 | import evo.core.trajectory
5 |
6 |
7 | def vec_to_skew_mat(x):
8 | return np.array([[0, -x[2], x[1]],
9 | [x[2], 0, -x[0]],
10 | [-x[1], x[0], 0]])
11 |
12 |
13 | def quat_to_rot_vel_mat(quat_wxyz):
14 | h_mat = np.empty((3, 4))
15 | h_mat[:, 0] = -quat_wxyz[1:]
16 | h_mat[:, 1:] = vec_to_skew_mat(quat_wxyz[1:]) + np.eye(3) * quat_wxyz[0]
17 |
18 | return h_mat * 2
19 |
20 |
21 | def quat_left_mat(quat_wxyz):
22 | mat = np.empty((4, 4))
23 | mat[:, 0] = quat_wxyz
24 | mat[0, 1:] = -quat_wxyz[1:]
25 | mat[1:, 1:] = vec_to_skew_mat(quat_wxyz[1:]) + np.eye(3) * quat_wxyz[0]
26 | return mat
27 |
28 |
29 | def quat_mul(q0, q1):
30 | return np.matmul(quat_left_mat(q0), q1)
31 |
32 |
33 | def quat_inv(quat_wxyz):
34 | quat_wxyz[1:] *= -1
35 | return quat_wxyz
36 |
37 |
38 | def quaternion_sequence_to_rotational_velocity(t_wxyz):
39 | # q_wxyz_from = q_wxyz_from[::5, :]
40 | # q_wxyz_to = q_wxyz_to[::5, :]
41 | # time_diff = time_diff[::5]
42 | q_wxyz_from = t_wxyz[:-1, 1:]
43 | q_wxyz_to = t_wxyz[1:, 1:]
44 | time_diff = t_wxyz[1:, 0] - t_wxyz[:-1, 0]
45 |
46 | assert np.allclose(np.linalg.norm(q_wxyz_from, axis=1), 1)
47 |
48 | # q_wxyz_from_inv = np.apply_along_axis(quaternion_inverse, 1, q_wxyz_from)
49 | # quat_from_mat = np.apply_along_axis(quat_left_mat, 1, q_wxyz_from_inv)
50 |
51 | # calc_angular_speed()
52 |
53 | # calc_angular_speed()
54 | # np.apply_along_axis(calc_angular_speed, 1, q_wxyz_from, q_wxyz_to, t_wxyz[:-1, 0], t_wxyz[1:, 0])
55 | # q_diff = np.matmul(quat_from_mat, q_wxyz_to[:, :, None]).squeeze(-1)
56 | # q_dot = q_diff
57 | # q_dot = np.empty_like(q_diff)
58 | #
59 | # for i in range(len(q_dot)):
60 | # theta_half = np.arccos(q_diff[i, 0])
61 | # n = np.array([1, 0, 0])
62 | # if np.abs(np.sin(theta_half)) > 1e-5:
63 | # n = q_diff[i, 1:] / np.sin(theta_half)
64 | #
65 | # q_dot[i, :] = theta_half * n
66 | # # s = Slerp([0, time_diff[i]], R.from_quat([[1.0, 0, 0, 0], q_diff[i, :]]))
67 | # # q_dot[i, :] = s(1).as_quat()
68 |
69 | # https://fgiesen.wordpress.com/2012/08/24/quaternion-differentiation/
70 |
71 |
72 | # expm()
73 |
74 | # q_diff = q_wxyz_to - q_wxyz_from
75 | # q_diff[:, 0] /= time_diff
76 | # q_diff[:, 1] /= time_diff
77 | # q_diff[:, 2] /= time_diff
78 | # mapping_matrices = np.apply_along_axis(quat_to_rot_vel_mat, 1, q_wxyz_from)
79 | # omegas = np.matmul(mapping_matrices, q_dot[:, :, None]).squeeze(-1)
80 | # rot_vel = np.linalg.norm(omegas, axis=1)
81 |
82 | # rot_vel = rot_vel / time_diff
83 | #
84 | rot_vel = np.empty((len(q_wxyz_from), 3))
85 | for i, (q1, q2, delta_t) in enumerate(zip(q_wxyz_from, q_wxyz_to, time_diff)):
86 | R_1 = quaternion_matrix(q1)
87 | R_2 = quaternion_matrix(q2)
88 |
89 | R_dot = (R_2 - R_1) / delta_t
90 |
91 | omega_skew = R_1.T @ R_dot
92 | omega_skew = 1/2 * (omega_skew - omega_skew.T)
93 | omega = np.array([omega_skew[1, 2], omega_skew[0, 2], omega_skew[0, 1]])
94 | rot_vel[i, :] = np.linalg.norm(omega)
95 | # rot_vel[i] = calc_angular_speed(quaternion_matrix(q1), quaternion_matrix(q2), t1, t2)
96 | return rot_vel
97 |
98 |
99 | def calculate_velocities(trajectory: evo.core.trajectory.Trajectory):
100 | time_diff = trajectory.timestamps[1:] - trajectory.timestamps[:-1]
101 | velocity = (trajectory.positions_xyz[1:, :] - trajectory.positions_xyz[:-1, :])
102 | velocity[:, 0] /= time_diff
103 | velocity[:, 1] /= time_diff
104 | velocity[:, 2] /= time_diff
105 | t_wxyz = np.hstack((trajectory.timestamps[:, np.newaxis], trajectory.orientations_quat_wxyz))
106 | rot_vel = quaternion_sequence_to_rotational_velocity(t_wxyz)
107 | return trajectory.timestamps[:-1], velocity, rot_vel
108 |
109 |
110 | def moving_average(t, data, time_window=10):
111 | delta_t = t[1:] - t[:-1]
112 |
113 | # 3-sigma region of difference should be within 1% of our time window to be able to assume constant delta_t
114 | if 3*np.std(delta_t) > 0.01*time_window:
115 | raise NotImplemented("Moving average has not been implemented for variable delta_t timesteps")
116 |
117 | fixed_n = int(np.round(time_window / np.mean(delta_t)))
118 |
119 | data = moving_average_fixed_n(data, fixed_n)
120 | t_out = t[fixed_n - 1:]
121 | return t_out, data
122 |
123 |
124 | def moving_average_fixed_n(data: np.array, n: int):
125 | cum_sum = np.cumsum(data)
126 | # difference gets partial sums (with index n-1 subtracting '0')
127 | cum_sum[n:] = cum_sum[n:] - cum_sum[:-n]
128 | return cum_sum[n - 1:] / n
129 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jpl-x/x_evaluate/5d95accbed53c21709725b5fff9ffeb05ccbb391/src/x_evaluate/rpg_tracking_analysis/__init__.py
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/bag2dataframe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | try:
5 | import rosbag
6 | except:
7 | pass
8 | import numpy as np
9 | from cv_bridge import CvBridge
10 |
11 | from x_evaluate.rpg_tracking_analysis.tracker_utils import q_to_R
12 |
13 |
14 | class Bag2Images:
15 | def __init__(self, path_to_bag, topic):
16 | self.path_to_bag = path_to_bag
17 |
18 | self.times = []
19 | self.images = []
20 |
21 | self.bridge = CvBridge()
22 |
23 | with rosbag.Bag(path_to_bag) as bag:
24 |
25 | topics = bag.get_type_and_topic_info().topics
26 | if topic not in topics:
27 | raise ValueError("The topic with name %s was not found in bag %s" % (topic, path_to_bag))
28 |
29 | for topic, msg, t in bag.read_messages(topics=[topic]):
30 | time = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
31 | self.times.append(time)
32 |
33 | img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
34 | self.images.append(img)
35 |
36 |
37 | class Bag2CameraInfo:
38 | def __init__(self, path_to_bag, topic):
39 | self.K = None
40 | self.img_size = None
41 |
42 | with rosbag.Bag(path_to_bag) as bag:
43 |
44 | topics = bag.get_type_and_topic_info().topics
45 | if topic not in topics:
46 | raise ValueError("The topic with name %s was not found in bag %s" % (topic, path_to_bag))
47 |
48 | for topic, msg, t in bag.read_messages(topics=[topic]):
49 | self.K = np.array(msg.K).reshape((3, 3))
50 | self.img_size = (msg.width, msg.height)
51 | break
52 |
53 |
54 | class Bag2DepthMap:
55 | def __init__(self, path_to_bag, topic):
56 | self.times = []
57 | self.depth_maps = []
58 |
59 | self.bridge = CvBridge()
60 |
61 | with rosbag.Bag(path_to_bag) as bag:
62 |
63 | topics = bag.get_type_and_topic_info().topics
64 | if topic not in topics:
65 | raise ValueError("The topic with name %s was not found in bag %s" % (topic, path_to_bag))
66 |
67 | for topic, msg, t in bag.read_messages(topics=[topic]):
68 | time = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
69 | self.times.append(time)
70 |
71 | img = self.bridge.imgmsg_to_cv2(msg, "32FC1")
72 | self.depth_maps.append(img)
73 |
74 |
75 | class Bag2Trajectory:
76 | def __init__(self, path_to_bag, topic):
77 | self.times = []
78 | self.poses = []
79 | self.quaternions = []
80 |
81 | self.bridge = CvBridge()
82 |
83 | with rosbag.Bag(path_to_bag) as bag:
84 |
85 | topics = bag.get_type_and_topic_info().topics
86 | if topic not in topics:
87 | raise ValueError("The topic with name %s was not found in bag %s" % (topic, path_to_bag))
88 |
89 | for topic, msg, t in bag.read_messages(topics=[topic]):
90 | time = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
91 | self.times.append(time)
92 |
93 | q = msg.pose.orientation
94 | pos = msg.pose.position
95 | quaternion = np.array([q.w, q.x, q.y, q.z])
96 | t = np.array([[pos.x], [pos.y], [pos.z]])
97 |
98 | R = q_to_R(quaternion)[0]
99 |
100 | transform = np.vstack([np.hstack([R, t]), np.array([0, 0, 0, 1])])
101 |
102 | self.quaternions.append(quaternion)
103 | self.poses.append(transform)
104 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/compare_tracks.py:
--------------------------------------------------------------------------------
1 | """
2 | File that takes dataset.yaml path and generates an object containing images and timestamps
3 | """
4 | import argparse
5 | import os
6 | from os.path import isdir, isfile
7 |
8 | from plot_tracks import plot_num_features, format_summary
9 |
10 | parser = argparse.ArgumentParser(description='''Script that generates a plot and average age/error files for several methods.
11 | The user must supply the root directory where the methods are stored, a config file
12 | and an error threshold.''')
13 |
14 | parser.add_argument('--results_directory', help='Directory where results will be stored.', default="")
15 | parser.add_argument('--root', help='Directory where tracks_directories are found', default="", required=True)
16 | parser.add_argument('--config', help="Config file with label and colors for each method.", default="", required=True)
17 |
18 | parser.add_argument('--error_threshold', help="All tracks with an error higher than this threshold will be discarded."
19 | "error_threshold < 0 will not discard any tracks.",
20 | type=float, default=10)
21 |
22 | args = parser.parse_args()
23 |
24 | # check that directories and config exist
25 | assert isdir(args.root), "Root directory '%s' is not a directory." % args.root
26 | args.results_directory = args.results_directory or args.root
27 | assert isdir(args.results_directory), "Results directory '%s' is not a directory." % args.results_directory
28 | assert isfile(args.config), "Config file '%s' is not a file." % args.config
29 | assert args.config.endswith(".yaml") or args.config.endswith(
30 | ".yml"), "Config file '%s' is not a yaml file." % args.config
31 |
32 | print("Plotting errors for methods in '%s'" % args.root)
33 | print("Will save all data in %s." % args.results_directory)
34 | fig3, ax3, summary = plot_num_features(f="", root=args.root,
35 | config_path=args.config,
36 | error_threshold=args.error_threshold)
37 |
38 | base_dir = os.path.basename(args.results_directory)
39 | print("Saving error plot in location %s/errors.pdf" % base_dir)
40 | fig3.savefig(os.path.join(args.results_directory, "errors.pdf"), bbox_inches="tight")
41 |
42 | # write summary into a text file
43 | print("Saving error and age table in locations %s/errors.txt and %s/feature_age.txt" % (base_dir, base_dir))
44 | error_table, age_table = format_summary(summary)
45 | with open(os.path.join(args.results_directory, "errors.txt"), "w") as f:
46 | f.write(error_table)
47 | with open(os.path.join(args.results_directory, "feature_age.txt"), "w") as f:
48 | f.write(age_table)
49 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/dataset.py:
--------------------------------------------------------------------------------
1 | """
2 | File that takes dataset.yaml path and generates an object containing images and timestamps
3 | """
4 | import numpy as np
5 | from os.path import join, isfile
6 |
7 | from x_evaluate.rpg_tracking_analysis.tracker_utils import slerp, q_to_R, interpolate
8 | from x_evaluate.rpg_tracking_analysis.bag2dataframe import Bag2Images, Bag2DepthMap, Bag2Trajectory, Bag2CameraInfo
9 |
10 |
11 | class Dataset:
12 | def __init__(self, root, config, dataset_type="images"):
13 | self.root = root
14 | self.dataset_type = dataset_type
15 |
16 | if dataset_type == "frames":
17 | load = self.load_images
18 | elif dataset_type == "poses":
19 | load = self.load_poses
20 | elif dataset_type == "depth":
21 | load = self.load_depth
22 | elif dataset_type == "camera_info":
23 | load = self.load_camera_info
24 | else:
25 | raise ValueError
26 |
27 | self.queue, self.times, self.config = load(root, config)
28 |
29 | self.it = 0
30 |
31 | def _load(self, root, config, topic, cls, *attrs):
32 | """
33 | load
34 | """
35 |
36 | path = join(root, config["name"])
37 | assert isfile(path), "The dataset '%s' does not exist." % join(root, config["name"])
38 |
39 | if config["type"] == "bag":
40 | assert topic in config, "The dataset config at needs a key with name '%s'." % topic
41 | bag = cls(path, topic=config[topic])
42 |
43 | for attr in attrs:
44 | setattr(self, attr, getattr(bag, attr))
45 |
46 | if not hasattr(bag, "times"):
47 | return getattr(bag, attrs[0]), None, config
48 | return getattr(bag, attrs[0]), np.array(bag.times), config
49 |
50 | def load_camera_info(self, root, dataset_yaml):
51 | return self._load(root, dataset_yaml, "camera_info_topic", Bag2CameraInfo, "K", "img_size")
52 |
53 | def load_poses(self, root, dataset_yaml):
54 | return self._load(root, dataset_yaml, "pose_topic", Bag2Trajectory, "poses", "quaternions")
55 |
56 | def load_images(self, root, dataset_yaml):
57 | return self._load(root, dataset_yaml, "image_topic", Bag2Images, "images")
58 |
59 | def load_depth(self, root, dataset_yaml):
60 | return self._load(root, dataset_yaml, "depth_map_topic", Bag2DepthMap, "depth_maps")
61 |
62 | def interpolate_pose(self, t):
63 | """
64 | interpolate pose using slerp
65 | """
66 | concatenated_poses = np.stack(self.queue)
67 | T = concatenated_poses[:, :3, 3]
68 |
69 | # convert R to q
70 | q = np.stack(self.quaternions)
71 |
72 | q_interp = slerp(t, self.times, q)
73 |
74 | # find R
75 | R_interp = q_to_R(q_interp)
76 |
77 | # find T
78 | T_interp = interpolate(t, self.times, T)
79 |
80 | # put together into N x 4 x 4 matrix
81 | interp_pose = np.zeros((T_interp.shape[0], 4, 4))
82 | interp_pose[:, :3, 3] = T_interp
83 | interp_pose[:, :3, :3] = R_interp
84 | interp_pose[:, 3, 3] = 1
85 |
86 | return interp_pose
87 |
88 | def get_interpolated(self, t):
89 | """
90 | get interpolated value
91 | """
92 | if self.dataset_type == "poses":
93 | return self.interpolate_pose(t)
94 |
95 | values = np.stack(self.queue)
96 | values_interp = interpolate(t, self.times, values)
97 |
98 | return values_interp
99 |
100 | def current(self):
101 | return self[self.it]
102 |
103 | def set_to_first_after(self, t):
104 | assert t <= self.times[-1]
105 | mask = (self.times - t >= 0) | (np.abs(self.times - t) < 1e-5)
106 | self.it = np.where(mask)[0][0]
107 |
108 | def get_first_after(self, t):
109 | assert t <= self.times[-1]
110 | mask = (self.times - t >= 0) | (np.abs(self.times - t) < 1e-5)
111 | return self[np.where(mask)[0][0]]
112 |
113 | def get_closest(self, t):
114 | idx = np.argmin(np.abs(self.times - t)).astype(int)
115 | return self[idx]
116 |
117 | def __getitem__(self, idx):
118 | return self.times[idx], self.queue[idx]
119 |
120 | def __len__(self):
121 | return len(self.queue) - self.it
122 |
123 | def __iter__(self):
124 | return self
125 |
126 | def __next__(self):
127 | if self.it == len(self.queue):
128 | raise StopIteration
129 | else:
130 | self.it += 1
131 | return self[self.it - 1]
132 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/evaluate_tracks.py:
--------------------------------------------------------------------------------
1 | """
2 | File that takes dataset.yaml path and generates an object containing images and timestamps
3 | """
4 | import yaml
5 | import numpy as np
6 | import argparse
7 | import os
8 | from os.path import isfile, join, dirname, abspath
9 |
10 | from x_evaluate.rpg_tracking_analysis.tracker import Tracker
11 | from x_evaluate.rpg_tracking_analysis.tracker_init import TrackerInitializer
12 | from x_evaluate.rpg_tracking_analysis.tracker_utils import get_track_data, compare_tracks
13 | from x_evaluate.rpg_tracking_analysis.dataset import Dataset
14 | from x_evaluate.rpg_tracking_analysis.plot_tracks import plot_tracks_3d, plot_num_features, format_summary
15 | from x_evaluate.rpg_tracking_analysis.visualizer import FeatureTracksVisualizer
16 |
17 |
18 | def rpg_evaluate_tracks(args, dataset_config=None, tracker_config=None):
19 | if dataset_config is None:
20 | with open(args.dataset, "r") as f:
21 | dataset_config = yaml.load(f, Loader=yaml.Loader)
22 |
23 | if tracker_config is None:
24 | if args.tracker_params == "":
25 | # check what the type is
26 | assert args.tracker_type != "", "Either tracker_type or tracker_params need to be given."
27 | assert args.tracker_type in ["KLT", "reprojection"], 'Tracker type must be one of [reprojection, KLT].'
28 | config_dir = join(dirname(abspath(__file__)), "config")
29 | args.tracker_params = join(config_dir, "%s_params.yaml" % args.tracker_type)
30 | else:
31 | assert isfile(args.tracker_params), "Tracker params do not exist."
32 | assert args.tracker_params.endswith(".yaml") or args.tracker_params.endswith(".yml"), \
33 | "Tracker params '%s 'must be a yaml file." % args.tracker_params
34 |
35 | with open(args.tracker_params, "r") as f:
36 | tracker_config = yaml.load(f, Loader=yaml.Loader)
37 |
38 | assert os.path.isfile(args.file), "Tracks file '%s' does not exist." % args.file
39 | assert "type" in tracker_config, "Tracker parameters must contain a type tag, which can be one of [reprojection, KLT]."
40 | tracker_init_config = {
41 | "tracks_csv": args.file,
42 | "type": "tracks" if tracker_config["type"] == "KLT" else "depth_from_tracks"
43 | }
44 |
45 | print("Evaluating ground truth for %s in folder %s." % (os.path.basename(args.file), os.path.dirname(args.file)))
46 |
47 | # make feature init
48 | tracks_init = TrackerInitializer(args.root, dataset_config, config=tracker_init_config)
49 |
50 | # init tracker
51 | tracker = Tracker(tracker_config)
52 |
53 | # get tracks
54 | tracked_features = tracker.track(tracks_init.tracks_obj, tracks_init.tracker_params)
55 |
56 | # save tracks
57 | out_csv = args.file + ".gt.txt"
58 | print("Saving ground truth files to %s" % os.path.basename(out_csv))
59 | np.savetxt(out_csv, tracked_features, fmt=["%i", "%.8f", "%.4f", "%.4f"])
60 |
61 | # load both gt and normal tracks
62 | print("Computing errors")
63 | est_tracks_data = get_track_data(args.file, filter_too_short=True)
64 | gt_tracks_data = get_track_data(out_csv)
65 |
66 | # compute errors
67 | error_data = compare_tracks(est_tracks_data, gt_tracks_data)
68 | errors_csv = args.file + ".errors.txt"
69 | print("Saving error files to %s" % os.path.basename(errors_csv))
70 | np.savetxt(errors_csv, error_data, fmt=["%i", "%.8f", "%.4f", "%.4f"])
71 |
72 | # plotting
73 | folder = os.path.dirname(args.file)
74 | results_folder = os.path.join(folder, "results")
75 | if not os.path.isdir(results_folder):
76 | os.mkdir(results_folder)
77 |
78 | if args.plot_3d:
79 | # create 3D plot of tracks (only first features) and with gt
80 | print(
81 | "Saving 3d space-time curves to results/3d_plot.pdf and results/3d_plot_with_gt.pdf. This may take some time.")
82 | dataset = Dataset(args.root, dataset_config, dataset_type="frames")
83 | fig1, ax1 = plot_tracks_3d(dataset, args.file, out_csv, t_max=2)
84 | # fig1.savefig(join(results_folder, "3d_plot_with_gt.pdf"), bbox_inches="tight") # BUG: matplotlib API changed?
85 | fig1.savefig(join(results_folder, "3d_plot_with_gt.pdf"))
86 |
87 | dataset = Dataset(args.root, dataset_config, dataset_type="frames")
88 | fig2, ax2 = plot_tracks_3d(dataset, args.file, t_max=2)
89 | # fig2.savefig(os.path.join(results_folder, "3d_plot.pdf"), bbox_inches="tight") # BUG: matplotlib API changed?
90 | fig2.savefig(os.path.join(results_folder, "3d_plot.pdf"))
91 |
92 | if args.video_preview:
93 | # preview of video
94 | dataset = Dataset(args.root, dataset_config, dataset_type="frames")
95 | out_video = os.path.join(results_folder, "preview.avi")
96 |
97 | video_params = {
98 | 'track_history_length': 0.4,
99 | 'scale': 4.0,
100 | 'framerate': 80,
101 | 'speed': 1.0,
102 | 'marker': "circle",
103 | "error_threshold": args.error_threshold,
104 | "contrast_brightness": [1, 0],
105 | "crop_to_predictions": True,
106 | }
107 |
108 | # load dataset
109 | print("Saving video preview to results/preview.avi.")
110 | viz = FeatureTracksVisualizer(file=args.file, dataset=dataset, params=video_params)
111 | viz.write_to_video_file(out_video)
112 |
113 | if args.plot_errors:
114 | print("Saving error plot to errors.pdf")
115 | fig3, ax3, summary = plot_num_features(args.file, error_threshold=args.error_threshold)
116 | fig3.savefig(os.path.join(results_folder, "errors.pdf"), bbox_inches="tight")
117 |
118 | # write summary into a text file
119 | print("Saving error and age table in locations results/errors.txt and results/feature_age.txt")
120 | error_table, age_table = format_summary(summary)
121 | with open(os.path.join(results_folder, "errors.txt"), "w") as f:
122 | f.write(error_table)
123 | with open(os.path.join(results_folder, "feature_age.txt"), "w") as f:
124 | f.write(age_table)
125 |
126 | return tracked_features, error_data
127 |
128 |
129 | if __name__ == '__main__':
130 | parser = argparse.ArgumentParser(description='''Generates ground truth tracks for a given set of feature tracks.
131 | The user needs to specify the root where the rosbag containing the images/poses + depth maps are found.
132 | Additionally, a configuration file for the dataset and tracker must be provided.''')
133 | parser.add_argument('--tracker_params', help='Params yaml-file for tracker algorithm.', default="")
134 | parser.add_argument('--tracker_type', help='Tracker type. Can be one of [reprojection, KLT].', default="")
135 | parser.add_argument('--root', help='Directory where datasets are found.', default="", required=True)
136 | parser.add_argument('--dataset', help="Params yaml-file for dataset.", default="", required=True)
137 | parser.add_argument('--file', help="Tracks file for ground truth computation.", default="", required=True)
138 |
139 | parser.add_argument('--plot_3d', help="Whether to do a 3d plot.", action="store_true", default=False)
140 | parser.add_argument('--plot_errors', help="Tracks file giving KLT initialization.", action="store_true",
141 | default=False)
142 | parser.add_argument('--error_threshold', help="Error threshold. Tracks which exceed this threshold are discarded.",
143 | type=float, default=10)
144 | parser.add_argument('--video_preview', help="Whether to create a video preview.", action="store_true",
145 | default=False)
146 |
147 | arguments = parser.parse_args()
148 | rpg_evaluate_tracks(arguments)
149 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/plot_track.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import os
5 | import tqdm
6 |
7 | parser = argparse.ArgumentParser("")
8 | parser.add_argument("--file", default="")
9 | parser.add_argument("--id", type=int, default=-1)
10 |
11 | args = parser.parse_args()
12 |
13 | assert os.path.isfile(args.file), "Tracks file must exist."
14 |
15 | data = np.genfromtxt(args.file)
16 | gt = np.genfromtxt(args.file + ".gt.txt")
17 | errors = np.genfromtxt(args.file + ".errors.txt")
18 |
19 | ids = np.unique(data[:, 0]).astype(int)
20 | if args.id != -1:
21 | assert args.id in ids
22 | ids = [args.id]
23 |
24 | folder = os.path.dirname(args.file)
25 |
26 | results_folder = os.path.join(folder, "results")
27 | if not os.path.isdir(results_folder):
28 | os.mkdir(results_folder)
29 | tracks_folder = os.path.join(results_folder, "tracks")
30 | if not os.path.isdir(tracks_folder):
31 | os.mkdir(tracks_folder)
32 |
33 | for i in tqdm.tqdm(ids):
34 | # get one track
35 | est_one_track = data[data[:, 0] == i, 1:]
36 | gt_one_track = gt[gt[:, 0] == i, 1:]
37 | errors_one_track = errors[errors[:, 0] == i, 1:]
38 |
39 | fig, ax = plt.subplots(nrows=3)
40 | t0 = est_one_track[0, 0]
41 |
42 | # plot x coordinates
43 | ax[0].plot(est_one_track[:, 0] - t0, est_one_track[:, 1], color="b", label="estimate")
44 | ax[0].plot(gt_one_track[:, 0] - t0, gt_one_track[:, 1], color="g", label="ground truth")
45 | ax[0].set_ylabel("X coords. [px]")
46 | ax[0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
47 | ncol=2, borderaxespad=0., frameon=False)
48 |
49 | ax[0].tick_params(
50 | axis='x', # changes apply to the x-axis
51 | which='both', # both major and minor ticks are affected
52 | bottom=False, # ticks along the bottom edge are off
53 | top=False, # ticks along the top edge are off
54 | labelbottom=False)
55 |
56 | ax[1].plot(est_one_track[:, 0] - t0, est_one_track[:, 2], color="b")
57 | ax[1].plot(gt_one_track[:, 0] - t0, gt_one_track[:, 2], color="g")
58 | ax[1].set_ylabel("Y coords. [px]")
59 |
60 | ax[1].tick_params(
61 | axis='x', # changes apply to the x-axis
62 | which='both', # both major and minor ticks are affected
63 | bottom=False, # ticks along the bottom edge are off
64 | top=False, # ticks along the top edge are off
65 | labelbottom=False)
66 |
67 | ax[2].plot(errors_one_track[:, 0] - t0, errors_one_track[:, 1], color="c", label="error x")
68 | ax[2].plot(errors_one_track[:, 0] - t0, errors_one_track[:, 2], color="r", label="error y")
69 | ax[2].plot(errors_one_track[:, 0] - t0, np.sqrt(errors_one_track[:, 2] ** 2 + errors_one_track[:, 1] ** 2),
70 | color="m", label="total error")
71 | ax[2].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
72 | ncol=3, borderaxespad=0., frameon=False)
73 |
74 | ax[2].set_xlabel("Time [s]")
75 | ax[2].set_ylabel("Error [px]")
76 |
77 | plt.subplots_adjust(hspace=0.3)
78 |
79 | fig.savefig(os.path.join(tracks_folder, "track_%s.pdf" % i), bbox_inches="tight")
80 | plt.close()
81 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/tracker.py:
--------------------------------------------------------------------------------
1 | try:
2 | import cv2
3 | except:
4 | pass
5 | import numpy as np
6 | from x_evaluate.rpg_tracking_analysis.tracker_utils import project_landmarks
7 | import tqdm
8 |
9 |
10 | class Tracker:
11 | def __init__(self, config):
12 | self.config = config
13 |
14 | if self.config["type"] == "KLT":
15 | assert "window_size" in self.config, "The tracker config of type KLT needs the key window_size."
16 | assert "num_pyramidal_layers" in self.config, "The tracker config of type KLT needs the key num_pyramidal_layers."
17 | self.track = self.track_features_on_klt
18 | elif self.config["type"] == "reprojection":
19 | self.track = self.track_features_with_landmarks
20 | else:
21 | raise ValueError
22 |
23 | def track_features_on_klt(self, tracks_obj, tracker_params):
24 | """
25 | tracks features in feature_init using the dataset
26 | tracks must be dict with keys as ids and values as 1 x 3 array with x,y,t
27 | returns a dict with keys as track ids, and values as N x 3 arrays, with x,y,t.
28 | If collate is true, returns N x 4 array with id,x,y,t .
29 | """
30 | assert "reference_track" in tracker_params
31 | assert "frame_dataset" in tracker_params
32 |
33 | window_size = self.config["window_size"]
34 | num_pyramidal_layers = self.config["num_pyramidal_layers"]
35 |
36 | dataset = tracker_params["frame_dataset"]
37 | dataset.set_to_first_after(tracks_obj.t)
38 |
39 | print("Tracking with KLT parameters: [window_size=%s num_pyramidal_layers=%s]" % (
40 | window_size, num_pyramidal_layers))
41 | for i, (t, img) in enumerate(tqdm.tqdm(dataset)):
42 | if i == 0:
43 | first_img = img
44 | continue
45 |
46 | second_img = img
47 |
48 | if len(tracks_obj.active_features) == 0:
49 | break
50 |
51 | new_features, status, err = \
52 | cv2.calcOpticalFlowPyrLK(first_img, second_img, tracks_obj.active_features,
53 | None, winSize=(window_size, window_size), maxLevel=num_pyramidal_layers)
54 |
55 | tracks_obj.update(status[:, 0] == 1, new_features, t)
56 |
57 | first_img = second_img
58 |
59 | tracks = tracks_obj.collate()
60 | return tracks
61 |
62 | def track_features_with_landmarks(self, tracks_obj, tracker_params):
63 | """
64 | Track features feature_init by projecting it onto frames at every time step.
65 | dataset here is a pose dataset
66 | """
67 | # get camera calibration
68 | assert "img_size" in tracker_params
69 | assert "K" in tracker_params
70 | assert "landmarks" in tracker_params
71 | assert "pose_dataset" in tracker_params
72 |
73 | W, H = tracker_params["img_size"]
74 | K = tracker_params["K"]
75 | landmarks = tracker_params["landmarks"]
76 | pose_dataset = tracker_params["pose_dataset"]
77 |
78 | print("Tracking via reprojection")
79 | for i, (t, pose) in enumerate(tqdm.tqdm(pose_dataset)):
80 | # get closest image
81 | if i == 0:
82 | continue
83 |
84 | # all features lost
85 | if len(tracks_obj.active_ids) == 0:
86 | break
87 |
88 | active_landmarks = np.stack([landmarks[i] for i in tracks_obj.active_ids])
89 | new_features = project_landmarks(active_landmarks, pose, K)
90 |
91 | x_new, y_new = new_features[:, 0, 0], new_features[:, 0, 1]
92 | status = (x_new >= 0) & (y_new >= 0) & (x_new < W) & (y_new < H)
93 |
94 | tracks_obj.update(status, new_features, t)
95 |
96 | tracks = tracks_obj.collate()
97 |
98 | return tracks
99 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/tracker_init.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | from x_evaluate.rpg_tracking_analysis.dataset import Dataset
3 | import numpy as np
4 | from x_evaluate.rpg_tracking_analysis.tracker_utils import back_project_features, grid_sample, filter_tracks
5 | from x_evaluate.rpg_tracking_analysis.tracks import Tracks
6 | import os
7 |
8 |
9 | class TrackerInitializer:
10 | def __init__(self, root, dataset_config, config_yaml=None, config=None):
11 | if config is None:
12 | with open(config_yaml, "r") as f:
13 | self.config = yaml.load(f, Loader=yaml.Loader)
14 | else:
15 | self.config = config
16 |
17 | print("Constructing initializer with type '%s'" % self.config["type"])
18 |
19 | if self.config["type"] == "tracks":
20 | self.tracks_obj, self.tracker_params = self.init_on_track(root, self.config, dataset_config)
21 | elif self.config["type"] == "depth_from_tracks":
22 | self.tracks_obj, self.tracker_params = self.init_by_reprojection(root, self.config, dataset_config)
23 | else:
24 | raise ValueError
25 |
26 | print("Done")
27 |
28 | def init_by_reprojection(self, root, config, dataset_config):
29 | """
30 | inits tracks by extracting corners on image and then backprojecting them for a given pose.
31 | """
32 | print("[1/3] Loading tracks in %s" % os.path.basename(config["tracks_csv"]))
33 | tracks = np.genfromtxt(config["tracks_csv"])
34 | first_len_tracks = len(tracks)
35 | valid_ids, tracks = filter_tracks(tracks, filter_too_short=True, only_first_frame=False)
36 |
37 | # if len(tracks) < first_len_tracks:
38 | # print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
39 | # " time. All tracks except the first have been discarded.")
40 |
41 | tracks_dict = {i: tracks[tracks[:, 0] == i, 1:] for i in valid_ids}
42 | features = np.stack([tracks_dict[i][0] for i in valid_ids]).reshape(-1, 1, 3)
43 |
44 | print("[2/3] Loading depths, poses, frames and camera info")
45 | depth_dataset = Dataset(root, dataset_config, dataset_type="depth")
46 | pose_dataset = Dataset(root, dataset_config, dataset_type="poses")
47 | camera_info_dataset = Dataset(root, dataset_config, dataset_type="camera_info")
48 |
49 | K = camera_info_dataset.K
50 | img_size = camera_info_dataset.img_size
51 |
52 | # find poses and depths at features
53 | print("[3/3] Backprojecting first feature positions")
54 | depth_map_times = np.unique(features[:, 0, 0])
55 | depth_maps_interp = depth_dataset.get_interpolated(depth_map_times)
56 | depths = grid_sample(features[:, 0, 0], features[:, 0, 1:], depth_map_times, depth_maps_interp)
57 |
58 | # # Depth map debugging:
59 | # import cv2
60 | # def show_normalized_depth_as_image(depth, label):
61 | # depth_plot_01 = depth - np.min(depth)
62 | # if np.max(depth_plot_01) > 0:
63 | # depth_plot_01 = depth_plot_01 / np.max(depth_plot_01)
64 | # img = np.uint8(depth_plot_01 * 255)
65 | # cv2.putText(img, label, (25, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255,), 1)
66 | # cv2.imshow("Window", img)
67 |
68 | # for i in range(0, 1312):
69 | # depth = depth_maps_interp[i, :, :]
70 | # # if i == 571:
71 | # # depth = np.zeros_like(depth)
72 | #
73 | # print(F"[{np.min(depth)}, {np.max(depth)}]")
74 | # show_normalized_depth_as_image(depth, F"{i}")
75 | # cv2.waitKey(1)
76 |
77 | poses = pose_dataset.get_interpolated(features[:, 0, 0])
78 | landmarks = back_project_features(K, features[:, :, 1:], depths, poses)
79 |
80 | t_min = np.min(features[:, 0, 0])
81 | pose_dataset.set_to_first_after(t_min)
82 |
83 | # parse
84 | landmarks_dict = {i: landmarks[j] for j, i in enumerate(valid_ids)}
85 | features_dict = {i: features[j] for j, i in enumerate(valid_ids)}
86 |
87 | # create dict of features
88 | tracks_obj = Tracks(features_dict)
89 |
90 | # params for tracker
91 | tracker_params = {"landmarks": landmarks_dict,
92 | "pose_dataset": pose_dataset, "img_size": img_size, "K": K, "reference_track": tracks}
93 |
94 | return tracks_obj, tracker_params
95 |
96 | def init_on_track(self, root, config, dataset_config):
97 | print("[1/3] Loading tracks in %s." % os.path.basename(config["tracks_csv"]))
98 | tracks = np.genfromtxt(self.config["tracks_csv"])
99 |
100 | # check that all features start at the same timestamp, if not, discard features that occur later
101 | first_len_tracks = len(tracks)
102 | valid_ids, tracks = filter_tracks(tracks, filter_too_short=True, only_first_frame=False)
103 |
104 | # if len(tracks) < first_len_tracks:
105 | # print("WARNING: This package only supports evaluation of tracks which have been initialized at the same"
106 | # " time. All tracks except the first have been discarded.")
107 |
108 | tracks_dict = {i: tracks[tracks[:, 0] == i, 1:] for i in valid_ids}
109 |
110 | print("[2/3] Loading frame dataset to find positions of initial tracks.")
111 | frame_dataset = Dataset(root, dataset_config, dataset_type="frames")
112 |
113 | # find dataset indices for each start
114 | tracks_init = {}
115 | print("[3/3] Initializing tracks")
116 | for track_id, track in tracks_dict.items():
117 | # skip tracks outside of our range
118 | if frame_dataset.times[-1] < track[0, 0]:
119 | continue
120 | frame_dataset.set_to_first_after(track[0, 0])
121 | t_dataset, _ = frame_dataset.current()
122 |
123 | x_interp = np.interp(t_dataset, track[:, 0], track[:, 1])
124 | y_interp = np.interp(t_dataset, track[:, 0], track[:, 2])
125 |
126 | tracks_init[track_id] = np.array([[t_dataset, x_interp, y_interp]])
127 |
128 | tracks_obj = Tracks(tracks_init)
129 |
130 | return tracks_obj, {"frame_dataset": frame_dataset, "reference_track": tracks}
131 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_tracking_analysis/tracks.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from copy import deepcopy
3 | import matplotlib.pyplot as plt
4 |
5 |
6 | class Tracks:
7 | def __init__(self, track_inits):
8 | self.all_ids = np.array(list(track_inits.keys()), dtype=int)
9 | self.all_features = np.stack([track_inits[i] for i in self.all_ids])
10 | self.t = np.min(self.all_features[:, 0, 0])
11 |
12 | close = np.abs(self.all_features[:, 0, 0] - self.t) < 1e-4
13 | self.active_ids = self.all_ids[close]
14 | self.active_features = np.stack([track_inits[i][:, 1:] for i in self.active_ids]).astype(np.float32)
15 |
16 | self.tracks_dict = track_inits
17 |
18 | def update(self, mask, tracks, t):
19 | # update tracks
20 | self.t = t
21 | self.active_features = tracks
22 |
23 | # remove all deletes
24 | self.active_features = self.active_features[mask]
25 | self.active_ids = self.active_ids[mask]
26 |
27 | # append to dict
28 | for i, feature in zip(self.active_ids, self.active_features):
29 | self.tracks_dict[i] = np.concatenate([self.tracks_dict[i],
30 | np.concatenate([[[t]], feature], 1)], 0)
31 |
32 | # add new tracks
33 | # get close features inits
34 | close = np.abs(self.all_features[:, 0, 0] - t) < 1e-4
35 | new_ids = self.all_ids[close]
36 | new_tracks = self.all_features[close, :, 1:].astype(np.float32)
37 |
38 | self.active_ids = np.concatenate([self.active_ids, new_ids])
39 | self.active_features = np.concatenate([self.active_features, new_tracks])
40 |
41 | def collate(self):
42 | collated_features = None
43 | for i, vals in self.tracks_dict.items():
44 | id_block = np.concatenate([np.ones((len(vals), 1)) * i, vals], 1)
45 |
46 | if collated_features is None:
47 | collated_features = id_block
48 | else:
49 | collated_features = np.concatenate(
50 | [collated_features, id_block])
51 |
52 | # sort by timestamps
53 | collated_features = collated_features[collated_features[:, 1].argsort(
54 | )]
55 | return collated_features
56 |
57 | def get_ref_tracks_before_t(self, tracks, t, ids):
58 | ref_tracks = []
59 | lost_ids = []
60 | tracks = tracks[(tracks[:, 1] <= t + 1e-3) & (tracks[:, 1] > t - 0.1)]
61 | for i in ids:
62 | track_before_t = tracks[tracks[:, 0] == i]
63 | if len(track_before_t) > 0:
64 | ref_tracks.append(track_before_t[-1, 2:])
65 | else:
66 | lost_ids.append(i)
67 | if len(ref_tracks) > 0:
68 | return np.stack(ref_tracks).reshape((-1, 1, 2)), lost_ids
69 | return None, lost_ids
70 |
71 | def viz(self, img, handles=None, ref_tracks=None, wait_ms=1000.0):
72 | lost_ids = []
73 | if ref_tracks is not None:
74 | ref_tracks, lost_ids = self.get_ref_tracks_before_t(ref_tracks, self.t, self.active_ids)
75 |
76 | # filter out lost ids from gt
77 | mask = np.full(self.active_ids.shape, True, dtype=bool)
78 | for i in lost_ids:
79 | mask = mask & (self.active_ids != i)
80 | active_tracks = self.active_features[mask]
81 | # take out all tracks that are in plot_idx
82 | if handles is None:
83 | fig, ax = plt.subplots()
84 |
85 | handles = []
86 | handles += [ax.imshow(img)]
87 | handles += [ax.plot(active_tracks[:, 0, 0], active_tracks[:, 0, 1],
88 | color='b', marker='x', ls="", ms=4, label="gt")]
89 | handles += [fig]
90 |
91 | if ref_tracks is not None:
92 | handles += [ax.plot(ref_tracks[:, 0, 0], ref_tracks[:, 0, 1],
93 | color='g', marker='x', ls="", ms=4, label="reference")]
94 |
95 | ax.legend()
96 |
97 | plt.title("T = %.4f" % self.t)
98 | plt.show(block=False)
99 | else:
100 | plt.title("T = %.4f" % self.t)
101 | handles[0].set_data(img)
102 | handles[1][0].set_data(active_tracks[:, 0, 0], active_tracks[:, 0, 1])
103 | handles[2].canvas.draw()
104 |
105 | if ref_tracks is not None:
106 | handles[3][0].set_data(ref_tracks[:, 0, 0], ref_tracks[:, 0, 1])
107 |
108 | plt.pause(wait_ms / 1000.0)
109 |
110 | return handles
111 |
--------------------------------------------------------------------------------
/src/x_evaluate/rpg_trajectory_evaluation.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from typing import List, Tuple
3 |
4 | import numpy as np
5 | from evo.core.trajectory import PoseTrajectory3D
6 | from evo.core import lie_algebra as lie
7 | from evo.core import sync
8 |
9 | from x_evaluate.evaluation_data import AlignmentType
10 |
11 |
12 | def get_best_yaw(C):
13 | '''
14 | maximize trace(Rz(theta) * C)
15 | '''
16 | assert C.shape == (3, 3)
17 |
18 | A = C[0, 1] - C[1, 0]
19 | B = C[0, 0] + C[1, 1]
20 | theta = np.pi / 2 - np.arctan2(B, A)
21 |
22 | return theta
23 |
24 |
25 | def rot_z(theta):
26 |
27 | R = np.identity(3)
28 | R[0, 0] = np.cos(theta)
29 | R[0, 1] = -np.sin(theta)
30 | R[1, 0] = np.sin(theta)
31 | R[1, 1] = np.cos(theta)
32 |
33 | return R
34 |
35 |
36 | def rpg_get_alignment_umeyama(ground_truth: PoseTrajectory3D, trajectory_estimate: PoseTrajectory3D, known_scale=False, yaw_only=False):
37 | """Implementation of the paper: S. Umeyama, Least-Squares Estimation
38 | of Transformation Parameters Between Two Point Patterns,
39 | IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
40 |
41 | model = s * R * data + t
42 |
43 | Input:
44 | model -- first trajectory (nx3), numpy array type
45 | data -- second trajectory (nx3), numpy array type
46 |
47 | Output:
48 | s -- scale factor (scalar)
49 | R -- rotation matrix (3x3)
50 | t -- translation vector (3x1)
51 | t_error -- translational error per point (1xn)
52 |
53 | """
54 |
55 | # substract mean
56 |
57 | mu_M = ground_truth.positions_xyz.mean(0)
58 | mu_D = trajectory_estimate.positions_xyz.mean(0)
59 | model_zerocentered = ground_truth.positions_xyz - mu_M
60 | data_zerocentered = trajectory_estimate.positions_xyz - mu_D
61 | n = np.shape(ground_truth.positions_xyz)[0]
62 |
63 | # correlation
64 | C = 1.0/n*np.dot(model_zerocentered.transpose(), data_zerocentered)
65 | sigma2 = 1.0/n*np.multiply(data_zerocentered, data_zerocentered).sum()
66 | U_svd, D_svd, V_svd = np.linalg.linalg.svd(C)
67 | D_svd = np.diag(D_svd)
68 | V_svd = np.transpose(V_svd)
69 |
70 | S = np.eye(3)
71 | if(np.linalg.det(U_svd)*np.linalg.det(V_svd) < 0):
72 | S[2, 2] = -1
73 |
74 | if yaw_only:
75 | rot_C = np.dot(data_zerocentered.transpose(), model_zerocentered)
76 | theta = get_best_yaw(rot_C)
77 | R = rot_z(theta)
78 | else:
79 | R = np.dot(U_svd, np.dot(S, np.transpose(V_svd)))
80 |
81 | if known_scale:
82 | s = 1
83 | else:
84 | s = 1.0/sigma2*np.trace(np.dot(D_svd, S))
85 |
86 | t = mu_M-s*np.dot(R, mu_D)
87 |
88 | return s, R, t
89 |
90 |
91 | def rpg_align(ground_truth, trajectory_estimate, alignment_type: AlignmentType = AlignmentType.Disabled,
92 | use_subtrajectory=False, t_left=3, t_right=8):
93 |
94 | if use_subtrajectory:
95 | t_left += ground_truth.timestamps[0]
96 | t_right += ground_truth.timestamps[0]
97 | gt = copy.deepcopy(ground_truth)
98 | gt.reduce_to_time_range(t_left, t_right)
99 |
100 | gt, est = sync.associate_trajectories(gt, trajectory_estimate)
101 | else:
102 | gt = ground_truth
103 | est = trajectory_estimate
104 | if alignment_type == AlignmentType.PosYaw:
105 | s, r, t = rpg_get_alignment_umeyama(gt, est, known_scale=True, yaw_only=True)
106 | elif alignment_type == AlignmentType.SE3:
107 | s, r, t = rpg_get_alignment_umeyama(gt, est, known_scale=True, yaw_only=False)
108 | elif alignment_type == AlignmentType.SIM3:
109 | s, r, t = rpg_get_alignment_umeyama(gt, est, known_scale=False, yaw_only=False)
110 | elif alignment_type == AlignmentType.Disabled:
111 | s = 1
112 | r = np.identity(3)
113 | t = np.zeros((3, ))
114 | else:
115 | raise ValueError(F"Invalid alignment type {alignment_type}")
116 |
117 | trajectory_estimate.scale(s)
118 | trajectory_estimate.transform(lie.se3(r, t))
119 |
120 |
121 | def get_split_distances_at_percentages(ground_truth: PoseTrajectory3D, percentages):
122 | split_distances = [ground_truth.distances[-1] * p for p in percentages]
123 | split_distances = np.round(split_distances, 2)
124 | return split_distances
125 |
126 |
127 | def get_split_distances_on_equal_parts(ground_truth: PoseTrajectory3D, num_parts=5):
128 | percentages = [i / num_parts for i in range(1, num_parts)]
129 | return get_split_distances_at_percentages(ground_truth, percentages)
130 |
131 |
132 | def get_split_distances_equispaced(ground_truth: PoseTrajectory3D, step_size=5):
133 | distance = ground_truth.distances[-1]
134 | return np.arange(0, distance, step_size)
135 |
136 |
137 | def rpg_sub_trajectories(ground_truth: PoseTrajectory3D, trajectory_estimate: PoseTrajectory3D, split_distances,
138 | max_diff=0.01) -> List[Tuple[PoseTrajectory3D, PoseTrajectory3D, float]]:
139 |
140 | split_points = [-1] * len(split_distances)
141 |
142 | for i in range(len(split_distances)):
143 | split_points[i] = np.argmin(np.abs(ground_truth.distances-split_distances[i]))
144 |
145 | split_times = [ground_truth.timestamps[index] for index in split_points]
146 |
147 | split_times.insert(0, ground_truth.timestamps[0])
148 | split_distances.append(ground_truth.distances[-1])
149 | split_times.append(ground_truth.timestamps[-1])
150 |
151 | trajectory_tuples = []
152 |
153 | for i in range(len(split_distances)):
154 | sub_ground_truth = copy.deepcopy(ground_truth)
155 | sub_ground_truth.reduce_to_time_range(split_times[i], split_times[i+1])
156 |
157 | sub_trajectory = copy.deepcopy(trajectory_estimate)
158 | sub_trajectory.reduce_to_time_range(split_times[i], split_times[i+1])
159 |
160 | sub_ground_truth, sub_trajectory = sync.associate_trajectories(sub_ground_truth, sub_trajectory, max_diff)
161 |
162 | trajectory_tuples.append((sub_ground_truth, sub_trajectory, np.round(split_distances[i], 2)))
163 |
164 | return trajectory_tuples
165 |
166 |
167 |
--------------------------------------------------------------------------------
/src/x_evaluate/visualizer/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jpl-x/x_evaluate/5d95accbed53c21709725b5fff9ffeb05ccbb391/src/x_evaluate/visualizer/__init__.py
--------------------------------------------------------------------------------
/src/x_evaluate/visualizer/renderer.py:
--------------------------------------------------------------------------------
1 | import os
2 | from abc import ABCMeta, abstractmethod
3 | from typing import List
4 |
5 | import cv2
6 | import numpy as np
7 | import pygame
8 |
9 |
10 | class AbstractFrameRenderer(metaclass=ABCMeta):
11 |
12 | names: List[str]
13 | file_lists: List[List[str]]
14 | requires_prev_files: bool
15 | number_of_outputs: int
16 |
17 | def __init__(self, name: List[str], file_lists: List[List[str]], requires_prev_files: bool = False,
18 | number_of_output_surfaces: int = 1):
19 | self.names = name
20 | self.file_lists = file_lists
21 | self.requires_prev_files = requires_prev_files
22 | self.number_of_outputs = number_of_output_surfaces
23 |
24 | @abstractmethod
25 | def render(self, files: List[str]) -> List[pygame.SurfaceType]:
26 | pass
27 |
28 |
29 | class BlankRenderer(AbstractFrameRenderer):
30 |
31 | def __init__(self, file_list_length: int, frame_size):
32 |
33 | file_list = [""] * file_list_length
34 | self.frame_size = frame_size
35 |
36 | super().__init__([""], [file_list])
37 |
38 | def render(self, files: List[str]) -> List[pygame.SurfaceType]:
39 | return [pygame.Surface(self.frame_size)]
40 |
41 |
42 | class RgbFrameRenderer(AbstractFrameRenderer):
43 |
44 | def __init__(self, name: str, file_list: List[str], root_folder=None):
45 | # path = os.path.join(dataset_path, F"{sensor}/{sub_folder}")
46 |
47 | if root_folder:
48 | file_list = [os.path.join(root_folder, f) for f in file_list]
49 |
50 | super().__init__([name], [file_list])
51 |
52 | def render(self, files: List[str]) -> List[pygame.SurfaceType]:
53 | # THIS SHOULD BE UNIT TESTED IN ABSTRACT FRAME RENDERER: assert len(files) == 1
54 | img = cv2.imread(files[0])
55 | im_surface = pygame.surfarray.make_surface(img.swapaxes(0, 1))
56 | return [im_surface]
57 |
58 | #
59 | # class DepthFrameRenderer(RgbFrameRenderer):
60 | #
61 | # def __init__(self, dataset_path, sensor="depth", sub_folder="frames"):
62 | # super().__init__(dataset_path, sensor, sub_folder=sub_folder)
63 | #
64 | # def render(self, files: List[str]) -> List[pygame.SurfaceType]:
65 | # img = cv2.imread(files[0], cv2.COLOR_RGB2GRAY)
66 | # # im_surface = pygame.image.load(files[0])
67 | # # img = img[:, :, :3]
68 | # # img = img[:, :, ::-1]
69 | # im_surface = pygame.surfarray.make_surface(img.swapaxes(0, 1))
70 | # return [im_surface]
71 | #
72 | #
73 | # class RgbFrameProjectionRenderer(RgbFrameRenderer):
74 | #
75 | # def update_points_to_project(self, points_3d):
76 | # assert np.shape(points_3d)[0] == 3, "expecting 3xn 3d points here"
77 | # self._points_3d = points_3d
78 | #
79 | # def __init__(self, dataset_path, projection_matrix, sensor="rgb", sub_folder="frames"):
80 | # super().__init__(dataset_path, sensor, sub_folder=sub_folder)
81 | # self._points_3d = np.array([[], [], []])
82 | # self.projection_matrix = projection_matrix
83 | #
84 | # def render(self, files: List[str]) -> List[pygame.SurfaceType]:
85 | # img = cv2.imread(files[0])
86 | #
87 | # # im_surface = pygame.image.load(files[0])
88 | #
89 | # xy = np.matmul(self.projection_matrix, self._points_3d)
90 | # uv = xy[0:2, :] / xy[2, :] # the flip is taken care of later
91 | #
92 | # img = img[:, :, :3]
93 | # img = img[:, :, ::-1]
94 | # img = np.ascontiguousarray(img, dtype=np.uint8) # avoids issue with cv2.drawMarker
95 | #
96 | # for i in range(np.shape(uv)[1]):
97 | # cv2.drawMarker(img, (int(round(uv[0, i])), int(round(uv[1, i]))), (255, 0, 0), markerType=cv2.MARKER_STAR,
98 | # markerSize=10, thickness=2, line_type=cv2.LINE_AA)
99 | #
100 | # im_surface = pygame.surfarray.make_surface(img.swapaxes(0, 1))
101 | # return [im_surface]
102 |
--------------------------------------------------------------------------------
/src/x_evaluate/visualizer/utils.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import pygame
4 |
5 |
6 | def draw_arrow_with_cv2(img_out, p, vel):
7 | vel_norm = np.linalg.norm(vel)
8 | if vel_norm < 1:
9 | return
10 | spin_size = 0.3*np.linalg.norm(vel_norm)
11 | p2 = [sum(x) for x in zip(p, vel)]
12 | delta = [x2 - x for x2, x in zip(p2, p)]
13 | p2 = tuple(np.round(p2).astype(int))
14 | # cv2.line()
15 | cv2.line(img_out, p, p2, (220, 220, 220), thickness=1, lineType=cv2.LINE_AA)
16 | # cvLine(resultDenseOpticalFlow, p, p2, CV_RGB(220, 220, 220), 1, CV_AA);
17 | angle = np.arctan2(delta[1], delta[0])
18 | p = (p2[0] - spin_size * np.cos(angle + np.pi / 4),
19 | p2[1] - spin_size * np.sin(angle + np.pi / 4))
20 | p = tuple(np.round(p).astype(int))
21 | cv2.line(img_out, p, p2, (220, 220, 220), thickness=1, lineType=cv2.LINE_AA)
22 | p = (p2[0] - spin_size * np.cos(angle - np.pi / 4),
23 | p2[1] - spin_size * np.sin(angle - np.pi / 4))
24 | p = tuple(np.round(p).astype(int))
25 | cv2.line(img_out, p, p2, (220, 220, 220), thickness=1, lineType=cv2.LINE_AA)
26 |
27 |
28 | def get_pygame_font(desired_font='ubuntumono'):
29 | fonts = [x for x in pygame.font.get_fonts()]
30 | font = desired_font if desired_font in fonts else fonts[0]
31 | font = pygame.font.match_font(font)
32 | return pygame.font.Font(font, 24)
33 |
34 |
35 | def render_optical_flow_data(data):
36 | intensity = np.linalg.norm(data, axis=2)
37 | angle = np.arctan2(data[:, :, 0], data[:, :, 1])
38 | max_intensity = 100
39 | # N.B.: an intensity of exactly 1.0 makes the output black (perhaps showing the over-saturation), so keep it < 1
40 | intensity = np.clip(intensity, 0, max_intensity - 1) / max_intensity
41 | # log scaling
42 | basis = 30
43 | intensity = np.log1p((basis - 1) * intensity) / np.log1p(basis - 1)
44 | # for the angle they use 360° scale, see https://stackoverflow.com/a/57203497/14467327
45 | angle = (np.pi + angle) * 360 / (2 * np.pi)
46 | # print(F"Ranges, angle: [{np.min(angle)}, {np.max(angle)}], "
47 | # F"intensity: [{np.min(intensity)}, {np.max(intensity)}]")
48 | intensity = intensity[:, :, np.newaxis]
49 | angle = angle[:, :, np.newaxis]
50 | hsv_img = np.concatenate((angle, np.ones_like(intensity), intensity), axis=2)
51 | img_out = np.array(cv2.cvtColor(np.array(hsv_img, dtype=np.float32), cv2.COLOR_HSV2RGB) * 256,
52 | dtype=np.dtype("uint8"))
53 | for y in range(12, len(data), 25):
54 | for x in range(12, len(data[0]), 25):
55 | vel = (data[y][x][0], data[y][x][1])
56 |
57 | p = (x, y)
58 | draw_arrow_with_cv2(img_out, p, vel)
59 | return img_out
60 |
61 |
62 |
--------------------------------------------------------------------------------
/test/evaluate.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import argparse
5 | import traceback
6 |
7 | from envyaml import EnvYAML
8 | # import orjson
9 | from x_evaluate.evaluation_data import EvaluationDataSummary, FrontEnd
10 | import x_evaluate.performance_evaluation as pe
11 | import x_evaluate.trajectory_evaluation as te
12 | import x_evaluate.tracking_evaluation as fe
13 | from x_evaluate.utils import envyaml_to_archive_dict, name_to_identifier, \
14 | ArgparseKeyValueAction
15 | from x_evaluate.scriptlets import get_git_info, process_dataset, write_evaluation_pickle
16 |
17 |
18 | def main():
19 | print("Python evaluation script for X-library")
20 | print()
21 | print(F"Python executable: {sys.executable}")
22 | script_dir = os.path.dirname(os.path.realpath(__file__))
23 | print(F"Script located here: {script_dir}")
24 | print()
25 |
26 | parser = argparse.ArgumentParser(description='Automatic evaluation of X library according to evaluate.yaml')
27 | parser.add_argument('--evaluate', type=str, default="", help='location of c++ evaluate executable')
28 | parser.add_argument('--configuration', type=str, default="", help="YAML file specifying what to run")
29 | parser.add_argument('--name', type=str, help="optional name, if not the output folder name is used")
30 | parser.add_argument('--dataset_dir', type=str, default="", help="substitutes XVIO_DATASET_DIR in yaml file")
31 | parser.add_argument('--skip_feature_tracking', action="store_true", default=False)
32 | parser.add_argument('--skip_analysis', action="store_true", default=False)
33 | parser.add_argument('--output_folder', type=str, required=True)
34 | parser.add_argument('--frontend', type=FrontEnd, choices=list(FrontEnd), required=True)
35 | parser.add_argument('--overrides', nargs='*', action=ArgparseKeyValueAction)
36 | parser.add_argument('--dump_input_frames', help="Whether to dump input frames to disk.", action="store_true",
37 | default=False)
38 | parser.add_argument('--dump_debug_frames', help="Whether to dump debug frames to disk.", action="store_true",
39 | default=False)
40 |
41 | args = parser.parse_args()
42 |
43 | cmdline_override_params = dict()
44 | if args.overrides is not None:
45 | cmdline_override_params = args.overrides
46 |
47 | if args.dump_debug_frames:
48 | cmdline_override_params['eklt_display_features'] = True
49 | cmdline_override_params['eklt_display_feature_id'] = False
50 | cmdline_override_params['eklt_display_feature_patches'] = True
51 |
52 | if len(args.evaluate) == 0:
53 | try:
54 | print("Calling catkin_find x_evaluate evaluate")
55 | stream = os.popen('catkin_find x_evaluate evaluate')
56 | args.evaluate = stream.read()
57 | finally:
58 | if len(args.evaluate) == 0:
59 | print("Error: could not find 'evaluate' executable")
60 | return
61 |
62 | if len(args.configuration) == 0:
63 | # default to evaluate.yaml in same folder as this script
64 | args.configuration = os.path.join(script_dir, "evaluate.yaml")
65 |
66 | if len(args.dataset_dir) > 0:
67 | os.environ['XVIO_DATASET_DIR'] = args.dataset_dir
68 |
69 | if 'X_EVALUATE_SRC_ROOT' not in os.environ:
70 | src_root = os.path.normpath(os.path.join(script_dir, '..'))
71 | print(F"Assuming '{src_root}' to be the x_evaluate source root")
72 | os.environ['X_EVALUATE_SRC_ROOT'] = src_root
73 |
74 | print(F"Reading '{args.configuration}'")
75 |
76 | eval_config = EnvYAML(args.configuration)
77 | tmp_yaml_filename = os.path.join(args.output_folder, 'tmp.yaml')
78 |
79 | if not os.path.exists(args.output_folder):
80 | os.makedirs(args.output_folder)
81 |
82 | print(F"Using the following 'evaluate' executable: {args.evaluate}")
83 | print(F"Processing the following datasets: {str.join(', ', (d['name'] for d in eval_config['datasets']))}")
84 | print()
85 |
86 | n = len(eval_config['datasets'])
87 |
88 | summary = EvaluationDataSummary()
89 |
90 | conf = envyaml_to_archive_dict(eval_config)
91 |
92 | summary.configuration = conf
93 | summary.frontend = args.frontend
94 | summary.name = args.name
95 | if summary.name is None:
96 | summary.name = os.path.basename(os.path.normpath(args.output_folder))
97 |
98 | try:
99 | for i, dataset in enumerate(eval_config['datasets']):
100 | try:
101 | output_folder = F"{i+1:>03}_{name_to_identifier(dataset['name'])}"
102 | print(F"Processing dataset {i+1} of {n}, writing to {output_folder}")
103 | output_folder = os.path.join(args.output_folder, output_folder)
104 |
105 | d = process_dataset(args.evaluate, dataset, output_folder, tmp_yaml_filename, eval_config,
106 | cmdline_override_params, args.frontend, args.skip_feature_tracking, args.skip_analysis,
107 | args.dump_input_frames, args.dump_debug_frames)
108 |
109 | if not args.skip_analysis:
110 | pe.plot_performance_plots(d, output_folder)
111 | te.plot_trajectory_plots(d.trajectory_data, d.name, output_folder)
112 | fe.plot_feature_plots(d, output_folder)
113 | print(F"Analysis of output {i + 1} of {n} completed")
114 |
115 | summary.data[dataset['name']] = d
116 |
117 | except Exception as e:
118 | print(F"Processing '{dataset}' failed: {e}")
119 | print(traceback.format_exc())
120 | print("\n\n\n")
121 |
122 | if not args.skip_analysis:
123 | te.plot_summary_plots(summary, args.output_folder)
124 | te.create_summary_info(summary, args.output_folder)
125 | pe.plot_summary_plots(summary, args.output_folder)
126 | pe.print_realtime_factor_summary(summary)
127 | fe.plot_summary_plots(summary, args.output_folder)
128 |
129 | x_evaluate_root = os.environ['X_EVALUATE_SRC_ROOT']
130 | x_root = os.path.normpath(os.path.join(x_evaluate_root, "../x"))
131 |
132 | print(F"Assuming '{x_root}' to be the x source root")
133 |
134 | summary.x_git_info = get_git_info(x_root)
135 | summary.x_vio_ros_git_info = get_git_info(x_evaluate_root)
136 |
137 | finally:
138 | if summary is not None:
139 | write_evaluation_pickle(summary, args.output_folder)
140 |
141 | if os.path.exists(tmp_yaml_filename):
142 | os.remove(tmp_yaml_filename)
143 |
144 |
145 | if __name__ == '__main__':
146 | main()
147 |
148 |
--------------------------------------------------------------------------------
/test/evaluate_mars_yard.yaml:
--------------------------------------------------------------------------------
1 |
2 | common_params:
3 | eklt_bootstrap: "klt"
4 |
5 | datasets:
6 |
7 | - name: Mars Yard
8 | rosbag: ${XVIO_DATASET_DIR}/mars_yard_dvx/mars_yard_dvx_03_all_filtered_full.bag
9 | pose_topic:
10 | events_topic: /dvs/events_cleaned
11 | image_topic: /camera/image_raw
12 | imu_topic: /mavros/imu/data_raw
13 | params: ${X_EVALUATE_SRC_ROOT}/params_mars_yard.yaml
14 | # to: 1455212408.526946783
15 | override_params:
16 | p: [0.0, 0.0, 0.0]
17 | v: [0.0, 0.0, 0.0]
18 |
--------------------------------------------------------------------------------
/test/evaluate_rpg_davis_rotation.yaml:
--------------------------------------------------------------------------------
1 |
2 | common_params:
3 | eklt_bootstrap: "klt"
4 |
5 | datasets:
6 | -
7 | name: Boxes Rotation
8 | rosbag: ${XVIO_DATASET_DIR}/rpg_davis_data/boxes_rotation.bag
9 | events_topic: /dvs/events
10 | image_topic: /dvs/image_raw
11 | pose_topic: /optitrack/davis
12 | imu_topic: /dvs/imu
13 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_davis_rotation.yaml
14 | override_params:
15 | # initial state computed from topic /optitrack/davis over 100ms:
16 | p: [0.30291768098742905, 1.430519451034191, 1.3521637770392119]
17 | v: [-0.003301541735693611, 0.005962067389282077, -0.010828869392393976]
18 | q: [0.013829174831521022, -0.03468748297254869, 0.8955545300641503, -0.4433820205309269] # [w,x,y,z]
19 |
20 | -
21 | name: Dynamic Rotation
22 | rosbag: ${XVIO_DATASET_DIR}/rpg_davis_data/dynamic_rotation.bag
23 | events_topic: /dvs/events
24 | image_topic: /dvs/image_raw
25 | pose_topic: /optitrack/davis
26 | imu_topic: /dvs/imu
27 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_davis_rotation.yaml
28 | override_params:
29 | # initial state computed from topic /optitrack/davis over 100ms:
30 | p: [1.6859278477713469, -1.4658295591389563, 1.298812520634158]
31 | v: [0.0026893406797981628, 0.010057065949884923, -0.007002804190208332]
32 | q: [0.346405045682845, -0.5720589852495133, 0.6355752910378475, -0.3857409897106071] # [w,x,y,z]
33 |
34 |
35 | -
36 | name: Shapes Rotation
37 | rosbag: ${XVIO_DATASET_DIR}/rpg_davis_data/shapes_rotation.bag
38 | events_topic: /dvs/events
39 | image_topic: /dvs/image_raw
40 | pose_topic: /optitrack/davis
41 | imu_topic: /dvs/imu
42 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_davis_rotation.yaml
43 | override_params:
44 | # initial state computed from topic /optitrack/davis over 100ms:
45 | p: [4.461422921154449, 1.5512600588125605, 1.4175439815328836]
46 | v: [0.007208901640846718, 0.009767413656861793, 0.008575371720832243]
47 | q: [-0.4775463051309417, 0.5201921377433283, -0.4965123080459334, 0.5048021337561005] # [w,x,y,z]
48 |
49 | -
50 | name: Poster Rotation
51 | rosbag: ${XVIO_DATASET_DIR}/rpg_davis_data/poster_rotation.bag
52 | events_topic: /dvs/events
53 | image_topic: /dvs/image_raw
54 | pose_topic: /optitrack/davis
55 | imu_topic: /dvs/imu
56 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_davis_rotation.yaml
57 | override_params:
58 | # initial state computed from topic /optitrack/davis over 100ms:
59 | p: [4.526394089455742, -0.5973192737165077, 1.4715116414139455]
60 | v: [0.003121604446743642, 0.005615239263361253, -0.010553776392296934]
61 | q: [-0.4840108206676073, 0.4998577950717239, -0.5032014415263013, 0.5125075798742454] # [w,x,y,z]
62 |
--------------------------------------------------------------------------------
/test/evaluate_rpg_fpv.yaml:
--------------------------------------------------------------------------------
1 |
2 | common_params:
3 | fast_detection_delta: 20
4 |
5 |
6 | datasets:
7 | - name: Indoor 45 Seq 2
8 | rosbag: ${XVIO_DATASET_DIR}/rpg_fpv/indoor_45_2_davis_with_gt.bag
9 | from: 1545305731.5095596
10 | to: 1545305770.6105537
11 | events_topic: /dvs/events
12 | image_topic: /dvs/image_raw
13 | pose_topic: /groundtruth/colmap
14 | imu_topic: /dvs/imu
15 | depth_map_topic:
16 | camera_info_topic:
17 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_fpv.yaml
18 | override_params:
19 | # initial state computed from topic /groundtruth/colmap over 100ms:
20 | p: [ -3.44464409301, 4.65997703584, -0.801343059478 ]
21 | v: [ 0.5435364022578789, 1.1860373128293273, 1.923599302722088 ]
22 | q: [ -0.306097713576, 0.93164031599, -0.183486788497, 0.068433250789 ] #[w,x,y,z]
23 |
24 | - name: Indoor 45 Seq 12
25 | rosbag: ${XVIO_DATASET_DIR}/rpg_fpv/indoor_45_12_davis_with_gt.bag
26 | from: 1545313504.2094511
27 | to: 1545313548.7104790
28 | events_topic: /dvs/events
29 | image_topic: /dvs/image_raw
30 | pose_topic: /groundtruth/colmap
31 | imu_topic: /dvs/imu
32 | depth_map_topic:
33 | camera_info_topic:
34 | params: ${X_EVALUATE_SRC_ROOT}/params_rpg_fpv.yaml
35 | override_params:
36 | # initial state computed from topic /groundtruth/colmap over 100ms:
37 | p: [ -4.82944522786, 4.32989302502, -1.00369279693 ]
38 | v: [ -0.009830929637316676, 0.000989647805908094, 0.005835679570204378 ]
39 | q: [ -0.339599726372, 0.828465062019, -0.408797665913, 0.176641261335 ] #[w,x,y,z]
40 |
--------------------------------------------------------------------------------
/test/evaluate_sim_ral.yaml:
--------------------------------------------------------------------------------
1 |
2 | common_params:
3 | eklt_bootstrap: "klt"
4 |
5 | datasets:
6 | - name: Mars Vertical Circle
7 | rosbag: ${XVIO_DATASET_DIR}/sim/neuro_bem_esim_vcircle_vmax_2.4_offset_2.5.bag
8 | events_topic: /cam0/events
9 | image_topic: /cam0/image_raw
10 | pose_topic: /cam0/pose
11 | imu_topic: /imu
12 | depth_map_topic: /cam0/depthmap
13 | camera_info_topic: /cam0/camera_info
14 | params: ${X_EVALUATE_SRC_ROOT}/params_esim_davis346.yaml
15 | from: 10.0
16 | override_params:
17 | # initial state computed from /cam0/pose at 10.000999999s in neuro_bem_esim_vcircle_vmax_2.4_offset_2.5.bag:
18 | p: [ -0.0069972414333245615, 1.5892308541504607, 1.1513837585978979 ]
19 | v: [ 0.005836730616047017, 0.054023725912278366, 0.05297598693292569 ]
20 | q: [ -0.005574070453602605, 0.9997277664218892, 0.022566816247777906, 0.0020153382142657204 ] #[w,x,y,z]
21 |
22 | - name: Mars Circle
23 | rosbag: ${XVIO_DATASET_DIR}/sim/neuro_bem_esim_circle_vmax_7.2_offset_10.bag
24 | events_topic: /cam0/events
25 | image_topic: /cam0/image_raw
26 | pose_topic: /cam0/pose
27 | imu_topic: /imu
28 | depth_map_topic: /cam0/depthmap
29 | camera_info_topic: /cam0/camera_info
30 | params: ${X_EVALUATE_SRC_ROOT}/params_esim_davis346.yaml
31 | from: 10.0
32 | override_params:
33 | # initial state computed from /cam0/pose at 10.0s in neuro_bem_esim_circle_vmax_7.2_offset_10.bag:
34 | p: [ 3.509625004236487, 4.98771195648192, 2.050382598568867 ]
35 | v: [ 0.00817558596590592, -0.005691303876924701, -0.003433784376483093 ]
36 | q: [ -0.0014086850617686271, 0.4086417379214404, 0.9126879129698076, 0.0032740118751688634 ] #[w,x,y,z]
37 |
38 | - name: Mars Mellon
39 | rosbag: ${XVIO_DATASET_DIR}/sim/neuro_bem_esim_mellon_vmax_12.4_offset_10.bag
40 | events_topic: /cam0/events
41 | image_topic: /cam0/image_raw
42 | pose_topic: /cam0/pose
43 | imu_topic: /imu
44 | depth_map_topic: /cam0/depthmap
45 | camera_info_topic: /cam0/camera_info
46 | params: ${X_EVALUATE_SRC_ROOT}/params_esim_davis346.yaml
47 | from: 10.0
48 | override_params:
49 | # initial state computed from /cam0/pose at 10.0s in neuro_bem_esim_mellon_vmax_12.4_offset_10.bag:
50 | p: [ 2.979753345384094, 4.504408753737893, 3.626556145329341 ]
51 | v: [ 0.0620656286043175, 0.008858529848176285, -0.0017457572183846729 ]
52 | q: [ 0.0009804549968166192, 0.9992723717439144, 0.03624555703544209, -0.011833231700668188 ] #[w,x,y,z]
53 |
--------------------------------------------------------------------------------
/test/evaluate_wells_gt.yaml:
--------------------------------------------------------------------------------
1 |
2 | common_params:
3 | eklt_bootstrap: "klt"
4 |
5 | datasets:
6 | - name: Wells Test 5
7 | rosbag: ${XVIO_DATASET_DIR}/wells_cave/wells_test5_with_pose.bag
8 | pose_topic: /optitrack/davis
9 | events_topic: /dvs/events
10 | image_topic: /dvs/image_reconstructions
11 | imu_topic: /dvs/imu
12 | params: ${X_EVALUATE_SRC_ROOT}/params_wells_gt.yaml
13 | to: 1624056447
14 | override_params:
15 | # initial state computed from /optitrack/davis at 1624056435.57245s in wells_test5_with_pose.bag:
16 | p: [ -0.659018001066486, -2.62335194192339, -0.496722059903016 ]
17 | v: [ -0.11597827226708635, 0.22625134715447395, -0.08980424022087258 ]
18 | q: [ 0.385059137030963, -0.921542386688921, 0.0465619020689208, 0.0179186997402452 ] #[w,x,y,z]
19 | # from file: ./test5_15000/detector_parameters.yaml
20 | eklt_harris_block_size: 2
21 | eklt_harris_k: 0.2
22 | eklt_detection_min_distance: 72
23 | eklt_harris_quality_level: 0.05
24 |
25 | # - name: Wells Test 9
26 | # rosbag: ${XVIO_DATASET_DIR}/wells_cave/wells_test9_with_pose.bag
27 | # pose_topic: /optitrack/davis
28 | # events_topic: /dvs/events
29 | # image_topic: /dvs/image_reconstructions
30 | # imu_topic: /dvs/imu
31 | # params: ${X_EVALUATE_SRC_ROOT}/params_wells_gt.yaml
32 | # override_params:
33 | # # initial state computed from /optitrack/davis at 1624057502.45797s in wells_test9_with_pose.bag:
34 | # p: [ 2.41088499494861, 3.69978663101385, 0.594266045537711 ]
35 | # v: [ 1.8459367262211968, -6.540950997854848, -0.16677142409649845 ]
36 | # q: [ 0.718568240752693, -0.695029129938346, -0.0243657090590247, -0.000710028715432968 ] #[w,x,y,z]
37 | # # from file: ./test9_15000/detector_parameters.yaml
38 | # eklt_harris_block_size: 6
39 | # eklt_harris_k: 0.2
40 | # eklt_detection_min_distance: 68
41 | # eklt_harris_quality_level: 0.05
42 |
--------------------------------------------------------------------------------