├── .gitignore
├── LICENSE
├── README.md
├── affine_flow.py
├── affine_flow_time_to_contact.py
├── apriltag_odometry.py
├── phi_depth_integrals_accel_bias.py
├── phi_pose_observer.py
├── robomaster_pickle_mat.py
├── ros
├── README.md
├── VINS-Mono
│ ├── Dockerfile
│ ├── attach.sh
│ ├── build.sh
│ ├── realsense_435i.launch
│ ├── realsense_435i_config.yaml
│ ├── run.sh
│ └── vins_rviz.launch
├── ros_convert_rovio.py
├── ros_convert_vins_mono.py
├── ros_publish_folder_recording.py
└── rovio
│ ├── .dockerignore
│ ├── Dockerfile
│ ├── attach.sh
│ ├── build.sh
│ ├── d435i_cam0.yaml
│ ├── d435i_rovio.info
│ ├── rovio_node.launch
│ ├── rovio_rosbag_node_ttc.launch
│ └── run.sh
├── rpg_align_trajectory
├── LICENSE
├── __init__.py
└── rpg_align_trajectory.py
├── ttc_depth.py
├── ttc_depth_calc_error.py
├── ttc_depth_from_folder.py
├── ttc_depth_integrals_accel_bias.py
├── ttc_depth_plot_live.py
├── ttc_depth_realsense.py
├── ttc_depth_robomaster.py
├── ttc_pose_observer.py
├── utilities.py
└── zmq_vector_publisher.py
/.gitignore:
--------------------------------------------------------------------------------
1 | results
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | *.py,cover
52 | .hypothesis/
53 | .pytest_cache/
54 | cover/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | .pybuilder/
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | # For a library or package, you might want to ignore these files since the code is
89 | # intended to run in multiple environments; otherwise, check them in:
90 | # .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # poetry
100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101 | # This is especially recommended for binary packages to ensure reproducibility, and is more
102 | # commonly ignored for libraries.
103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104 | #poetry.lock
105 |
106 | # pdm
107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108 | #pdm.lock
109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110 | # in version control.
111 | # https://pdm.fming.dev/#use-with-ide
112 | .pdm.toml
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2022 Levi Burner
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TTCDist: Fast Distance Estimation From an Active Monocular Camera Using Time-to-Contact
2 |
3 | This repository contains code for *TTCDist: Fast Distance Estimation From an Active Monocular Camera Using Time-to-Contact* by Levi Burner, Nitin J. Sanket, Cornelia Fermuller, and Yiannis Aloimonos.
4 |
5 | You can find a pdf of the paper [here](https://arxiv.org/abs/2203.07530).
6 |
7 | [](https://www.youtube.com/watch?v=CGS2FIZujnQ)
8 |
9 | ## Setup
10 | We use standard scientific Python (`>=3.8`) libraries like NumPy, SciPy, Matplotlib, and Numba. Make sure these are installed.
11 |
12 | The python bindings of the official [`apriltag`](https://github.com/AprilRobotics/apriltag) library are needed to use the `--april` flag in any of the scripts below.
13 |
14 | The Python package `pyrealsense2` is needed to run the algorithms in realtime with a RealSense D435i camera. The following links may be helpful when tring to install `pyrealsense2` on Ubuntu 22.04 and newer. Older versions of Ubuntu should use the standard instructions.
15 |
16 | * https://github.com/IntelRealSense/librealsense/blob/master/doc/installation.md
17 | * https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python
18 |
19 | ## Running from a recording in a folder
20 | ### Data
21 | We provide the data for the 10 sequences considered in TTCDist [here](https://drive.google.com/file/d/1XPYIrPPVfR7nqbWSclclwrIp4igqRGyS/view?usp=sharing).
22 |
23 | Download and extract to `TTCDist/recordings_21_11_12`
24 |
25 | ### Running on one sequence
26 | ```bash
27 | python3 ttc_depth_from_folder.py --dir recordings_21_11_12/record_000000 --visualize
28 | ```
29 |
30 | ### Benchmark on a single sequence
31 |
32 | *Note:* On the first run, the Numba functions will be compiled which will significantly affect the runtime. However, when run a second time, a compilation cache will be used instead.
33 |
34 | ```bash
35 | python3 ttc_depth_from_folder.py --dir recordings_21_11_12/record_000000 --bench
36 | ```
37 |
38 | ### Benchmark on all sequences
39 |
40 | *Note:* On the first run, the Numba functions will be compiled which will significantly affect the runtime. However, when run a second time, a compilation cache will be used instead.
41 |
42 | ```bash
43 | python3 ttc_depth_from_folder.py --dir recordings_21_11_12 --all --bench
44 | ```
45 |
46 | ### Calculate Average Trajectory Error
47 | We provide the data produced by all methods on the sequences in TTCDist [here](https://drive.google.com/file/d/1bO09vdTp8yazhRv755r_PQv2GPik6mY1/view?usp=sharing).
48 |
49 | Download and extract to `TTCDist/results`
50 |
51 | Then run the following to regenerate the results for the Tau, Phi, and AprilTag methods and calculate error.
52 |
53 | ```bash
54 | python3 ttc_depth_from_folder.py --dir recordings_21_11_12 --all --save --april
55 | python3 ttc_depth_calc_error.py --dir results --calc --latex
56 | ```
57 |
58 | To use the `--april` flag the python bindings for the official apriltag package must be installed from [here](https://github.com/AprilRobotics/apriltag).
59 |
60 | If you wish to reevaluate ROVIO and VINS-Mono on the recorded sequences the scripts in the `ros/` folder may be
61 |
62 | ## Running in realtime with an Intel Realsense D435i
63 | ### Without apriltag package
64 | Assuming the `pyrealsense2` package is installed just run the following
65 |
66 | ```bash
67 | python3 ttc_depth_realsense.py --visualize
68 | ```
69 |
70 | A live output will run and the Z distance between the camera and the object will be shown. The top number is from the Tau constraint and the bottom is from the Phi constraint.
71 |
72 | The `r` key can be used to reset the patch being tracked. The `q` key will close the application.
73 |
74 | ### With apriltag ground truth
75 | If the python bindings for the official apriltag package are installed from [here](https://github.com/AprilRobotics/apriltag) then the following will work
76 |
77 | ```bash
78 | python3 ttc_depth_realsense.py --visualize --april
79 | ```
80 |
81 | in another terminal run
82 |
83 |
84 | ```bash
85 | python3 ttc_depth_plot_live.py
86 | ```
87 |
88 | A live plot will show. The top row shows the X, Y, and Z positions of the tracked object. `x_hat` is from the Tau Constraint. `x_gt` is from the AprilTag, and `phi_x_hat` is from the Phi Constraint.
89 |
90 |
91 | ## Running the RoboMaster experiments
92 | Install the [RoboMaster-SDK Python package](https://github.com/dji-sdk/RoboMaster-SDK). Make sure your RoboMaster robot works with some of the examples included in the SDK. Then to run the experiments from TTCDist use the command:
93 |
94 | ```bash
95 | python3 ttc_depth_robomaster.py --visualize --b 2.0 --efference
96 | ```
97 |
98 | Remove the `--efference` flag to use measured acceleration as described in the paper.
99 |
100 |
101 | ## Acknowledgements
102 | Part of the [rpg_trajectory_evaluation](https://github.com/uzh-rpg/rpg_trajectory_evaluation) toolbox is reproduced in `rpg_align_trajectory/` and is available under its respective license.
103 |
--------------------------------------------------------------------------------
/affine_flow.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: affine_flow.py
4 | # Available under MIT license
5 | #
6 | # Estimate affine flow for a predetermined patch
7 | #
8 | # History:
9 | # 04-23-20 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import time
15 |
16 | import cv2
17 | import numpy as np
18 | import numba as nb
19 | from scipy.spatial.transform import Rotation as R
20 |
21 | def derotate_image(frame, K, q_c_to_fc):
22 | R_c_to_fc = R.from_quat((q_c_to_fc[1], q_c_to_fc[2], q_c_to_fc[3], q_c_to_fc[0])).as_matrix()
23 | R_fc_to_c = R_c_to_fc.T
24 |
25 | # Derive this by considering p1 = (K R K_inv) (Z(X)/Z(RX)) p0
26 | top_two_rows = (K @ R_fc_to_c @ np.linalg.inv(K))[0:2, :]
27 | bottom_row = (R_fc_to_c @ np.linalg.inv(K))[2, :]
28 |
29 | map_pixel_c_to_fc = np.vstack((top_two_rows, bottom_row))
30 | map_pixel_c_to_fc_opencv = np.float32(map_pixel_c_to_fc.flatten().reshape(3,3))
31 |
32 | frame_derotated = cv2.warpPerspective(frame, map_pixel_c_to_fc_opencv, (frame.shape[1], frame.shape[0]), flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR)
33 | return frame_derotated
34 |
35 | def calculate_dW_dp_mults(rect, stride):
36 | region_size = (int((rect[2] - rect[0])/stride), int((rect[3] - rect[1])/stride))
37 |
38 | x_mult = stride*np.tile(np.arange(0, region_size[0]), region_size[1]) + rect[0]
39 | y_mult = stride*np.repeat(np.arange(0, region_size[1]), region_size[0]) + rect[1]
40 | return x_mult, y_mult
41 |
42 | # From eq 35 of LK 20 years on
43 | def invert_delta_p(last_p, T_considered, I_warped_back_considered, grad_T_dW, H_T_dW_inv):
44 | diff_T_I_shaped = (T_considered - I_warped_back_considered).flatten()
45 |
46 | grad_J_to_p = diff_T_I_shaped @ grad_T_dW
47 | delta_p_inv = H_T_dW_inv @ grad_J_to_p
48 |
49 | p = -delta_p_inv
50 |
51 | p_inv_unscaled = np.array(
52 | (-p[0] - p[0]*p[3] + p[1] * p[2],
53 | -p[1],
54 | -p[2],
55 | -p[3] - p[0]*p[3] + p[1]*p[2],
56 | -p[4] - p[3]*p[4] + p[2]*p[5],
57 | -p[5] - p[0]*p[5] + p[1]*p[4]),
58 | dtype=np.float32)
59 | scale = 1.0 / ((1+p[0])*(1+p[3])-p[1]*p[2])
60 | delta_p = scale * p_inv_unscaled
61 |
62 | last_p = compose_warp(last_p, delta_p)
63 |
64 | return delta_p, last_p
65 | invert_delta_p = nb.jit(nopython = True, cache = True, fastmath=True)(invert_delta_p)
66 |
67 | # Calculate p_c s.t. W(p_c, x) = W(p, W_(dp, x))
68 | # from eq 18 of LK 20 years on
69 | def compose_warp(p, dp):
70 | p_c = np.array(
71 | (p[0] + dp[0] + p[0] * dp[0] + p[2] * dp[1],
72 | p[1] + dp[1] + p[1] * dp[0] + p[3] * dp[1],
73 | p[2] + dp[2] + p[0] * dp[2] + p[2] * dp[3],
74 | p[3] + dp[3] + p[1] * dp[2] + p[3] * dp[3],
75 | p[4] + dp[4] + p[0] * dp[4] + p[2] * dp[5],
76 | p[5] + dp[5] + p[1] * dp[4] + p[3] * dp[5]))
77 | return p_c
78 | compose_warp = nb.jit(nopython = True, cache = True, fastmath=True)(compose_warp)
79 |
80 | def make_rot_times_affine(stride, p, rect, R_fc_to_c, K, K_inv):
81 | # Affine matrix parameterized by p
82 | A_p = np.array([[stride*(1.0+p[0]), stride*p[2], p[4] + (1.0+p[0])*rect[0] + p[2]*rect[1]],
83 | [stride*(p[1]), stride*(1.0+p[3]), p[5] + p[1]*rect[0] + (1.0+p[3])*rect[1]],
84 | [ 0.0, 0.0, 1.0]], dtype=np.float32)
85 | # Derived directly from perspective projection equations
86 | tmp = R_fc_to_c @ K_inv @ A_p
87 |
88 | K_cropped = np.ascontiguousarray(K[0:2, :])
89 | top = K_cropped @ tmp
90 | bot = tmp[2, :]
91 |
92 | rot_times_affine = np.vstack((top, np.atleast_2d(bot)))
93 |
94 | return rot_times_affine
95 | make_rot_times_affine = nb.jit(nopython = True, cache = True, fastmath=True)(make_rot_times_affine)
96 |
97 | def nb_norm(p, delta_p_stop):
98 | return np.linalg.norm(p) < delta_p_stop
99 | nb_norm = nb.jit(nopython = True, cache = True, fastmath=True)(nb_norm)
100 |
101 | def affineLKTracker(frame,
102 | T_considered,
103 | rect,
104 | region_size,
105 | p,
106 | K, K_inv,
107 | R_fc_to_c,
108 | dW_dp_x_mult=None, dW_dp_y_mult=None,
109 | stride=1,
110 | inverse=True,
111 | grad_T_dW=None,
112 | H_T_dW_inv=None):
113 | #if dW_dp_x_mult is None or dW_dp_y_mult is None:
114 | # dW_dp_x_mult, dW_dp_y_mult = calculate_dW_dp_mults(rect, region_size, stride)
115 | #start = time.time()
116 | rot_times_affine = make_rot_times_affine(stride, p, rect, R_fc_to_c, K, K_inv)
117 | I_warped_back_considered = cv2.warpPerspective(frame, rot_times_affine, region_size, flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR)
118 |
119 | #mid = time.time()
120 |
121 | #bit_start = time.time()
122 |
123 | if not inverse:
124 | #bit_start = time.time()
125 |
126 | normalization_factor = 0.125 # 1/8 for sobel of size 3
127 | sobel_x = cv2.Sobel(I_warped_back_considered, cv2.CV_32F, 1, 0, ksize=3, scale=normalization_factor).flatten()
128 | sobel_y = cv2.Sobel(I_warped_back_considered, cv2.CV_32F, 0, 1, ksize=3, scale=normalization_factor).flatten()
129 |
130 |
131 | x_mult_sobel_x = dW_dp_x_mult * sobel_x
132 | x_mult_sobel_y = dW_dp_x_mult * sobel_y
133 | y_mult_sobel_x = dW_dp_y_mult * sobel_x
134 | y_mult_sobel_y = dW_dp_y_mult * sobel_y
135 | # vstack and transpose is faster than column stack
136 | grad_I_dW = np.vstack((x_mult_sobel_x, x_mult_sobel_y, y_mult_sobel_x, y_mult_sobel_y, sobel_x, sobel_y)).transpose()
137 |
138 | # TODO this may not be exploiting symmetry, is almost half the computation time
139 | H = np.einsum('ij,ik->jk', grad_I_dW, grad_I_dW, optimize=True)
140 |
141 | grad_J_to_p = diff_T_I_shaped @ grad_I_dW
142 |
143 | delta_p = np.linalg.solve(H, grad_J_to_p).reshape((6,))
144 |
145 | #bit_end = time.time()
146 | else:
147 | #bit_start = time.time()
148 |
149 |
150 |
151 | #delta_p_inv = np.sum(diff_T_I_shaped) * (H_T_dW_inv @ grad_T_dW)
152 | #print(delta_p_inv.shape)
153 |
154 | # Negative because we used T - I instead of I - T as in LK 20 years on
155 | #bit_end = time.time()
156 |
157 | delta_p, p = invert_delta_p(p, T_considered, I_warped_back_considered, grad_T_dW, H_T_dW_inv)
158 | #bit_end = time.time()
159 |
160 | #end = time.time()
161 | #print('end {:.2f} bit {:.2f} mid {:.2f}'.format(1000000*(end-bit_end), 1000000*(bit_end-mid), 1000000*(mid-start)))
162 |
163 | return delta_p, p, I_warped_back_considered
164 |
165 |
166 | def draw_warped_patch_location(frame, rect, p, q_c_to_fc, K):
167 | points = np.array([
168 | (rect[0], rect[1], 1),
169 | (rect[2], rect[1], 1),
170 | (rect[2], rect[3], 1),
171 | (rect[0], rect[3], 1)
172 | ])
173 |
174 | A_p = np.array([[1+p[0], p[2], p[4]],
175 | [p[1], 1+p[3], p[5]],
176 | [ 0, 0, 1]], dtype=np.float32)
177 |
178 | # Derived directly from perspective projection equations
179 | R_fc_to_c = R.from_quat([q_c_to_fc[1], q_c_to_fc[2], q_c_to_fc[3], q_c_to_fc[0]]).as_matrix().astype(np.float32).transpose()
180 | K_inv = np.linalg.inv(K)
181 |
182 | tmp = R_fc_to_c @ K_inv @ A_p
183 | rot_times_affine = np.vstack((K[0:2, :] @ tmp, tmp[2, :]))
184 |
185 | points_warped = []
186 | for point in points:
187 | point = rot_times_affine @ point
188 | px = point[0]/point[2]
189 | py = point[1]/point[2]
190 | points_warped.append([px, py])
191 | points_warped = np.array(points_warped)
192 |
193 | points_warped = points_warped.reshape((-1, 1, 2)).astype(np.int32)
194 |
195 | cv2.polylines(frame, [points_warped], isClosed=True, color=255, thickness=2)
196 |
197 | def draw_derotated(frame, q_c_to_fc, K):
198 | R_fc_to_c = R.from_quat([q_c_to_fc[1], q_c_to_fc[2], q_c_to_fc[3], q_c_to_fc[0]]).as_matrix().astype(np.float32).transpose()
199 | K_inv = np.linalg.inv(K)
200 |
201 | tmp = R_fc_to_c @ K_inv
202 | rot = np.vstack((K[0:2, :] @ tmp, tmp[2, :]))
203 | frame_derotated = cv2.warpPerspective(frame, rot, (frame.shape[1], frame.shape[0]), flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR)
204 | return frame_derotated
205 |
206 | def draw_full_reverse_warp(frame, rect, p, q_c_to_fc, K):
207 | R_fc_to_c = R.from_quat([q_c_to_fc[1], q_c_to_fc[2], q_c_to_fc[3], q_c_to_fc[0]]).as_matrix().astype(np.float32).transpose()
208 | K_inv = np.linalg.inv(K)
209 |
210 | # Affine matrix parameterized by p
211 | A_p = np.array([[1+p[0], p[2], p[4]],
212 | [p[1], 1+p[3], p[5]],
213 | [ 0, 0, 1]], dtype=np.float32)
214 |
215 | # Derived directly from perspective projection equations
216 | tmp = R_fc_to_c @ K_inv @ A_p
217 | rot_times_affine = np.vstack((K[0:2, :] @ tmp, tmp[2, :]))
218 |
219 | frame_warped_back = cv2.warpPerspective(frame, rot_times_affine, (frame.shape[1], frame.shape[0]), flags=cv2.WARP_INVERSE_MAP+cv2.INTER_LINEAR)
220 |
221 | points = np.array([
222 | (rect[0], rect[1]),
223 | (rect[2], rect[1]),
224 | (rect[2], rect[3]),
225 | (rect[0], rect[3])
226 | ])
227 | points = points.reshape((-1, 1, 2)).astype(np.int32)
228 | cv2.polylines(frame_warped_back, [points], isClosed=True, color=(0, 255, 0), thickness=1)
229 |
230 | return frame_warped_back
231 |
232 | class AffineTrackRotInvariant:
233 | def __init__(self,
234 | patch_coordinates,
235 | template_image,
236 | template_q_c_to_fc,
237 | K,
238 | delta_p_stop=0.1,
239 | delta_p_mult=1.0,
240 | visualize=False,
241 | visualize_verbose=False,
242 | wait_key=0,
243 | stride=1.0,
244 | inverse=True,
245 | max_update_time=None):
246 | self._patch_coordinates = patch_coordinates
247 | self._K = K.astype(np.float32)
248 | self._K_inv = np.linalg.inv(K).astype(np.float32)
249 | self._delta_p_stop = delta_p_stop
250 | self._delta_p_mult = delta_p_mult
251 | self._stride = stride
252 | self._inverse = inverse
253 | self._max_update_time = max_update_time
254 |
255 | self._visualize = visualize
256 | self._visualize_verbose = visualize_verbose
257 | self._wait_key = wait_key
258 |
259 | if template_image is not None:
260 | self.set_template(patch_coordinates, template_image, template_q_c_to_fc)
261 |
262 | def set_template(self, patch_coordinates, template_image, template_q_c_to_fc):
263 | dW_dp_x_mult, dW_dp_y_mult = calculate_dW_dp_mults(patch_coordinates, self._stride)
264 | self._dW_dp_x_mult = dW_dp_x_mult
265 | self._dW_dp_y_mult = dW_dp_y_mult
266 |
267 | self._patch_coordinates = patch_coordinates
268 | self._template_image = template_image
269 | self._template_q_c_to_fc = template_q_c_to_fc
270 |
271 | # Get the image in the fixed orientation frame
272 | template_image_derotated = derotate_image(self._template_image, self._K, template_q_c_to_fc)
273 |
274 | self._template_image_derotated = template_image_derotated[self._patch_coordinates[1]:self._patch_coordinates[3], self._patch_coordinates[0]:self._patch_coordinates[2]]
275 |
276 | size_y = int((self._patch_coordinates[3] - self._patch_coordinates[1])/self._stride)
277 | size_x = int((self._patch_coordinates[2] - self._patch_coordinates[0])/self._stride)
278 |
279 | self._template_image_derotated = cv2.resize(self._template_image_derotated, (size_x, size_y))
280 |
281 | if self._inverse:
282 | normalization_factor = 0.125 # 1/8 for sobel of size 3
283 | sobel_x = cv2.Sobel(self._template_image_derotated, cv2.CV_32F, 1, 0, ksize=3, scale=normalization_factor).flatten()
284 | sobel_y = cv2.Sobel(self._template_image_derotated, cv2.CV_32F, 0, 1, ksize=3, scale=normalization_factor).flatten()
285 |
286 | x_mult_sobel_x = self._dW_dp_x_mult * sobel_x
287 | x_mult_sobel_y = self._dW_dp_x_mult * sobel_y
288 | y_mult_sobel_x = self._dW_dp_y_mult * sobel_x
289 | y_mult_sobel_y = self._dW_dp_y_mult * sobel_y
290 | # vstack and transpose is faster than column stack
291 | self._grad_T_dW = np.vstack((x_mult_sobel_x, x_mult_sobel_y, y_mult_sobel_x, y_mult_sobel_y, sobel_x, sobel_y)).transpose().astype(np.float32)
292 |
293 | # TODO this may not be exploiting symmetry, is almost half the computation time
294 | self._H_T_dW_inv = np.linalg.inv(np.einsum('ij,ik->jk', self._grad_T_dW, self._grad_T_dW, optimize=True)).astype(np.float32)
295 | else:
296 | self._grad_T_dW = None
297 | self._H_T_dW_inv = None
298 |
299 | def update(self, p, frame_gray, R_fc_to_c):
300 | steps = 0
301 |
302 | rect = self._patch_coordinates
303 | region_size = (int((rect[2] - rect[0])/self._stride), int((rect[3] - rect[1])/self._stride))
304 |
305 | p, I_warped_back_considered = affine_flow_loop(
306 | frame_gray,
307 | self._template_image_derotated,
308 | self._patch_coordinates,
309 | region_size,
310 | p,
311 | self._K,
312 | self._K_inv,
313 | R_fc_to_c,
314 | dW_dp_x_mult=self._dW_dp_x_mult,
315 | dW_dp_y_mult=self._dW_dp_y_mult,
316 | stride=self._stride,
317 | inverse=self._inverse,
318 | grad_T_dW=self._grad_T_dW,
319 | H_T_dW_inv=self._H_T_dW_inv,
320 | delta_p_stop = self._delta_p_stop,
321 | max_update_time = self._max_update_time)
322 |
323 | if self._visualize:
324 | cv2.imshow('affine flow progress', np.hstack((I_warped_back_considered, self._template_image_derotated)))
325 | #cv2.waitKey(self._wait_key)
326 |
327 | #print('Update time {} steps {} seconds'.format(steps, t_current - t_start))
328 | return p
329 |
330 | def affine_flow_loop(frame,
331 | T_considered,
332 | rect,
333 | region_size,
334 | p,
335 | K, K_inv,
336 | R_fc_to_c,
337 | dW_dp_x_mult=None, dW_dp_y_mult=None,
338 | stride=1,
339 | inverse=True,
340 | grad_T_dW=None,
341 | H_T_dW_inv=None,
342 | delta_p_stop=None,
343 | max_update_time=None):
344 | steps = 0
345 | t_start = time.time()
346 | old_p = p
347 | while True:
348 | #start = time.time()
349 | delta_p, p, I_warped_back_considered = affineLKTracker(
350 | frame,
351 | T_considered,
352 | rect,
353 | region_size,
354 | p,
355 | K,
356 | K_inv,
357 | R_fc_to_c,
358 | dW_dp_x_mult,
359 | dW_dp_y_mult,
360 | stride,
361 | inverse,
362 | grad_T_dW,
363 | H_T_dW_inv)
364 |
365 | #p = compose_warp(p, delta_p)
366 |
367 | #if self._visualize_verbose:
368 | # cv2.imshow('affine flow progress verbose', np.hstack((I_warped_back_considered, self._template_image_derotated)))
369 | #cv2.waitKey(self._wait_key)
370 |
371 | #mid = time.time()
372 | steps += 1
373 | t_current = time.time()
374 |
375 | #if np.linalg.norm(delta_p) < delta_p_stop:
376 | #if nb_norm(delta_p) < delta_p_stop:
377 | if not nb_norm(p, 1e4): # sanity
378 | p = old_p
379 | break
380 | if nb_norm(delta_p, delta_p_stop):
381 | #print('Small delta_p {} steps {:.01f} hz'.format(steps, steps/(t_current - t_start)))
382 | break
383 | #end = time.time()
384 | #print(end-mid, mid-start)
385 |
386 |
387 | if max_update_time is not None:
388 | if t_current - t_start > max_update_time:
389 | print('Max update time {} steps {} seconds'.format(steps, t_current - t_start))
390 | break
391 | return p, I_warped_back_considered
392 |
--------------------------------------------------------------------------------
/affine_flow_time_to_contact.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: affine_flow_time_to_contact.py
4 | # Available under MIT license
5 | #
6 | # Calculate time to contact from affine flow parameters
7 | #
8 | # History:
9 | # 04-23-20 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import numpy as np
15 |
16 | class AffineFlowTimeToContactEstimator(object):
17 | def __init__(self, patch_coordinates, K, stride=None):
18 | self._patch_coordinates = patch_coordinates
19 | self._K = K
20 | self._K_inv = np.linalg.inv(self._K)
21 | self._patch_center_xy = np.array([(self._patch_coordinates[2] + self._patch_coordinates[0]) / 2.0,
22 | (self._patch_coordinates[3] + self._patch_coordinates[1]) / 2.0])
23 | self._X_0_ttc = np.array([self._patch_center_xy[0], self._patch_center_xy[1], 1])
24 | self._last_p = None
25 |
26 | def estimate_ttc(self, p):
27 | if self._last_p is None:
28 | self._last_p = p
29 | return False
30 |
31 | A_0 = self._K_inv @ np.array([[1+self._last_p[0], self._last_p[2], self._last_p[4]],
32 | [self._last_p[1], 1+self._last_p[3], self._last_p[5]],
33 | [ 0, 0, 1]]) @ self._K
34 |
35 | A_1 = self._K_inv @ np.array([[1+p[0], p[2], p[4]],
36 | [p[1], 1+p[3], p[5]],
37 | [ 0, 0, 1]]) @ self._K
38 | self._last_p = p
39 |
40 | try:
41 | # dp = I - A_0 A_1^-1
42 | dp = np.linalg.solve(A_1.transpose(), (A_1 - A_0).transpose()).transpose()
43 | except np.linalg.LinAlgError:
44 | return False
45 |
46 | a = np.array([0, 0, dp[0, 0], dp[0, 1], dp[0, 2], dp[1, 0], dp[1, 1], dp[1, 2]])
47 |
48 | # Select a point to return the ttc for
49 | X_ttc = A_1 @ self._K_inv @ self._X_0_ttc
50 |
51 | EPS_XY = 1E-3
52 | x_dot_over_z, y_dot_over_z, _ = a_to_frequency_of_contact(a, X_ttc, EPS_XY)
53 |
54 | EPS_Z = 2E-3
55 | _, _, z_dot_over_z = a_to_frequency_of_contact(a, X_ttc, EPS_Z)
56 |
57 | return x_dot_over_z, y_dot_over_z, z_dot_over_z, X_ttc
58 |
59 | def a_to_frequency_of_contact(a, X_ttc, EPS):
60 | if abs(a[4]) < EPS and abs(a[7]) < EPS: # Don't divide by a[4], a[7], assume U,V=0
61 | W_c = (a[2] + a[6]) / 2
62 | U_a = -(a[2] - W_c)
63 | V_b = -(a[6] - W_c)
64 |
65 | W_a = 0
66 | W_b = 0
67 |
68 | elif abs(a[4]) < EPS: # Don't divide by a[4], assume U=0
69 | U_a = -a[5]*(a[4]/a[7])
70 | W_c = a[2] + U_a
71 | V_b = -(a[6] - W_c)
72 |
73 | W_a = W_c * a[5] / a[7]
74 | W_b = 0
75 |
76 | elif abs(a[7]) < EPS: # Don't divide by a[7], assume V=0
77 | V_b = -a[3]*(a[7]/a[4])
78 | W_c = a[6] + V_b
79 | U_a = -(a[2] - W_c)
80 |
81 | W_a = 0
82 | W_b = W_c * a[3] / a[4]
83 |
84 | else:
85 | U_a = -a[5]*(a[4]/a[7])
86 | V_b = -a[3]*(a[7]/a[4])
87 | W_c = ((a[2] + U_a) + (a[6] + V_b)) / 2
88 |
89 | W_a = W_c * a[5] / a[7]
90 | W_b = W_c * a[3] / a[4]
91 |
92 | x_dot_over_z = U_a * X_ttc[0] - a[3] * X_ttc[1] - a[4]
93 | y_dot_over_z = -a[5] * X_ttc[0] + V_b * X_ttc[1] - a[7]
94 | z_dot_over_z = W_a * X_ttc[0] + W_b * X_ttc[1] + W_c
95 |
96 | return x_dot_over_z, y_dot_over_z, z_dot_over_z
97 |
--------------------------------------------------------------------------------
/apriltag_odometry.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: apriltag_odometry.py
4 | # Available under MIT license
5 | #
6 | # Find an apriltag in an image and estimate pose relative to it
7 | #
8 | # History:
9 | # 04-23-20 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import cv2
15 | import numpy as np
16 |
17 | from apriltag import apriltag
18 |
19 | class AprilPose(object):
20 | def __init__(self, K, family='tagStandard41h12', marker_size_m=(4.5 * 2.54 / 100)):
21 | # Hard code a bunch of defaults for now, they are not important
22 | family = family
23 | threads = 8
24 | max_hamming = 0
25 | decimate = 1
26 | blur = 0.8
27 | refine_edges = False
28 | debug = False
29 |
30 | # Camera intrinsics
31 | self._K = K
32 |
33 | self._detector = apriltag(family, threads, max_hamming, decimate, blur, refine_edges, debug)
34 |
35 | m_half_size = marker_size_m / 2
36 |
37 | marker_center = np.array((0, 0, 0))
38 | marker_points = []
39 | marker_points.append(marker_center + (-m_half_size, m_half_size, 0))
40 | marker_points.append(marker_center + ( m_half_size, m_half_size, 0))
41 | marker_points.append(marker_center + ( m_half_size, -m_half_size, 0))
42 | marker_points.append(marker_center + (-m_half_size, -m_half_size, 0))
43 | self._marker_points = np.array(marker_points)
44 |
45 | def find_tags(self, frame_gray):
46 | detections = self._detector.detect(frame_gray)
47 | return detections
48 |
49 | def find_detection(self, detections, id):
50 | for (i, detection) in enumerate(detections):
51 | if detection['id'] == id:
52 | return detection
53 | return None
54 |
55 | def find_pose_from_tag(self, detection):
56 | object_points = self._marker_points
57 | image_points = detection['lb-rb-rt-lt']
58 |
59 | pnp_ret = cv2.solvePnP(object_points, image_points, self._K, distCoeffs=None,flags=cv2.SOLVEPNP_IPPE_SQUARE)
60 | if pnp_ret[0] == False:
61 | raise Exception('Error solving PnP')
62 |
63 | r = pnp_ret[1]
64 | p = pnp_ret[2]
65 |
66 | return p.reshape((3,)), r.reshape((3,))
67 |
68 | def draw_detections(self, frame, detections):
69 | for detection in detections:
70 | pts = detection['lb-rb-rt-lt'].reshape((-1, 1, 2)).astype(np.int32)
71 | frame = cv2.polylines(frame, [pts], isClosed=True, color=(0, 0, 255), thickness=5)
72 | cv2.circle(frame, tuple(detection['center'].astype(np.int32)), 5, (0, 0, 255), -1)
73 |
--------------------------------------------------------------------------------
/phi_depth_integrals_accel_bias.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: phi_depth_integrals_accel_bias.py
4 | # Available under MIT license
5 | #
6 | # Estimate distance using the Phi constraint in the presence of
7 | # a constant acceleration bias
8 | #
9 | # History:
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import numpy as np
15 | import numba as nb
16 |
17 | def cumulative_int(dt, x):
18 | return dt * np.cumsum(x)
19 |
20 | def accel_z_phi_constraint(times, z_over_z0, accel_z):
21 | z_over_z0 = z_over_z0 / z_over_z0[0]
22 |
23 | # TODO all times are needed but they are assumed to be evenly spaced
24 | dt = times[1] - times[0]
25 | int_accel = cumulative_int(dt, accel_z)
26 | iint_accel = cumulative_int(dt, int_accel)
27 |
28 | E = z_over_z0 - 1
29 | R = -(times - times[0])
30 | D1 = 0.5 * np.square(R)
31 | Da = iint_accel
32 |
33 | A = np.stack((E, R, D1), axis=1)
34 | b = Da.reshape((A.shape[0], 1))
35 |
36 | z, res, rank, s = np.linalg.lstsq(A, b, rcond=-1)
37 | return z.reshape((3,)), res
38 |
39 | def accel_z_phi_constraint_tf(times, z_over_z0, accel_z):
40 | z_star, res = accel_z_phi_constraint(np.flip(times), np.flip(z_over_z0), np.flip(accel_z))
41 | return z_star, res
42 |
43 | def accel_x_phi_constraint(times, x_over_z, z_over_z0, accel_x):
44 | dx_over_z0 = x_over_z * (z_over_z0 / z_over_z0[0])
45 | dx_over_z0 -= dx_over_z0[0]
46 |
47 | # TODO all times are needed but they are assumed to be evenly spaced
48 | dt = times[1] - times[0]
49 | int_accel = cumulative_int(dt, accel_x)
50 | iint_accel = cumulative_int(dt, int_accel)
51 |
52 | P = dx_over_z0
53 | R = -(times - times[0])
54 | D1 = 0.5 * np.square(R)
55 | Da = iint_accel
56 |
57 | A = np.stack((P, R, D1), axis=1)
58 | b = Da.reshape((A.shape[0], 1))
59 |
60 | x, res, rank, s = np.linalg.lstsq(A, b, rcond=-1)
61 |
62 | return x.reshape((3,)), res
63 |
64 | def accel_x_phi_constraint_tf(times, x_over_z, z_over_z0, accel_x):
65 | x_star, res = accel_x_phi_constraint(np.flip(times), np.flip(x_over_z), np.flip(z_over_z0), np.flip(accel_x))
66 | return x_star, res
67 |
68 | # Setup numba
69 | # TODO No fast math because lots of the values are super tiny and might be sub-normal (not sure)
70 | cumulative_int = nb.jit(nopython = True, cache = True, fastmath=False)(cumulative_int)
71 | accel_z_phi_constraint = nb.jit(nopython = True, cache = True, fastmath=False)(accel_z_phi_constraint)
72 | accel_z_phi_constraint_tf = nb.jit(nopython = True, cache = True, fastmath=False)(accel_z_phi_constraint_tf)
73 | accel_x_phi_constraint = nb.jit(nopython = True, cache = True, fastmath=False)(accel_x_phi_constraint)
74 | accel_x_phi_constraint_tf = nb.jit(nopython = True, cache = True, fastmath=False)(accel_x_phi_constraint_tf)
75 |
--------------------------------------------------------------------------------
/phi_pose_observer.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: phi_pose_observer.py
4 | # Available under MIT license
5 | #
6 | # Position observer from phi and acceleration
7 | #
8 | # History:
9 | # 08-18-22 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import numpy as np
15 | from phi_depth_integrals_accel_bias import accel_x_phi_constraint_tf, accel_z_phi_constraint_tf
16 |
17 | def feedback_good(accel_power, accel_z, max_z, accel_power_thresh):
18 | return (accel_z != np.NAN and accel_power > accel_power_thresh and accel_z < max_z)
19 |
20 | class PhiPoseObserver(object):
21 | def __init__(self,
22 | z_0=None,
23 | z_0_dot=0.0,
24 | dt=0.005,
25 | max_z=-0.1,
26 | z_hat_gain=2.0,
27 | seconds_to_keep = 1.0,
28 | accel_power_thresh = 2.0):
29 | self._dt = dt
30 | self._z_0 = None
31 | self._z_hat = self._z_0
32 | self._max_z = max_z
33 | self._min_initial_z = -7.5
34 |
35 | self._seconds_to_keep = seconds_to_keep
36 | self._num_samples_to_keep = int(self._seconds_to_keep / self._dt)
37 |
38 | self._t_list = []
39 | self._phi_list = []
40 | self._a_fc_list = []
41 |
42 | self._A = np.array(((0, 1), (0, 0)))
43 | self._B = np.array((0, 1))
44 | self._L = np.array(((2, 0), (0, 20)))
45 |
46 | self._accel_power_thresh = accel_power_thresh
47 |
48 | def reset_ic(self):
49 | self._z_hat = self._z_0
50 |
51 | def update(self, t, phi, a_fc):
52 | self._t_list.append(t)
53 | self._phi_list.append(phi.tolist())
54 | self._a_fc_list.append(a_fc.tolist())
55 |
56 | accel_z_bias = None
57 | if len(self._t_list) == self._num_samples_to_keep:
58 | self._t_list = self._t_list[1:]
59 | self._phi_list = self._phi_list[1:]
60 | self._a_fc_list = self._a_fc_list[1:]
61 |
62 | # TODO NO!
63 | t = np.array(self._t_list)
64 |
65 | phis = np.array(self._phi_list)
66 |
67 | phis_x = phis[:, 0]
68 | phis_y = phis[:, 1]
69 | phis_z = phis[:, 2]
70 |
71 | a_fc = np.array(self._a_fc_list)
72 | a_x = a_fc[:, 0]
73 | a_y = a_fc[:, 1]
74 | a_z = a_fc[:, 2]
75 |
76 | (accel_x_z_tf, accel_x_dz_tf, accel_x_bias), x_res = accel_x_phi_constraint_tf(t, phis_x, phis_z, a_x)
77 | if accel_x_z_tf != np.NAN:
78 | a_x_power = np.linalg.norm(a_x-accel_x_bias) / np.sqrt(self._num_samples_to_keep)
79 | else:
80 | a_x_power = 0.0
81 | print('x singular')
82 |
83 | (accel_y_z_tf, accel_y_dz_tf, accel_y_bias), y_res = accel_x_phi_constraint_tf(t, phis_y, phis_z, a_y)
84 | if accel_y_z_tf != np.NAN:
85 | a_y_power = np.linalg.norm(a_y-accel_y_bias) / np.sqrt(self._num_samples_to_keep)
86 | else:
87 | a_y_power = 0.0
88 | print('y singular')
89 |
90 | (accel_z_z_tf, accel_z_dz_tf, accel_z_bias), z_res = accel_z_phi_constraint_tf(t, phis_z, a_z)
91 | if accel_z_z_tf != np.NAN:
92 | a_z_power = np.linalg.norm(a_z-accel_z_bias) / np.sqrt(self._num_samples_to_keep)
93 | else:
94 | a_z_power = 0.0
95 | print('z singular')
96 | else:
97 | return None
98 |
99 | accel_z_hat = 0.0
100 | accel_z_dot_hat = 0.0
101 | num_feedback = 0
102 | if feedback_good(a_x_power, accel_x_z_tf, self._max_z, self._accel_power_thresh):
103 | accel_z_hat += accel_x_z_tf
104 | accel_z_dot_hat += accel_x_dz_tf
105 | num_feedback += 1
106 | else:
107 | accel_x_z_tf = 0.0
108 |
109 | if feedback_good(a_y_power, accel_y_z_tf, self._max_z, self._accel_power_thresh):
110 | accel_z_hat += accel_y_z_tf
111 | accel_z_dot_hat += accel_y_dz_tf
112 | num_feedback += 1
113 | else:
114 | accel_y_z_tf = 0.0
115 |
116 | if feedback_good(a_z_power, accel_z_z_tf, self._max_z, self._accel_power_thresh):
117 | accel_z_hat += accel_z_z_tf
118 | accel_z_dot_hat += accel_z_dz_tf
119 | num_feedback += 1
120 | else:
121 | accel_z_z_tf = 0.0
122 |
123 | if num_feedback > 0 and accel_z_bias is not None and self._z_hat is not None:
124 | accel_z_hat /= num_feedback
125 | accel_z_dot_hat /= num_feedback
126 |
127 | e = np.array((accel_z_hat, accel_z_dot_hat)) - self._z_hat
128 |
129 | z_hat_dot = self._A @ self._z_hat + self._B * (a_z[-1]-accel_z_bias) + self._L @ e
130 |
131 | self._z_hat = self._z_hat + z_hat_dot * self._dt
132 | elif num_feedback > 0 and self._z_hat is None:
133 | accel_z_hat /= num_feedback
134 | accel_z_dot_hat /= num_feedback
135 |
136 | # _min_initial_z is to filter an insane spike at the beginning of one sequence
137 | if accel_z_hat > self._min_initial_z:
138 | self._z_hat = np.array((accel_z_hat, accel_z_dot_hat))
139 | else:
140 | return None
141 |
142 | elif self._z_hat is not None:
143 | new_z = self._z_hat[0] * phi[2] / phis[-2, 2]
144 | new_dz = (new_z - self._z_hat[0]) / self._dt
145 | self._z_hat = np.array((new_z, new_dz))
146 | else:
147 | return None
148 |
149 | if self._z_hat[0] > self._max_z:
150 | self._z_hat[0] = self._max_z
151 |
152 | return self._z_hat[0], accel_x_z_tf, accel_y_z_tf, accel_z_z_tf
153 |
154 |
--------------------------------------------------------------------------------
/robomaster_pickle_mat.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: robomaster_pickle_mat.py
4 | # Available under MIT license
5 | #
6 | # Export recordings made by ttc_depth_robomaster.py to Matlab .mat files
7 | # for plotting
8 | #
9 | # History:
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import pickle
15 | from scipy.io import savemat
16 | import os
17 | import glob
18 | import numpy as np
19 |
20 | BASE_RESULTS_PATH = '../robomaster_recordings'
21 |
22 | if __name__ == '__main__':
23 | recordings = sorted(glob.glob(os.path.join(BASE_RESULTS_PATH, 'record_*')))
24 |
25 | for recording in recordings:
26 | print('Processing: {}'.format(recording))
27 | with open(os.path.join(recording, 'imu.pickle'), 'rb') as f:
28 | p = pickle.load(f)
29 |
30 | dictionary = {}
31 | for key in p:
32 | dictionary[key] = np.array(p[key])
33 |
34 | savemat(os.path.join(recording, 'imu.mat'), dictionary)
--------------------------------------------------------------------------------
/ros/README.md:
--------------------------------------------------------------------------------
1 | # Docker containers for other VIO Stereo methods
2 |
3 | ## Host setup
4 | * Install docker
5 | ** https://docs.docker.com/engine/install/ubuntu/
6 | ** https://docs.docker.com/engine/install/linux-postinstall/
7 | * Install rocker https://github.com/osrf/rocker
8 | * Install nvidia-docker 2 https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker
9 |
10 | ## To run
11 | * ./build.sh
12 | * ./run.sh
13 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/Dockerfile:
--------------------------------------------------------------------------------
1 | # Adapted from:
2 | # https://github.com/HKUST-Aerial-Robotics/VINS-Mono/blob/master/docker/Dockerfile
3 |
4 | FROM ros:kinetic-perception
5 |
6 | ENV CERES_VERSION="1.12.0"
7 | ENV CATKIN_WS=/root/catkin_ws
8 |
9 | # set up thread number for building
10 | RUN if [ "x$(nproc)" = "x1" ] ; then export USE_PROC=1 ; \
11 | else export USE_PROC=$(($(nproc)/2)) ; fi && \
12 | apt-get update && apt-get install -y \
13 | cmake \
14 | libatlas-base-dev \
15 | libeigen3-dev \
16 | libgoogle-glog-dev \
17 | libsuitesparse-dev \
18 | python-catkin-tools \
19 | ros-${ROS_DISTRO}-cv-bridge \
20 | ros-${ROS_DISTRO}-image-transport \
21 | ros-${ROS_DISTRO}-message-filters \
22 | ros-${ROS_DISTRO}-tf && \
23 | rm -rf /var/lib/apt/lists/* && \
24 | # Build and install Ceres
25 | git clone https://ceres-solver.googlesource.com/ceres-solver && \
26 | cd ceres-solver && \
27 | git checkout tags/${CERES_VERSION} && \
28 | mkdir build && cd build && \
29 | cmake .. && \
30 | make -j$(USE_PROC) install && \
31 | rm -rf ../../ceres-solver && \
32 | mkdir -p $CATKIN_WS/src
33 |
34 | # Clone VINS-Mono
35 | WORKDIR $CATKIN_WS
36 |
37 | RUN cd src && \
38 | git clone --depth 1 https://github.com/HKUST-Aerial-Robotics/VINS-Mono.git
39 |
40 | # Build VINS-Mono
41 | ENV TERM xterm
42 | ENV PYTHONIOENCODING UTF-8
43 | RUN catkin config \
44 | --extend /opt/ros/$ROS_DISTRO \
45 | --cmake-args \
46 | -DCMAKE_BUILD_TYPE=Release && \
47 | catkin build && \
48 | sed -i '/exec "$@"/i \
49 | source "/root/catkin_ws/devel/setup.bash"' /ros_entrypoint.sh
50 |
51 | RUN apt-get update && \
52 | apt-get install -y ros-${ROS_DISTRO}-rviz python-numpy python-scipy python3-numpy python3-scipy python3-yaml
53 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/attach.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker exec -it vins_mono bash
--------------------------------------------------------------------------------
/ros/VINS-Mono/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build --tag ros:vins-mono -f ./Dockerfile .
3 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/realsense_435i.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/realsense_435i_config.yaml:
--------------------------------------------------------------------------------
1 | %YAML:1.0
2 |
3 | #common parameters
4 | imu_topic: "/imu"
5 | image_topic: "/frames"
6 | output_path: "/home/levi/NormalFlyt/time_to_contact_depth/recordings/vins_mono_out"
7 |
8 | #camera calibration
9 | model_type: PINHOLE
10 | camera_name: camera
11 | image_width: 848
12 | image_height: 480
13 | distortion_parameters:
14 | k1: 0.0
15 | k2: 0.0
16 | p1: 0.0
17 | p2: 0.0
18 | projection_parameters:
19 | fx: 423.5357
20 | fy: 423.5357
21 | cx: 429.74478
22 | cy: 239.89632
23 |
24 | # Extrinsic parameter between IMU and Camera.
25 | estimate_extrinsic: 0 # 0 Have an accurate extrinsic parameters. We will trust the following imu^R_cam, imu^T_cam, don't change it.
26 | # 1 Have an initial guess about extrinsic parameters. We will optimize around your initial guess.
27 | # 2 Don't know anything about extrinsic parameters. You don't need to give R,T. We will try to calibrate it. Do some rotation movement at beginning.
28 | #If you choose 0 or 1, you should write down the following matrix.
29 | #Rotation from camera frame to imu frame, imu^R_cam
30 |
31 | # Self reported extrinsics from realsense from imu to infrared
32 | # accel extrinsics rotation: [1, 0, 0, 0, 1, 0, 0, 0, 1]
33 | # translation: [0.00552, -0.0051, -0.01174]
34 | # gyro exstrinsics rotation: [1, 0, 0, 0, 1, 0, 0, 0, 1]
35 | # translation: [0.00552, -0.0051, -0.01174]
36 | extrinsicRotation: !!opencv-matrix
37 | rows: 3
38 | cols: 3
39 | dt: d
40 | data: [1, 0, 0,
41 | 0, 1, 0,
42 | 0, 0, 1]
43 | #Translation from camera frame to imu frame, imu^T_cam
44 | extrinsicTranslation: !!opencv-matrix
45 | rows: 3
46 | cols: 1
47 | dt: d
48 | data: [-0.00552, 0.0051, 0.01174]
49 |
50 | #feature traker paprameters
51 | max_cnt: 120 # max feature number in feature tracking
52 | min_dist: 30 # min distance between two features
53 | freq: 100 # frequence (Hz) of publish tracking result. At least 10Hz for good estimation. If set 0, the frequence will be same as raw image
54 | F_threshold: 1.0 # ransac threshold (pixel)
55 | show_track: 0 # publish tracking image as topic
56 | equalize: 0 # if image is too dark or light, trun on equalize to find enough features
57 | fisheye: 0 # if using fisheye, trun on it. A circle mask will be loaded to remove edge noisy points
58 |
59 | #optimization parameters
60 | max_solver_time: 0.04 # max solver itration time (ms), to guarantee real time
61 | max_num_iterations: 8 # max solver itrations, to guarantee real time
62 | keyframe_parallax: 10.0 # keyframe selection threshold (pixel)
63 |
64 | #imu parameters The more accurate parameters you provide, the better performance
65 | acc_n: 0.08 # accelerometer measurement noise standard deviation. #0.2 0.04
66 | gyr_n: 0.004 # gyroscope measurement noise standard deviation. #0.05 0.004
67 | acc_w: 0.00004 # accelerometer bias random work noise standard deviation. #0.02
68 | gyr_w: 2.0e-6 # gyroscope bias random work noise standard deviation. #4.0e-5
69 | g_norm: 9.805 # gravity magnitude
70 |
71 | #loop closure parameters
72 | loop_closure: 0 # start loop closure
73 | fast_relocalization: 1 # useful in real-time and large project
74 | load_previous_pose_graph: 0 # load and reuse previous pose graph; load from 'pose_graph_save_path'
75 | pose_graph_save_path: "/home/levi/NormalFlyt/time_to_contact_depth/recordings/vins_mono_out/pose_graph" # save and load path
76 |
77 | #unsynchronization parameters
78 | estimate_td: 0 # online estimate time offset between camera and imu
79 | td: 0.000 # initial value of time offset. unit: s. readed image clock + td = real image clock (IMU clock)
80 |
81 | #rolling shutter parameters
82 | rolling_shutter: 0 # 0: global shutter camera, 1: rolling shutter camera
83 | rolling_shutter_tr: 0 # unit: s. rolling shutter read out time per frame (from data sheet).
84 |
85 | #visualization parameters
86 | save_image: 0 # save image in pose graph for visualization prupose; you can close this function by setting 0
87 | visualize_imu_forward: 0 # output imu forward propogation to achieve low latency and high frequence results
88 | visualize_camera_size: 0.4 # size of camera marker in RVIZ
89 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rocker --volume ../../../../time_to_contact_depth --nvidia --x11 --name vins_mono ros:vins-mono bash
4 |
5 | # docker run -it --rm ros:vins-mono \
6 | # /bin/bash -c \
7 | # "cd /root/catkin_ws/; \
8 | # source devel/setup.bash; \
9 | # roslaunch vins_estimator ${1}"
10 |
--------------------------------------------------------------------------------
/ros/VINS-Mono/vins_rviz.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/ros/ros_convert_rovio.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ros_convert_rovio.py
4 | #
5 | # Record rovio output to a npy file for comparison
6 | #
7 | # History:
8 | # 11-09-20 - Levi Burner - Created file
9 | #
10 | ###############################################################################
11 |
12 | import argparse
13 | import json
14 | import os
15 | import glob
16 | import pickle
17 | import time
18 |
19 | import numpy as np
20 |
21 | import rospy
22 | from nav_msgs.msg import Odometry
23 | import rosbag
24 | import pickle
25 |
26 | last_timestamp = None
27 | all_recorded = False
28 |
29 | def odometry_callback(odom):
30 | global last_timestamp
31 | global all_recorded
32 |
33 | if all_recorded:
34 | print('All recorded')
35 | return
36 |
37 | p = [odom.pose.pose.position.x,
38 | odom.pose.pose.position.y,
39 | odom.pose.pose.position.z]
40 | t = odom.header.stamp.to_sec()
41 |
42 | if last_timestamp is None:
43 | last_timestamp = t
44 |
45 | if t < last_timestamp:
46 | all_recorded = True
47 |
48 | print(t, p)
49 |
50 | if __name__ == '__main__':
51 | parser = argparse.ArgumentParser()
52 | parser.add_argument('--rec', dest='recording', type=str, help='Bag file')
53 | #parser.add_argument('--allspeed', dest='allspeed', type=str, help='Run over all to calculate average speed')
54 | parser.add_argument('--allspeed', dest='allspeed', action='store_true', help='Run over all to calculate average speed')
55 |
56 | args = parser.parse_args()
57 |
58 |
59 | if not args.allspeed:
60 | record_dir = os.path.dirname(args.recording)
61 | recording = os.path.basename(args.recording)
62 |
63 | bag = rosbag.Bag(os.path.join(record_dir, recording))
64 |
65 | poses = []
66 | message_t = []
67 | for topic, odom, t in bag.read_messages(topics=['/rovio/odometry']):
68 | x = [odom.header.stamp.to_sec(),
69 | odom.pose.pose.position.x,
70 | odom.pose.pose.position.y,
71 | odom.pose.pose.position.z]
72 | poses.append(x)
73 | message_t.append(t.to_sec())
74 |
75 | poses = np.array(poses)
76 | message_t = np.array(message_t)
77 |
78 | print('================================')
79 | print(1.0 / np.mean(np.gradient(message_t)))
80 | print(1.0 / np.mean(np.gradient(poses[:, 0])))
81 |
82 |
83 | with open(os.path.join(record_dir, 'results.pickle'), 'wb') as file:
84 | pickle.dump({'poses': np.array(poses)}, file, protocol=2)
85 |
86 | bag.close()
87 | else:
88 | BASE_RESULTS_PATH = 'results'
89 | bags = sorted(glob.glob('results/record_*/rovio/*.bag'))
90 |
91 | print(bags)
92 |
93 | total_poses = 0.0
94 | total_pose_time_span = 0.0
95 | total_message_time_span = 0.0
96 | for bag_name in bags:
97 | bag = rosbag.Bag(bag_name)
98 |
99 | poses = []
100 | message_t = []
101 | for topic, odom, t in bag.read_messages(topics=['/rovio/odometry']):
102 | x = [odom.header.stamp.to_sec(),
103 | odom.pose.pose.position.x,
104 | odom.pose.pose.position.y,
105 | odom.pose.pose.position.z]
106 | poses.append(x)
107 | message_t.append(t.to_sec())
108 | bag.close()
109 |
110 | poses = np.array(poses)
111 | message_t = np.array(message_t)
112 |
113 | total_poses += poses.shape[0]
114 | total_pose_time_span += poses[-1,0]-poses[0,0]
115 | total_message_time_span += message_t[-1] - message_t[0]
116 |
117 | print('================================')
118 | #print('Pose HZ'.format((poses[-1,0]-poses[0,0]) / poses.shape[0]))
119 | #print('Pose HZ'.format((poses[-1,0]-poses[0,0]) / poses.shape[0]))
120 | #print(1.0 / np.mean(np.gradient(message_t)))
121 | #print(1.0 / np.mean(np.gradient(poses[:, 0])))
122 |
123 |
124 | print('============ Overall ==============')
125 | print('Total pose messages {}'.format(total_poses))
126 | print('Total wall time of poses {}'.format(total_pose_time_span))
127 | print('Total time to generate poses {}'.format(total_message_time_span))
128 | print('Times realtime {}'.format(total_pose_time_span/total_message_time_span))
129 | #print(1.0 / np.mean(np.gradient(message_t)))
130 | #print(1.0 / np.mean(np.gradient(poses[:, 0])))
--------------------------------------------------------------------------------
/ros/ros_convert_vins_mono.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ros_record_vins_mono.py
4 | #
5 | # Record vins mono output to a npy file for comparison
6 | #
7 | # History:
8 | # 11-09-20 - Levi Burner - Created file
9 | #
10 | ###############################################################################
11 |
12 | import argparse
13 | import json
14 | import os
15 | import glob
16 | import pickle
17 | import time
18 |
19 | import numpy as np
20 |
21 | import rospy
22 | from nav_msgs.msg import Odometry
23 | import rosbag
24 | import pickle
25 |
26 | last_timestamp = None
27 | all_recorded = False
28 |
29 | def odometry_callback(odom):
30 | global last_timestamp
31 | global all_recorded
32 |
33 | if all_recorded:
34 | print('All recorded')
35 | return
36 |
37 | p = [odom.pose.pose.position.x,
38 | odom.pose.pose.position.y,
39 | odom.pose.pose.position.z]
40 | t = odom.header.stamp.to_sec()
41 |
42 | if last_timestamp is None:
43 | last_timestamp = t
44 |
45 | if t < last_timestamp:
46 | all_recorded = True
47 |
48 | print(t, p)
49 |
50 | if __name__ == '__main__':
51 | parser = argparse.ArgumentParser()
52 | parser.add_argument('--rec', dest='recording', type=str, help='Bag file')
53 | #parser.add_argument('--allspeed', dest='allspeed', type=str, help='Run over all to calculate average speed')
54 | parser.add_argument('--allspeed', dest='allspeed', action='store_true', help='Run over all to calculate average speed')
55 |
56 | args = parser.parse_args()
57 |
58 |
59 | if not args.allspeed:
60 | record_dir = os.path.dirname(args.recording)
61 | recording = os.path.basename(args.recording)
62 |
63 | bag = rosbag.Bag(os.path.join(record_dir, recording))
64 |
65 | poses = []
66 | message_t = []
67 | for topic, odom, t in bag.read_messages(topics=['/vins_estimator/odometry']):
68 | x = [odom.header.stamp.to_sec(),
69 | odom.pose.pose.position.x,
70 | odom.pose.pose.position.y,
71 | odom.pose.pose.position.z]
72 | poses.append(x)
73 | message_t.append(t.to_sec())
74 |
75 | poses = np.array(poses)
76 | message_t = np.array(message_t)
77 |
78 | print('================================')
79 | print(1.0 / np.mean(np.gradient(message_t)))
80 | print(1.0 / np.mean(np.gradient(poses[:, 0])))
81 |
82 |
83 | with open(os.path.join(record_dir, 'results.pickle'), 'wb') as file:
84 | pickle.dump({'poses': np.array(poses)}, file, protocol=2)
85 |
86 | bag.close()
87 | else:
88 | BASE_RESULTS_PATH = 'results'
89 | bags = sorted(glob.glob('results/record_*/vins_mono/*.bag'))
90 |
91 | print(bags)
92 |
93 | total_poses = 0.0
94 | total_pose_time_span = 0.0
95 | total_message_time_span = 0.0
96 | for bag_name in bags:
97 | bag = rosbag.Bag(bag_name)
98 |
99 | poses = []
100 | message_t = []
101 | for topic, odom, t in bag.read_messages(topics=['/vins_estimator/odometry']):
102 | x = [odom.header.stamp.to_sec(),
103 | odom.pose.pose.position.x,
104 | odom.pose.pose.position.y,
105 | odom.pose.pose.position.z]
106 | poses.append(x)
107 | message_t.append(t.to_sec())
108 | bag.close()
109 |
110 | poses = np.array(poses)
111 | message_t = np.array(message_t)
112 |
113 | total_poses += poses.shape[0]
114 | total_pose_time_span += poses[-1,0]-poses[0,0]
115 | total_message_time_span += message_t[-1] - message_t[0]
116 |
117 | print('================================')
118 | #print('Pose HZ'.format((poses[-1,0]-poses[0,0]) / poses.shape[0]))
119 | #print('Pose HZ'.format((poses[-1,0]-poses[0,0]) / poses.shape[0]))
120 | #print(1.0 / np.mean(np.gradient(message_t)))
121 | #print(1.0 / np.mean(np.gradient(poses[:, 0])))
122 |
123 |
124 | print('============ Overall ==============')
125 | print('Total pose messages {}'.format(total_poses))
126 | print('Total wall time of poses {}'.format(total_pose_time_span))
127 | print('Total time to generate poses {}'.format(total_message_time_span))
128 | print('Times realtime {}'.format(total_pose_time_span/total_message_time_span))
129 | #print(1.0 / np.mean(np.gradient(message_t)))
130 | #print(1.0 / np.mean(np.gradient(poses[:, 0])))
--------------------------------------------------------------------------------
/ros/ros_publish_folder_recording.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ros_publish_folder_recording.py
4 | #
5 | # Publish a recording to ROS
6 | #
7 | # History:
8 | # 11-08-20 - Levi Burner - Created file
9 | #
10 | ###############################################################################
11 |
12 | import argparse
13 | import json
14 | import os
15 | import glob
16 | import pickle
17 | import time
18 |
19 | import numpy as np
20 | from scipy import interpolate
21 |
22 | from utilities import latest_recording_dir
23 |
24 | import rospy
25 | from sensor_msgs.msg import Image, Imu
26 | from cv_bridge import CvBridge, CvBridgeError
27 |
28 | class RecordedIMUSource(object):
29 | def __init__(self, record_dir):
30 | sensor_data = pickle.load(open(os.path.join(record_dir, 'imu.pickle'), 'rb'))
31 | self._accel_samples = sensor_data['accel']
32 | self._gyro_samples = sensor_data['gyro']
33 | self._sample_index = 0
34 |
35 | # TODO resampling like this should occur live as well
36 | interpolater = interpolate.interp1d(self._accel_samples[:, 0],
37 | self._accel_samples[:, 1:4],
38 | axis=0,
39 | bounds_error=False,
40 | fill_value='extrapolate')
41 | accel_samples_interpolated = interpolater(self._gyro_samples[:, 0])
42 |
43 | self._imu_samples = np.hstack((np.atleast_2d(self._gyro_samples[:, 0]).transpose(),
44 | accel_samples_interpolated,
45 | self._gyro_samples[:, 1:4]))
46 |
47 | def earliest_timestamp(self):
48 | if self._sample_index >= len(self._imu_samples):
49 | return None
50 | return self._imu_samples[0, 0]
51 |
52 | def latest_timestamp(self):
53 | if self._sample_index >= len(self._imu_samples):
54 | return None
55 | return self._imu_samples[-1, 0]
56 |
57 | def next_sample(self):
58 | if self._sample_index >= len(self._imu_samples):
59 | return None
60 |
61 | sample = self._imu_samples[self._sample_index]
62 | self._sample_index += 1
63 | return sample
64 |
65 | def preprocess_image(frame):
66 | return frame
67 |
68 | class RecordedFrameSource(object):
69 | def __init__(self, record_dir, preload=False, preprocess=False):
70 | self._preload = preload
71 | self._preprocess = preprocess
72 |
73 | self._frame_names = sorted(list(glob.glob(os.path.join(record_dir, 'images/frame_*.npy'))))
74 |
75 | frame_metadata = pickle.load(open(os.path.join(record_dir, 'frame_metadata.pickle'), 'rb'))
76 | self._frame_ts = frame_metadata['ts']
77 |
78 | self._sample_index = 0
79 |
80 | if self._preload:
81 | if self._preprocess:
82 | self._preloaded_frames = [preprocess_image(np.load(frame_name)) for frame_name in self._frame_names]
83 | else:
84 | self._preloaded_frames = [np.load(frame_name) for frame_name in self._frame_names]
85 |
86 | def earliest_timestamp(self):
87 | return self._frame_ts[0]
88 |
89 | def latest_timestamp(self):
90 | if self._sample_index >= len(self._frame_ts):
91 | return None
92 | return self._frame_ts[-1]
93 |
94 | def next_sample(self):
95 | if self._sample_index >= len(self._frame_ts):
96 | return None
97 |
98 | if not self._preload:
99 | frame = np.load(self._frame_names[self._sample_index])
100 | frame_gray = preprocess_image(frame)
101 | sample = (self._frame_ts[self._sample_index], frame_gray)
102 | else:
103 | if not self._preprocess:
104 | sample = (self._frame_ts[self._sample_index], preprocess_image(self._preloaded_frames[self._sample_index]))
105 | else:
106 | sample = (self._frame_ts[self._sample_index], self._preloaded_frames[self._sample_index])
107 |
108 | self._sample_index += 1
109 | return sample
110 |
111 | def free_sample(self):
112 | pass
113 |
114 | class ROSFrameIMUPublisher(object):
115 | def __init__ (self, frame_source, imu_source):
116 | self._frame_source = frame_source
117 | self._imu_source = imu_source
118 |
119 | self._image_pub = rospy.Publisher('frames', Image, queue_size=10)
120 | self._imu_pub = rospy.Publisher('imu', Imu, queue_size=10)
121 |
122 | frame_first_timestamp = self._frame_source.earliest_timestamp()
123 | imu_first_timestamp = self._imu_source.earliest_timestamp()
124 | self._first_timestamp = min(frame_first_timestamp, imu_first_timestamp)
125 |
126 |
127 | frame_last_timestamp = self._frame_source.latest_timestamp()
128 | imu_last_timestamp = self._imu_source.latest_timestamp()
129 | self._last_timestamp = max(frame_last_timestamp, imu_last_timestamp)
130 |
131 | self._first_ros_time = rospy.get_rostime().to_sec()
132 | self._time_delta = self._first_ros_time - self._first_timestamp
133 | self._rate_limiter = rospy.Rate(1000)
134 |
135 | self._next_frame = None
136 | self._next_imu = None
137 |
138 | def run(self):
139 | while not rospy.is_shutdown():
140 | now = rospy.get_rostime().to_sec()
141 | sensor_time = now - self._time_delta
142 |
143 | if sensor_time > self._last_timestamp:
144 | break
145 |
146 | if self._next_frame is None:
147 | self._next_frame = self._frame_source.next_sample()
148 |
149 | if self._next_frame is not None:
150 | if self._next_frame[0] < sensor_time:
151 | image_msg = CvBridge().cv2_to_imgmsg(self._next_frame[1])
152 | image_msg.header.stamp = rospy.Time.from_sec(self._next_frame[0])
153 | self._image_pub.publish(image_msg)
154 |
155 | self._next_frame = None
156 |
157 | if self._next_imu is None:
158 | self._next_imu = self._imu_source.next_sample()
159 |
160 | if self._next_imu is not None:
161 | if self._next_imu[0] < sensor_time:
162 | imu_msg = Imu()
163 | imu_msg.header.stamp = rospy.Time.from_sec(self._next_imu[0])
164 | imu_msg.angular_velocity.x = self._next_imu[4]
165 | imu_msg.angular_velocity.y = self._next_imu[5]
166 | imu_msg.angular_velocity.z = self._next_imu[6]
167 | imu_msg.linear_acceleration.x = self._next_imu[1]
168 | imu_msg.linear_acceleration.y = self._next_imu[2]
169 | imu_msg.linear_acceleration.z = self._next_imu[3]
170 |
171 | self._imu_pub.publish(imu_msg)
172 |
173 | self._next_imu = None
174 |
175 | self._rate_limiter.sleep()
176 |
177 | if __name__ == '__main__':
178 | parser = argparse.ArgumentParser()
179 | parser.add_argument('--dir', dest='dir', type=str, help='Directory to load data from. If not specified the latest in the default location is used')
180 | parser.add_argument('--visualize', dest='visualize', action='store_true', help='Visualize')
181 | parser.add_argument('--wait', dest='wait', action='store_true', help='Wait for key press when visualizing')
182 | parser.add_argument('--april', dest='april', action='store_true', help='Use apriltag for ground truth')
183 | parser.add_argument('--preload', dest='preload', action='store_true', help='Read all images before processing for benchmarking')
184 | parser.add_argument('--preprocess', dest='preprocess', action='store_true', help='Preprocess all images when preloading')
185 | parser.add_argument('--nopublish', dest='nopublish', action='store_true', help='Dont publish with ZMQ')
186 | parser.add_argument('--bench', dest='bench', action='store_true', help='Activate preload nopublish and some other things')
187 | parser.add_argument('--realtime', dest='realtime', action='store_true', help='Test ability to run in realtime with april feedback')
188 |
189 | args = parser.parse_args()
190 |
191 | if not args.dir:
192 | record_dir = latest_recording_dir()
193 | else:
194 | record_dir = args.dir
195 |
196 | rospy.init_node('ttc_recording_pub')
197 |
198 | frame_source = RecordedFrameSource(record_dir, preload=args.preload, preprocess=args.preprocess)
199 | imu_source = RecordedIMUSource(record_dir)
200 | ros_publisher = ROSFrameIMUPublisher(frame_source, imu_source)
201 |
202 | ros_publisher.run()
203 |
--------------------------------------------------------------------------------
/ros/rovio/.dockerignore:
--------------------------------------------------------------------------------
1 | recordings/*
2 |
--------------------------------------------------------------------------------
/ros/rovio/Dockerfile:
--------------------------------------------------------------------------------
1 | # Adapted from:
2 | # https://github.com/HKUST-Aerial-Robotics/VINS-Mono/blob/master/docker/Dockerfile
3 |
4 | FROM ros:kinetic-perception
5 |
6 | #ENV CERES_VERSION="1.12.0"
7 | ENV CATKIN_WS=/root/catkin_ws
8 |
9 | # set up thread number for building
10 | RUN if [ "x$(nproc)" = "x1" ] ; then export USE_PROC=1 ; \
11 | else export USE_PROC=$(($(nproc)/2)) ; fi && \
12 | apt-get update && apt-get install -y \
13 | cmake \
14 | libatlas-base-dev \
15 | libeigen3-dev \
16 | libgoogle-glog-dev \
17 | libsuitesparse-dev \
18 | python-catkin-tools \
19 | ros-${ROS_DISTRO}-cv-bridge \
20 | ros-${ROS_DISTRO}-image-transport \
21 | ros-${ROS_DISTRO}-message-filters \
22 | ros-${ROS_DISTRO}-tf && \
23 | rm -rf /var/lib/apt/lists/* && \
24 | mkdir -p $CATKIN_WS/src
25 |
26 | # Build and install Ceres
27 | #git clone https://ceres-solver.googlesource.com/ceres-solver && \
28 | #cd ceres-solver && \
29 | #git checkout tags/${CERES_VERSION} && \
30 | #mkdir build && cd build && \
31 | #cmake .. && \
32 | #make -j$(USE_PROC) install && \
33 | #rm -rf ../../ceres-solver && \
34 |
35 | # Clone Rovio, Kindr
36 | WORKDIR $CATKIN_WS
37 |
38 | RUN cd src && \
39 | git clone --depth 1 https://github.com/ethz-asl/rovio.git && \
40 | cd rovio && \
41 | git submodule update --init --recursive && \
42 | cd .. && \
43 | git clone --depth 1 https://github.com/ethz-asl/kindr.git
44 |
45 | # Build Rovio
46 | ENV TERM xterm
47 | ENV PYTHONIOENCODING UTF-8
48 | RUN catkin config \
49 | --extend /opt/ros/$ROS_DISTRO \
50 | --cmake-args \
51 | -DCMAKE_BUILD_TYPE=Release -DMAKE_SCENE=OFF && \
52 | catkin build && \
53 | sed -i '/exec "$@"/i \
54 | source "/root/catkin_ws/devel/setup.bash"' /ros_entrypoint.sh
55 |
56 | #RUN catkin build rovio --cmake-args -DCMAKE_BUILD_TYPE=Release
57 |
58 | RUN apt-get update && \
59 | apt-get install -y ros-${ROS_DISTRO}-rviz python-numpy python-scipy python3-numpy python3-scipy python3-yaml
60 |
--------------------------------------------------------------------------------
/ros/rovio/attach.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker exec -it rovio bash
--------------------------------------------------------------------------------
/ros/rovio/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | docker build --tag ros:vins-mono -f ./Dockerfile .
3 |
--------------------------------------------------------------------------------
/ros/rovio/d435i_cam0.yaml:
--------------------------------------------------------------------------------
1 | ###### Camera Calibration File For D435i ######
2 | image_width: 848
3 | image_height: 480
4 | camera_name: cam0
5 | camera_matrix:
6 | rows: 3
7 | cols: 3
8 | data: [423.5357, 0.0, 429.74478, 0.0, 423.5357, 239.89632, 0.0, 0.0, 1.0]
9 | distortion_model: plumb_bob
10 | distortion_coefficients:
11 | rows: 1
12 | cols: 5
13 | data: [0.0, 0.0, 0.0, 0.0, 0.0]
14 |
15 |
16 |
--------------------------------------------------------------------------------
/ros/rovio/d435i_rovio.info:
--------------------------------------------------------------------------------
1 | ; You can partially override parameter set in this file by creating your own subset of parameter in a separate info-file and include it with:
2 | ; #include "/home/user/workspace/rovio/cfg/rovio_custom.info"
3 | Common
4 | {
5 | doVECalibration true; Should the camera-IMU extrinsics be calibrated online
6 | depthType 1; Type of depth parametrization (0: normal, 1: inverse depth, 2: log, 3: hyperbolic)
7 | verbose false; Is the verbose active
8 | }
9 | Camera0
10 | {
11 | CalibrationFile ; Camera-Calibration file for intrinsics
12 | qCM_x 0.0; X-entry of IMU to Camera quaterion (Hamilton)
13 | qCM_y 0.0; Y-entry of IMU to Camera quaterion (Hamilton)
14 | qCM_z 0.0; Z-entry of IMU to Camera quaterion (Hamilton)
15 | qCM_w 1.0; W-entry of IMU to Camera quaterion (Hamilton)
16 | MrMC_x 0.00552; X-entry of IMU to Camera vector (expressed in IMU CF) [m]
17 | MrMC_y -0.0051; Y-entry of IMU to Camera vector (expressed in IMU CF) [m]
18 | MrMC_z -0.01174; Z-entry of IMU to Camera vector (expressed in IMU CF) [m]
19 | }
20 | Camera1
21 | {
22 | CalibrationFile ; Camera-Calibration file for intrinsics
23 | qCM_x 0.00151329637706; X-entry of IMU to Camera quaterion (Hamilton)
24 | qCM_y -0.0123329249764; Y-entry of IMU to Camera quaterion (Hamilton)
25 | qCM_z -0.702657352863; Z-entry of IMU to Camera quaterion (Hamilton)
26 | qCM_w 0.711419885414; W-entry of IMU to Camera quaterion (Hamilton)
27 | MrMC_x -0.0091929160801; X-entry of IMU to Camera vector (expressed in IMU CF) [m]
28 | MrMC_y 0.0540646643676; Y-entry of IMU to Camera vector (expressed in IMU CF) [m]
29 | MrMC_z 0.0190387660614; Z-entry of IMU to Camera vector (expressed in IMU CF) [m]
30 | }
31 | Init
32 | {
33 | State
34 | {
35 | pos_0 0; X-entry of initial position (world to IMU in world) [m]
36 | pos_1 0; Y-entry of initial position (world to IMU in world) [m]
37 | pos_2 0; Z-entry of initial position (world to IMU in world) [m]
38 | vel_0 0; X-entry of initial velocity (robocentric, IMU) [m/s]
39 | vel_1 0; Y-entry of initial velocity (robocentric, IMU) [m/s]
40 | vel_2 0; Z-entry of initial velocity (robocentric, IMU) [m/s]
41 | acb_0 0; X-entry of accelerometer bias [m/s^2]
42 | acb_1 0; Y-entry of accelerometer bias [m/s^2]
43 | acb_2 0; Z-entry of accelerometer bias [m/s^2]
44 | gyb_0 0; X-entry of gyroscope bias [rad/s]
45 | gyb_1 0; Y-entry of gyroscope bias [rad/s]
46 | gyb_2 0; Z-entry of gyroscope bias [rad/s]
47 | att_x 0; X-entry of initial attitude (IMU to world, Hamilton)
48 | att_y 0; Y-entry of initial attitude (IMU to world, Hamilton)
49 | att_z 0; Z-entry of initial attitude (IMU to world, Hamilton)
50 | att_w 1; W-entry of initial attitude (IMU to world, Hamilton)
51 | }
52 | Covariance
53 | {
54 | pos_0 0.0001; X-Covariance of initial position [m^2]
55 | pos_1 0.0001; Y-Covariance of initial position [m^2]
56 | pos_2 0.0001; Z-Covariance of initial position [m^2]
57 | vel_0 1.0; X-Covariance of initial velocity [m^2/s^2]
58 | vel_1 1.0; Y-Covariance of initial velocity [m^2/s^2]
59 | vel_2 1.0; Z-Covariance of initial velocity [m^2/s^2]
60 | acb_0 4e-4; X-Covariance of initial accelerometer bias [m^2/s^4]
61 | acb_1 4e-4; Y-Covariance of initial accelerometer bias [m^2/s^4]
62 | acb_2 4e-4; Z-Covariance of initial accelerometer bias [m^2/s^4]
63 | gyb_0 3e-4; X-Covariance of initial gyroscope bias [rad^2/s^2]
64 | gyb_1 3e-4; Y-Covariance of initial gyroscope bias [rad^2/s^2]
65 | gyb_2 3e-4; Z-Covariance of initial gyroscope bias [rad^2/s^2]
66 | vep 0.0001; Covariance of initial linear camera-IMU extrinsics, same for all entries [m^2]
67 | att_0 0.1; X-Covariance of initial attitude [rad^2]
68 | att_1 0.1; Y-Covariance of initial attitude [rad^2]
69 | att_2 0.1; Z-Covariance of initial attitude [rad^2]
70 | vea 0.01; Covariance of initial rotational camera-IMU extrinsics, same for all entries [rad^2]
71 | }
72 | }
73 | ImgUpdate
74 | {
75 | updateVecNormTermination 1e-4;
76 | maxNumIteration 20;
77 | doPatchWarping true; Should the patches be warped
78 | doFrameVisualisation false; Should the frames be visualized
79 | visualizePatches false; Should the patches be visualized
80 | useDirectMethod true; Should the EKF-innovation be based on direct intensity error (o.w. reprojection error)
81 | startLevel 2; Highest patch level which is being employed (must be smaller than the hardcoded template parameter)
82 | endLevel 1; Lowest patch level which is being employed
83 | nDetectionBuckets 100; Number of discretization buckets used during the candidates selection
84 | MahalanobisTh 9.21; Mahalanobis treshold for the update, 5.8858356
85 | UpdateNoise
86 | {
87 | pix 2; Covariance used for the reprojection error, 1/focal_length is roughly 1 pixel std [rad] (~1/f ~ 1/400^2 = 1/160000)
88 | int 400; Covariance used for the photometric error [intensity^2]
89 | }
90 | initCovFeature_0 0.5; Covariance for the initial distance (Relative to initialization depth [m^2/m^2])
91 | initCovFeature_1 1e-5; Covariance for the initial bearing vector, x-component [rad^2]
92 | initCovFeature_2 1e-5; Covariance for the initial bearing vector, y-component [rad^2]
93 | initDepth 0.5; Initial value for the initial distance parameter
94 | startDetectionTh 0.8; Threshold for detecting new features (min: 0, max: 1)
95 | scoreDetectionExponent 0.25; Exponent used for weighting the distance between candidates
96 | penaltyDistance 100; Maximal distance which gets penalized during bucketing [pix]
97 | zeroDistancePenalty 100; Penalty for zero distance (max: nDetectionBuckets)
98 | statLocalQualityRange 10; Number of frames for local quality evaluation
99 | statLocalVisibilityRange 100; Number of frames for local visibility evaluation
100 | statMinGlobalQualityRange 100; Minimum number of frames for obtaining maximal global quality
101 | trackingUpperBound 0.9; Threshold for local quality for min overall global quality
102 | trackingLowerBound 0.8; Threshold for local quality for max overall global quality
103 | minTrackedAndFreeFeatures 0.75; Minimum of amount of feature which are either tracked or free
104 | removalFactor 1.1; Factor for enforcing feature removal if not enough free
105 | minRelativeSTScore 0.75; Minimum relative ST-score for extracting new feature patch
106 | minAbsoluteSTScore 5.0; Minimum absolute ST-score for extracting new feature patch
107 | minTimeBetweenPatchUpdate 1.0; Minimum time between new multilevel patch extrection [s]
108 | fastDetectionThreshold 5; Fast corner detector treshold while adding new feature
109 | alignConvergencePixelRange 10; Assumed convergence range for image alignment (gets scaled with the level) [pixels]
110 | alignCoverageRatio 2; How many sigma of the uncertainty should be covered in the adaptive alignement
111 | alignMaxUniSample 1; Maximal number of alignment seeds on one side -> total number of sample = 2n+1. Carefull can get very costly if diverging!
112 | patchRejectionTh 50.0; If the average itensity error is larger than this the feauture is rejected [intensity], if smaller 0 the no check is performed
113 | alignmentHuberNormThreshold 10; Intensity error threshold for Huber norm (enabled if > 0)
114 | alignmentGaussianWeightingSigma -1; Width of Gaussian which is used for pixel error weighting (enabled if > 0)
115 | alignmentGradientExponent 0.0; Exponent used for gradient based weighting of residuals
116 | useIntensityOffsetForAlignment true; Should an intensity offset between the patches be considered
117 | useIntensitySqewForAlignment true; Should an intensity sqew between the patches be considered
118 | removeNegativeFeatureAfterUpdate true; Should feature with negative distance get removed
119 | maxUncertaintyToDepthRatioForDepthInitialization 0.3; If set to 0.0 the depth is initialized with the standard value provided above, otherwise ROVIO attempts to figure out a median depth in each frame
120 | useCrossCameraMeasurements true; Should cross measurements between frame be used. Might be turned of in calibration phase.
121 | doStereoInitialization true; Should a stereo match be used for feature initialization.
122 | noiseGainForOffCamera 10.0; Factor added on update noise if not main camera
123 | discriminativeSamplingDistance 0.02; Sampling distance for checking discriminativity of patch (if <= 0.0 no check is performed).
124 | discriminativeSamplingGain 1.1; Gain for threshold above which the samples must lie (if <= 1.0 the patchRejectionTh is used).
125 | MotionDetection
126 | {
127 | isEnabled 0; Is the motion detection enabled
128 | rateOfMovingFeaturesTh 0.5; Amount of feature with motion for overall motion detection
129 | pixelCoordinateMotionTh 1.0; Threshold for motion detection for patched [pixels]
130 | minFeatureCountForNoMotionDetection 5; Min feature count in frame for motion detection
131 | }
132 | ZeroVelocityUpdate
133 | {
134 | UpdateNoise
135 | {
136 | vel_0 0.01; X-Covariance of zero velocity update [m^2/s^2]
137 | vel_1 0.01; Y-Covariance of zero velocity update [m^2/s^2]
138 | vel_2 0.01; Z-Covariance of zero velocity update [m^2/s^2]
139 | }
140 | MahalanobisTh0 7.689997599999999; Mahalanobid distance for zero velocity updates
141 | minNoMotionTime 1.0; Min duration of no-motion
142 | isEnabled 0; Should zero velocity update be applied, only works if MotionDetection.isEnabled is true
143 | }
144 | }
145 | Prediction
146 | {
147 | PredictionNoise
148 | {
149 | pos_0 1e-4; X-covariance parameter of position prediction [m^2/s]
150 | pos_1 1e-4; Y-covariance parameter of position prediction [m^2/s]
151 | pos_2 1e-4; Z-covariance parameter of position prediction [m^2/s]
152 | vel_0 4e-6; X-covariance parameter of velocity prediction [m^2/s^3]
153 | vel_1 4e-6; Y-covariance parameter of velocity prediction [m^2/s^3]
154 | vel_2 4e-6; Z-covariance parameter of velocity prediction [m^2/s^3]
155 | acb_0 1e-8; X-covariance parameter of accelerometer bias prediction [m^2/s^5]
156 | acb_1 1e-8; Y-covariance parameter of accelerometer bias prediction [m^2/s^5]
157 | acb_2 1e-8; Z-covariance parameter of accelerometer bias prediction [m^2/s^5]
158 | gyb_0 3.8e-7; X-covariance parameter of gyroscope bias prediction [rad^2/s^3]
159 | gyb_1 3.8e-7; Y-covariance parameter of gyroscope bias prediction [rad^2/s^3]
160 | gyb_2 3.8e-7; Z-covariance parameter of gyroscope bias prediction [rad^2/s^3]
161 | vep 1e-8; Covariance parameter of linear extrinsics prediction [m^2/s]
162 | att_0 7.6e-7; X-Covariance parameter of attitude prediction [rad^2/s]
163 | att_1 7.6e-7; Y-Covariance parameter of attitude prediction [rad^2/s]
164 | att_2 7.6e-7; Z-Covariance parameter of attitude prediction [rad^2/s]
165 | vea 1e-8; Covariance parameter of rotational extrinsics prediction [rad^2/s]
166 | dep 0.0001; Covariance parameter of distance prediction [m^2/s]
167 | nor 0.00001; Covariance parameter of bearing vector prediction [rad^2/s]
168 | }
169 | MotionDetection
170 | {
171 | inertialMotionRorTh 0.1; Treshold on rotational rate for motion detection [rad/s]
172 | inertialMotionAccTh 0.5; Treshold on acceleration for motion detection [m/s^2]
173 | }
174 | }
175 | PoseUpdate
176 | {
177 | UpdateNoise
178 | {
179 | pos_0 0.01; X-Covariance of linear pose measurement update [m^2]
180 | pos_1 0.01; Y-Covariance of linear pose measurement update [m^2]
181 | pos_2 0.01; Z-Covariance of linear pose measurement update [m^2]
182 | att_0 0.01; X-Covariance of rotational pose measurement update [rad^2]
183 | att_1 0.01; Y-Covariance of rotational pose measurement update [rad^2]
184 | att_2 0.01; Z-Covariance of rotational pose measurement update [rad^2]
185 | }
186 | init_cov_IrIW 1; Covariance of initial pose between inertial frames, linear part [m^2]
187 | init_cov_qWI 1; Covariance of initial pose between inertial frames, rotational part [rad^2]
188 | init_cov_MrMV 1; Covariance of initial pose between inertial frames, linear part [m^2]
189 | init_cov_qVM 1; Covariance of initial pose between inertial frames, rotational part [rad^2]
190 | pre_cov_IrIW 1e-4; Covariance parameter of pose between inertial frames, linear part [m^2/s]
191 | pre_cov_qWI 1e-4; Covariance parameter of pose between inertial frames, rotational part [rad^2/s]
192 | pre_cov_MrMV 1e-4; Covariance parameter of pose between inertial frames, linear part [m^2/s]
193 | pre_cov_qVM 1e-4; Covariance parameter of pose between inertial frames, rotational part [rad^2/s]
194 | MahalanobisTh0 12.6511204; Mahalonobis distance treshold of pose measurement
195 | doVisualization false; Should the measured poses be vizualized
196 | enablePosition true; Should the linear part be used during the update
197 | enableAttitude true; Should the rotation part be used during the update (e.g. set to fals eif feeding GPS-measurement)
198 | noFeedbackToRovio true; By default the pose update is only used for aligning coordinate frame. Set to false if ROVIO should benefit frome the posed measurements.
199 | doInertialAlignmentAtStart true; Should the transformation between I and W be explicitly computed and set with the first pose measurement.
200 | timeOffset 0.0; Time offset added to the pose measurement timestamps
201 | useOdometryCov false; Should the UpdateNoise position covariance be scaled using the covariance in the Odometry message
202 | qVM_x 0; X-entry of quaterion representing IMU to reference body coordinate frame of pose measurement (Hamilton)
203 | qVM_y 0; Y-entry of quaterion representing IMU to reference body coordinate frame of pose measurement (Hamilton)
204 | qVM_z 0; Z-entry of quaterion representing IMU to reference body coordinate frame of pose measurement (Hamilton)
205 | qVM_w 1; W-entry of quaterion representing IMU to reference body coordinate frame of pose measurement (Hamilton)
206 | MrMV_x 0; X-entry of vector representing IMU to reference body coordinate frame of pose measurement
207 | MrMV_y 0; Y-entry of vector representing IMU to reference body coordinate frame of pose measurement
208 | MrMV_z 0; Z-entry of vector representing IMU to reference body coordinate frame of pose measurement
209 | qWI_x 0; X-entry of quaterion representing World to reference inertial coordinate frame of pose measurement (Hamilton)
210 | qWI_y 0; Y-entry of quaterion representing World to reference inertial coordinate frame of pose measurement (Hamilton)
211 | qWI_z 0; Z-entry of quaterion representing World to reference inertial coordinate frame of pose measurement (Hamilton)
212 | qWI_w 1; W-entry of quaterion representing World to reference inertial coordinate frame of pose measurement (Hamilton)
213 | IrIW_x 0; X-entry of vector representing World to reference inertial coordinate frame of pose measurement
214 | IrIW_y 0; Y-entry of vector representing World to reference inertial coordinate frame of pose measurement
215 | IrIW_z 0; Z-entry of vector representing World to reference inertial coordinate frame of pose measurement
216 | }
217 | VelocityUpdate
218 | {
219 | UpdateNoise
220 | {
221 | vel_0 0.0001
222 | vel_1 0.0001
223 | vel_2 0.0001
224 | }
225 | MahalanobisTh0 7.689997599999999
226 | qAM_x 0
227 | qAM_y 0
228 | qAM_z 0
229 | qAM_w 1
230 | }
231 |
--------------------------------------------------------------------------------
/ros/rovio/rovio_node.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/ros/rovio/rovio_rosbag_node_ttc.launch:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/ros/rovio/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rocker --volume ../../../../time_to_contact_depth --nvidia --x11 --name rovio ros:rovio bash
4 |
5 | # docker run -it --rm ros:vins-mono \
6 | # /bin/bash -c \
7 | # "cd /root/catkin_ws/; \
8 | # source devel/setup.bash; \
9 | # roslaunch vins_estimator ${1}"
10 |
--------------------------------------------------------------------------------
/rpg_align_trajectory/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Zichao Zhang (Robotics and Perception Group, University of Zurich, Switzerland)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/rpg_align_trajectory/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/prgumd/TTCDist/c76dff31c96ff97a97a8e5d4d99133cbc9bd2d23/rpg_align_trajectory/__init__.py
--------------------------------------------------------------------------------
/rpg_align_trajectory/rpg_align_trajectory.py:
--------------------------------------------------------------------------------
1 | # Downloaded from:
2 | # https://github.com/uzh-rpg/rpg_trajectory_evaluation/blob/master/src/rpg_trajectory_evaluation/align_trajectory.py
3 | # Available under MIT license
4 |
5 | #!/usr/bin/env python2
6 | # -*- coding: utf-8 -*-
7 |
8 | import numpy as np
9 | #import transformations as tfs
10 |
11 |
12 | def get_best_yaw(C):
13 | '''
14 | maximize trace(Rz(theta) * C)
15 | '''
16 | assert C.shape == (3, 3)
17 |
18 | A = C[0, 1] - C[1, 0]
19 | B = C[0, 0] + C[1, 1]
20 | theta = np.pi / 2 - np.arctan2(B, A)
21 |
22 | return theta
23 |
24 |
25 | def rot_z(theta):
26 | R = tfs.rotation_matrix(theta, [0, 0, 1])
27 | R = R[0:3, 0:3]
28 |
29 | return R
30 |
31 |
32 | def align_umeyama(model, data, known_scale=False, yaw_only=False):
33 | """Implementation of the paper: S. Umeyama, Least-Squares Estimation
34 | of Transformation Parameters Between Two Point Patterns,
35 | IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
36 |
37 | model = s * R * data + t
38 |
39 | Input:
40 | model -- first trajectory (nx3), numpy array type
41 | data -- second trajectory (nx3), numpy array type
42 |
43 | Output:
44 | s -- scale factor (scalar)
45 | R -- rotation matrix (3x3)
46 | t -- translation vector (3x1)
47 | t_error -- translational error per point (1xn)
48 |
49 | """
50 |
51 | # substract mean
52 | mu_M = model.mean(0)
53 | mu_D = data.mean(0)
54 | model_zerocentered = model - mu_M
55 | data_zerocentered = data - mu_D
56 | n = np.shape(model)[0]
57 |
58 | # correlation
59 | C = 1.0/n*np.dot(model_zerocentered.transpose(), data_zerocentered)
60 | sigma2 = 1.0/n*np.multiply(data_zerocentered, data_zerocentered).sum()
61 | U_svd, D_svd, V_svd = np.linalg.linalg.svd(C)
62 | D_svd = np.diag(D_svd)
63 | V_svd = np.transpose(V_svd)
64 |
65 | S = np.eye(3)
66 | if(np.linalg.det(U_svd)*np.linalg.det(V_svd) < 0):
67 | S[2, 2] = -1
68 |
69 | if yaw_only:
70 | rot_C = np.dot(data_zerocentered.transpose(), model_zerocentered)
71 | theta = get_best_yaw(rot_C)
72 | R = rot_z(theta)
73 | else:
74 | R = np.dot(U_svd, np.dot(S, np.transpose(V_svd)))
75 |
76 | if known_scale:
77 | s = 1
78 | else:
79 | s = 1.0/sigma2*np.trace(np.dot(D_svd, S))
80 |
81 | t = mu_M-s*np.dot(R, mu_D)
82 |
83 | return s, R, t
84 |
--------------------------------------------------------------------------------
/ttc_depth_calc_error.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_calc_error.py
4 | # Available under MIT license
5 | #
6 | # Calculate the error for all sequences released with the TTCDist paper
7 | #
8 | # History:
9 | # 09-26-22 - Levi Burner - Open source release
10 | #
11 | ###############################################################################
12 |
13 | import argparse
14 | import csv
15 | import glob
16 | import os
17 | os.environ['OPENBLAS_NUM_THREADS'] = '1' # Attempt to disable OpenBLAS multithreading, it makes the script slower
18 | import pickle
19 |
20 | import numpy as np
21 |
22 | import matplotlib.pyplot as plt
23 |
24 | from scipy.interpolate import interp1d
25 | from scipy.io import savemat
26 |
27 | from rpg_align_trajectory.rpg_align_trajectory import align_umeyama
28 |
29 | def signal_to_array(signal):
30 | times = [t for t, d in signal]
31 | data = [d for t, d in signal]
32 |
33 | times_array = np.array(times)
34 | data_array = np.vstack(data)
35 |
36 | return np.hstack((np.atleast_2d(times_array).transpose(), data_array))
37 |
38 | def load_april(directory):
39 | results_name = os.path.join(directory, 'results.pickle')
40 | ttc_data = pickle.load(open(results_name, 'rb'))['results']
41 |
42 | if 'ground_truth_pose' in ttc_data.keys():
43 | april_signal = ttc_data['ground_truth_pose']
44 | april_data = signal_to_array(april_signal)
45 | return april_data
46 | else:
47 | return None
48 |
49 | def load_ttc(directory):
50 | results_name = os.path.join(directory, 'results.pickle')
51 | ttc_data = pickle.load(open(results_name, 'rb'))['results']
52 |
53 | pose_signal = ttc_data['pose_hat']
54 | pose_data = signal_to_array(pose_signal)
55 |
56 | return pose_data
57 |
58 | def load_phi(directory):
59 | results_name = os.path.join(directory, 'results.pickle')
60 | ttc_data = pickle.load(open(results_name, 'rb'))['results']
61 |
62 | pose_signal = ttc_data['phi_pose_hat']
63 | pose_data = signal_to_array(pose_signal)
64 |
65 | return pose_data
66 |
67 | def load_vins_mono(directory):
68 | results_name = os.path.join(directory, 'results.pickle')
69 | vins_mono_data = pickle.load(open(results_name, 'rb'), encoding='latin1')
70 |
71 | vins_mono_data = vins_mono_data['poses']
72 |
73 | return vins_mono_data
74 |
75 | def load_rovio(directory):
76 | results_name = os.path.join(directory, 'results.pickle')
77 |
78 | if not os.path.exists(results_name):
79 | return None
80 |
81 | rovio_data = pickle.load(open(results_name, 'rb'), encoding='latin1')
82 |
83 | rovio_data = rovio_data['poses']
84 |
85 | return rovio_data
86 |
87 | def load_vicon(directory):
88 | results_name = os.path.join(directory, 'results.csv')
89 |
90 | # Unfortunately cannot use genfromtxt because vicon leaves out
91 | # delimiters when data is bad
92 | #vicon_data = np.genfromtxt(results_name, skip_header=5, delimiter=',', filling_values=np.nan, invalid_raise=False)
93 |
94 | vicon_data_list = []
95 | with open(results_name) as results_file:
96 | csv_reader = csv.reader(results_file, delimiter=',')
97 | for i in range(5):
98 | csv_reader.__next__()
99 |
100 | for row in csv_reader:
101 | if len(row) == 9:
102 | row_float = [float(x) for x in row]
103 | t = (row_float[0] - 1) * (1.0/200.0)
104 | vicon_data_list.append([t,] + row_float[6:9])
105 |
106 | vicon_data = np.array(vicon_data_list)
107 | vicon_data[:, 1:4] /= 1000.0
108 | return vicon_data
109 |
110 | def plot_all_naive(data, abs=False):
111 | plt.figure()
112 | data_abs = {}
113 | for method in data.keys():
114 | if abs:
115 | data_abs[method] = np.abs(data[method])
116 | else:
117 | data_abs = data
118 |
119 | plt.subplot(3, 1, 1)
120 |
121 | t = data_abs[method][:, 0]
122 |
123 | x = data_abs[method][:, 1]
124 | plt.plot(t, x)
125 | plt.legend(data.keys())
126 | plt.grid(True)
127 |
128 | plt.subplot(3, 1, 2)
129 | y = data_abs[method][:, 2]
130 | plt.plot(t, y)
131 | plt.legend(data.keys())
132 | plt.grid(True)
133 |
134 | plt.subplot(3, 1, 3)
135 | z = data_abs[method][:, 3]
136 | plt.plot(t, z)
137 | plt.legend(data.keys())
138 | plt.grid(True)
139 |
140 |
141 | def select_data(data, min_t, max_t):
142 | t = data[:, 0]
143 | data_selected = data[(t >= min_t) & (t <= max_t)]
144 | return data_selected
145 |
146 | def calc_valid_path_length(data, ground_truth='vicon'):
147 | ground_truth_data = data[ground_truth]
148 |
149 | min_t = None
150 | max_t = None
151 | for method in data.keys():
152 | if method == ground_truth:
153 | continue
154 | data_times = data[method][:, 0]
155 | if min_t is None or data_times[0] > min_t:
156 | min_t = data_times[0]
157 | if max_t is None or data_times[-1] < max_t:
158 | max_t = data_times[-1]
159 |
160 | ground_truth_times = ground_truth_data[:, 0]
161 | ground_truth_trimmed = ground_truth_data[(ground_truth_times >= min_t) * (ground_truth_times <= max_t), :]
162 |
163 | dist_traveled_samples = np.linalg.norm(ground_truth_trimmed[1:, 1:4] - ground_truth_trimmed[:-1, 1:4], axis=1)
164 | path_length = np.sum(dist_traveled_samples)
165 | return path_length
166 |
167 |
168 | def calc_error(data, ground_truth='april'):
169 | ground_truth_data = data[ground_truth]
170 |
171 | min_t = None
172 | max_t = None
173 | for method in data.keys():
174 | if method == ground_truth:
175 | continue
176 | data_times = data[method][:, 0]
177 | if min_t is None or data_times[0] > min_t:
178 | min_t = data_times[0]
179 | if max_t is None or data_times[-1] < max_t:
180 | max_t = data_times[-1]
181 |
182 | error_data = {}
183 | for method in data.keys():
184 | if method == ground_truth:
185 | continue
186 |
187 | data_times = data[method][:, 0]
188 | data_selected = select_data(data[method], min_t, max_t)
189 |
190 | data_interp = interp1d(data_selected[:, 0], data_selected[:, 1:4], axis=0)
191 |
192 | data_times_min = data_selected[:, 0].min()
193 | data_times_max = data_selected[:, 0].max()
194 |
195 | ground_truth_times = ground_truth_data[:, 0]
196 | ground_truth_trimmed = ground_truth_data[(ground_truth_times >= data_times_min) * (ground_truth_times <= data_times_max), :]
197 |
198 | data_resampled = data_interp(ground_truth_trimmed[:, 0])
199 |
200 | error = data_resampled - ground_truth_trimmed[:, 1:4]
201 |
202 |
203 | name = method + '-' + ground_truth
204 | error_data[name] = np.hstack((np.atleast_2d(ground_truth_trimmed[:, 0]).transpose(), error))
205 |
206 | return error_data
207 |
208 | def calc_error_stats(error_data):
209 | error_stats = {}
210 | for method in error_data.keys():
211 | rmse = 100*np.linalg.norm(error_data[method][:, 1:4], axis=0) / (error_data[method].shape[0]**0.5)
212 |
213 | ate_samples = error_data[method][:, 1:4].flatten()
214 | ate = 100*np.linalg.norm(ate_samples) / (ate_samples.shape[0]**0.5)
215 |
216 | error_stats[method] = (rmse, ate)
217 | return error_stats
218 |
219 | def print_error_stats(error_stats):
220 | for method in error_stats.keys():
221 | rmse = error_stats[method][0]
222 | ate = error_stats[method][1]
223 | print('{:<40}: rmse {:0.3f} {:0.3f} {:0.3f} (cm) ate: {:0.3f} (cm)'.format(method, rmse[0], rmse[1], rmse[2], ate))
224 |
225 | def find_transform_time_sync_to_ground_truth(data, ground_truth='april'):
226 | ground_truth_data = data['vicon']
227 |
228 | ground_truth_data = data[ground_truth]
229 |
230 | min_t = None
231 | max_t = None
232 | for method in data.keys():
233 | if method == ground_truth:
234 | continue
235 | data_times = data[method][:, 0]
236 | if min_t is None or data_times[0] > min_t:
237 | min_t = data_times[0]
238 | if max_t is None or data_times[-1] < max_t:
239 | max_t = data_times[-1]
240 |
241 | transformation = {}
242 | min_ate = {}
243 | for method in data.keys():
244 | if method == ground_truth:
245 | continue
246 |
247 | data_selected = select_data(data[method], min_t, max_t)
248 | data_interp = interp1d(data_selected[:, 0], data_selected[:, 1:4], axis=0, bounds_error=False)
249 |
250 | ground_truth_length = ground_truth_data[-1, 0] - ground_truth_data[0, 0]
251 | if (ground_truth_length < (max_t - min_t)):
252 | raise Exception('ground truth must be longer than data for now')
253 |
254 | # TODO handle ground truth sample rate correctly
255 | ground_truth_samples = int(np.floor((max_t - min_t) * 200.0))
256 |
257 | for i in range(0, ground_truth_data.shape[0] - ground_truth_samples):
258 | if ('record_000004' in recording
259 | and (method == 'ttc_wy_bias_2'
260 | or method =='rovio')
261 | and i < 2000): # Hack to fix incorrect correlations that cause insane ATE errors for these two cases
262 | continue
263 |
264 | ground_truth_selected = ground_truth_data[i:i+ground_truth_samples, :]
265 | ground_truth_times_shifted = (ground_truth_selected[:, 0] - ground_truth_selected[0, 0]) + min_t
266 | valid = (ground_truth_times_shifted >= data_selected[0, 0]) & (ground_truth_times_shifted <= data_selected[-1, 0])
267 | ground_truth_times_shifted = ground_truth_times_shifted[valid]
268 | ground_truth_selected = ground_truth_selected[valid]
269 |
270 | data_resampled = data_interp(ground_truth_times_shifted)
271 |
272 | try:
273 | s, R, t = align_umeyama(ground_truth_selected[:, 1:4], data_resampled, known_scale=True)
274 | assert s == 1
275 | except np.linalg.LinAlgError:
276 | #print('error, should not happen')
277 | continue
278 |
279 | transformed = ((s * R) @ data_resampled.transpose()).transpose() + t
280 |
281 | error = (transformed - ground_truth_selected[:, 1:4]).flatten()
282 | ate = 100*np.linalg.norm(error) / (error.shape[0]**0.5)
283 |
284 | if method not in min_ate.keys() or ate < min_ate[method]:
285 | min_ate[method] = ate
286 | transformation[method] = (s, R, t, i / 200.0 - min_t)
287 | #print(method, i, i / 200.0, min_ate[method])
288 |
289 | return transformation
290 |
291 | def transform_data_to_ground_truth(data, transformations, ground_truth='april'):
292 | data_transformed = {ground_truth: data[ground_truth]}
293 | for method in data.keys():
294 | if method == ground_truth:
295 | continue
296 |
297 | if method not in transformations:
298 | continue
299 |
300 | transformation = transformations[method]
301 |
302 | s = transformation[0]
303 | R = transformation[1]
304 | t = transformation[2]
305 | delta_t = transformation[3]
306 |
307 | data_times = data[method][:, 0] + delta_t
308 |
309 | transformed = ((s * R) @ data[method][:, 1:4].transpose()).transpose() + t
310 | data_transformed[method] = np.hstack((np.atleast_2d(data_times).transpose(), transformed))
311 | return data_transformed
312 |
313 | if __name__ == '__main__':
314 | parser = argparse.ArgumentParser()
315 | parser.add_argument('--dir', help='directory containing results files')
316 | parser.add_argument('--calc', action='store_true', help='Align trajectories with ground truth, save results to file')
317 | parser.add_argument('--latex', action='store_true', help='Process results file to latex')
318 | args = parser.parse_args()
319 |
320 |
321 | if args.calc:
322 | # Find all the recordings
323 | recordings = sorted(glob.glob(os.path.join(args.dir, 'record_*')))
324 |
325 | results = {}
326 |
327 | for recording in recordings:
328 | print('Processing: {}'.format(recording))
329 | # Find all the algorithms
330 | algorithms = sorted(glob.glob(os.path.join(recording, '*')))
331 |
332 | data = {}
333 | for algorithm in algorithms:
334 | method = os.path.basename(algorithm)
335 |
336 | #print(method)
337 |
338 | if method[:3] == 'ttc':
339 | if method[3:] == '':
340 | april_data = load_april(algorithm)
341 | if april_data is not None:
342 | data['april'] = april_data
343 | data[method] = load_ttc(algorithm)
344 | data['phi'] = load_phi(algorithm)
345 | elif method[:9] == 'vins_mono':
346 | if 'record_000004' not in recording: # Skip recording where VINS-Mono did not work
347 | data[method] = load_vins_mono(algorithm)
348 | elif method[:5] == 'rovio':
349 | data[method] = load_rovio(algorithm)
350 | elif method[:5] == 'vicon':
351 | data[method] = load_vicon(algorithm)
352 | elif method == 'visualization':
353 | pass
354 | else:
355 | raise Exception('unsupported method')
356 |
357 | transformation = find_transform_time_sync_to_ground_truth(data, ground_truth='vicon')
358 | transformed_data = transform_data_to_ground_truth(data, transformation, ground_truth='vicon')
359 |
360 | error_data = calc_error(transformed_data, ground_truth='vicon')
361 |
362 | #plot_all_naive(transformed_data)
363 | #plot_all_naive(error_data, abs=True)
364 | error_stats = calc_error_stats(error_data)
365 | print_error_stats(error_stats)
366 |
367 | for key in transformation.keys():
368 | print(transformation[key][3] - transformation[list(transformation.keys())[0]][3], key)
369 |
370 | recording_name = os.path.split(recording)[-1]
371 | results[recording_name] = {'transformed_data': transformed_data,
372 | 'error_data': error_data}
373 |
374 | results_save_name = os.path.join(args.dir, 'processed_results.pickle')
375 | with open(results_save_name, 'wb') as file:
376 | pickle.dump(results, file)
377 |
378 | results_save_name = os.path.join(args.dir, 'processed_results.mat')
379 | savemat(results_save_name, mdict=results)
380 |
381 | plt.show()
382 |
383 | if args.latex:
384 | results_save_name = os.path.join(args.dir, 'processed_results.pickle')
385 | results_data = pickle.load(open(results_save_name, 'rb'))
386 |
387 | methods_to_error_name = {
388 | 'AprilTag 3': 'april-vicon',
389 | 'VINS-Mono': 'vins_mono-vicon',
390 | 'ROVIO': 'rovio-vicon',
391 |
392 | '$\\Phi$-constraint (ours)': 'phi-vicon',
393 | '$\\tau$-constraint (ours)': 'ttc-vicon',
394 | #'$\\tau$-constraint (ours) (45 Hz LK output)': 'ttc_affine_skip_1-vicon',
395 | # '$\\tau$-constraint (ours) (30 Hz LK output)': 'ttc_affine_skip_2-vicon',
396 | # '$\\tau$-constraint (ours) (15 Hz LK output)': 'ttc_affine_skip_3-vicon',
397 |
398 | #'$\\tau$-constraint (ours) (0.5 deg/s $\\Omega_y$ bias)': 'ttc_wy_bias_0_5-vicon',
399 | # '$\\tau$-constraint (ours) (1.0 deg/s $\\Omega_y$ bias)': 'ttc_wy_bias_1-vicon',
400 | # '$\\tau$-constraint (ours) (2.0 deg/s $\\Omega_y$ bias)': 'ttc_wy_bias_2-vicon',
401 | #'$\\tau$-constraint (ours) (3.0 deg/s $\\Omega_y$ bias)': 'ttc_wy_bias_3-vicon',
402 | #'$\\tau$-constraint (ours) (4.0 deg/s $\\Omega_y$ bias)': 'ttc_wy_bias_4-vicon',
403 |
404 | #'$\\tau$-constraint (ours) (0.5 deg/s $\\Omega_z$ bias)': 'ttc_wz_bias_0_5-vicon',
405 | # '$\\tau$-constraint (ours) (1.0 deg/s $\\Omega_z$ bias)': 'ttc_wz_bias_1-vicon',
406 | # '$\\tau$-constraint (ours) (2.0 deg/s $\\Omega_z$ bias)': 'ttc_wz_bias_2-vicon',
407 | #'$\\tau$-constraint (ours) (3.0 deg/s $\\Omega_z$ bias)': 'ttc_wz_bias_3-vicon',
408 | #'$\\tau$-constraint (ours) (4.0 deg/s $\\Omega_z$ bias)': 'ttc_wz_bias_4-vicon',
409 | }
410 |
411 | latex_table_str = '\\begin{table*}\n'
412 | latex_table_str += ' \\centering\n'
413 | latex_table_str += ' \\begin{tabular}{@{}lcccccccccc@{}}\n'
414 | latex_table_str += ' \\toprule\n'
415 | latex_table_str += ' & \\textit{Seq. 1} & \\textit{Seq. 2} & \\textit{Seq. 3} & \\textit{Seq. 4} & \\textit{Seq. 5} & \\textit{Seq. 6} & \\textit{Seq. 7} & \\textit{Seq. 8} & \\textit{Seq. 9} & \\textit{Seq. 10}\\\\\n'
416 | latex_table_str += ' \\midrule\n'
417 |
418 | line_str = 'Sequence Duration (s)'
419 | line2_str = 'Distance Traveled (m)'
420 | for recording in results_data.keys():
421 | #if recording != 'record_000004':
422 | # continue
423 |
424 | error_data = results_data[recording]['error_data']
425 | dt = error_data['ttc-vicon'][-1, 0] - error_data['ttc-vicon'][0, 0]
426 | #dt2 = error_data['vins_mono-vicon'][-1, 0] - error_data['vins_mono-vicon'][0, 0]
427 | #print(dt, dt2)
428 | line_str += ' & {:1.2f}'.format(dt)
429 |
430 | transformed_data = results_data[recording]['transformed_data']
431 | path_length = calc_valid_path_length(transformed_data)
432 | line2_str += ' & {:1.2f}'.format(float(path_length))
433 |
434 | latex_table_str += line_str + '\\\\\n'
435 | latex_table_str += line2_str + '\\\\\n'
436 |
437 | latex_table_str += ' \\midrule\n'
438 | latex_table_str += ' Method & &&&& ATE (cm) &&&& \\\\\n'
439 | latex_table_str += ' \\midrule\n'
440 |
441 | for method in methods_to_error_name.keys():
442 | results_line = '{} &'.format(method)
443 | for recording in results_data.keys():
444 | #if recording != 'record_000004':
445 | # continue
446 |
447 |
448 | transformed_data = results_data[recording]['transformed_data']
449 | error_data = results_data[recording]['error_data']
450 |
451 | error_stats = calc_error_stats(error_data)
452 |
453 | if methods_to_error_name[method] in error_stats:
454 | rmse, ate = error_stats[methods_to_error_name[method]]
455 | results_line += ' {:.2f} &'.format(ate)
456 | else:
457 | results_line += ' - &'
458 |
459 | latex_table_str += results_line[:-1] + '\\\\\n'
460 |
461 | latex_table_str += ' \\bottomrule\n'
462 | latex_table_str += ' \\end{tabular}\n'
463 | latex_table_str += ' \\caption{All results in (cm).}\n'
464 | latex_table_str += ' \\label{tab:atecompare}\n'
465 | latex_table_str += '\\end{table*}'
466 | print(latex_table_str)
467 |
468 | error_data_total = {'ttc-vicon': [],
469 | 'phi-vicon': [],
470 | 'vins_mono-vicon': [],
471 | 'april-vicon': [],
472 | 'rovio-vicon': [],
473 | # 'ttc_affine_skip_2-vicon': [],
474 | # 'ttc_affine_skip_3-vicon': [],
475 | # 'ttc_wy_bias_1-vicon': [],
476 | # 'ttc_wy_bias_2-vicon': [],
477 | # 'ttc_wz_bias_1-vicon': [],
478 | # 'ttc_wz_bias_2-vicon': [],
479 | }
480 | for recording in results_data.keys():
481 | error_data = results_data[recording]['error_data']
482 | for method in error_data.keys():
483 | if recording == 'record_000004' and method == 'vins_mono-vicon': # Skip recording where VINS-Mono did not work
484 | continue
485 | error_data_total[method].append(error_data[method])
486 |
487 | for key in error_data_total.keys():
488 | error_data_total[key] = np.vstack(error_data_total[key])
489 |
490 | error_stats = calc_error_stats(error_data_total)
491 |
492 | for key in error_data_total.keys():
493 | rmse, ate = error_stats[key]
494 | print(key)
495 | print(ate)
496 |
--------------------------------------------------------------------------------
/ttc_depth_from_folder.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_from_folder.py
4 | # Available under MIT license
5 | #
6 | # Run Phi and TTC distance estimation using recordings made with ttc_depth_realsense.py
7 | #
8 | # History:
9 | # 04-21-20 - Levi Burner - Adapted file from early prototype code
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import argparse
15 | import json
16 | import os
17 | os.environ['OPENBLAS_NUM_THREADS'] = '1' # Attempt to disable OpenBLAS multithreading, it makes the script slower
18 | import glob
19 | import pickle
20 | import json
21 | import time
22 |
23 | import cv2
24 | import numpy as np
25 | from scipy import interpolate
26 |
27 | from ttc_depth import TTCDepth
28 | from utilities import (latest_recording_dir,
29 | load_calibration_from_dir)
30 | from zmq_vector_publisher import ZMQVectorPublisher, ZMQVectorPublisherSaver
31 | from shutil import copyfile
32 |
33 | class RecordedIMUSource(object):
34 | def __init__(self, record_dir, wy_bias = 0.0, wz_bias = 0.0):
35 | self._wy_bias = wy_bias
36 | self._wz_bias = wz_bias
37 |
38 | sensor_data = pickle.load(open(os.path.join(record_dir, 'imu.pickle'), 'rb'))
39 | self._accel_samples = sensor_data['accel']
40 | self._gyro_samples = sensor_data['gyro']
41 |
42 | self._gyro_samples[:, 2] += self._wy_bias * np.pi / 180.0
43 | self._gyro_samples[:, 3] += self._wz_bias * np.pi / 180.0
44 |
45 | self._sample_index = 0
46 |
47 | # TODO resampling like this should occur live as well
48 | interpolater = interpolate.interp1d(self._accel_samples[:, 0],
49 | self._accel_samples[:, 1:4],
50 | axis=0,
51 | bounds_error=False,
52 | fill_value='extrapolate')
53 | accel_samples_interpolated = interpolater(self._gyro_samples[:, 0])
54 |
55 | self._imu_samples = np.hstack((np.atleast_2d(self._gyro_samples[:, 0]).transpose(),
56 | accel_samples_interpolated,
57 | self._gyro_samples[:, 1:4]))
58 |
59 | def earliest_timestamp(self):
60 | if self._sample_index >= len(self._imu_samples):
61 | return None
62 | return self._imu_samples[0, 0]
63 |
64 | def latest_timestamp(self):
65 | if self._sample_index >= len(self._imu_samples):
66 | return None
67 | return self._imu_samples[-1, 0]
68 |
69 | def next_sample(self):
70 | if self._sample_index >= len(self._imu_samples):
71 | return None
72 |
73 | sample = self._imu_samples[self._sample_index]
74 | self._sample_index += 1
75 | return sample
76 |
77 | def preprocess_image(frame):
78 | frame_gray = frame.astype(np.float32) * (1.0 / 255.0)
79 | return frame_gray
80 |
81 | class RecordedFrameSource(object):
82 | def __init__(self,
83 | record_dir,
84 | preload=False,
85 | preprocess=False,
86 | frame_skip=0):
87 | self._preload = preload
88 | self._preprocess = preprocess
89 | self._frame_skip = frame_skip
90 |
91 | self._frame_names_orig = sorted(list(glob.glob(os.path.join(record_dir, 'images/frame_*.npy'))))
92 |
93 | self._frame_names = self._frame_names_orig[::(frame_skip+1)]
94 |
95 | frame_metadata = pickle.load(open(os.path.join(record_dir, 'frame_metadata.pickle'), 'rb'))
96 | self._frame_ts = frame_metadata['ts'][::(frame_skip + 1)]
97 |
98 | self._sample_index = 0
99 |
100 | if self._preload:
101 | if self._preprocess:
102 | self._preloaded_frames = [preprocess_image(np.load(frame_name)) for frame_name in self._frame_names]
103 | else:
104 | self._preloaded_frames = [np.load(frame_name) for frame_name in self._frame_names]
105 |
106 | def earliest_timestamp(self):
107 | return self._frame_ts[0]
108 |
109 | def latest_timestamp(self):
110 | if self._sample_index >= len(self._frame_ts):
111 | return None
112 | return self._frame_ts[-1]
113 |
114 | def next_sample(self):
115 | if self._sample_index >= len(self._frame_ts):
116 | return None
117 |
118 | if not self._preload:
119 | frame = np.load(self._frame_names[self._sample_index])
120 | frame_gray = preprocess_image(frame)
121 | sample = (self._frame_ts[self._sample_index], frame_gray)
122 | else:
123 | if not self._preprocess:
124 | sample = (self._frame_ts[self._sample_index], preprocess_image(self._preloaded_frames[self._sample_index]))
125 | else:
126 | sample = (self._frame_ts[self._sample_index], self._preloaded_frames[self._sample_index])
127 |
128 | self._sample_index += 1
129 | return sample
130 |
131 | def free_sample(self):
132 | pass
133 |
134 | class RecordedTemplateSource(object):
135 | def __init__(self, record_dir):
136 | templates = pickle.load(open(os.path.join(record_dir, 'templates_live.pickle'), 'rb'))
137 | self._patches = templates['patches']
138 |
139 | # WIDTH = 848
140 | # HEIGHT = 480
141 | # patch_dim = 50
142 |
143 | # self._patch_params = {
144 | # 'patch_start_time': time.time() + 2.5,
145 | # 'patch_end_time': time.time() + 10000.0,
146 | # 'patch_coordinates': (int(WIDTH/2 - patch_dim), int(HEIGHT/2 - patch_dim), int(WIDTH/2 + patch_dim), int(HEIGHT/2 + patch_dim))
147 | # }
148 |
149 | self._patch_index = -1
150 |
151 | # def _create_patch(self, time):
152 | # self._patches.append((time,
153 | # time + 100000.0,
154 | # self._patch_params['patch_coordinates']))
155 |
156 | def current_patch_valid(self, time):
157 | # TODO allow creating patches on the fly
158 | # if True: #self._resettable:
159 | # key = cv2.pollKey()
160 | # #if key != -1:
161 | # # print('key {}'.format(key))
162 | # if key == 114: # r key
163 | # self._create_patch(time)
164 | # return False
165 | # elif key == 116: # t key
166 | # return 'reset observer'
167 | # elif key == 113: # q key
168 | # self._exit = True
169 |
170 | if self._patch_index < 0:
171 | raise Exception('Current patch valid called before patch selected')
172 |
173 | valid = True
174 |
175 | if time > self._patches[self._patch_index][1]:
176 | valid = False
177 |
178 | if self._patch_index + 1 < len(self._patches):
179 | if time > self._patches[self._patch_index + 1][0]:
180 | valid = False
181 |
182 | return valid
183 |
184 | def get_new_patch(self, time):
185 | next_patch_index = self._patch_index + 1
186 |
187 | if next_patch_index >= len(self._patches):
188 | return None
189 |
190 | if time >= self._patches[next_patch_index][0]:
191 | self._patch_index = next_patch_index
192 | return self._patches[next_patch_index][2]
193 |
194 | def make_results_dir(recording_name, ttc_append='', vicon=False):
195 | if not os.path.isdir('results'):
196 | os.mkdir('results')
197 |
198 | results_dir = os.path.join('results', recording_name)
199 | if not os.path.isdir(results_dir):
200 | os.mkdir(results_dir)
201 |
202 | results_dir_ttc = os.path.join(results_dir, 'ttc'+ttc_append)
203 | if not os.path.isdir(results_dir_ttc):
204 | os.mkdir(results_dir_ttc)
205 |
206 | visualization_dir = os.path.join(results_dir, 'visualization')
207 | if not os.path.isdir(visualization_dir):
208 | os.mkdir(visualization_dir)
209 |
210 | if vicon:
211 | results_dir_vicon = os.path.join(results_dir, 'vicon')
212 | if not os.path.isdir(results_dir_vicon):
213 | os.mkdir(results_dir_vicon)
214 |
215 |
216 | return results_dir_ttc, visualization_dir
217 |
218 | def vicon_file_name(record_dir, recording_name):
219 | record_number = int(recording_name[-6:])
220 |
221 | if record_number == 0:
222 | vicon_file_number_str = ''
223 | else:
224 | vicon_file_number_str = ' {}'.format(record_number)
225 |
226 | vicon_file_name = os.path.join(record_dir, '../vicon/d435i_ttc{}.csv'.format(vicon_file_number_str))
227 | return vicon_file_name
228 |
229 | def run_on_directory(record_dir, args, max_visualize_rate, save_visualization, settings_name=''):
230 | K, D, map1, map2, resolution = load_calibration_from_dir(record_dir)
231 |
232 | params_file_name = os.path.join(record_dir, 'params.json')
233 | if os.path.exists(params_file_name):
234 | params = json.load(open(params_file_name))
235 | else:
236 | params = {'trim_end': 0.0}
237 |
238 | if args.save or save_visualization:
239 | recording_name = os.path.split(record_dir)[-1]
240 |
241 | vicon_file = vicon_file_name(record_dir, recording_name)
242 | if os.path.exists(vicon_file):
243 | results_dir, visualization_dir = make_results_dir(recording_name, ttc_append=settings_name, vicon=True)
244 | vicon_file_dest = os.path.join('results', recording_name, 'vicon', 'results.csv')
245 | copyfile(vicon_file, vicon_file_dest)
246 | else:
247 | results_dir, visualization_dir = make_results_dir(recording_name, vicon=False)
248 |
249 | vector_pub = ZMQVectorPublisherSaver()
250 | else:
251 | vector_pub = ZMQVectorPublisher()
252 | visualization_dir = None
253 |
254 | if args.nopublish:
255 | vector_pub = None
256 |
257 | frame_source = RecordedFrameSource(record_dir,
258 | preload=args.preload,
259 | preprocess=args.preprocess,
260 | frame_skip=args.frame_skip)
261 | imu_source = RecordedIMUSource(record_dir,
262 | wy_bias=args.wy_bias,
263 | wz_bias=args.wz_bias)
264 | template_source = RecordedTemplateSource(record_dir)
265 |
266 | strided_patch_size = 4000
267 |
268 | max_flow_time = 0.8 * (1.0/90.0) # Not important since real time isn't the problem here
269 |
270 | last_time = imu_source.latest_timestamp()
271 |
272 | ttc_depth = TTCDepth(frame_source, imu_source, template_source, K,
273 | visualize=args.visualize,
274 | wait_key=wait_key,
275 | max_flow_time=max_flow_time,
276 | max_visualize_rate=max_visualize_rate,
277 | save_visualization=save_visualization,
278 | save_visualization_dir=visualization_dir,
279 | plot_start_t=args.plot_start,
280 | plot_end_t=args.plot_end,
281 | strided_patch_size=strided_patch_size,
282 | april_ground_truth=args.april,
283 | april_resize_to=april_resize_to,
284 | max_april_rate=max_april_rate,
285 | max_delta=max_delta,
286 | vector_pub=vector_pub,
287 | print_timing=print_timing,
288 | ground_truth_source=ground_truth_source,
289 | affine_skip=args.affine_skip)
290 |
291 | print('Beginning')
292 | start_time = time.time()
293 |
294 | track_start_time = None
295 | track_start_frame_count = None
296 | track_end_time = None
297 | track_end_frame_count = None
298 | while True:
299 | ttc_depth.update()
300 | cv2.waitKey(wait_key)
301 |
302 | if track_start_time is None:
303 | if ttc_depth._affine_tracker is not None:
304 | track_start_time = time.time()
305 | track_start_frame_count = ttc_depth._frames_processed
306 |
307 | if frame_source.latest_timestamp() is None or imu_source.latest_timestamp() is None:
308 | track_end_time = time.time()
309 | track_end_frame_count = ttc_depth._frames_processed
310 | print('Out of data')
311 | break
312 |
313 | if ttc_depth._ttc_pose_observer_time_computed_to > last_time - params['trim_end']:
314 | track_end_time = time.time()
315 | track_end_frame_count = ttc_depth._frames_processed
316 | print('Early end')
317 | break
318 |
319 | end_time = time.time()
320 |
321 | tracked_time = track_end_time - track_start_time
322 | tracked_frames = track_end_frame_count - track_start_frame_count
323 |
324 | num_samples = tracked_frames
325 | frames_start_time = track_start_time
326 | frames_stop_time = track_end_time
327 | print('======================================================')
328 | print('Took {:.3f} seconds'.format(track_end_time-track_start_time))
329 | print('Processed {} frames {:.2f} ms per frame'.format(num_samples, 1000*(track_end_time-track_start_time)/num_samples))
330 | print('{:.2f} Hz'.format(num_samples / (frames_stop_time-frames_start_time)))
331 | print('======================================================')
332 |
333 | #print('{:.2f}x realtime'.format((frames_stop_time-frames_start_time)/ (end_time-start_time)))
334 |
335 | # num_samples = frame_source._sample_index
336 | # frames_start_time = frame_source._frame_ts.min()
337 | # frames_stop_time = frame_source._frame_ts.max()
338 | # print('Took {:.3f} seconds'.format(end_time-start_time))
339 | # print('Processed {} frames {:.2f} ms per frame'.format(num_samples, 1000*(end_time-start_time)/num_samples))
340 | # print('{:.2f} Hz'.format(num_samples / (end_time-start_time)))
341 | # print('{:.2f}x realtime'.format((frames_stop_time-frames_start_time)/ (end_time-start_time)))
342 |
343 | if args.save:
344 | file_name = os.path.join(results_dir, 'results.pickle')
345 | results_dict = vector_pub.get_data()
346 | with open(file_name, 'wb') as file:
347 | pickle.dump({'params': None,
348 | 'results': results_dict}, file)
349 |
350 | return num_samples, (frames_stop_time - frames_start_time)
351 |
352 | if __name__ == '__main__':
353 | parser = argparse.ArgumentParser()
354 | parser.add_argument('--dir', dest='dir', type=str, help='Directory to load data from. If not specified the latest in the default location is used')
355 | parser.add_argument('--all', dest='all', action='store_true', help='Run through all recordings in folder')
356 | parser.add_argument('--visualize', dest='visualize', action='store_true', help='Visualize')
357 | parser.add_argument('--wait', dest='wait', action='store_true', help='Wait for key press when visualizing')
358 | parser.add_argument('--april', dest='april', action='store_true', help='Use apriltag for ground truth')
359 | parser.add_argument('--preload', dest='preload', action='store_true', help='Read all images before processing for benchmarking')
360 | parser.add_argument('--preprocess', dest='preprocess', action='store_true', help='Preprocess all images when preloading')
361 | parser.add_argument('--nopublish', dest='nopublish', action='store_true', help='Dont publish with ZMQ')
362 | parser.add_argument('--bench', dest='bench', action='store_true', help='Activate preload nopublish and some other things')
363 | parser.add_argument('--realtime', dest='realtime', action='store_true', help='Test ability to run in realtime with april feedback')
364 | parser.add_argument('--gt', dest='ground_truth_file', type=str, help='Load ground truth from file')
365 | parser.add_argument('--save', dest='save', action='store_true', help='Save results to results folder')
366 | parser.add_argument('--save_vis', dest='save_vis', action='store_true', help='Save visualization results at 30fps for video')
367 | parser.add_argument('--frame_skip', dest='frame_skip', type=int, default=0, help='camera frames to skip (decimation)')
368 | parser.add_argument('--affine_skip', dest='affine_skip', type=int, default=0, help='affine samples to skip (decimation)')
369 | parser.add_argument('--wy_bias', dest='wy_bias', type=float, default=0, help='Gyro bias y axis in deg per second')
370 | parser.add_argument('--wz_bias', dest='wz_bias', type=float, default=0, help='Gyro bias z axis in deg per second')
371 | parser.add_argument('--plot_start', dest='plot_start', type=float, default=0, help='Default visualization plot start time')
372 | parser.add_argument('--plot_end', dest='plot_end', type=float, default=60.0, help='Default visualization plot end time')
373 |
374 | args = parser.parse_args()
375 |
376 | if args.wait:
377 | wait_key = 0
378 | else:
379 | wait_key = 1
380 |
381 | if not args.dir:
382 | record_dir = latest_recording_dir()
383 | else:
384 | record_dir = args.dir
385 |
386 | if args.bench:
387 | args.preload = True
388 | #args.preprocess = True #Pre-converting to floating point is cheating
389 | args.nopublish = True
390 | num_runs = 1
391 | max_delta = 0.1
392 | print_timing = False # Can turn on for detailed info
393 | else:
394 | num_runs = 1
395 | max_delta = 0.01
396 | print_timing = False
397 |
398 | if args.realtime:
399 | args.preload = True
400 | args.april = True
401 | max_april_rate = 30.0
402 | max_delta = 0.1
403 | print_timing = False
404 | april_resize_to = None #(270,480)
405 | else:
406 | max_april_rate = 100.0
407 | april_resize_to=None
408 |
409 | if args.ground_truth_file:
410 | # TODO https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3
411 | ground_truth_source = pickle.load(open(args.ground_truth_file, 'rb'), encoding='latin1')['poses']
412 | else:
413 | ground_truth_source = None
414 |
415 | if args.save_vis:
416 | max_visualize_rate = 30.0
417 | save_visualization = True
418 | else:
419 | max_visualize_rate = 60.0
420 | save_visualization = False
421 |
422 | # TODO how to disable threading
423 | cv2.setNumThreads(1)
424 |
425 | if not args.bench:
426 | SETTINGS = {
427 | '': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 0, 'april': True},
428 | #'_affine_skip_1': {'affine_skip': 1, 'wy_bias': 0, 'wz_bias': 0},
429 | # '_affine_skip_2': {'affine_skip': 2, 'wy_bias': 0, 'wz_bias': 0, 'april': False},
430 | # '_affine_skip_3': {'affine_skip': 3, 'wy_bias': 0, 'wz_bias': 0, 'april': False},
431 | #'_wy_bias_0_5': {'affine_skip': 0, 'wy_bias': 0.5, 'wz_bias': 0},
432 | # '_wy_bias_1': {'affine_skip': 0, 'wy_bias': 1, 'wz_bias': 0, 'april': False},
433 | # '_wy_bias_2': {'affine_skip': 0, 'wy_bias': 2, 'wz_bias': 0, 'april': False},
434 | #'_wy_bias_3': {'affine_skip': 0, 'wy_bias': 3, 'wz_bias': 0},
435 | #'_wy_bias_4': {'affine_skip': 0, 'wy_bias': 4, 'wz_bias': 0},
436 | #'_wz_bias_0_5': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 0.5},
437 | # '_wz_bias_1': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 1, 'april': False},
438 | # '_wz_bias_2': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 2, 'april': False},
439 | #'_wz_bias_3': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 3},
440 | #'_wz_bias_4': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 4},
441 | #'_wz_bias_5': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 5},
442 | }
443 | else:
444 | SETTINGS = {
445 | '': {'affine_skip': 0, 'wy_bias': 0, 'wz_bias': 0, 'april': False},
446 | }
447 |
448 | if not args.all:
449 | for i in range(num_runs):
450 | run_on_directory(record_dir, args, max_visualize_rate, save_visualization)
451 | else:
452 | recording_dirs = sorted(glob.glob(os.path.join(record_dir, 'record_*')))
453 |
454 | total_track_time = 0
455 | total_frames = 0
456 |
457 | for directory in recording_dirs:
458 | #if directory != '../recordings_21_11_12/record_000004':
459 | # continue
460 |
461 | for key in SETTINGS.keys():
462 | print('Processing: {}'.format(directory))
463 | print('Settings', SETTINGS[key])
464 | args.affine_skip = SETTINGS[key]['affine_skip']
465 | args.wy_bias = SETTINGS[key]['wy_bias']
466 | args.wz_bias = SETTINGS[key]['wz_bias']
467 | args.april = SETTINGS[key]['april']
468 |
469 | for i in range(num_runs):
470 | track_frames, track_time = run_on_directory(directory, args, max_visualize_rate, save_visualization,
471 | settings_name=key)
472 |
473 | total_track_time += track_time
474 | total_frames += track_frames
475 |
476 |
477 | print('===================== Overall ========================')
478 | print('Took {:.3f} seconds'.format(total_track_time))
479 | print('Processed {} frames {:.2f} ms per frame'.format(total_frames, 1000*total_track_time/total_frames))
480 | print('{:.2f} Hz'.format(total_frames / total_track_time))
481 | print('======================================================')
482 |
--------------------------------------------------------------------------------
/ttc_depth_integrals_accel_bias.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_integrals_accel_bias.py
4 | # Available under MIT license
5 | #
6 | # Estimate distance using the Tau constraint in the presence of
7 | # a constant acceleration bias
8 | #
9 | # History:
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import numpy as np
15 | import numba as nb
16 | import time
17 |
18 | def cumulative_int(dt, x):
19 | return dt * np.cumsum(x)
20 |
21 | # Calculate Phi
22 | def Phi_t_t_0(dt, dot_z_over_z):
23 | int_dot_z_over_z = cumulative_int(dt, dot_z_over_z)
24 | phi = np.exp(int_dot_z_over_z)
25 | return phi
26 |
27 | def accel_z_z_star_t0_no_inv(times, dot_z_over_z, accel_z):
28 | # TODO all times are needed but they are assumed to be evenly spaced
29 | #mid0 = time.time()
30 | dt = times[1] - times[0]
31 |
32 | int_accel = cumulative_int(dt, accel_z)
33 | iint_accel = cumulative_int(dt, int_accel)
34 |
35 | phi = Phi_t_t_0(dt, dot_z_over_z)
36 |
37 | F_action = phi - (1 + ((times - times[0]) * dot_z_over_z[0]))
38 |
39 | times_squared = np.square(times-times[0])
40 | #total_time = (times[-1] - times[0])
41 | #int_times_squared_squared = (1./5.)*(total_time * total_time * total_time * total_time * total_time)
42 |
43 | a = dt * np.sum(np.square(F_action))
44 | b = dt * np.sum(times_squared*F_action)
45 | #c = (1./4.) * int_times_squared_squared
46 | c = (1./4.) * dt * np.sum(np.square(times_squared))
47 |
48 | d = -2.0 * dt * np.sum(iint_accel*F_action)
49 | e = - dt * np.sum(iint_accel*times_squared)
50 |
51 | P = np.array(((a, b/2),
52 | (b/2, c)))
53 | c = np.array((d, e))
54 |
55 | return P, c
56 |
57 | def accel_z_z_star_t0(times, dot_z_over_z, accel_z):
58 | P, c = accel_z_z_star_t0_no_inv(times, dot_z_over_z, accel_z)
59 |
60 | try:
61 | z_star = np.linalg.solve(P, -0.5*c)
62 | except np.linalg.LinAlgError:
63 | z_star = (np.NAN, np.NAN)
64 |
65 | return z_star
66 |
67 | def accel_z_z_star_tf(times, dot_z_over_z, accel_z):
68 | #start = time.time()
69 | z_star = accel_z_z_star_t0(np.flip(times), np.flip(dot_z_over_z), np.flip(accel_z))
70 | #end = time.time()
71 | #print(end-start)
72 | return z_star
73 |
74 | def accel_x_z_star_t0_no_inv(times, dot_x_over_z, dot_z_over_z, accel_x):
75 | # TODO all times are needed but they are assumed to be evenly spaced
76 | #mid0 = time.time()
77 | dt = times[1] - times[0]
78 |
79 | int_accel = cumulative_int(dt, accel_x)
80 | iint_accel = cumulative_int(dt, int_accel)
81 |
82 | phi = Phi_t_t_0(dt, dot_z_over_z)
83 |
84 | F_action = cumulative_int(dt, dot_x_over_z*phi) - ((times-times[0])*dot_x_over_z[0])
85 |
86 | times_squared = np.square(times-times[0])
87 | # TODO using the closed form makes the results worse...
88 | #int_times_squared_squared = (1./5.)*((times[-1] - times[0])**5)
89 |
90 | a = dt * np.sum(np.square(F_action))
91 | b = dt * np.sum(times_squared*F_action)
92 | #c = (1./4.) * int_times_squared_squared
93 | c = (1./4.) * dt * np.sum(np.square(times_squared))
94 |
95 | d = -2.0 * dt * np.sum(iint_accel*F_action)
96 | e = - dt * np.sum(iint_accel*times_squared)
97 |
98 | P = np.array(((a, b/2),
99 | (b/2, c)))
100 | c = np.array((d, e))
101 |
102 | return P, c
103 |
104 | def accel_x_z_star_t0(times, dot_x_over_z, dot_z_over_z, accel_x):
105 | P, c = accel_x_z_star_t0_no_inv(times, dot_x_over_z, dot_z_over_z, accel_x)
106 |
107 | try:
108 | z_star = np.linalg.solve(P, -0.5*c)
109 | except np.linalg.LinAlgError:
110 | z_star = (np.NAN, np.NAN)
111 |
112 | return z_star
113 |
114 | def accel_x_z_star_tf(times, dot_x_over_z, dot_z_over_z, accel_x):
115 | #start = time.time()
116 | z_star = accel_x_z_star_t0(np.flip(times), np.flip(dot_x_over_z), np.flip(dot_z_over_z), np.flip(accel_x))
117 | #end = time.time()
118 | #print(end-start)
119 | return z_star
120 |
121 | # Setup numba
122 | # No fast math because lots of the values are super tiny and might be sub-normal (not sure)
123 | cumulative_int = nb.jit(nopython = True, cache = True, fastmath=False)(cumulative_int)
124 | Phi_t_t_0 = nb.jit(nopython = True, cache = True, fastmath=False)(Phi_t_t_0)
125 | accel_z_z_star_t0_no_inv = nb.jit(nopython = True, cache = True, fastmath=False)(accel_z_z_star_t0_no_inv)
126 | # accel_z_z_star_tf = nb.jit(nopython = True, cache = True, fastmath=False)(accel_z_z_star_tf)
127 | accel_x_z_star_t0_no_inv = nb.jit(nopython = True, cache = True, fastmath=False)(accel_x_z_star_t0_no_inv)
128 | # accel_x_z_star_tf = nb.jit(nopython = True, cache = True, fastmath=False)(accel_x_z_star_tf)
129 |
--------------------------------------------------------------------------------
/ttc_depth_plot_live.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_plot_live.py
4 | # Available under MIT license
5 | #
6 | # Plot states published by ttc_depth.py over a ZMQ socket
7 | #
8 | # History:
9 | # 08-30-21 - Levi Burner - Created File
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import struct
15 | import threading
16 |
17 | import numpy as np
18 | import zmq
19 |
20 | import matplotlib.pyplot as plt
21 | import matplotlib.animation as animation
22 |
23 | port = '5556'
24 | STOPPED = False
25 |
26 | sensors = {}
27 | sensors[b'R_fc_to_c'] = []
28 | sensors[b'p'] = []
29 | sensors[b'ttc_inv'] = []
30 | sensors[b'ttc_inv_gt'] = []
31 | sensors[b'pose_hat'] = []
32 | sensors[b'phi_pose_hat'] = []
33 | sensors[b'ground_truth_pose'] = []
34 | sensors[b'accel_meas_c'] = []
35 | sensors[b'gyro'] = []
36 | sensors[b'accel_z_hat'] = []
37 | sensors[b'phi_accel_z_hat'] = []
38 |
39 | def zmq_receive_thread(port):
40 | # Socket to talk to server
41 | context = zmq.Context()
42 | socket = context.socket(zmq.SUB)
43 |
44 | print('Collecting updates from server...')
45 | socket.connect ("tcp://localhost:{}".format(port))
46 |
47 | topicfilter = "ttc_depth".encode('ascii')
48 | socket.setsockopt(zmq.SUBSCRIBE, topicfilter)
49 |
50 | while not STOPPED:
51 | topic = socket.recv()
52 | base_name, sensor = topic.split(b'/')
53 |
54 | time_bytes = socket.recv()
55 | t = struct.unpack('d', time_bytes)
56 |
57 | md = socket.recv_json()
58 | msg = socket.recv()
59 | buf = memoryview(msg)
60 | x = np.frombuffer(buf, dtype=md['dtype']).reshape(md['shape'])
61 |
62 | try:
63 | sensors[sensor].append((t, x))
64 | except KeyError as e:
65 | print('Unrecognized sensor: {}'.format(sensor))
66 |
67 | # Plotting based on: https://learn.sparkfun.com/tutorials/graph-sensor-data-with-python-and-matplotlib/speeding-up-the-plot-animation
68 | def ttc_depth_plot_live_process():
69 | x_len = 300
70 | xs = list(range(0, x_len))
71 | fig = plt.figure()
72 |
73 | ROWS = 4
74 | COLS = 3
75 |
76 | Z_LIM = (-4.0, 0.5)
77 |
78 | F_LIM = (-4.0, 4.0)
79 |
80 | # Setup pose plots
81 | ly1 = [0] * x_len
82 | ly2 = [0] * x_len
83 | ly3 = [0] * x_len
84 | ly4 = [0] * x_len
85 | ly5 = [0] * x_len
86 | ly6 = [0] * x_len
87 |
88 |
89 | ax = fig.add_subplot(ROWS, COLS, 1)
90 | ax.set_ylim([-1, 1])
91 | lline1, = ax.plot(xs, ly1)
92 | lline4, = ax.plot(xs, ly4)
93 | llllllllllly1 = [0] * x_len
94 | lllllllllllline1, = ax.plot(xs, llllllllllly1)
95 | plt.ylabel('X (m)')
96 | plt.legend(['x_hat', 'x_gt', 'phi_x_hat'])
97 | plt.grid()
98 |
99 | ax = fig.add_subplot(ROWS, COLS, 2)
100 | ax.set_ylim([-1, 1])
101 | lline2, = ax.plot(xs, ly2)
102 | lline5, = ax.plot(xs, ly5)
103 | llllllllllly2 = [0] * x_len
104 | lllllllllllline2, = ax.plot(xs, llllllllllly2)
105 | plt.ylabel('Y (m)')
106 | plt.legend(['y_hat', 'y_gt', 'phi_y_hat'])
107 | plt.grid()
108 |
109 | ax = fig.add_subplot(ROWS, COLS, 3)
110 | ax.set_ylim(Z_LIM)
111 | lline3, = ax.plot(xs, ly3)
112 | lline6, = ax.plot(xs, ly6)
113 | llllllllllly3 = [0] * x_len
114 | lllllllllllline3, = ax.plot(xs, llllllllllly3)
115 | plt.ylabel('Z (m)')
116 | plt.legend(['z_hat', 'z_gt', 'phi_z_hat'])
117 | plt.grid()
118 |
119 | # Setup Orientation plot
120 | ax = fig.add_subplot(ROWS, COLS, 4, projection='3d')
121 | axis = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
122 |
123 | lines = []
124 | for a in axis:
125 | line_data = np.array([[0, 0, 0], [a[0], a[1], a[2]]]).transpose()
126 | line = ax.plot(line_data[0, :], line_data[1, :], line_data[2, :])[0]
127 | lines.append(line)
128 |
129 | ax.set_xlim3d([-1.0, 1.0])
130 | ax.set_xlabel('X')
131 | ax.set_ylim3d([-1.0, 1.0])
132 | ax.set_ylabel('Y')
133 | ax.set_zlim3d([-1.0, 1.0])
134 | ax.set_title('Integrated Orientation')
135 | plt.legend(['x', 'y', 'z'])
136 |
137 | ax = fig.add_subplot(ROWS, COLS, 5)
138 | ax.set_ylim([-10, 10])
139 | llly1 = [0] * x_len
140 | llly2 = [0] * x_len
141 | llly3 = [0] * x_len
142 | lllline1, = ax.plot(xs, llly1)
143 | lllline2, = ax.plot(xs, llly2)
144 | lllline3, = ax.plot(xs, llly3)
145 | plt.ylabel('accel (m/s^2)')
146 | plt.legend(['x','y','z'])
147 | plt.grid()
148 |
149 | ax = fig.add_subplot(ROWS, COLS, 6)
150 | ax.set_ylim([-np.pi, np.pi])
151 | lllly1 = [0] * x_len
152 | lllly2 = [0] * x_len
153 | lllly3 = [0] * x_len
154 | llllline1, = ax.plot(xs, lllly1)
155 | llllline2, = ax.plot(xs, lllly2)
156 | llllline3, = ax.plot(xs, lllly3)
157 | plt.ylabel('gyro (rad/s)')
158 | plt.legend(['x','y','z'])
159 | plt.grid()
160 |
161 |
162 | ax = fig.add_subplot(ROWS, COLS, 7)
163 | ax.set_ylim(F_LIM)
164 | llllly1 = [0] * x_len
165 | llllly2 = [0] * x_len
166 | lllllline1, = ax.plot(xs, llllly1)
167 | lllllline2, = ax.plot(xs, llllly2)
168 | plt.ylabel('depth scaled velocity 1/s')
169 | plt.legend(['dot x / z', 'dot x / z gt'])
170 | plt.grid()
171 |
172 |
173 | ax = fig.add_subplot(ROWS, COLS, 8)
174 | ax.set_ylim(F_LIM)
175 | lllllly1 = [0] * x_len
176 | lllllly2 = [0] * x_len
177 | llllllline1, = ax.plot(xs, lllllly1)
178 | llllllline2, = ax.plot(xs, lllllly2)
179 | plt.ylabel('depth scaled velocity 1/s')
180 | plt.legend(['dot y / z', 'dot y / z gt'])
181 | plt.grid()
182 |
183 | ax = fig.add_subplot(ROWS, COLS, 9)
184 | ax.set_ylim(F_LIM)
185 | llllllly1 = [0] * x_len
186 | llllllly2 = [0] * x_len
187 | lllllllline1, = ax.plot(xs, llllllly1)
188 | lllllllline2, = ax.plot(xs, llllllly2)
189 | plt.ylabel('depth scaled velocity 1/s')
190 | plt.legend(['dot z / z', 'dot z / z gt'])
191 | plt.grid()
192 |
193 | ax = fig.add_subplot(ROWS, COLS, 10)
194 | ax.set_ylim(Z_LIM)
195 | lllllllly1 = [0] * x_len
196 | llllllllly1 = [0] * x_len
197 | lllllllllly1 = [0] * x_len
198 | lllllllllllly1 = [0] * x_len
199 | llllllllllllly1 = [0] * x_len
200 | llllllllline1, = ax.plot(xs, lllllllly1)
201 | lllllllllline1, = ax.plot(xs, llllllllly1)
202 | llllllllllline1, = ax.plot(xs, lllllllllly1)
203 | llllllllllllline1, = ax.plot(xs, lllllllllllly1)
204 | lllllllllllllline1, = ax.plot(xs, llllllllllllly1)
205 | plt.ylabel('X (m)')
206 | plt.legend(['accel_x_gt', 'x_hat', 'x_gt', 'phi_x_hat', 'p_accel_x_gt'])
207 | plt.grid()
208 |
209 |
210 | ax = fig.add_subplot(ROWS, COLS, 11)
211 | ax.set_ylim(Z_LIM)
212 | lllllllly2 = [0] * x_len
213 | llllllllly2 = [0] * x_len
214 | lllllllllly2 = [0] * x_len
215 | lllllllllllly2 = [0] * x_len
216 | llllllllllllly2 = [0] * x_len
217 | llllllllline2, = ax.plot(xs, lllllllly2)
218 | lllllllllline2, = ax.plot(xs, llllllllly2)
219 | llllllllllline2, = ax.plot(xs, lllllllllly2)
220 | llllllllllllline2, = ax.plot(xs, lllllllllllly2)
221 | lllllllllllllline2, = ax.plot(xs, llllllllllllly2)
222 | plt.ylabel('Y (m)')
223 | plt.legend(['accel_y_gt', 'y_hat', 'y_gt', 'phi_y_hat', 'p_accel_y_gt'])
224 | plt.grid()
225 |
226 | ax = fig.add_subplot(ROWS, COLS, 12)
227 | ax.set_ylim(Z_LIM)
228 | lllllllly3 = [0] * x_len
229 | llllllllly3 = [0] * x_len
230 | lllllllllly3 = [0] * x_len
231 | lllllllllllly3 = [0] * x_len
232 | llllllllllllly3 = [0] * x_len
233 | llllllllline3, = ax.plot(xs, lllllllly3)
234 | lllllllllline3, = ax.plot(xs, llllllllly3)
235 | llllllllllline3, = ax.plot(xs, lllllllllly3)
236 | llllllllllllline3, = ax.plot(xs, lllllllllllly3)
237 | lllllllllllllline3, = ax.plot(xs, llllllllllllly3)
238 | plt.ylabel('Z (m)')
239 | plt.legend(['accel_z_gt', 'z_hat', 'z_gt', 'phi_z_hat', 'p_accel_z_gt'])
240 | plt.grid()
241 |
242 | # This function is called periodically from FuncAnimation
243 | def animate(i,
244 | sensors,
245 | ly1, ly2, ly3, ly4, ly5, ly6,
246 | lines,
247 | llly1, llly2, llly3,
248 | lllly1, lllly2, lllly3,
249 | llllly1, llllly2,
250 | lllllly1, lllllly2,
251 | llllllly1, llllllly2,
252 | lllllllly1, lllllllly2, lllllllly3,
253 | llllllllly1, llllllllly2, llllllllly3,
254 | lllllllllly1, lllllllllly2, lllllllllly3,
255 | llllllllllly1, llllllllllly2, llllllllllly3,
256 | lllllllllllly1, lllllllllllly2, lllllllllllly3,
257 | llllllllllllly1, llllllllllllly2, llllllllllllly3):
258 | # Plot linear acceleration
259 | if len(sensors[b'pose_hat']) > 0:
260 | ly1.append(sensors[b'pose_hat'][-1][1][0])
261 | ly2.append(sensors[b'pose_hat'][-1][1][1])
262 | ly3.append(sensors[b'pose_hat'][-1][1][2])
263 |
264 | ly1 = ly1[-x_len:]
265 | ly2 = ly2[-x_len:]
266 | ly3 = ly3[-x_len:]
267 |
268 | lline1.set_ydata(ly1)
269 | lline2.set_ydata(ly2)
270 | lline3.set_ydata(ly3)
271 |
272 | # On 4th row 3 plots
273 | llllllllly1.append(sensors[b'pose_hat'][-1][1][2])
274 | llllllllly2.append(sensors[b'pose_hat'][-1][1][2])
275 | llllllllly3.append(sensors[b'pose_hat'][-1][1][2])
276 |
277 | llllllllly1 = llllllllly1[-x_len:]
278 | llllllllly2 = llllllllly2[-x_len:]
279 | llllllllly3 = llllllllly3[-x_len:]
280 |
281 | lllllllllline1.set_ydata(llllllllly1)
282 | lllllllllline2.set_ydata(llllllllly2)
283 | lllllllllline3.set_ydata(llllllllly3)
284 |
285 | if len(sensors[b'phi_pose_hat']) > 0:
286 | llllllllllly1.append(sensors[b'phi_pose_hat'][-1][1][0])
287 | llllllllllly2.append(sensors[b'phi_pose_hat'][-1][1][1])
288 | llllllllllly3.append(sensors[b'phi_pose_hat'][-1][1][2])
289 |
290 | llllllllllly1 = llllllllllly1[-x_len:]
291 | llllllllllly2 = llllllllllly2[-x_len:]
292 | llllllllllly3 = llllllllllly3[-x_len:]
293 |
294 | lllllllllllline1.set_ydata(llllllllllly1)
295 | lllllllllllline2.set_ydata(llllllllllly2)
296 | lllllllllllline3.set_ydata(llllllllllly3)
297 |
298 | # On 4th row 3 plots
299 | lllllllllllly1.append(sensors[b'phi_pose_hat'][-1][1][2])
300 | lllllllllllly2.append(sensors[b'phi_pose_hat'][-1][1][2])
301 | lllllllllllly3.append(sensors[b'phi_pose_hat'][-1][1][2])
302 |
303 | lllllllllllly1 = lllllllllllly1[-x_len:]
304 | lllllllllllly2 = lllllllllllly2[-x_len:]
305 | lllllllllllly3 = lllllllllllly3[-x_len:]
306 |
307 | llllllllllllline1.set_ydata(lllllllllllly1)
308 | llllllllllllline2.set_ydata(lllllllllllly2)
309 | llllllllllllline3.set_ydata(lllllllllllly3)
310 |
311 | if len(sensors[b'ground_truth_pose']) > 0:
312 | ly4.append(sensors[b'ground_truth_pose'][-1][1][0])
313 | ly5.append(sensors[b'ground_truth_pose'][-1][1][1])
314 | ly6.append(sensors[b'ground_truth_pose'][-1][1][2])
315 |
316 | ly4 = ly4[-x_len:]
317 | ly5 = ly5[-x_len:]
318 | ly6 = ly6[-x_len:]
319 |
320 | lline4.set_ydata(ly4)
321 | lline5.set_ydata(ly5)
322 | lline6.set_ydata(ly6)
323 |
324 | # On 4th row 3 plots
325 | lllllllllly1.append(sensors[b'ground_truth_pose'][-1][1][2])
326 | lllllllllly2.append(sensors[b'ground_truth_pose'][-1][1][2])
327 | lllllllllly3.append(sensors[b'ground_truth_pose'][-1][1][2])
328 |
329 | lllllllllly1 = lllllllllly1[-x_len:]
330 | lllllllllly2 = lllllllllly2[-x_len:]
331 | lllllllllly3 = lllllllllly3[-x_len:]
332 |
333 | llllllllllline1.set_ydata(lllllllllly1)
334 | llllllllllline2.set_ydata(lllllllllly2)
335 | llllllllllline3.set_ydata(lllllllllly3)
336 |
337 | # Plot orientation axis
338 | if len(sensors[b'R_fc_to_c']) > 0:
339 | t, R_fc_to_c = sensors[b'R_fc_to_c'][-1]
340 | else:
341 | R_fc_to_c = np.eye(3)
342 |
343 | axis_rotated = R_fc_to_c @ axis
344 |
345 | for a, line in zip(axis_rotated.transpose(), lines):
346 | line_data = np.array([[0, 0, 0], [a[0], a[1], a[2]]]).transpose()
347 | line.set_data(line_data[0:2, :])
348 | line.set_3d_properties(line_data[2, :])
349 |
350 | # Plot acceleration
351 | if len(sensors[b'accel_meas_c']) > 0:
352 | llly1.append(sensors[b'accel_meas_c'][-1][1][0])
353 | llly2.append(sensors[b'accel_meas_c'][-1][1][1])
354 | llly3.append(sensors[b'accel_meas_c'][-1][1][2])
355 | llly1 = llly1[-x_len:]
356 | llly2 = llly2[-x_len:]
357 | llly3 = llly3[-x_len:]
358 |
359 | lllline1.set_ydata(llly1)
360 | lllline2.set_ydata(llly2)
361 | lllline3.set_ydata(llly3)
362 |
363 | # Plot gyro
364 | if len(sensors[b'gyro']) > 0:
365 | lllly1.append(sensors[b'gyro'][-1][1][0])
366 | lllly2.append(sensors[b'gyro'][-1][1][1])
367 | lllly3.append(sensors[b'gyro'][-1][1][2])
368 | lllly1 = lllly1[-x_len:]
369 | lllly2 = lllly2[-x_len:]
370 | lllly3 = lllly3[-x_len:]
371 |
372 | llllline1.set_ydata(lllly1)
373 | llllline2.set_ydata(lllly2)
374 | llllline3.set_ydata(lllly3)
375 |
376 |
377 | # Plot ttc_inv
378 | if len(sensors[b'ttc_inv']) > 0:
379 | llllly1.append(sensors[b'ttc_inv'][-1][1][0])
380 | llllly1 = llllly1[-x_len:]
381 |
382 | lllllly1.append(sensors[b'ttc_inv'][-1][1][1])
383 | lllllly1 = lllllly1[-x_len:]
384 |
385 | llllllly1.append(sensors[b'ttc_inv'][-1][1][2])
386 | llllllly1 = llllllly1[-x_len:]
387 |
388 | lllllline1.set_ydata(llllly1)
389 | llllllline1.set_ydata(lllllly1)
390 | lllllllline1.set_ydata(llllllly1)
391 |
392 | # Plot ttc_inv_gt
393 | if len(sensors[b'ttc_inv_gt']) > 0:
394 | llllly2.append(sensors[b'ttc_inv_gt'][-1][1][0])
395 | llllly2 = llllly2[-x_len:]
396 |
397 | lllllly2.append(sensors[b'ttc_inv_gt'][-1][1][1])
398 | lllllly2 = lllllly2[-x_len:]
399 |
400 | llllllly2.append(sensors[b'ttc_inv_gt'][-1][1][2])
401 | llllllly2 = llllllly2[-x_len:]
402 |
403 | lllllline2.set_ydata(llllly2)
404 | llllllline2.set_ydata(lllllly2)
405 | lllllllline2.set_ydata(llllllly2)
406 |
407 |
408 | # Plot accel_z_hat
409 | if len(sensors[b'accel_z_hat']) > 0:
410 | lllllllly1.append(sensors[b'accel_z_hat'][-1][1][0])
411 | lllllllly1 = lllllllly1[-x_len:]
412 |
413 | llllllllline1.set_ydata(lllllllly1)
414 |
415 | lllllllly2.append(sensors[b'accel_z_hat'][-1][1][1])
416 | lllllllly2 = lllllllly2[-x_len:]
417 |
418 | llllllllline2.set_ydata(lllllllly2)
419 |
420 | lllllllly3.append(sensors[b'accel_z_hat'][-1][1][2])
421 | lllllllly3 = lllllllly3[-x_len:]
422 |
423 | llllllllline3.set_ydata(lllllllly3)
424 |
425 | # Plot phi_accel_z_hat
426 | if len(sensors[b'phi_accel_z_hat']) > 0:
427 | llllllllllllly1.append(sensors[b'phi_accel_z_hat'][-1][1][0])
428 | llllllllllllly1 = llllllllllllly1[-x_len:]
429 |
430 | lllllllllllllline1.set_ydata(llllllllllllly1)
431 |
432 | llllllllllllly2.append(sensors[b'phi_accel_z_hat'][-1][1][1])
433 | llllllllllllly2 = llllllllllllly2[-x_len:]
434 |
435 | lllllllllllllline2.set_ydata(llllllllllllly2)
436 |
437 | llllllllllllly3.append(sensors[b'phi_accel_z_hat'][-1][1][2])
438 | llllllllllllly3 = llllllllllllly3[-x_len:]
439 |
440 | lllllllllllllline3.set_ydata(llllllllllllly3)
441 |
442 | return (lline1, lline2, lline3, lline4, lline5, lline6,
443 | lines[0], lines[1], lines[2],
444 | lllline1, lllline2, lllline3,
445 | llllline1, llllline2, llllline3,
446 | lllllline1, lllllline2,
447 | llllllline1, llllllline2,
448 | lllllllline1, lllllllline2,
449 | llllllllline1, llllllllline2, llllllllline3,
450 | lllllllllline1, lllllllllline2, lllllllllline3,
451 | llllllllllline1, llllllllllline2, llllllllllline3,
452 | lllllllllllline1, lllllllllllline2, lllllllllllline3,
453 | llllllllllllline1, llllllllllllline2, llllllllllllline3,
454 | lllllllllllllline1, lllllllllllllline2, lllllllllllllline3)
455 |
456 | # Set up plot to call animate() function periodically
457 | ani = animation.FuncAnimation(fig,
458 | animate,
459 | fargs=(sensors,
460 | ly1, ly2, ly3, ly4, ly5, ly6,
461 | lines,
462 | llly1, llly2, llly3,
463 | lllly1, lllly2, lllly3,
464 | llllly1, llllly2,
465 | lllllly1, lllllly2,
466 | llllllly1, llllllly2,
467 | lllllllly1, lllllllly2, lllllllly3,
468 | llllllllly1, llllllllly2, llllllllly3,
469 | lllllllllly1, lllllllllly2, lllllllllly3,
470 | llllllllllly1, llllllllllly2, llllllllllly3,
471 | lllllllllllly1, lllllllllllly2, lllllllllllly3,
472 | llllllllllllly1, llllllllllllly2, llllllllllllly3),
473 | interval=10,
474 | blit=True)
475 |
476 | plt.show()
477 | print('ttc_depth_live_plot_process exiting')
478 |
479 | if __name__ == '__main__':
480 | zmq_thread = threading.Thread(target=zmq_receive_thread, args=(port,))
481 | zmq_thread.start()
482 |
483 | ttc_depth_plot_live_process()
484 |
485 | STOPPED = True
486 | zmq_thread.join()
487 |
--------------------------------------------------------------------------------
/ttc_depth_realsense.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_realsense.py
4 | # Available under MIT license
5 | #
6 | # Estimate distance in realtime with Phi, TTC, and AprilTag using a Realsense D435i camera
7 | # Supports saving the data to disk for later evaluation with ttc_depth_from_folder.py
8 | # and ttc_depth_calc_error.py
9 | #
10 | # History:
11 | # 07-22-21 - Levi Burner - Adapted file from ttc_depth_nx.py
12 | # 09-26-22 - Levi Burner - Open source release
13 | #
14 | ###############################################################################
15 |
16 | import argparse
17 | import json
18 | import glob
19 | from multiprocessing import (Process,
20 | Queue,
21 | resource_tracker,
22 | shared_memory,
23 | Value)
24 | import queue
25 | import os
26 | import pickle
27 | import time
28 | import traceback
29 |
30 | import cv2
31 | import numpy as np
32 |
33 | # If pyrealsense2 is able to be installed with
34 | # pip install pyrealsense2
35 | # then the first import metho works
36 | # but if installed from source (as needed for Ubuntu 22.04 and on)
37 | # the second import is needed
38 | try:
39 | import pyrealsense2 as rs
40 | rs.__version__
41 | except AttributeError as e:
42 | from pyrealsense2 import pyrealsense2 as rs
43 |
44 | from ttc_depth import TTCDepth
45 |
46 | from zmq_vector_publisher import ZMQVectorPublisher
47 |
48 | BASE_RECORD_PATH = '../recordings'
49 |
50 | def remove_shm_from_resource_tracker():
51 | """Monkey-patch multiprocessing.resource_tracker so SharedMemory won't be tracked
52 |
53 | More details at: https://bugs.python.org/issue38119
54 | """
55 |
56 | def fix_register(name, rtype):
57 | if rtype == "shared_memory":
58 | return
59 | return resource_tracker._resource_tracker.register(self, name, rtype)
60 | resource_tracker.register = fix_register
61 |
62 | def fix_unregister(name, rtype):
63 | if rtype == "shared_memory":
64 | return
65 | return resource_tracker._resource_tracker.unregister(self, name, rtype)
66 | resource_tracker.unregister = fix_unregister
67 |
68 | if "shared_memory" in resource_tracker._CLEANUP_FUNCS:
69 | del resource_tracker._CLEANUP_FUNCS["shared_memory"]
70 |
71 | class RealsenseIMUSource(object):
72 | def __init__(self, no_gyro, record_dir = None):
73 | self._no_gyro = no_gyro
74 | self._record_dir = record_dir
75 |
76 | self._queue = Queue()
77 | self._stop_process = Value('b', False)
78 | self._earliest_timestamp = Value('d', -1.0)
79 | self._earliest_timestamp_set = False
80 | self._latest_timestamp = Value('d', -1.0)
81 | self._backup = []
82 |
83 | self._last_accel_sample = np.array((0.0, 0.0, 0.0), dtype=np.float32)
84 | self._last_gyro_sample = np.array((0.0, 0.0, 0.0), dtype=np.float32)
85 |
86 | self._accel_list = []
87 | self._gyro_list = []
88 |
89 | def _add_sample(self, sample):
90 | self._latest_timestamp.value = sample[0]
91 | self._backup.append(sample)
92 |
93 | if not self._earliest_timestamp_set:
94 | self._earliest_timestamp.value = sample[0]
95 | self._earliest_timestamp_set = True
96 |
97 | # Fill the multiprocess queue, TODO is this needed?
98 | while len(self._backup) > 0:
99 | try:
100 | self._queue.put_nowait(self._backup[0])
101 | self._backup.pop(0)
102 | except queue.Full:
103 | break
104 |
105 | def add_accel_sample(self, accel):
106 | ts = accel.timestamp / 1000.0
107 |
108 | data = accel.get_motion_data()
109 |
110 | # Transform the data from the realsense frame to the camera is identity
111 | accel_c = np.array((data.x, data.y, data.z), dtype=np.float32)
112 |
113 | self._last_accel_sample = accel_c
114 | sample = np.concatenate(((ts,), accel_c, self._last_gyro_sample))
115 |
116 | self._accel_list.append(np.concatenate(((ts,), accel_c)).tolist())
117 |
118 | self._add_sample(sample)
119 |
120 | def add_gyro_sample(self, gyro):
121 | if not self._no_gyro:
122 | ts = gyro.timestamp / 1000.0
123 |
124 | data = gyro.get_motion_data()
125 | gyro = np.array((data.x, data.y, data.z), dtype=np.float32)
126 |
127 | self._last_gyro_sample = gyro
128 | sample = np.concatenate(((ts,), self._last_accel_sample, gyro))
129 |
130 | self._gyro_list.append(np.concatenate(((ts,), gyro)).tolist())
131 |
132 | self._add_sample(sample)
133 |
134 | def earliest_timestamp(self):
135 | return float(self._earliest_timestamp.value)
136 |
137 | def latest_timestamp(self):
138 | return float(self._latest_timestamp.value)
139 |
140 | def next_sample(self):
141 | try:
142 | sample = self._queue.get_nowait()
143 | except queue.Empty:
144 | sample = None
145 | return sample
146 |
147 | def signal_stop(self):
148 | self._stop_process.value = True
149 |
150 | def save_data(self):
151 | file_name = os.path.join(self._record_dir, 'imu.pickle')
152 |
153 | with open(file_name, 'wb') as file:
154 | pickle.dump({'accel': np.array(self._accel_list),
155 | 'gyro': np.array(self._gyro_list)}, file, protocol=2)
156 |
157 | def preprocess_image(frame, frame_gray):
158 | frame_gray[...] = frame.astype(np.float32) * (1.0 / 255.0)
159 | # = tmp # cv2.remap(tmp, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
160 |
161 | class RealsenseFrameSource(Process):
162 | def __init__(self, realsense_imu_source, record_dir=None):
163 | super(RealsenseFrameSource, self).__init__()
164 | self._queue = Queue()
165 | self._stop_process = Value('b', False)
166 | self._earliest_timestamp = Value('d', -1.0)
167 | self._earliest_timestamp_set = False
168 | self._latest_timestamp = Value('d', -1.0)
169 | self._width = WIDTH
170 | self._height = HEIGHT
171 | self._fps = FPS
172 | self._imager = 1
173 | self._num_frames = 0
174 |
175 | self._accel_hz = 250
176 | self._gyro_hz = 400
177 |
178 | self._template_buffer = np.zeros((self._height, self._width), dtype=np.float32)
179 |
180 | self._shared_memories = []
181 |
182 | self._realsense_imu_source = realsense_imu_source
183 | self._record_dir = record_dir
184 |
185 | self._frame_times = []
186 |
187 | cv2.setNumThreads(1)
188 |
189 | def run(self):
190 | remove_shm_from_resource_tracker()
191 |
192 | pipeline = rs.pipeline()
193 |
194 | config = rs.config()
195 | config.enable_stream(rs.stream.infrared, self._imager, self._width, self._height, rs.format.y8, self._fps)
196 | config.enable_stream(rs.stream.accel, rs.format.motion_xyz32f, self._accel_hz)
197 | config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, self._gyro_hz)
198 |
199 | queue = rs.frame_queue(200)
200 | self._pipeline_profile = pipeline.start(config, queue)
201 |
202 | # https://community.intel.com/t5/Items-with-no-label/How-to-enable-disable-emitter-through-python-wrapper/td-p/547900
203 | device = self._pipeline_profile.get_device()
204 | depth_sensor = device.query_sensors()[0] # TODO NO!!!!
205 | depth_sensor.set_option(rs.option.emitter_enabled, 0)
206 |
207 | try:
208 | while not self._stop_process.value:
209 | frame = queue.wait_for_frame()
210 | if frame:
211 | if frame.is_frameset():
212 | self._process_frameset(frame.as_frameset())
213 | elif frame.is_motion_frame():
214 | profile = frame.get_profile()
215 | if profile.stream_type() == rs.stream.accel:
216 | self._process_accel(frame.as_motion_frame())
217 | elif profile.stream_type() == rs.stream.gyro:
218 | self._process_gyro(frame.as_motion_frame())
219 | else:
220 | time.sleep(0.001)
221 | finally:
222 | pipeline.stop()
223 |
224 | if self._record_dir:
225 | self._realsense_imu_source.save_data()
226 |
227 | file_name = os.path.join(self._record_dir, 'frame_metadata.pickle')
228 | with open(file_name, 'wb') as file:
229 | pickle.dump({'ts': np.array(self._frame_times)}, file, protocol=2)
230 |
231 | def _process_frameset(self, frameset):
232 | ts = frameset.timestamp / 1000.0
233 | self._frame_times.append(ts)
234 |
235 | frame = np.asanyarray(frameset.get_infrared_frame().get_data())
236 |
237 | shm = shared_memory.SharedMemory(create=True, size=self._template_buffer.nbytes)
238 | frame_gray = np.ndarray(self._template_buffer.shape,
239 | dtype=self._template_buffer.dtype,
240 | buffer=shm.buf)
241 | preprocess_image(frame, frame_gray)
242 |
243 | self._queue.put((ts, shm))
244 | shm.close()
245 |
246 | if not self._earliest_timestamp_set:
247 | self._earliest_timestamp.value = ts
248 | self._earliest_timestamp_set = True
249 | self._latest_timestamp.value = ts
250 |
251 | if self._record_dir:
252 | image_dir = os.path.join(record_dir, 'images')
253 | frame_name = os.path.join(image_dir, 'frame_{:06d}.npy'.format(self._num_frames))
254 | with open(frame_name, 'wb') as f:
255 | np.save(f, frame)
256 |
257 | self._num_frames += 1
258 |
259 | def _process_accel(self, motion_frame):
260 | self._realsense_imu_source.add_accel_sample(motion_frame)
261 |
262 | def _process_gyro(self, motion_frame):
263 | self._realsense_imu_source.add_gyro_sample(motion_frame)
264 |
265 | def earliest_timestamp(self):
266 | return float(self._earliest_timestamp.value)
267 |
268 | def latest_timestamp(self):
269 | return float(self._latest_timestamp.value)
270 |
271 | def next_sample(self):
272 | try:
273 | (ts, shm) = self._queue.get_nowait()
274 | frame_gray = np.ndarray(self._template_buffer.shape,
275 | dtype=self._template_buffer.dtype,
276 | buffer=shm.buf)
277 | sample = (ts, frame_gray)
278 | self._shared_memories.append(shm)
279 | return sample
280 | except queue.Empty:
281 | sample = None
282 | return sample
283 |
284 | def free_sample(self):
285 | if len(self._shared_memories) > 0:
286 | shm = self._shared_memories.pop(0)
287 | shm.close()
288 | shm.unlink()
289 |
290 | def signal_stop(self):
291 | self._stop_process.value = True
292 |
293 | class RealsenseTemplateSource(object):
294 | def __init__(self, patch_dim=50, resettable=True, record_dir=None):
295 | self._resettable = resettable
296 | self._record_dir = record_dir
297 | self._patch_params = {
298 | 'patch_start_time': time.time() + 2.5,
299 | 'patch_end_time': time.time() + 10000.0,
300 | 'patch_coordinates': (int(WIDTH/2 - patch_dim), int(HEIGHT/2 - patch_dim), int(WIDTH/2 + patch_dim), int(HEIGHT/2 + patch_dim))
301 | }
302 | self._patches = []
303 | self._create_patch(self._patch_params['patch_start_time'])
304 |
305 | self._patch_index = -1
306 |
307 | self._exit = False
308 |
309 | self._force_new_patch = False
310 |
311 | def force_new_patch(self):
312 | self._force_new_patch = True
313 |
314 | def _create_patch(self, time):
315 | self._patches.append((time,
316 | time + 100000.0,
317 | self._patch_params['patch_coordinates']))
318 |
319 | def current_patch_valid(self, time):
320 | if True: #self._resettable:
321 | key = cv2.pollKey()
322 | #if key != -1:
323 | # print('key {}'.format(key))
324 | if key == 114 or self._force_new_patch: # r key
325 | self._create_patch(time)
326 | self._force_new_patch = False
327 | return False
328 | elif key == 116: # t key
329 | return 'reset observer'
330 | elif key == 113: # q key
331 | self._exit = True
332 |
333 | if self._patch_index < 0:
334 | raise Exception('Current patch valid called before patch selected')
335 | return time < self._patches[self._patch_index][1]
336 |
337 | def get_new_patch(self, time):
338 | next_patch_index = self._patch_index + 1
339 |
340 | if next_patch_index >= len(self._patches):
341 | return None
342 |
343 | if time >= self._patches[next_patch_index][0]:
344 | self._patch_index = next_patch_index
345 | return self._patches[next_patch_index][2]
346 |
347 | def save_data(self):
348 | file_name = os.path.join(self._record_dir, 'templates_live.pickle')
349 | with open(file_name, 'wb') as file:
350 | pickle.dump({'patches': self._patches}, file, protocol=2)
351 |
352 | # Hacky way to get the realsense intrinsics from the camera
353 | def get_realsense_intrinsics():
354 | pipeline = rs.pipeline()
355 |
356 | config = rs.config()
357 | config.enable_stream(rs.stream.infrared, 1, WIDTH, HEIGHT, rs.format.y8, FPS)
358 | config.enable_stream(rs.stream.accel, rs.format.motion_xyz32f, 250)
359 | config.enable_stream(rs.stream.gyro, rs.format.motion_xyz32f, 400)
360 |
361 | queue = rs.frame_queue(200)
362 | pipeline_profile = pipeline.start(config, queue)
363 | pipeline.stop()
364 |
365 | # TODO Shouldn't the imager be specified?
366 | profile = pipeline_profile.get_stream(rs.stream.infrared)
367 | intrinsics = profile.as_video_stream_profile().get_intrinsics()
368 | fx = intrinsics.fx
369 | fy = intrinsics.fy
370 | ppx = intrinsics.ppx
371 | ppy = intrinsics.ppy
372 |
373 | dist_coeffs = intrinsics.coeffs # TODO use these, they are all zero right now
374 |
375 | K = np.array(((fx, 0, ppx),
376 | (0, fy, ppy),
377 | (0, 0, 1)),
378 | dtype=np.float32)
379 | D = np.array((0.0, 0.0, 0.0, 0.0), dtype=np.float32)
380 | resolution = (WIDTH, HEIGHT)
381 |
382 | map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, resolution, cv2.CV_16SC2)
383 |
384 | accel_profile = pipeline_profile.get_stream(rs.stream.accel)
385 | gyro_profile = pipeline_profile.get_stream(rs.stream.gyro)
386 |
387 | accel_extrinsics = accel_profile.get_extrinsics_to(profile)
388 | gyro_extrinsics = gyro_profile.get_extrinsics_to(profile)
389 |
390 | print('accel extrinsics {}'.format(accel_extrinsics))
391 | print('gyro exstrinsics {}'.format(gyro_extrinsics))
392 |
393 | return K, D, map1, map2, resolution
394 |
395 | def make_record_dir():
396 | previous_directories = sorted(glob.glob(os.path.join(BASE_RECORD_PATH, 'record_*')))
397 | if len(previous_directories) == 0:
398 | next_recording_number = 0
399 | else:
400 | next_recording_number = int(previous_directories[-1][-6:]) + 1
401 |
402 | record_dir = os.path.join(BASE_RECORD_PATH, 'record_{:06d}'.format(next_recording_number))
403 | images_dir = os.path.join(record_dir, 'images')
404 |
405 | os.mkdir(record_dir)
406 | os.mkdir(images_dir)
407 | return record_dir
408 |
409 |
410 | if __name__ == '__main__':
411 | parser = argparse.ArgumentParser()
412 | parser.add_argument('--visualize', dest='visualize', action='store_true', help='Visualize')
413 | parser.add_argument('--wait', dest='wait', action='store_true', help='Wait for key press when visualizing')
414 | parser.add_argument('--high', dest='high', action='store_true', help='Highest fps possible with limited fov')
415 | parser.add_argument('--nogyro', dest='no_gyro', action='store_true', help='Disable gyro stabilization')
416 | parser.add_argument('--april', dest='april', action='store_true', help='Use apriltag for ground truth')
417 | parser.add_argument('--record', dest='record', action='store_true', help='Record data and events')
418 |
419 | args = parser.parse_args()
420 |
421 | if args.wait:
422 | wait_key = 0
423 | else:
424 | wait_key = 1
425 |
426 | if args.high:
427 | # Highest fps super narrow vertical fov
428 | WIDTH = 848
429 | HEIGHT = 100
430 | FPS = 300
431 | patch_dim = HEIGHT / 4
432 | strided_patch_size = patch_dim**2
433 | max_flow_time = 0.5 * (1.0/FPS) # Works well enough
434 | else:
435 | # Highest fps with standard fov
436 | WIDTH = 848
437 | HEIGHT = 480
438 | FPS = 90
439 | patch_dim = 50
440 | strided_patch_size = 4000 # Track 400 pixels
441 | max_flow_time = 0.5 * (1.0/FPS) # Works well enough
442 |
443 |
444 | if args.record:
445 | record_dir = make_record_dir()
446 | else:
447 | record_dir = None
448 |
449 | K, D, map1, map2, resolution = get_realsense_intrinsics()
450 |
451 | if record_dir:
452 | file_name = os.path.join(record_dir, 'intrinsics.pickle')
453 | with open(file_name, 'wb') as file:
454 | pickle.dump({'K': K,
455 | 'D': D,
456 | 'resolution': resolution}, file, protocol=2)
457 |
458 | imu_source = RealsenseIMUSource(no_gyro = args.no_gyro, record_dir=record_dir)
459 | frame_source = RealsenseFrameSource(imu_source, record_dir=record_dir)
460 | template_source = RealsenseTemplateSource(patch_dim=patch_dim,
461 | resettable = not args.high,
462 | record_dir = record_dir)
463 |
464 | vector_pub = ZMQVectorPublisher()
465 |
466 | ttc_depth = TTCDepth(frame_source, imu_source, template_source, K,
467 | visualize=args.visualize,
468 | wait_key=wait_key,
469 | max_flow_time=max_flow_time,
470 | strided_patch_size=strided_patch_size,
471 | april_ground_truth=args.april,
472 | april_resize_to=None, #(270, 480),
473 | vector_pub = vector_pub)
474 |
475 | cv2.setNumThreads(1)
476 | try:
477 | frame_source.start()
478 |
479 | start_collection_time = time.time()
480 |
481 | while not template_source._exit:
482 | #print(time.time() - start_time)
483 | imu_start_compute_time = ttc_depth._imu_time_computed_to
484 | flow_start_compute_time = ttc_depth._flow_time_computed_to
485 | observer_start_compute_time = ttc_depth._ttc_pose_observer_time_computed_to
486 | start_time = time.time()
487 | ttc_depth.update()
488 | end_time = time.time()
489 | imu_end_compute_time = ttc_depth._imu_time_computed_to
490 | flow_end_compute_time = ttc_depth._flow_time_computed_to
491 | observer_end_compute_time = ttc_depth._ttc_pose_observer_time_computed_to
492 |
493 | if imu_start_compute_time is not None:
494 | real_delta = end_time-start_time
495 | imu_delta = imu_end_compute_time - imu_start_compute_time
496 | flow_delta = flow_end_compute_time - flow_start_compute_time
497 | observer_delta = observer_end_compute_time - observer_start_compute_time
498 |
499 | if imu_delta > 0:
500 | # TODO this compares system time with hardware timestamps, it doens't make sense
501 | imu_lag = end_time - imu_end_compute_time
502 | flow_lag = end_time - flow_end_compute_time
503 | observer_lag = end_time - observer_end_compute_time
504 |
505 | if (observer_lag > 0.25):
506 | template_source.force_new_patch()
507 |
508 | print('real {:.03f} imu {:.03f} ratio {:.03f} imu lag {:.03f} flow lag {:.03f} obs lag {:.03f}'.format(
509 | real_delta, imu_delta, imu_delta/real_delta, imu_lag, flow_lag, observer_lag))
510 | #print('{:.02f}'.format(ttc_depth._ttc_list[-1][1]))
511 | #print('{:.01f}'.format(ttc_depth._z_hat_list[-1][1]))
512 |
513 | if time.time() - start_collection_time > 10000.0:
514 | print('Ran for 10000 seconds, exiting')
515 | frame_source.signal_stop()
516 |
517 | print('Join frame source1')
518 | frame_source.join()
519 | break
520 |
521 | if imu_start_compute_time is None or observer_delta < 0.1:
522 | update_delta = end_time - start_time
523 | # Rate limit
524 | sleep_delta = (1.0/1000.0) - update_delta
525 | if sleep_delta > 0:
526 | time.sleep(sleep_delta)
527 |
528 | except Exception as e:
529 | print('Exception caught')
530 | print(traceback.format_exc())
531 |
532 | finally:
533 | print('Attempting to clean up data sources')
534 | imu_source.signal_stop()
535 | frame_source.signal_stop()
536 | frame_source.join()
537 |
538 | if args.record:
539 | template_source.save_data()
540 |
541 | print('Success, ignore the resource tracker')
542 |
--------------------------------------------------------------------------------
/ttc_depth_robomaster.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_depth_robomaster.py
4 | # Available under MIT license
5 | #
6 | # Run experiments with RoboMaster robot for TTCDist paper
7 | #
8 | # History:
9 | # 09-02-22 - Levi Burner - Adapted file from ttc_depth_realsense.py
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import argparse
15 | import json
16 | import glob
17 | from multiprocessing import (Process,
18 | Queue,
19 | resource_tracker,
20 | shared_memory,
21 | Value)
22 | import queue
23 | import os
24 | import pickle
25 | import time
26 | import traceback
27 |
28 | import cv2
29 | import numpy as np
30 |
31 | from ttc_depth import TTCDepth
32 |
33 | from zmq_vector_publisher import ZMQVectorPublisher
34 |
35 | import robomaster
36 | from robomaster import robot
37 |
38 | BASE_RECORD_PATH = '../robomaster_recordings'
39 |
40 | def remove_shm_from_resource_tracker():
41 | """Monkey-patch multiprocessing.resource_tracker so SharedMemory won't be tracked
42 |
43 | More details at: https://bugs.python.org/issue38119
44 | """
45 |
46 | def fix_register(name, rtype):
47 | if rtype == "shared_memory":
48 | return
49 | return resource_tracker._resource_tracker.register(self, name, rtype)
50 | resource_tracker.register = fix_register
51 |
52 | def fix_unregister(name, rtype):
53 | if rtype == "shared_memory":
54 | return
55 | return resource_tracker._resource_tracker.unregister(self, name, rtype)
56 | resource_tracker.unregister = fix_unregister
57 |
58 | if "shared_memory" in resource_tracker._CLEANUP_FUNCS:
59 | del resource_tracker._CLEANUP_FUNCS["shared_memory"]
60 |
61 | class RoboMasterIMUSource(object):
62 | def __init__(self, no_gyro, record_dir = None, b=1.0, efference=False):
63 | self._no_gyro = no_gyro
64 | self._record_dir = record_dir
65 |
66 | self._queue = Queue()
67 | self._pose_hat_queue = Queue()
68 | self._earliest_timestamp = Value('d', -1.0)
69 | self._earliest_timestamp_set = False
70 | self._latest_timestamp = Value('d', -1.0)
71 | self._exit_everything = Value('b', False)
72 | self._backup = []
73 |
74 | self._last_accel_sample = np.array((0.0, 0.0, 0.0), dtype=np.float32)
75 | self._last_gyro_sample = np.array((0.0, 0.0, 0.0), dtype=np.float32)
76 |
77 | self._accel_list = []
78 | self._gyro_list = []
79 | self._gyro_b = None
80 |
81 | self._u_list = []
82 | self._u_t_list = []
83 | self._int_u_list = []
84 | self._x_t_list = []
85 | self._x_hat_list = []
86 |
87 | self._E_dt = 1/50.0
88 | self._ts_hat = None
89 | self._time_filter_alpha = 0.01
90 | self._t0 = None
91 |
92 | self.int_u = np.zeros((3,))
93 | self.last_t = None
94 | self.last_pose = None
95 |
96 | self.first_pose_t = None
97 | self.stop = False
98 |
99 | if b is None:
100 | self._b = 1.0
101 | else:
102 | self._b = b
103 | self._efference = efference
104 |
105 | def add_pose_hat(self, t, pose_hat):
106 | self._pose_hat_queue.put((t, pose_hat))
107 |
108 | def get_latest_pose(self):
109 | sample = None
110 | while True:
111 | try:
112 | sample = self._pose_hat_queue.get_nowait()
113 | except queue.Empty:
114 | break
115 | return sample
116 |
117 | def add_sample(self, imu_sample):
118 | self._ts_hat = time.time()
119 | imu_sample = np.array(imu_sample)
120 | acc_x_r, acc_y_r, acc_z_r, gyro_x_r, gyro_y_r, gyro_z_r = imu_sample
121 | sample = np.array((self._ts_hat, acc_y_r, acc_z_r, acc_x_r, gyro_y_r, gyro_z_r, gyro_x_r))
122 |
123 | sample[1:4] *= 9.81 # G to m/s^2
124 |
125 | if self._gyro_b is not None:
126 | sample[4:7] -= self._gyro_b
127 |
128 | if self._no_gyro:
129 | sample[4:7] = 0
130 |
131 | self._accel_list.append(np.concatenate(((self._ts_hat,), sample[1:4])))
132 | self._gyro_list.append(np.concatenate(((self._ts_hat,), sample[4:7])))
133 |
134 | if len(self._gyro_list) < 100:
135 | return
136 | elif len(self._gyro_list) == 100:
137 | gyro_arr = np.array(self._gyro_list)
138 | self._gyro_b = np.mean(gyro_arr[:, 1:4], axis=0)
139 |
140 | if self._t0 is None:
141 | self._t0 = time.time()
142 |
143 | t_abs = time.time()
144 | t = t_abs - self._t0
145 |
146 | if self.last_t is None:
147 | self.last_t = t - (1.0 / 50.0)
148 |
149 | dt = t - self.last_t
150 | self.last_t = t
151 |
152 | l_over_d_w = (0.2 / 1.0)
153 |
154 | w_t = np.pi
155 | x_t = np.array([[0.0, 0.0, -1.0*l_over_d_w / w_t * np.cos(w_t*t) - 5.0*l_over_d_w],
156 | [0.0, 0.0, 1.0*l_over_d_w * np.sin(w_t*t)]])
157 | u_t = np.array( [0.0, 0.0, w_t*1.0*l_over_d_w * np.cos(w_t*t)])
158 |
159 | try:
160 | pose = self.get_latest_pose()
161 |
162 | if pose is None and self.last_pose is None:
163 | u = u_t
164 | elif pose is None and self.last_pose is not None:
165 | pose = (self.last_pose_t, self.last_pose)
166 |
167 | if pose is not None:
168 | pose_t, pose = pose
169 |
170 | if self.first_pose_t is None:
171 | self.first_pose_t = pose_t
172 |
173 | if self.last_pose is None:
174 | self.last_pose = pose
175 | dpose = (pose - self.last_pose) / dt
176 | self.last_pose = pose
177 | self.last_pose_t = pose_t
178 | hat_x = np.vstack((pose, dpose))
179 | self._x_hat_list.append(np.concatenate(((t_abs,), hat_x.flatten())))
180 |
181 | K = np.array([[2/1.0, 2/1.0]])
182 |
183 | if pose_t - self.first_pose_t > 5.0:
184 | x_t = np.array([[0.0, 0.0, -2.5*l_over_d_w],
185 | [0.0, 0.0, 0.0]])
186 | u_t = np.array([0.0, 0.0, 0.0])
187 | e = x_t - hat_x
188 | if False: #np.abs(e[0, 2]) < l_over_d_w / 100.0 and np.abs(e[1, 2]) < l_over_d_w / 100.0:
189 | # self._ep_gripper.close(power=50)
190 | self.stop = True
191 | self.t_stop = time.time()
192 |
193 | u_e = np.array([0.0, 0.0, 0.0])
194 | else:
195 | u_e = (K @ e).flatten()
196 |
197 | if self.stop:
198 | if time.time() - self.t_stop > 2.0:
199 | self._exit_everything.value = True
200 |
201 | u = u_t + u_e
202 | else:
203 | u_e = (K @ (x_t - hat_x)).flatten()
204 | u = u_t + u_e
205 |
206 | self.int_u += dt*u
207 | self._u_list.append(np.concatenate(((t_abs,), u)))
208 | self._int_u_list.append(np.concatenate(((t_abs,), self.int_u)))
209 | except Exception as e:
210 | print(traceback.format_exc())
211 |
212 | self._x_t_list.append(np.concatenate(((t_abs,), x_t.flatten())))
213 | self._u_t_list.append(np.concatenate(((t_abs,), u_t)))
214 |
215 | try:
216 | if not self.stop:
217 | self._ep_chassis.drive_speed(x=self._b*self.int_u[2], y=self._b*self.int_u[0], timeout=0.5)
218 | a_efference = np.array([u[0], 0.0, u[2]])
219 | else:
220 | self._ep_chassis.drive_speed(x=0, y=0, timeout=0.5)
221 | a_efference = np.array([0, 0.0, 0])
222 | except Exception as e:
223 | print(traceback.format_exc())
224 | # print(sample[1:4], a_efference)
225 |
226 | # Overwrite IMU measurements with efference copy
227 | if self._efference:
228 | sample[1:4] = a_efference
229 |
230 |
231 | self._latest_timestamp.value = sample[0]
232 | self._backup.append(sample)
233 |
234 | if not self._earliest_timestamp_set:
235 | self._earliest_timestamp.value = sample[0]
236 | self._earliest_timestamp_set = True
237 |
238 | # Fill the multiprocess queue, TODO is this needed?
239 | while len(self._backup) > 0:
240 | try:
241 | self._queue.put_nowait(self._backup[0])
242 | self._backup.pop(0)
243 | except queue.Full:
244 | break
245 |
246 | def earliest_timestamp(self):
247 | return float(self._earliest_timestamp.value)
248 |
249 | def latest_timestamp(self):
250 | return float(self._latest_timestamp.value)
251 |
252 | def next_sample(self):
253 | try:
254 | sample = self._queue.get_nowait()
255 | except queue.Empty:
256 | sample = None
257 | return sample
258 |
259 | def save_data(self):
260 | file_name = os.path.join(self._record_dir, 'imu.pickle')
261 |
262 | with open(file_name, 'wb') as file:
263 | pickle.dump({'accel': np.array(self._accel_list),
264 | 'gyro': np.array(self._gyro_list),
265 | 'u': np.array(self._u_list),
266 | 'u_t': np.array(self._u_t_list),
267 | 'int_u': np.array(self._int_u_list),
268 | 'x_t': np.array(self._x_t_list),
269 | 'x_hat': np.array(self._x_hat_list),
270 | }, file, protocol=2)
271 |
272 |
273 | def preprocess_image(frame, frame_gray):
274 | frame_gray_uint8 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
275 | tmp = frame_gray_uint8.astype(np.float32) * (1.0 / 255.0)
276 | frame_gray[...] = cv2.remap(tmp, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
277 |
278 | class RoboMasterFrameSource(Process):
279 | def __init__(self, imu_source, record_dir=None):
280 | super(RoboMasterFrameSource, self).__init__()
281 | self._queue = Queue()
282 | self._stop_process = Value('b', False)
283 | self._earliest_timestamp = Value('d', -1.0)
284 | self._earliest_timestamp_set = False
285 | self._latest_timestamp = Value('d', -1.0)
286 | self._width = WIDTH
287 | self._height = HEIGHT
288 | self._fps = FPS
289 | self._num_frames = 0
290 |
291 | self._template_buffer = np.zeros((self._height, self._width), dtype=np.float32)
292 |
293 | self._shared_memories = []
294 |
295 | self._imu_source = imu_source
296 | self._record_dir = record_dir
297 |
298 | self._frame_times = []
299 |
300 | self._E_dt = 1/FPS
301 | self._ts_hat = None
302 | self._time_filter_alpha = 0.01
303 |
304 | # self._last_receive_ts = time.time()
305 |
306 | cv2.setNumThreads(1)
307 |
308 | def run(self):
309 | remove_shm_from_resource_tracker()
310 |
311 | ep_robot = robot.Robot()
312 | ep_robot.initialize(conn_type="sta")
313 |
314 | # ep_robot.gripper.open(power=50)
315 | ep_robot.robotic_arm.moveto(x=200, y=40).wait_for_completed()
316 |
317 |
318 | self._imu_source._ep_chassis = ep_robot.chassis
319 | self._imu_source._ep_gripper = ep_robot.gripper
320 |
321 | ep_robot.chassis.sub_imu(freq=50, callback=self._imu_source.add_sample)
322 |
323 | ep_camera = ep_robot.camera
324 | ep_camera.start_video_stream(display=False)
325 |
326 | try:
327 | while not self._stop_process.value:
328 | frame = ep_camera.read_cv2_image(strategy='pipeline')
329 | receive_ts = time.time()
330 |
331 | # delta = receive_ts - self._last_receive_ts
332 | # print(1.0 / delta)
333 | # self._last_receive_ts = receive_ts
334 |
335 | # if self._ts_hat is None:
336 | # self._ts_hat = receive_ts
337 | # else:
338 | # self._ts_hat += self._E_dt
339 | # self._ts_hat = (1-self._time_filter_alpha) * self._ts_hat + self._time_filter_alpha * receive_ts
340 | self._ts_hat = receive_ts - 0.1 #+ (-0.0242) # TODO NO!
341 |
342 | self._process_frame(self._ts_hat, frame)
343 | except KeyboardInterrupt:
344 | print('RoboMasterFrameSource KeyboardInterruptcaught')
345 | finally:
346 | ep_robot.chassis.drive_speed(x=0, y=0, z=0, timeout=5)
347 | ep_camera.stop_video_stream()
348 | print('sleeping for robot to stop')
349 | time.sleep(2)
350 | ep_robot.close()
351 |
352 | if self._record_dir:
353 | print('RoboMasterFrameSource saving')
354 | file_name = os.path.join(self._record_dir, 'frame_metadata.pickle')
355 | with open(file_name, 'wb') as file:
356 | pickle.dump({'ts': np.array(self._frame_times)}, file, protocol=2)
357 | self._imu_source.save_data()
358 |
359 | def _process_frame(self, ts, frame):
360 | self._frame_times.append(ts)
361 |
362 | shm = shared_memory.SharedMemory(create=True, size=self._template_buffer.nbytes)
363 | frame_gray = np.ndarray(self._template_buffer.shape,
364 | dtype=self._template_buffer.dtype,
365 | buffer=shm.buf)
366 | preprocess_image(frame, frame_gray)
367 |
368 | self._queue.put((ts, shm))
369 | shm.close()
370 |
371 | if not self._earliest_timestamp_set:
372 | self._earliest_timestamp.value = ts
373 | self._earliest_timestamp_set = True
374 | self._latest_timestamp.value = ts
375 |
376 | if self._record_dir:
377 | image_dir = os.path.join(record_dir, 'images')
378 | frame_name = os.path.join(image_dir, 'frame_{:06d}.npy'.format(self._num_frames))
379 | with open(frame_name, 'wb') as f:
380 | np.save(f, frame)
381 |
382 | self._num_frames += 1
383 |
384 | def earliest_timestamp(self):
385 | return float(self._earliest_timestamp.value)
386 |
387 | def latest_timestamp(self):
388 | return float(self._latest_timestamp.value)
389 |
390 | def next_sample(self):
391 | try:
392 | (ts, shm) = self._queue.get_nowait()
393 | frame_gray = np.ndarray(self._template_buffer.shape,
394 | dtype=self._template_buffer.dtype,
395 | buffer=shm.buf)
396 | sample = (ts, frame_gray)
397 | self._shared_memories.append(shm)
398 | return sample
399 | except queue.Empty:
400 | sample = None
401 | return sample
402 |
403 | def free_sample(self):
404 | if len(self._shared_memories) > 0:
405 | shm = self._shared_memories.pop(0)
406 | shm.close()
407 | shm.unlink()
408 |
409 | def signal_stop(self):
410 | self._stop_process.value = True
411 |
412 | class RobomasterTemplateSource(object):
413 | def __init__(self, patch_dim=50, resettable=True, record_dir=None):
414 | self._resettable = resettable
415 | self._record_dir = record_dir
416 | self._patch_params = {
417 | 'patch_start_time': time.time() + 2.5,
418 | 'patch_end_time': time.time() + 10000.0,
419 | 'patch_coordinates': (int(5.0*WIDTH/8 - patch_dim),
420 | int(2.0*HEIGHT/4 - patch_dim),
421 | int(5.0*WIDTH/8 + patch_dim),
422 | int(2.0*HEIGHT/4 + patch_dim))
423 | }
424 | self._patches = []
425 | self._create_patch(self._patch_params['patch_start_time'])
426 |
427 | self._patch_index = -1
428 |
429 | self._exit = False
430 |
431 | self._force_new_patch = False
432 |
433 | def force_new_patch(self):
434 | self._force_new_patch = True
435 |
436 | def _create_patch(self, time):
437 | self._patches.append((time,
438 | time + 100000.0,
439 | self._patch_params['patch_coordinates']))
440 |
441 | def current_patch_valid(self, time):
442 | if True: #self._resettable:
443 | key = cv2.pollKey()
444 | #if key != -1:
445 | # print('key {}'.format(key))
446 | if key == 114 or self._force_new_patch: # r key
447 | self._create_patch(time)
448 | self._force_new_patch = False
449 | return False
450 | elif key == 116: # t key
451 | return 'reset observer'
452 | elif key == 113: # q key
453 | self._exit = True
454 |
455 | if self._patch_index < 0:
456 | raise Exception('Current patch valid called before patch selected')
457 | return time < self._patches[self._patch_index][1]
458 |
459 | def get_new_patch(self, time):
460 | next_patch_index = self._patch_index + 1
461 |
462 | if next_patch_index >= len(self._patches):
463 | return None
464 |
465 | if time >= self._patches[next_patch_index][0]:
466 | self._patch_index = next_patch_index
467 | return self._patches[next_patch_index][2]
468 |
469 | def save_data(self):
470 | file_name = os.path.join(self._record_dir, 'templates_live.pickle')
471 | with open(file_name, 'wb') as file:
472 | pickle.dump({'patches': self._patches}, file, protocol=2)
473 |
474 | def make_record_dir():
475 | previous_directories = sorted(glob.glob(os.path.join(BASE_RECORD_PATH, 'record_*')))
476 | if len(previous_directories) == 0:
477 | next_recording_number = 0
478 | else:
479 | next_recording_number = int(previous_directories[-1][-6:]) + 1
480 |
481 | record_dir = os.path.join(BASE_RECORD_PATH, 'record_{:06d}'.format(next_recording_number))
482 | images_dir = os.path.join(record_dir, 'images')
483 |
484 | os.mkdir(record_dir)
485 | os.mkdir(images_dir)
486 | return record_dir
487 |
488 | if __name__ == '__main__':
489 | parser = argparse.ArgumentParser()
490 | parser.add_argument('--visualize', dest='visualize', action='store_true', help='Visualize')
491 | parser.add_argument('--wait', dest='wait', action='store_true', help='Wait for key press when visualizing')
492 | parser.add_argument('--nogyro', dest='no_gyro', action='store_true', help='Disable gyro stabilization')
493 | parser.add_argument('--april', dest='april', action='store_true', help='Use apriltag for ground truth')
494 | parser.add_argument('--record', dest='record', action='store_true', help='Record data and events')
495 | parser.add_argument('--b', type=float, help='scalar gain b to apply to control effort')
496 | parser.add_argument('--efference', dest='efference', action='store_true', help='Report efference copy instead of IMU')
497 |
498 | args = parser.parse_args()
499 |
500 | if args.wait:
501 | wait_key = 0
502 | else:
503 | wait_key = 1
504 |
505 | WIDTH = 1280
506 | HEIGHT = 720
507 | FPS = 30
508 | patch_dim = 50
509 | strided_patch_size = 4000 # Track 4000 pixels
510 | max_flow_time = 0.5 * (1.0/FPS) # Works well enough
511 |
512 | if args.record:
513 | record_dir = make_record_dir()
514 | else:
515 | record_dir = None
516 |
517 | # From running calibrate_calculate.py
518 | K = np.array([[631.05345607, 0., 646.8600196 ],
519 | [ 0., 633.5803277, 357.86951071],
520 | [ 0., 0., 1. ]])
521 | D = np.array([0.1726052, 0.43400192, -0.43320789, 0.04646433])
522 |
523 | resolution = (WIDTH, HEIGHT)
524 | map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, resolution, cv2.CV_16SC2)
525 |
526 | if record_dir:
527 | file_name = os.path.join(record_dir, 'intrinsics.pickle')
528 | with open(file_name, 'wb') as file:
529 | pickle.dump({'K': K,
530 | 'D': D,
531 | 'resolution': resolution}, file, protocol=2)
532 |
533 |
534 | imu_source = RoboMasterIMUSource(no_gyro = args.no_gyro, record_dir=record_dir,
535 | b=args.b, efference=args.efference)
536 | phi_pose_subscriber = imu_source
537 |
538 | frame_source = RoboMasterFrameSource(imu_source, record_dir=record_dir)
539 | template_source = RobomasterTemplateSource(patch_dim=patch_dim,
540 | resettable = True,
541 | record_dir = record_dir)
542 |
543 | vector_pub = ZMQVectorPublisher()
544 |
545 | ttc_depth = TTCDepth(frame_source, imu_source, template_source, K,
546 | visualize=args.visualize,
547 | wait_key=wait_key,
548 | max_flow_time=max_flow_time,
549 | strided_patch_size=strided_patch_size,
550 | april_ground_truth=args.april,
551 | april_resize_to=None, #(270, 480),
552 | vector_pub = vector_pub,
553 | phi_pose_subscriber=phi_pose_subscriber,
554 | phi_accel_power_thresh=0.001)
555 |
556 | cv2.setNumThreads(1)
557 | try:
558 | frame_source.start()
559 |
560 | start_collection_time = time.time()
561 |
562 | while not template_source._exit and not imu_source._exit_everything.value:
563 | #print(time.time() - start_time)
564 | imu_start_compute_time = ttc_depth._imu_time_computed_to
565 | flow_start_compute_time = ttc_depth._flow_time_computed_to
566 | observer_start_compute_time = ttc_depth._ttc_pose_observer_time_computed_to
567 | start_time = time.time()
568 | ttc_depth.update()
569 | end_time = time.time()
570 | imu_end_compute_time = ttc_depth._imu_time_computed_to
571 | flow_end_compute_time = ttc_depth._flow_time_computed_to
572 | observer_end_compute_time = ttc_depth._ttc_pose_observer_time_computed_to
573 |
574 | if imu_start_compute_time is not None:
575 | real_delta = end_time-start_time
576 | imu_delta = imu_end_compute_time - imu_start_compute_time
577 | flow_delta = flow_end_compute_time - flow_start_compute_time
578 | observer_delta = observer_end_compute_time - observer_start_compute_time
579 |
580 | if imu_delta > 0:
581 | # TODO this compares system time with hardware timestamps, it doens't make sense
582 | imu_lag = end_time - imu_end_compute_time
583 | flow_lag = end_time - flow_end_compute_time
584 | observer_lag = end_time - observer_end_compute_time
585 |
586 | if (observer_lag > 0.25):
587 | template_source.force_new_patch()
588 |
589 | # print('real {:.03f} imu {:.03f} ratio {:.03f} imu lag {:.03f} flow lag {:.03f} obs lag {:.03f}'.format(
590 | # real_delta, imu_delta, imu_delta/real_delta, imu_lag, flow_lag, observer_lag))
591 | #print('{:.02f}'.format(ttc_depth._ttc_list[-1][1]))
592 | # print('{:.01f}'.format(ttc_depth._z_hat_list[-1][1]))
593 |
594 | if time.time() - start_collection_time > 10000.0:
595 | print('Ran for 10000 seconds, exiting')
596 | frame_source.signal_stop()
597 |
598 | print('Join frame source1')
599 | frame_source.join()
600 | break
601 |
602 | if imu_start_compute_time is None or observer_delta < 0.1:
603 | update_delta = end_time - start_time
604 | # Rate limit
605 | sleep_delta = (1.0/1000.0) - update_delta
606 | if sleep_delta > 0:
607 | time.sleep(sleep_delta)
608 |
609 | except KeyboardInterrupt:
610 | print('Main process KeyboardInterrupt caught')
611 |
612 | finally:
613 | print('Attempting to clean up data sources')
614 | frame_source.signal_stop()
615 | frame_source.join()
616 |
617 | if args.record:
618 | template_source.save_data()
619 |
620 | print('Success, ignore the resource tracker')
621 |
--------------------------------------------------------------------------------
/ttc_pose_observer.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: ttc_pose_observer.py
4 | # Available under MIT license
5 | #
6 | # Position observer from phi and acceleration
7 | #
8 | # History:
9 | # 04-26-20 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import time
15 | import numpy as np
16 | from ttc_depth_integrals_accel_bias import accel_z_z_star_tf, accel_x_z_star_tf
17 |
18 | def feedback_good(accel_power, accel_z, max_z):
19 | return (accel_z != np.NAN and accel_power > 2.0 and accel_z < max_z)
20 |
21 | class TTCPoseObserver(object):
22 | def __init__(self,
23 | z_0=None,
24 | z_0_dot=0.0,
25 | dt=0.005,
26 | max_z=-0.1,
27 | z_hat_gain=2.0,
28 | seconds_to_keep = 1.0):
29 | self._dt = dt
30 | self._z_0 = None
31 | self._z_hat = self._z_0
32 | self._max_z = max_z
33 | self._min_initial_z = -7.5
34 |
35 | self._seconds_to_keep = seconds_to_keep
36 | self._num_samples_to_keep = int(self._seconds_to_keep / self._dt)
37 |
38 | self._t_list = []
39 | self._scaled_velocities_list = []
40 | self._a_fc_list = []
41 |
42 | self._A = np.array(((0, 1), (0, 0)))
43 | self._B = np.array((0, 1))
44 | self._L = np.array(((2, 0), (0, 20)))
45 |
46 | def reset_ic(self):
47 | self._z_hat = self._z_0
48 |
49 | def update(self, t, scaled_velocities, a_fc):
50 | self._t_list.append(t)
51 | self._scaled_velocities_list.append(scaled_velocities.tolist())
52 | self._a_fc_list.append(a_fc.tolist())
53 |
54 | accel_z_bias = None
55 | if len(self._t_list) == self._num_samples_to_keep:
56 | self._t_list = self._t_list[1:]
57 | self._scaled_velocities_list = self._scaled_velocities_list[1:]
58 | self._a_fc_list = self._a_fc_list[1:]
59 |
60 | # TODO NO!
61 | t = np.array(self._t_list)
62 |
63 | s_v = np.array(self._scaled_velocities_list)
64 |
65 | s_v_x = s_v[:, 0]
66 | s_v_y = s_v[:, 1]
67 | s_v_z = s_v[:, 2]
68 |
69 | a_fc = np.array(self._a_fc_list)
70 | a_x = a_fc[:, 0]
71 | a_y = a_fc[:, 1]
72 | a_z = a_fc[:, 2]
73 |
74 | (accel_x_z_tf, accel_x_bias) = accel_x_z_star_tf(t, s_v_x, s_v_z, a_x)
75 | if accel_x_z_tf != np.NAN:
76 | a_x_power = np.linalg.norm(a_x-accel_x_bias) / np.sqrt(self._num_samples_to_keep)
77 | else:
78 | a_x_power = 0.0
79 | print('x singular')
80 |
81 | (accel_y_z_tf, accel_y_bias) = accel_x_z_star_tf(t, s_v_y, s_v_z, a_y)
82 | if accel_y_z_tf != np.NAN:
83 | a_y_power = np.linalg.norm(a_y-accel_y_bias) / np.sqrt(self._num_samples_to_keep)
84 | else:
85 | a_y_power = 0.0
86 | print('y singular')
87 |
88 | (accel_z_z_tf, accel_z_bias) = accel_z_z_star_tf(t, s_v_z, a_z)
89 | if accel_z_z_tf != np.NAN:
90 | a_z_power = np.linalg.norm(a_z-accel_z_bias) / np.sqrt(self._num_samples_to_keep)
91 | else:
92 | a_z_power = 0.0
93 | print('z singular')
94 | else:
95 | return None
96 |
97 | accel_z_hat = 0.0
98 | accel_z_dot_hat = 0.0
99 | num_feedback = 0
100 | if feedback_good(a_x_power, accel_x_z_tf, self._max_z):
101 | accel_z_hat += accel_x_z_tf
102 | accel_z_dot_hat += scaled_velocities[2] * accel_x_z_tf
103 | num_feedback += 1
104 | else:
105 | accel_x_z_tf = 0.0
106 |
107 | if feedback_good(a_y_power, accel_y_z_tf, self._max_z):
108 | accel_z_hat += accel_y_z_tf
109 | accel_z_dot_hat += scaled_velocities[2] * accel_y_z_tf
110 | num_feedback += 1
111 | else:
112 | accel_y_z_tf = 0.0
113 |
114 | if feedback_good(a_z_power, accel_z_z_tf, self._max_z):
115 | accel_z_hat += accel_z_z_tf
116 | accel_z_dot_hat += scaled_velocities[2] * accel_z_z_tf
117 | num_feedback += 1
118 | else:
119 | accel_z_z_tf = 0.0
120 |
121 | if num_feedback > 0 and accel_z_bias is not None and self._z_hat is not None:
122 | accel_z_hat /= num_feedback
123 | accel_z_dot_hat /= num_feedback
124 |
125 | e = np.array((accel_z_hat, accel_z_dot_hat)) - self._z_hat
126 |
127 | z_hat_dot = self._A @ self._z_hat + self._B * (a_z[-1]-accel_z_bias) + self._L @ e
128 |
129 | self._z_hat = self._z_hat + z_hat_dot * self._dt
130 | elif num_feedback > 0 and self._z_hat is None:
131 | accel_z_hat /= num_feedback
132 | accel_z_dot_hat /= num_feedback
133 |
134 | # _min_initial_z is to filter an insane spike at the beginning of one sequence
135 | if accel_z_hat > self._min_initial_z:
136 | self._z_hat = np.array((accel_z_hat, accel_z_dot_hat))
137 | else:
138 | return None
139 |
140 | elif self._z_hat is not None:
141 | s_v_z_hat_dot = scaled_velocities[2] * self._z_hat[0]
142 | self._z_hat = np.array((self._z_hat[0] + s_v_z_hat_dot * self._dt,
143 | s_v_z_hat_dot))
144 | else:
145 | return None
146 |
147 | if self._z_hat[0] > self._max_z:
148 | self._z_hat[0] = self._max_z
149 |
150 | return self._z_hat[0], accel_x_z_tf, accel_y_z_tf, accel_z_z_tf
151 |
--------------------------------------------------------------------------------
/utilities.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: utilities.py
4 | # Available under MIT license
5 | #
6 | # Utility functions used by multiple files
7 | #
8 | # History:
9 | # 04-23-20 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import glob
15 | import os
16 | import pickle
17 |
18 | import cv2
19 | import numpy as np
20 |
21 | def latest_recording_dir():
22 | datasets = sorted(glob.glob('../recordings/record_*/'))
23 | if len(datasets) == 0:
24 | raise Exception('No data directories in default location and no directory specified')
25 | else:
26 | dataset_dir = datasets[-1]
27 | return dataset_dir
28 |
29 | def load_calibration_from_dir(record_dir):
30 | calibration = pickle.load(open(os.path.join(record_dir, 'intrinsics.pickle'), 'rb'))
31 | resolution = calibration['resolution']
32 | K = calibration['K']
33 | D = calibration['D']
34 |
35 | map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, resolution, cv2.CV_16SC2)
36 |
37 | return K, D, map1, map2, resolution
38 |
--------------------------------------------------------------------------------
/zmq_vector_publisher.py:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | #
3 | # File: zmq_vector_publisher.py
4 | # Available under MIT license
5 | #
6 | # An object that publishes timestamped floating point vectors over a zmq socket
7 | #
8 | # History:
9 | # 09-01-21 - Levi Burner - Created file
10 | # 09-26-22 - Levi Burner - Open source release
11 | #
12 | ###############################################################################
13 |
14 | import zmq
15 | import struct
16 |
17 | class ZMQVectorPublisher(object):
18 | def __init__(self, port=5556, base_topic='ttc_depth'):
19 | self._port = port
20 | self._base_topic = base_topic
21 | self._context = zmq.Context()
22 | self._socket = self._context.socket(zmq.PUB)
23 | self._socket.bind('tcp://*:{}'.format(self._port))
24 |
25 | def publish(self, topic, t, x, flags=0, copy=True, track=False):
26 | full_topic = self._base_topic + '/' + topic
27 | self._socket.send(full_topic.encode('ascii'), flags|zmq.SNDMORE)
28 |
29 | self._socket.send(struct.pack('d', float(t)), flags|zmq.SNDMORE)
30 |
31 | md = dict(
32 | dtype = str(x.dtype),
33 | shape = x.shape,
34 | )
35 |
36 | self._socket.send_json(md, flags|zmq.SNDMORE)
37 | self._socket.send(x, flags, copy=copy, track=track)
38 |
39 |
40 | class ZMQVectorPublisherSaver(ZMQVectorPublisher):
41 | def __init__(self, port=5556, base_topic='ttc_depth'):
42 | super().__init__(port=port, base_topic=base_topic)
43 | self._save_dict = {}
44 |
45 | def publish(self, topic, t, x, flags=0, copy=True, track=False):
46 | if topic not in self._save_dict:
47 | self._save_dict[topic] = []
48 |
49 | self._save_dict[topic].append((t, x))
50 |
51 | super().publish(topic, t, x, flags=flags, copy=copy, track=track)
52 |
53 | def get_data(self):
54 | return self._save_dict
55 |
--------------------------------------------------------------------------------