├── LICENSE ├── README.md ├── assets └── click-calib.png ├── calibrations ├── optimized │ ├── 00164_FV.json │ ├── 00165_MVL.json │ ├── 00166_MVR.json │ └── 00167_RV.json └── original │ ├── 00164_FV.json │ ├── 00165_MVL.json │ ├── 00166_MVR.json │ └── 00167_RV.json ├── images └── fisheye │ ├── 00164_FV.png │ ├── 00165_MVL.png │ ├── 00166_MVR.png │ └── 00167_RV.png ├── requirements.txt └── source ├── click_points.py ├── eval.py ├── generate_bev_img.py ├── initialize_extrins_calib.py ├── optimize.py ├── projection.py └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Click-Calib: A Robust Extrinsic Calibration Method for Surround-View Systems 2 | 3 | This is the official code repository for our paper Click-Calib. The example data (images & calibrations) are from the public dataset [WoodScape](https://github.com/valeoai/WoodScape). 4 | 5 | [[Paper](https://arxiv.org/abs/2501.01557)] [[Video](https://www.youtube.com/watch?v=p4pmqPD5JJU)] 6 | 7 | ### Overview 8 | ![Click-Calib](assets/click-calib.png) 9 | 10 | ## Python environment setup 11 | 12 | Click-Calib requires Python 3.7 or later. Run the following command in your terminal to install the required packages: 13 | 14 | `pip install -r requirements.txt` 15 | 16 | ## Usage Guide 17 | 18 | ### Step 1: Initialize extrinsic calibration 19 | 20 | To ensure the optimization convergence, an initial guess of the Surround-View System (SVS) extrinsic calibration needs to 21 | be provided. In this demo we use the original calibration from WoodScape as the initial values for simplicity. However, 22 | you can also use the script initialize_extrins_calib.py to get the initial values via manually adjusting each camera's pose. 23 | 24 | ### Step 2: Select keypoints 25 | 26 | Use the click_points.py script to click keypoints in each pair of adjacent camera images. Ensure both images have an equal 27 | number of selected keypoints. After you finish clicking, simply close the GUI window; the selected keypoints will then be 28 | printed out. To achieve good calibration, at least 10 points need be selected for each pair of adjacent cameras. If you 29 | prefer not to select points yourself, you can skip this step and use our pre-selected keypoints provided in optimize.py. 30 | 31 | ### Step 3: Optimize 32 | 33 | Copy and paste the keypoints from click_points.py to optimize.py, then run optimize.py. The optimization process should 34 | take about 5 to 30 seconds. If it takes too long time or results in a large Mean Distance Error (MDE), this indicates 35 | a failure to converge. In such cases, check your initial extrinsics or other settings (e.g., number of selected keypoints). 36 | 37 | ### (Optional) Step 4: Generate BEV images 38 | 39 | For qualitative evaluation, use generate_bev_img.py to create BEV images from SVS images. It overlays all pixels 40 | reprojected from each camera, so better calibration will yield better alignment while poor calibration will have more 41 | "ghosting" effect. 42 | 43 | ### (Optional) Step 5: Metric calculation 44 | 45 | For quantitative evaluation, use eval.py to compute the MDE metric on your test frames. 46 | 47 | ### Acknowledgements 48 | 49 | The implementation of Click-Calib is based on [WoodScape](https://github.com/valeoai/WoodScape), and we extend our 50 | gratitude to all its contributors. 51 | 52 | ## Citation 53 | If you find Click-Calib is useful in your research or applications, please consider giving us a star 🌟 and citing: 54 | 55 | **Click-Calib: A Robust Extrinsic Calibration Method for Surround-View Systems**. 56 | [Lihao Wang](https://scholar.google.com/citations?user=u2NnHowAAAAJ&hl=en). In arXiv:2501.01557. 57 | 58 | Bibtex: 59 | ``` 60 | @inproceedings{lwang2025clickcalib, 61 | title={Click-Calib: A Robust Extrinsic Calibration Method for Surround-View Systems}, 62 | author={Lihao Wang}, 63 | booktitle={arXiv:2501.01557}, 64 | year={2025} 65 | } 66 | ``` -------------------------------------------------------------------------------- /assets/click-calib.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LihaoWang1991/click_calib/20be84e8a413f04fe42ac1c8c69560b39a0c8a47/assets/click-calib.png -------------------------------------------------------------------------------- /calibrations/optimized/00164_FV.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.6201308174516292, 5 | -0.5937307509342868, 6 | 0.39001004283353635, 7 | -0.41242829258731867 8 | ], 9 | "translation": [ 10 | 3.6819384234436225, 11 | 0.0008236789826057496, 12 | 0.68133 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 3.942, 18 | "cy_offset": -3.093, 19 | "height": 966.0, 20 | "k1": 339.749, 21 | "k2": -31.988, 22 | "k3": 48.275, 23 | "k4": -7.201, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | } 28 | } -------------------------------------------------------------------------------- /calibrations/optimized/00165_MVL.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.9784285343011225, 5 | 0.2637768040624223, 6 | -0.00484101685033018, 7 | -0.1388865347828484 8 | ], 9 | "translation": [ 10 | 1.883877634519682, 11 | 0.9374885909901753, 12 | 0.9549 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 1.829, 18 | "cy_offset": -0.49, 19 | "height": 966.0, 20 | "k1": 335.497, 21 | "k2": -11.41, 22 | "k3": 22.009, 23 | "k4": 2.539, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | } 28 | } -------------------------------------------------------------------------------- /calibrations/optimized/00166_MVR.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.23598267590341626, 5 | 1.0537965866083725, 6 | -0.12486099597242313, 7 | -0.018195002265676996 8 | ], 9 | "translation": [ 10 | 1.842974892202017, 11 | -0.8684761281618796, 12 | 0.9753099999999999 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 3.468, 18 | "cy_offset": -1.705, 19 | "height": 966.0, 20 | "k1": 337.657, 21 | "k2": -16.126, 22 | "k3": 27.487, 23 | "k4": 0.888, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | } 28 | } -------------------------------------------------------------------------------- /calibrations/optimized/00167_RV.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.6622127409872829, 5 | 0.6840713908382783, 6 | -0.3314765422972093, 7 | -0.3397133622735843 8 | ], 9 | "translation": [ 10 | -0.8390160802244699, 11 | -0.015338822270666503, 12 | 0.90934 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 6.67, 18 | "cy_offset": -3.291, 19 | "height": 966.0, 20 | "k1": 339.039, 21 | "k2": -29.815, 22 | "k3": 46.483, 23 | "k4": -6.655, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | } 28 | } -------------------------------------------------------------------------------- /calibrations/original/00164_FV.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.592188269837962, 5 | -0.584690916322556, 6 | 0.39504292969920435, 7 | -0.3890895387065559 8 | ], 9 | "translation": [ 10 | 3.7484, 11 | 0.0, 12 | 0.68133 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 3.942, 18 | "cy_offset": -3.093, 19 | "height": 966.0, 20 | "k1": 339.749, 21 | "k2": -31.988, 22 | "k3": 48.275, 23 | "k4": -7.201, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | }, 28 | "name": "FV" 29 | } -------------------------------------------------------------------------------- /calibrations/original/00165_MVL.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.9597923475490652, 5 | 0.24268656748912834, 6 | -0.004160525068239742, 7 | -0.14101265750901937 8 | ], 9 | "translation": [ 10 | 1.9445999999999999, 11 | 0.9572999999999999, 12 | 0.9549 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 1.829, 18 | "cy_offset": -0.49, 19 | "height": 966.0, 20 | "k1": 335.497, 21 | "k2": -11.41, 22 | "k3": 22.009, 23 | "k4": 2.539, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | }, 28 | "name": "MVL" 29 | } -------------------------------------------------------------------------------- /calibrations/original/00166_MVR.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.23466017209146606, 5 | 0.9622497661995224, 6 | -0.13751608301948323, 7 | -0.00996584135669057 8 | ], 9 | "translation": [ 10 | 1.9445999999999999, 11 | -0.9572999999999999, 12 | 0.9753099999999999 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 3.468, 18 | "cy_offset": -1.705, 19 | "height": 966.0, 20 | "k1": 337.657, 21 | "k2": -16.126, 22 | "k3": 27.487, 23 | "k4": 0.888, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | }, 28 | "name": "MVR" 29 | } -------------------------------------------------------------------------------- /calibrations/original/00167_RV.json: -------------------------------------------------------------------------------- 1 | { 2 | "extrinsic": { 3 | "quaternion": [ 4 | 0.636314098922813, 5 | 0.6310055318192165, 6 | -0.30810493642236725, 7 | -0.3193864970185268 8 | ], 9 | "translation": [ 10 | -1.0678, 11 | 0.0545, 12 | 0.90934 13 | ] 14 | }, 15 | "intrinsic": { 16 | "aspect_ratio": 1.0, 17 | "cx_offset": 6.67, 18 | "cy_offset": -3.291, 19 | "height": 966.0, 20 | "k1": 339.039, 21 | "k2": -29.815, 22 | "k3": 46.483, 23 | "k4": -6.655, 24 | "model": "radial_poly", 25 | "poly_order": 4, 26 | "width": 1280.0 27 | }, 28 | "name": "RV" 29 | } -------------------------------------------------------------------------------- /images/fisheye/00164_FV.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LihaoWang1991/click_calib/20be84e8a413f04fe42ac1c8c69560b39a0c8a47/images/fisheye/00164_FV.png -------------------------------------------------------------------------------- /images/fisheye/00165_MVL.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LihaoWang1991/click_calib/20be84e8a413f04fe42ac1c8c69560b39a0c8a47/images/fisheye/00165_MVL.png -------------------------------------------------------------------------------- /images/fisheye/00166_MVR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LihaoWang1991/click_calib/20be84e8a413f04fe42ac1c8c69560b39a0c8a47/images/fisheye/00166_MVR.png -------------------------------------------------------------------------------- /images/fisheye/00167_RV.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LihaoWang1991/click_calib/20be84e8a413f04fe42ac1c8c69560b39a0c8a47/images/fisheye/00167_RV.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.21.5 2 | opencv-python==4.6.0.66 3 | matplotlib==3.5.0 4 | scipy==1.7.3 5 | -------------------------------------------------------------------------------- /source/click_points.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import matplotlib.pyplot as plt 24 | import matplotlib.image as mpimg 25 | 26 | def zoom(event): 27 | ax = event.inaxes 28 | if ax is None: 29 | return 30 | xdata, ydata = event.xdata, event.ydata 31 | x, y = ax.get_xlim(), ax.get_ylim() 32 | if event.button == 'up': # Zoom in on scroll up 33 | ax.set_xlim(xdata - (xdata - x[0]) / 1.1, xdata + (x[1] - xdata) / 1.1) 34 | ax.set_ylim(ydata - (ydata - y[0]) / 1.1, ydata + (y[1] - ydata) / 1.1) 35 | elif event.button == 'down': # Zoom out on scroll down 36 | ax.set_xlim(xdata - (xdata - x[0]) * 1.1, xdata + (x[1] - xdata) * 1.1) 37 | ax.set_ylim(ydata - (ydata - y[0]) * 1.1, ydata + (y[1] - ydata) * 1.1) 38 | ax.figure.canvas.draw() 39 | 40 | def onclick(event): 41 | if event.inaxes == ax1: 42 | x, y = event.xdata, event.ydata 43 | pts_1.append((int(x), int(y))) 44 | pt_1_idx = len(pts_1) 45 | ax1.plot(x, y, 'ro', markersize=3) 46 | ax1.annotate(f"{pt_1_idx}", (x, y), color=(0.70, 1, 0.40), fontsize=6) 47 | elif event.inaxes == ax2: 48 | x, y = event.xdata, event.ydata 49 | pts_2.append((int(x), int(y))) 50 | pt_2_idx = len(pts_2) 51 | ax2.plot(x, y, 'ro', markersize=3) 52 | ax2.annotate(f"{pt_2_idx}", (x, y), color=(0.70, 1, 0.40), fontsize=6) 53 | fig.canvas.draw() 54 | 55 | if __name__ == '__main__': 56 | pts_1 = [] 57 | pts_2 = [] 58 | img_1_path = "../images/fisheye/00164_FV.png" 59 | img_2_path = "../images/fisheye/00165_MVL.png" 60 | img1 = mpimg.imread(img_1_path) 61 | img2 = mpimg.imread(img_2_path) 62 | 63 | fig, (ax1, ax2) = plt.subplots(1, 2) 64 | ax1.imshow(img1) 65 | ax1.set_title('Cam_1', fontsize=10) 66 | ax1.axis('off') 67 | ax2.imshow(img2) 68 | ax2.set_title('Cam_2', fontsize=10) 69 | ax2.axis('off') 70 | 71 | fig.text(0.5, 0.94, 'Click-Calib', fontsize=12, fontweight='bold', ha='center') 72 | fig.suptitle('Click to select keypoints in both cameras. Scroll to zoom.\n' 73 | 'Keypoints with the same index in both images should match in world.\n' 74 | 'The number of selected keypoints must be equal in each camera.', fontsize=10, y=0.9, linespacing=2) 75 | 76 | cid = fig.canvas.mpl_connect('button_press_event', onclick) 77 | fig.canvas.mpl_connect('scroll_event', zoom) 78 | plt.show() 79 | 80 | assert len(pts_1) == len(pts_2), "The number of points in two cameras must be the same!" 81 | print(f"Points in cam 1: {pts_1}") 82 | print(f"Points in cam 2: {pts_2}") 83 | 84 | 85 | -------------------------------------------------------------------------------- /source/eval.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import numpy as np 24 | from utils import quat_to_mat, init_fisheye_cam, read_calib 25 | 26 | def calc_mean_dist_error(calib, 27 | cam_front, 28 | cam_left, 29 | cam_right, 30 | cam_rear, 31 | pos_z_front, 32 | pos_z_left, 33 | pos_z_right, 34 | pos_z_rear, 35 | pts_img_front_left, 36 | pts_img_front_right, 37 | pts_img_rear_left, 38 | pts_img_rear_right): 39 | t_front = [calib[0], calib[1], pos_z_front] 40 | R_front = quat_to_mat(calib[2:6]) 41 | cam_front.update_extr(t_front, R_front) 42 | 43 | t_left = [calib[6], calib[7], pos_z_left] 44 | R_left = quat_to_mat(calib[8:12]) 45 | cam_left.update_extr(t_left, R_left) 46 | 47 | t_right = [calib[12], calib[13], pos_z_right] 48 | R_right = quat_to_mat(calib[14:18]) 49 | cam_right.update_extr(t_right, R_right) 50 | 51 | t_rear = [calib[18], calib[19], pos_z_rear] 52 | R_rear = quat_to_mat(calib[20:24]) 53 | cam_rear.update_extr(t_rear, R_rear) 54 | 55 | distance = 0 56 | pts_img_front_fl = pts_img_front_left["front"] 57 | pts_img_left_fl = pts_img_front_left["left"] 58 | pts_img_front_fr = pts_img_front_right["front"] 59 | pts_img_right_fr = pts_img_front_right["right"] 60 | pts_img_rear_rl = pts_img_rear_left["rear"] 61 | pts_img_left_rl = pts_img_rear_left["left"] 62 | pts_img_rear_rr = pts_img_rear_right["rear"] 63 | pts_img_right_rr = pts_img_rear_right["right"] 64 | 65 | assert len(pts_img_front_fl) == len(pts_img_left_fl) 66 | assert len(pts_img_front_fr) == len(pts_img_right_fr) 67 | assert len(pts_img_rear_rl) == len(pts_img_left_rl) 68 | assert len(pts_img_rear_rr) == len(pts_img_right_rr) 69 | 70 | num_pts = len(pts_img_front_fl) + len(pts_img_front_fr) + len(pts_img_rear_rl) + len(pts_img_rear_rr) 71 | 72 | if len(pts_img_front_fl) > 0: 73 | pts_world_front_fl = cam_front.project_2d_to_3d_ground(pts_img_front_fl) 74 | pts_world_left_fl = cam_left.project_2d_to_3d_ground(pts_img_left_fl) 75 | distance += np.linalg.norm(pts_world_front_fl - pts_world_left_fl, axis=1).sum() 76 | 77 | if len(pts_img_front_fr) > 0: 78 | pts_world_front_fr = cam_front.project_2d_to_3d_ground(pts_img_front_fr) 79 | pts_world_right_fr = cam_right.project_2d_to_3d_ground(pts_img_right_fr) 80 | distance += np.linalg.norm(pts_world_front_fr - pts_world_right_fr, axis=1).sum() 81 | 82 | if len(pts_img_rear_rl) > 0: 83 | pts_world_rear_rl = cam_rear.project_2d_to_3d_ground(pts_img_rear_rl) 84 | pts_world_left_rl = cam_left.project_2d_to_3d_ground(pts_img_left_rl) 85 | distance += np.linalg.norm(pts_world_rear_rl - pts_world_left_rl, axis=1).sum() 86 | 87 | if len(pts_img_rear_rr) > 0: 88 | pts_world_rear_rr = cam_rear.project_2d_to_3d_ground(pts_img_rear_rr) 89 | pts_world_right_rr = cam_right.project_2d_to_3d_ground(pts_img_right_rr) 90 | distance += np.linalg.norm(pts_world_rear_rr - pts_world_right_rr, axis=1).sum() 91 | 92 | mean_dist_error = distance / num_pts 93 | return mean_dist_error 94 | 95 | 96 | if __name__ == '__main__': 97 | calib_f_front = "../calibrations/optimized/00164_FV.json" 98 | calib_f_left = "../calibrations/optimized/00165_MVL.json" 99 | calib_f_right = "../calibrations/optimized/00166_MVR.json" 100 | calib_f_rear = "../calibrations/optimized/00167_RV.json" 101 | 102 | pts_img_front_left = { 103 | "front": np.array( 104 | [(186, 585), (194, 591), (325, 493), (333, 495), (418, 444), (463, 417), (502, 402), (547, 384), 105 | (210, 469), (211, 458), (226, 454), (428, 403), (546, 369)]), 106 | "left": np.array( 107 | [(1048, 539), (1047, 555), (1092, 591), (1091, 607), (1119, 639), (1135, 651), (1146, 677), (1162, 704), 108 | (1057, 309), (1063, 297), (1074, 317), (1159, 550), (1187, 660)])} 109 | 110 | pts_img_front_right = { 111 | "front": np.array( 112 | [(939, 475), (856, 433), (865, 432), (815, 412), (978, 499), (1175, 559), (1137, 534), (1121, 551), 113 | (1130, 549), (1126, 619)]), 114 | "right": np.array( 115 | [(158, 583), (138, 618), (137, 606), (124, 639), (174, 566), (246, 330), (221, 356), (212, 422), 116 | (216, 400), (222, 538)])} 117 | 118 | pts_img_rear_left = { 119 | "rear": np.array( 120 | [(788, 350), (810, 370), (818, 369), (858, 410), (866, 409), (825, 360), (921, 469), (931, 467), 121 | (1019, 582), (1028, 576), (1061, 476), (1114, 513), (1158, 546)]), 122 | "left": np.array( 123 | [(240, 212), (247, 217), (252, 210), (263, 219), (270, 212), (267, 194), (285, 218), (290, 210), 124 | (317, 222), (325, 213), (452, 109), (512, 97), (571, 89)])} 125 | 126 | pts_img_rear_right = { 127 | "rear": np.array( 128 | [(325, 454), (338, 453), (389, 399), (446, 361), (456, 360), (420, 396), (449, 372), (487, 339), 129 | (504, 324), (512, 324), (598, 280), (555, 300)]), 130 | "right": np.array( 131 | [(967, 197), (980, 208), (995, 198), (1019, 202), (1027, 211), (1019, 220), (1030, 220), (1043, 212), 132 | (1047, 207), (1054, 214), (1105, 222), (1078, 216)])} 133 | 134 | intr_front, quat_front, t_front = read_calib(calib_f_front) 135 | intr_left, quat_left, t_left = read_calib(calib_f_left) 136 | intr_right, quat_right, t_right = read_calib(calib_f_right) 137 | intr_rear, quat_rear, t_rear = read_calib(calib_f_rear) 138 | 139 | pos_x_front, pos_y_front, pos_z_front = t_front 140 | pos_x_left, pos_y_left, pos_z_left = t_left 141 | pos_x_right, pos_y_right, pos_z_right = t_right 142 | pos_x_rear, pos_y_rear, pos_z_rear = t_rear 143 | 144 | cam_front = init_fisheye_cam(intr_front, quat_front, t_front) 145 | cam_left = init_fisheye_cam(intr_left, quat_left, t_left) 146 | cam_right = init_fisheye_cam(intr_right, quat_right, t_right) 147 | cam_rear = init_fisheye_cam(intr_rear, quat_rear, t_rear) 148 | 149 | calib = np.array([pos_x_front, 150 | pos_y_front, 151 | *quat_front, 152 | pos_x_left, 153 | pos_y_left, 154 | *quat_left, 155 | pos_x_right, 156 | pos_y_right, 157 | *quat_right, 158 | pos_x_rear, 159 | pos_y_rear, 160 | *quat_rear]) 161 | mean_dist_error = calc_mean_dist_error(calib, 162 | cam_front, 163 | cam_left, 164 | cam_right, 165 | cam_rear, 166 | pos_z_front, 167 | pos_z_left, 168 | pos_z_right, 169 | pos_z_rear, 170 | pts_img_front_left, 171 | pts_img_front_right, 172 | pts_img_rear_left, 173 | pts_img_rear_right) 174 | 175 | print("Mean distance error:", mean_dist_error) -------------------------------------------------------------------------------- /source/generate_bev_img.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import numpy as np 24 | import cv2 25 | from projection import Camera, create_bev_projection_maps, read_cam_from_json, bev_points_world_to_img 26 | from matplotlib import pyplot as plt 27 | 28 | def generate_bev_one_cam(source_cam: Camera, source_img: np.ndarray, bev_range: int, bev_size: int): 29 | map1, map2 = create_bev_projection_maps(source_cam, bev_range, bev_size) 30 | bev_image = cv2.remap(source_img, map1, map2, cv2.INTER_CUBIC) 31 | return bev_image 32 | 33 | def generate_bev_all_cams(cam_front, cam_left, cam_right, cam_rear, img_front, img_left, img_right, img_rear, 34 | overlay_opt='all', bev_range=25, bev_size=640): 35 | assert overlay_opt in ['fr', 'lr', 'all'] 36 | 37 | bev_img_front = generate_bev_one_cam(cam_front, img_front, bev_range, bev_size) 38 | bev_img_left = generate_bev_one_cam(cam_left, img_left, bev_range, bev_size) 39 | bev_img_right = generate_bev_one_cam(cam_right, img_right, bev_range, bev_size) 40 | bev_img_rear = generate_bev_one_cam(cam_rear, img_rear, bev_range, bev_size) 41 | 42 | xy_world_front = cam_front.get_translation()[:2] 43 | xy_world_left = cam_left.get_translation()[:2] 44 | xy_world_right = cam_right.get_translation()[:2] 45 | xy_world_rear = cam_rear.get_translation()[:2] 46 | 47 | u_front_bev, v_front_bev = bev_points_world_to_img(bev_range, bev_size, xy_world_front) 48 | u_left_bev, v_left_bev = bev_points_world_to_img(bev_range, bev_size, xy_world_left) 49 | u_right_bev, v_right_bev = bev_points_world_to_img(bev_range, bev_size, xy_world_right) 50 | u_rear_bev, v_rear_bev = bev_points_world_to_img(bev_range, bev_size, xy_world_rear) 51 | 52 | bev_img_all = np.zeros(bev_img_front.shape).astype(np.uint8) 53 | if overlay_opt == 'lr': 54 | bev_img_all[0:v_front_bev, :] = bev_img_front[0:v_front_bev, :] 55 | bev_img_all[v_rear_bev:bev_size, :] = bev_img_rear[v_rear_bev:bev_size, :] 56 | bev_img_all[:, 0:u_left_bev] = bev_img_left[:, 0:u_left_bev] 57 | bev_img_all[:, u_right_bev:bev_size] = bev_img_right[:, u_right_bev:bev_size] 58 | elif overlay_opt == 'fr': 59 | bev_img_all[:, 0:u_left_bev] = bev_img_left[:, 0:u_left_bev] 60 | bev_img_all[:, u_right_bev:bev_size] = bev_img_right[:, u_right_bev:bev_size] 61 | bev_img_all[0:v_front_bev, :] = bev_img_front[0:v_front_bev, :] 62 | bev_img_all[v_rear_bev:bev_size, :] = bev_img_rear[v_rear_bev:bev_size, :] 63 | else: 64 | bev_img_all = (bev_img_front.astype(np.float32) + bev_img_left.astype(np.float32) + 65 | bev_img_right.astype(np.float32) + bev_img_rear.astype(np.float32)) / 4 66 | bev_img_all = ((bev_img_all / bev_img_all.max()) * 255).astype(np.uint8) 67 | 68 | return bev_img_all 69 | 70 | if __name__ == '__main__': 71 | bev_range = 25 72 | bev_size = 960 73 | overlay_opt = "all" # Which images to take for overlaying zones, available options: fr: front & rear, lr: left & right, all: all 4 cams 74 | calib_f_front = "../calibrations/optimized/00164_FV.json" 75 | calib_f_left = "../calibrations/optimized/00165_MVL.json" 76 | calib_f_right = "../calibrations/optimized/00166_MVR.json" 77 | calib_f_rear = "../calibrations/optimized/00167_RV.json" 78 | fisheye_img_front = cv2.imread("../images/fisheye/00164_FV.png") 79 | fisheye_img_left = cv2.imread("../images/fisheye/00165_MVL.png") 80 | fisheye_img_right = cv2.imread("../images/fisheye/00166_MVR.png") 81 | fisheye_img_rear = cv2.imread("../images/fisheye/00167_RV.png") 82 | cam_front = read_cam_from_json(calib_f_front) 83 | cam_left = read_cam_from_json(calib_f_left) 84 | cam_right = read_cam_from_json(calib_f_right) 85 | cam_rear = read_cam_from_json(calib_f_rear) 86 | bev_img_all = generate_bev_all_cams(cam_front, cam_left, cam_right, cam_rear, fisheye_img_front, fisheye_img_left, 87 | fisheye_img_right, fisheye_img_rear, overlay_opt, bev_range, bev_size) 88 | plt.imshow(cv2.cvtColor(bev_img_all, cv2.COLOR_BGR2RGB)) 89 | plt.show() 90 | 91 | 92 | -------------------------------------------------------------------------------- /source/initialize_extrins_calib.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import matplotlib.pyplot as plt 24 | from matplotlib.widgets import TextBox, RadioButtons, Button 25 | import os 26 | from scipy.spatial.transform import Rotation as SciRot 27 | import cv2 28 | from utils import init_fisheye_cam, read_calib, write_calib 29 | from generate_bev_img import generate_bev_all_cams 30 | 31 | 32 | if __name__ == '__main__': 33 | """ 34 | How to use: 35 | Adjust the extrinsic parameters, especially rotation angles, for each camera to create a reasonably good BEV image. 36 | Use the left-right / front-rear toggle button to switch between BEV images overlaid from different cameras. 37 | Once finished, click 'Export to files' button to save calibrations to files. Then you can use the saved calibrations 38 | as the initial files for optimize.py. 39 | """ 40 | ini_calib_dir = "../calibrations/fisheye/creteil_learning_car_manual_calib_ini" 41 | # Original calibrations are used only to get intrinsics and camera heights 42 | calib_ori_f_front = "../calibrations/original/00164_FV.json" 43 | calib_ori_f_left = "../calibrations/original/00165_MVL.json" 44 | calib_ori_f_right = "../calibrations/original/00166_MVR.json" 45 | calib_ori_f_rear = "../calibrations/original/00167_RV.json" 46 | img_front = cv2.imread("../images/fisheye/00164_FV.png") 47 | img_left = cv2.imread("../images/fisheye/00165_MVL.png") 48 | img_right = cv2.imread("../images/fisheye/00166_MVR.png") 49 | img_rear = cv2.imread("../images/fisheye/00167_RV.png") 50 | calib_export_dir = "../calibrations/initial" 51 | overlay_opt = 'lr' 52 | 53 | if not os.path.exists(calib_export_dir): 54 | os.makedirs(calib_export_dir) 55 | 56 | intr_front, _, t_front = read_calib(calib_ori_f_front) 57 | intr_left, _, t_left = read_calib(calib_ori_f_left) 58 | intr_right, _, t_right = read_calib(calib_ori_f_right) 59 | intr_rear, _, t_rear = read_calib(calib_ori_f_rear) 60 | 61 | # Replace translation xy by their nominal values 62 | pos_x_front, pos_y_front = 3.7, 0 63 | pos_x_left, pos_y_left = 2, 1 64 | pos_x_right, pos_y_right = 2, -1 65 | pos_x_rear, pos_y_rear = -1, 0 66 | pos_z_front = t_front[2] 67 | pos_z_left = t_left[2] 68 | pos_z_right = t_right[2] 69 | pos_z_rear = t_rear[2] 70 | t_front = [pos_x_front, pos_y_front, pos_z_front] 71 | t_left = [pos_x_left, pos_y_left, pos_z_left] 72 | t_right = [pos_x_right, pos_y_right, pos_z_right] 73 | t_rear = [pos_x_rear, pos_y_rear, pos_z_rear] 74 | 75 | # Replace Euler angles by their typical values 76 | rot_z1_front, rot_x_front, rot_z2_front = 180, 90, 90 77 | quat_front = SciRot.from_euler('zxz', [rot_z1_front, rot_x_front, rot_z2_front], degrees=True).as_quat() 78 | rot_z1_left, rot_x_left, rot_z2_left = 180, 180, -180 79 | quat_left = SciRot.from_euler('zxz', [rot_z1_left, rot_x_left, rot_z2_left], degrees=True).as_quat() 80 | rot_z1_right, rot_x_right, rot_z2_right = -180, 180, 0 81 | quat_right = SciRot.from_euler('zxz', [rot_z1_right, rot_x_right, rot_z2_right], degrees=True).as_quat() 82 | rot_z1_rear, rot_x_rear, rot_z2_rear = 180, 90, -90 83 | quat_rear = SciRot.from_euler('zxz', [rot_z1_rear, rot_x_rear, rot_z2_rear], degrees=True).as_quat() 84 | 85 | cam_front = init_fisheye_cam(intr_front, quat_front, t_front) 86 | cam_left = init_fisheye_cam(intr_left, quat_left, t_left) 87 | cam_right = init_fisheye_cam(intr_right, quat_right, t_right) 88 | cam_rear = init_fisheye_cam(intr_rear, quat_rear, t_rear) 89 | 90 | fig, ax = plt.subplots() 91 | plt.subplots_adjust(left=0.5, right=0.9, top=0.8, bottom=0.2) 92 | ax.axis('off') 93 | topview = generate_bev_all_cams(cam_front, cam_left, cam_right, cam_rear, img_front, img_left, img_right, img_rear, overlay_opt) 94 | im = ax.imshow(cv2.cvtColor(topview, cv2.COLOR_BGR2RGB)) 95 | 96 | def update_calib(val): 97 | overlay_opt = 'lr' if menu_topview_opt.value_selected == 'left-right' else 'fr' 98 | pos_x_0 = float(text_pos_x_0.text) 99 | pos_y_0 = float(text_pos_y_0.text) 100 | rot_z1_0 = float(text_rot_z1_0.text) 101 | rot_x_0 = float(text_rot_x_0.text) 102 | rot_z2_0 = float(text_rot_z2_0.text) 103 | pos_x_1 = float(text_pos_x_1.text) 104 | pos_y_1 = float(text_pos_y_1.text) 105 | rot_z1_1 = float(text_rot_z1_1.text) 106 | rot_x_1 = float(text_rot_x_1.text) 107 | rot_z2_1 = float(text_rot_z2_1.text) 108 | pos_x_2 = float(text_pos_x_2.text) 109 | pos_y_2 = float(text_pos_y_2.text) 110 | rot_z1_2 = float(text_rot_z1_2.text) 111 | rot_x_2 = float(text_rot_x_2.text) 112 | rot_z2_2 = float(text_rot_z2_2.text) 113 | pos_x_3 = float(text_pos_x_3.text) 114 | pos_y_3 = float(text_pos_y_3.text) 115 | rot_z1_3 = float(text_rot_z1_3.text) 116 | rot_x_3 = float(text_rot_x_3.text) 117 | rot_z2_3 = float(text_rot_z2_3.text) 118 | t_front[0] = pos_x_0 119 | t_front[1] = pos_y_0 120 | R_front = SciRot.from_euler('zxz', [rot_z1_0, rot_x_0, rot_z2_0], degrees=True).as_matrix() 121 | cam_front.update_extr(t_front, R_front) 122 | t_left[0] = pos_x_1 123 | t_left[1] = pos_y_1 124 | R_left = SciRot.from_euler('zxz', [rot_z1_1, rot_x_1, rot_z2_1], degrees=True).as_matrix() 125 | cam_left.update_extr(t_left, R_left) 126 | t_right[0] = pos_x_2 127 | t_right[1] = pos_y_2 128 | R_right = SciRot.from_euler('zxz', [rot_z1_2, rot_x_2, rot_z2_2], degrees=True).as_matrix() 129 | cam_right.update_extr(t_right, R_right) 130 | t_rear[0] = pos_x_3 131 | t_rear[1] = pos_y_3 132 | R_rear = SciRot.from_euler('zxz', [rot_z1_3, rot_x_3, rot_z2_3], degrees=True).as_matrix() 133 | cam_rear.update_extr(t_rear, R_rear) 134 | topview = generate_bev_all_cams(cam_front, cam_left, cam_right, cam_rear, img_front, img_left, img_right, img_rear, overlay_opt) 135 | im.set_data(cv2.cvtColor(topview, cv2.COLOR_BGR2RGB)) 136 | fig.canvas.draw_idle() 137 | 138 | def export_calib(event): 139 | quat_front = SciRot.from_matrix(cam_front.get_rotation()).as_quat().tolist() 140 | quat_left = SciRot.from_matrix(cam_left.get_rotation()).as_quat().tolist() 141 | quat_right = SciRot.from_matrix(cam_right.get_rotation()).as_quat().tolist() 142 | quat_rear = SciRot.from_matrix(cam_rear.get_rotation()).as_quat().tolist() 143 | write_calib(intr_front, quat_front, t_front, os.path.join(calib_export_dir, "00164_FV.json")) 144 | write_calib(intr_left, quat_left, t_left, os.path.join(calib_export_dir, "00165_MVL.json")) 145 | write_calib(intr_right, quat_right, t_right, os.path.join(calib_export_dir, "00166_MVR.json")) 146 | write_calib(intr_rear, quat_rear, t_rear, os.path.join(calib_export_dir, "00167_RV.json")) 147 | 148 | box_topview_opt = plt.axes([0.10, 0.7, 0.1, 0.1], facecolor='linen') 149 | menu_topview_opt = RadioButtons(box_topview_opt, ('left-right', 'front-rear')) 150 | box_export_calib = plt.axes([0.10, 0.2, 0.08, 0.04], facecolor='linen') 151 | button_export_calib = Button(box_export_calib, 'Export to files') 152 | 153 | box_pos_x_0 = plt.axes([0.10, 0.6, 0.05, 0.03], facecolor='linen') 154 | text_pos_x_0 = TextBox(box_pos_x_0, 'Front: pos_x', initial=f"{pos_x_front:.2f}") 155 | box_pos_y_0 = plt.axes([0.19, 0.6, 0.05, 0.03], facecolor='linen') 156 | text_pos_y_0 = TextBox(box_pos_y_0, 'pos_y', initial=f"{pos_y_front:.2f}") 157 | box_rot_z1_0 = plt.axes([0.28, 0.6, 0.05, 0.03], facecolor='linen') 158 | text_rot_z1_0 = TextBox(box_rot_z1_0, 'rot_z1', initial=f"{rot_z1_front:.1f}") 159 | box_rot_x_0 = plt.axes([0.37, 0.6, 0.05, 0.03], facecolor='linen') 160 | text_rot_x_0 = TextBox(box_rot_x_0, 'rot_x', initial=f"{rot_x_front:.1f}") 161 | box_rot_z2_0 = plt.axes([0.46, 0.6, 0.05, 0.03], facecolor='linen') 162 | text_rot_z2_0 = TextBox(box_rot_z2_0, 'rot_z2', initial=f"{rot_z2_front:.1f}") 163 | 164 | box_pos_x_1 = plt.axes([0.10, 0.5, 0.05, 0.03], facecolor='linen') 165 | text_pos_x_1 = TextBox(box_pos_x_1, 'Left: pos_x', initial=f"{pos_x_left:.2f}") 166 | box_pos_y_1 = plt.axes([0.19, 0.5, 0.05, 0.03], facecolor='linen') 167 | text_pos_y_1 = TextBox(box_pos_y_1, 'pos_y', initial=f"{pos_y_left:.2f}") 168 | box_rot_z1_1 = plt.axes([0.28, 0.5, 0.05, 0.03], facecolor='linen') 169 | text_rot_z1_1 = TextBox(box_rot_z1_1, 'rot_z1', initial=f"{rot_z1_left:.1f}") 170 | box_rot_x_1 = plt.axes([0.37, 0.5, 0.05, 0.03], facecolor='linen') 171 | text_rot_x_1 = TextBox(box_rot_x_1, 'rot_x', initial=f"{rot_x_left:.1f}") 172 | box_rot_z2_1 = plt.axes([0.46, 0.5, 0.05, 0.03], facecolor='linen') 173 | text_rot_z2_1 = TextBox(box_rot_z2_1, 'rot_z2', initial=f"{rot_z2_left:.1f}") 174 | 175 | box_pos_x_2 = plt.axes([0.10, 0.4, 0.05, 0.03], facecolor='linen') 176 | text_pos_x_2 = TextBox(box_pos_x_2, 'Right: pos_x', initial=f"{pos_x_right:.2f}") 177 | box_pos_y_2 = plt.axes([0.19, 0.4, 0.05, 0.03], facecolor='linen') 178 | text_pos_y_2 = TextBox(box_pos_y_2, 'pos_y', initial=f"{pos_y_right:.2f}") 179 | box_rot_z1_2 = plt.axes([0.28, 0.4, 0.05, 0.03], facecolor='linen') 180 | text_rot_z1_2 = TextBox(box_rot_z1_2, 'rot_z1', initial=f"{rot_z1_right:.1f}") 181 | box_rot_x_2 = plt.axes([0.37, 0.4, 0.05, 0.03], facecolor='linen') 182 | text_rot_x_2 = TextBox(box_rot_x_2, 'rot_x', initial=f"{rot_x_right:.1f}") 183 | box_rot_z2_2 = plt.axes([0.46, 0.4, 0.05, 0.03], facecolor='linen') 184 | text_rot_z2_2 = TextBox(box_rot_z2_2, 'rot_z2', initial=f"{rot_z2_right:.1f}") 185 | 186 | box_pos_x_3 = plt.axes([0.10, 0.3, 0.05, 0.03], facecolor='linen') 187 | text_pos_x_3 = TextBox(box_pos_x_3, 'Rear: pos_x', initial=f"{pos_x_rear:.2f}") 188 | box_pos_y_3 = plt.axes([0.19, 0.3, 0.05, 0.03], facecolor='linen') 189 | text_pos_y_3 = TextBox(box_pos_y_3, 'pos_y', initial=f"{pos_y_rear:.2f}") 190 | box_rot_z1_3 = plt.axes([0.28, 0.3, 0.05, 0.03], facecolor='linen') 191 | text_rot_z1_3 = TextBox(box_rot_z1_3, 'rot_z1', initial=f"{rot_z1_rear:.1f}") 192 | box_rot_x_3 = plt.axes([0.37, 0.3, 0.05, 0.03], facecolor='linen') 193 | text_rot_x_3 = TextBox(box_rot_x_3, 'rot_x', initial=f"{rot_x_rear:.1f}") 194 | box_rot_z2_3 = plt.axes([0.46, 0.3, 0.05, 0.03], facecolor='linen') 195 | text_rot_z2_3 = TextBox(box_rot_z2_3, 'rot_z2', initial=f"{rot_z2_rear:.1f}") 196 | 197 | menu_topview_opt.on_clicked(update_calib) 198 | button_export_calib.on_clicked(export_calib) 199 | 200 | text_pos_x_0.on_submit(update_calib) 201 | text_pos_y_0.on_submit(update_calib) 202 | text_rot_z1_0.on_submit(update_calib) 203 | text_rot_x_0.on_submit(update_calib) 204 | text_rot_z2_0.on_submit(update_calib) 205 | 206 | text_pos_x_1.on_submit(update_calib) 207 | text_pos_y_1.on_submit(update_calib) 208 | text_rot_z1_1.on_submit(update_calib) 209 | text_rot_x_1.on_submit(update_calib) 210 | text_rot_z2_1.on_submit(update_calib) 211 | 212 | text_pos_x_2.on_submit(update_calib) 213 | text_pos_y_2.on_submit(update_calib) 214 | text_rot_z1_2.on_submit(update_calib) 215 | text_rot_x_2.on_submit(update_calib) 216 | text_rot_z2_2.on_submit(update_calib) 217 | 218 | text_pos_x_3.on_submit(update_calib) 219 | text_pos_y_3.on_submit(update_calib) 220 | text_rot_z1_3.on_submit(update_calib) 221 | text_rot_x_3.on_submit(update_calib) 222 | text_rot_z2_3.on_submit(update_calib) 223 | 224 | plt.show() 225 | -------------------------------------------------------------------------------- /source/optimize.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import os 24 | import numpy as np 25 | from scipy.optimize import minimize 26 | from utils import quat_to_mat, init_fisheye_cam, read_calib, write_calib 27 | 28 | def optimizer(calib, 29 | cam_front, 30 | cam_left, 31 | cam_right, 32 | cam_rear, 33 | pos_z_front, 34 | pos_z_left, 35 | pos_z_right, 36 | pos_z_rear, 37 | pts_img_front_left, 38 | pts_img_front_right, 39 | pts_img_rear_left, 40 | pts_img_rear_right): 41 | 42 | t_front = [calib[0], calib[1], pos_z_front] 43 | R_front = quat_to_mat(calib[2:6]) 44 | cam_front.update_extr(t_front, R_front) 45 | 46 | t_left = [calib[6], calib[7], pos_z_left] 47 | R_left = quat_to_mat(calib[8:12]) 48 | cam_left.update_extr(t_left, R_left) 49 | 50 | t_right = [calib[12], calib[13], pos_z_right] 51 | R_right = quat_to_mat(calib[14:18]) 52 | cam_right.update_extr(t_right, R_right) 53 | 54 | t_rear = [calib[18], calib[19], pos_z_rear] 55 | R_rear = quat_to_mat(calib[20:24]) 56 | cam_rear.update_extr(t_rear, R_rear) 57 | 58 | distance = 0 59 | pts_img_front_fl = pts_img_front_left["front"] 60 | pts_img_left_fl = pts_img_front_left["left"] 61 | pts_img_front_fr = pts_img_front_right["front"] 62 | pts_img_right_fr = pts_img_front_right["right"] 63 | pts_img_rear_rl = pts_img_rear_left["rear"] 64 | pts_img_left_rl = pts_img_rear_left["left"] 65 | pts_img_rear_rr = pts_img_rear_right["rear"] 66 | pts_img_right_rr = pts_img_rear_right["right"] 67 | 68 | assert len(pts_img_front_fl) == len(pts_img_left_fl) and len(pts_img_front_fl) > 0 69 | assert len(pts_img_front_fr) == len(pts_img_right_fr) and len(pts_img_front_fr) > 0 70 | assert len(pts_img_rear_rl) == len(pts_img_left_rl) and len(pts_img_rear_rl) > 0 71 | assert len(pts_img_rear_rr) == len(pts_img_right_rr) and len(pts_img_rear_rr) > 0 72 | 73 | num_pts = len(pts_img_front_fl) + len(pts_img_front_fr) + len(pts_img_rear_rl) + len(pts_img_rear_rr) 74 | 75 | pts_world_front_fl = cam_front.project_2d_to_3d_ground(pts_img_front_fl) 76 | pts_world_left_fl = cam_left.project_2d_to_3d_ground(pts_img_left_fl) 77 | distance += np.linalg.norm(pts_world_front_fl - pts_world_left_fl, axis=1).sum() 78 | 79 | pts_world_front_fr = cam_front.project_2d_to_3d_ground(pts_img_front_fr) 80 | pts_world_right_fr = cam_right.project_2d_to_3d_ground(pts_img_right_fr) 81 | distance += np.linalg.norm(pts_world_front_fr - pts_world_right_fr, axis=1).sum() 82 | 83 | pts_world_rear_rl = cam_rear.project_2d_to_3d_ground(pts_img_rear_rl) 84 | pts_world_left_rl = cam_left.project_2d_to_3d_ground(pts_img_left_rl) 85 | distance += np.linalg.norm(pts_world_rear_rl - pts_world_left_rl, axis=1).sum() 86 | 87 | pts_world_rear_rr = cam_rear.project_2d_to_3d_ground(pts_img_rear_rr) 88 | pts_world_right_rr = cam_right.project_2d_to_3d_ground(pts_img_right_rr) 89 | distance += np.linalg.norm(pts_world_rear_rr - pts_world_right_rr, axis=1).sum() 90 | 91 | mde = distance / num_pts 92 | print(f"Mean distance error: {mde}") 93 | 94 | return mde 95 | 96 | 97 | if __name__ == '__main__': 98 | calib_f_front = "../calibrations/original/00164_FV.json" 99 | calib_f_left = "../calibrations/original/00165_MVL.json" 100 | calib_f_right = "../calibrations/original/00166_MVR.json" 101 | calib_f_rear = "../calibrations/original/00167_RV.json" 102 | calib_save_root = "../calibrations/optimized" 103 | 104 | # Put your clicked keypoints here 105 | pts_img_front_left = { 106 | "front": np.array([(186, 585), (194, 591), (325, 493), (333, 495), (418, 444), (463, 417), (502, 402), (547, 384), 107 | (210, 469), (211, 458), (226, 454), (428, 403), (546, 369)]), 108 | "left": np.array([(1048, 539), (1047, 555), (1092, 591), (1091, 607), (1119, 639), (1135, 651), (1146, 677), (1162, 704), 109 | (1057, 309), (1063, 297), (1074, 317), (1159, 550), (1187, 660)])} 110 | 111 | pts_img_front_right = { 112 | "front": np.array([(939, 475), (856, 433), (865, 432), (815, 412), (978, 499), (1175, 559), (1137, 534), (1121, 551), 113 | (1130, 549), (1126, 619)]), 114 | "right": np.array([(158, 583), (138, 618), (137, 606), (124, 639), (174, 566), (246, 330), (221, 356), (212, 422), 115 | (216, 400), (222, 538)])} 116 | 117 | pts_img_rear_left = { 118 | "rear": np.array([(788, 350), (810, 370), (818, 369), (858, 410), (866, 409), (825, 360), (921, 469), (931, 467), 119 | (1019, 582), (1028, 576), (1061, 476), (1114, 513), (1158, 546)]), 120 | "left": np.array([(240, 212), (247, 217), (252, 210), (263, 219), (270, 212), (267, 194), (285, 218), (290, 210), 121 | (317, 222), (325, 213), (452, 109), (512, 97), (571, 89)])} 122 | 123 | pts_img_rear_right = { 124 | "rear": np.array([(325, 454), (338, 453), (389, 399), (446, 361), (456, 360), (420, 396), (449, 372), (487, 339), 125 | (504, 324), (512, 324), (598, 280), (555, 300)]), 126 | "right": np.array([(967, 197), (980, 208), (995, 198), (1019, 202), (1027, 211), (1019, 220), (1030, 220), (1043, 212), 127 | (1047, 207), (1054, 214), (1105, 222), (1078, 216)])} 128 | 129 | intr_front, quat_front, t_front = read_calib(calib_f_front) 130 | intr_left, quat_left, t_left = read_calib(calib_f_left) 131 | intr_right, quat_right, t_right = read_calib(calib_f_right) 132 | intr_rear, quat_rear, t_rear = read_calib(calib_f_rear) 133 | 134 | pos_x_front, pos_y_front, pos_z_front = t_front 135 | pos_x_left, pos_y_left, pos_z_left = t_left 136 | pos_x_right, pos_y_right, pos_z_right = t_right 137 | pos_x_rear, pos_y_rear, pos_z_rear = t_rear 138 | 139 | cam_front = init_fisheye_cam(intr_front, quat_front, t_front) 140 | cam_left = init_fisheye_cam(intr_left, quat_left, t_left) 141 | cam_right = init_fisheye_cam(intr_right, quat_right, t_right) 142 | cam_rear = init_fisheye_cam(intr_rear, quat_rear, t_rear) 143 | 144 | calib_ini = np.array([pos_x_front, 145 | pos_y_front, 146 | *quat_front, 147 | pos_x_left, 148 | pos_y_left, 149 | *quat_left, 150 | pos_x_right, 151 | pos_y_right, 152 | *quat_right, 153 | pos_x_rear, 154 | pos_y_rear, 155 | *quat_rear]) 156 | func_optimize = lambda calib: optimizer(calib, 157 | cam_front, 158 | cam_left, 159 | cam_right, 160 | cam_rear, 161 | pos_z_front, 162 | pos_z_left, 163 | pos_z_right, 164 | pos_z_rear, 165 | pts_img_front_left, 166 | pts_img_front_right, 167 | pts_img_rear_left, 168 | pts_img_rear_right) 169 | 170 | res_multi_cam = minimize(func_optimize, calib_ini, method='BFGS') 171 | final_calib = res_multi_cam.x.tolist() 172 | print("Optimized mean distance error:", res_multi_cam.fun) 173 | # Save to files 174 | final_t_front = final_calib[0:2] + [pos_z_front] 175 | final_quat_front = final_calib[2:6] 176 | final_t_left = final_calib[6:8] + [pos_z_left] 177 | final_quat_left = final_calib[8:12] 178 | final_t_right = final_calib[12:14] + [pos_z_right] 179 | final_quat_right = final_calib[14:18] 180 | final_t_rear = final_calib[18:20] + [pos_z_rear] 181 | final_quat_rear = final_calib[20:24] 182 | write_calib(intr_front, final_quat_front, final_t_front, os.path.join(calib_save_root, "00164_FV.json")) 183 | write_calib(intr_left, final_quat_left, final_t_left, os.path.join(calib_save_root, "00165_MVL.json")) 184 | write_calib(intr_right, final_quat_right, final_t_right, os.path.join(calib_save_root, "00166_MVR.json")) 185 | write_calib(intr_rear, final_quat_rear, final_t_rear, os.path.join(calib_save_root, "00167_RV.json")) -------------------------------------------------------------------------------- /source/projection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2021 Valeo Schalter und Sensoren GmbH and contributors 3 | # 4 | # Author: Christian Witt 5 | # Modified 2024 by Lihao Wang 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a 8 | # copy of this software and associated documentation files (the "Software"), 9 | # to deal in the Software without restriction, including without limitation 10 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 | # and/or sell copies of the Software, and to permit persons to whom the 12 | # Software is furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in 15 | # all copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | # DEALINGS IN THE SOFTWARE. 24 | 25 | import json 26 | 27 | import numpy as np 28 | import cv2 29 | from scipy.spatial.transform import Rotation as SciRot 30 | 31 | 32 | def ensure_point_list(points, dim, concatenate=True, crop=True): 33 | if isinstance(points, list): 34 | points = np.array(points) 35 | assert isinstance(points, np.ndarray) 36 | assert points.ndim == 2 37 | 38 | if crop: 39 | for test_dim in range(4, dim, -1): 40 | if points.shape[1] == test_dim: 41 | new_shape = test_dim - 1 42 | assert np.array_equal(points[:, new_shape], np.ones(points.shape[0])) 43 | points = points[:, 0:new_shape] 44 | 45 | if concatenate and points.shape[1] == (dim - 1): 46 | points = np.concatenate((np.array(points), np.ones((points.shape[0], 1))), axis=1) 47 | 48 | if points.shape[1] != dim: 49 | raise AssertionError('points.shape[1] == dim failed ({} != {})'.format(points.shape[1], dim)) 50 | return points 51 | 52 | 53 | class Projection(object): 54 | def project_3d_to_2d(self, cam_points: np.ndarray, invalid_value=np.nan): 55 | raise NotImplementedError() 56 | 57 | def project_2d_to_3d(self, lens_points: np.ndarray, norm: np.ndarray): 58 | raise NotImplementedError() 59 | 60 | 61 | class RadialPolyCamProjection(Projection): 62 | def __init__(self, distortion_params: list): 63 | self.coefficients = np.asarray(distortion_params) 64 | self.power = np.array([np.arange(start=1, stop=self.coefficients.size + 1)]).T 65 | 66 | def project_3d_to_2d(self, cam_points, invalid_value=np.nan): 67 | camera_points = ensure_point_list(cam_points, dim=3) 68 | chi = np.sqrt(camera_points.T[0] * camera_points.T[0] + camera_points.T[1] * camera_points.T[1]) 69 | theta = np.pi / 2.0 - np.arctan2(camera_points.T[2], chi) 70 | rho = self._theta_to_rho(theta) 71 | lens_points = np.divide(rho, chi, where=(chi != 0))[:, np.newaxis] * camera_points[:, 0:2] 72 | 73 | # set (0, 0, 0) = np.nan 74 | lens_points[(chi == 0) & (cam_points[:, 2] == 0)] = invalid_value 75 | return lens_points 76 | 77 | def project_2d_to_3d(self, lens_points: np.ndarray, norms: np.ndarray): 78 | lens_points = ensure_point_list(lens_points, dim=2) 79 | norms = ensure_point_list(norms, dim=1).reshape(norms.size) 80 | 81 | rhos = np.linalg.norm(lens_points, axis=1) 82 | thetas = self._rho_to_theta(rhos) 83 | chis = norms * np.sin(thetas) 84 | zs = norms * np.cos(thetas) 85 | xy = np.divide(chis, rhos, where=(rhos != 0))[:, np.newaxis] * lens_points 86 | xyz = np.hstack((xy, zs[:, np.newaxis])) 87 | return xyz 88 | 89 | def _theta_to_rho(self, theta): 90 | return np.dot(self.coefficients, np.power(np.array([theta]), self.power)) 91 | 92 | def _rho_to_theta(self, rho): 93 | coeff = list(reversed(self.coefficients)) 94 | results = np.zeros_like(rho) 95 | for i, _r in enumerate(rho): 96 | theta = np.roots([*coeff, -_r]) 97 | theta = np.real(theta[theta.imag == 0]) 98 | theta = theta[np.where(np.abs(theta) < np.pi)] 99 | theta = np.min(theta) if theta.size > 0 else 0 100 | results[i] = theta 101 | return results 102 | 103 | 104 | class Camera(object): 105 | def __init__(self, lens: Projection, translation, rotation, size, principle_point, 106 | aspect_ratio: float = 1.0): 107 | self.lens = lens 108 | pose = np.eye(4) 109 | pose[0:3, 3] = translation 110 | pose[0:3, 0:3] = rotation 111 | self._pose = np.asarray(pose, dtype=float) 112 | self._inv_pose = np.linalg.inv(self._pose) 113 | self._size = np.array([size[0], size[1]], dtype=int) 114 | self._principle_point = 0.5 * self._size + np.array([principle_point[0], principle_point[1]], dtype=float) - 0.5 115 | self._aspect_ratio = np.array([1, aspect_ratio], dtype=float) 116 | 117 | size = property(lambda self: self._size) 118 | width = property(lambda self: self._size[0]) 119 | height = property(lambda self: self._size[1]) 120 | cx = property(lambda self: self._principle_point[0]) 121 | cy = property(lambda self: self._principle_point[1]) 122 | cx_offset = property(lambda self: self._principle_point[0] - 0.5 * self._size[0] + 0.5) 123 | cy_offset = property(lambda self: self._principle_point[1] - 0.5 * self._size[1] + 0.5) 124 | aspect_ratio = property(lambda self: self._aspect_ratio[1]) 125 | 126 | rotation = property(lambda self: self._pose[0:3, 0:3]) 127 | translation = property(lambda self: self._pose[0:3, 3]) 128 | 129 | def update_extr(self, translation, rotation): 130 | self._pose[0:3, 3] = translation 131 | self._pose[0:3, 0:3] = rotation 132 | self._inv_pose = np.linalg.inv(self._pose) 133 | 134 | def project_3d_to_2d(self, world_points: np.ndarray, do_clip=False, invalid_value=np.nan): 135 | world_points = ensure_point_list(world_points, dim=4) 136 | 137 | camera_points = world_points @ self._inv_pose.T 138 | lens_points = self.lens.project_3d_to_2d(camera_points[:, 0:3], invalid_value=invalid_value) 139 | screen_points = (lens_points * self._aspect_ratio) + self._principle_point 140 | return self._apply_clip(screen_points, screen_points) if do_clip else screen_points 141 | 142 | def project_2d_to_3d(self, screen_points: np.ndarray, norm: np.ndarray, do_clip=False): 143 | screen_points = ensure_point_list(screen_points, dim=2, concatenate=False, crop=False) 144 | norm = ensure_point_list(norm[:, np.newaxis], dim=1, concatenate=False, crop=False) 145 | lens_points = (screen_points - self._principle_point) / self._aspect_ratio 146 | lens_points = self._apply_clip(lens_points, screen_points) if do_clip else lens_points 147 | 148 | camera_points = self.lens.project_2d_to_3d(lens_points, norm) 149 | 150 | camera_points = ensure_point_list(camera_points, dim=4) 151 | world_points = camera_points @ self._pose.T 152 | return world_points[:, 0:3] 153 | 154 | def project_2d_to_3d_ground(self, screen_points: np.ndarray, do_clip=False): 155 | world_points = self.project_2d_to_3d(screen_points, norm=np.array([1]), do_clip=do_clip) 156 | world_points_from_cam = world_points - self.translation 157 | z_ground_from_cam = - self.translation[2] 158 | zs_from_cam = world_points_from_cam[:, [2]] 159 | scale = z_ground_from_cam / zs_from_cam 160 | ground_points = world_points_from_cam * scale + self.translation 161 | return ground_points 162 | 163 | def get_translation(self): 164 | return self._pose[0:3, 3] 165 | 166 | def get_rotation(self): 167 | return self._pose[0:3, 0:3] 168 | 169 | def _apply_clip(self, points, clip_source) -> np.ndarray: 170 | if self._size[0] == 0 or self._size[1] == 0: 171 | raise RuntimeError('clipping without a size is not possible') 172 | mask = (clip_source[:, 0] < 0) | (clip_source[:, 0] >= self._size[0]) | \ 173 | (clip_source[:, 1] < 0) | (clip_source[:, 1] >= self._size[1]) 174 | 175 | points[mask] = [np.nan] 176 | return points 177 | 178 | 179 | def create_img_projection_maps(source_cam: Camera, destination_cam: Camera): 180 | """ 181 | Generates maps for cv2.remap to remap from one camera to another 182 | """ 183 | u_map = np.zeros((destination_cam.height, destination_cam.width, 1), dtype=np.float32) 184 | v_map = np.zeros((destination_cam.height, destination_cam.width, 1), dtype=np.float32) 185 | 186 | destination_points_b = np.arange(destination_cam.height) 187 | 188 | for u_px in range(destination_cam.width): 189 | destination_points_a = np.ones(destination_cam.height) * u_px 190 | destination_points = np.vstack((destination_points_a, destination_points_b)).T 191 | 192 | source_points = source_cam.project_3d_to_2d( 193 | destination_cam.project_2d_to_3d(destination_points, norm=np.array([1]))) 194 | 195 | u_map.T[0][u_px] = source_points.T[0] 196 | v_map.T[0][u_px] = source_points.T[1] 197 | 198 | map1, map2 = cv2.convertMaps(u_map, v_map, dstmap1type=cv2.CV_16SC2, nninterpolation=False) 199 | return map1, map2 200 | 201 | 202 | def create_bev_projection_maps(source_cam: Camera, bev_range: int, bev_size: int): 203 | """ 204 | Generate maps to remap from one camera to bird-eye-view (BEV) image. 205 | 206 | :param bev_range: BEV range in meters 207 | :param bev_size: BEV image size in pixels 208 | """ 209 | u_map = np.zeros((bev_size, bev_size, 1), dtype=np.float32) 210 | v_map = np.zeros((bev_size, bev_size, 1), dtype=np.float32) 211 | scale_pxl_to_meter = bev_range / bev_size 212 | 213 | bev_points_v = np.arange(bev_size) 214 | bev_points_world_z = np.zeros(bev_size) 215 | 216 | for u_px in range(bev_size): 217 | bev_points_u = np.ones(bev_size) * u_px 218 | bev_points_world_x = bev_range / 2 - bev_points_v * scale_pxl_to_meter 219 | bev_points_world_y = bev_range / 2 - bev_points_u * scale_pxl_to_meter 220 | bev_points_world = np.column_stack((bev_points_world_x, bev_points_world_y, bev_points_world_z)) 221 | source_points = source_cam.project_3d_to_2d(bev_points_world) 222 | u_map.T[0][u_px] = source_points.T[0] 223 | v_map.T[0][u_px] = source_points.T[1] 224 | 225 | map1, map2 = cv2.convertMaps(u_map, v_map, dstmap1type=cv2.CV_16SC2, nninterpolation=False) 226 | return map1, map2 227 | 228 | def bev_points_world_to_img(bev_range: float, bev_size: int, bev_points_world: np.ndarray): 229 | """ 230 | Convert world ground points coordinates (in meter) to bird-eye-view (BEV) image coordinates (in pixel). 231 | 232 | :param bev_range: BEV range in meters 233 | :param bev_size: BEV image size in pixels 234 | :param bev_points_world: world ground points coordinates (in meter) 235 | """ 236 | scale_meter_to_pxl = bev_size / bev_range 237 | bev_points_img_u = (bev_range / 2 - bev_points_world[1]) * scale_meter_to_pxl 238 | bev_points_img_v = (bev_range / 2 - bev_points_world[0]) * scale_meter_to_pxl 239 | return np.stack(((bev_points_img_u, bev_points_img_v))).astype(np.int32) 240 | 241 | 242 | def read_cam_from_json(path): 243 | """ 244 | Generates a Camera object from a json file 245 | """ 246 | with open(path) as f: 247 | config = json.load(f) 248 | 249 | intrinsic = config['intrinsic'] 250 | coefficients = [intrinsic['k1'], intrinsic['k2'], intrinsic['k3'], intrinsic['k4']] 251 | 252 | cam = Camera( 253 | rotation=SciRot.from_quat(config['extrinsic']['quaternion']).as_matrix(), 254 | translation=config['extrinsic']['translation'], 255 | lens=RadialPolyCamProjection(coefficients), 256 | size=(intrinsic['width'], intrinsic['height']), 257 | principle_point=(intrinsic['cx_offset'], intrinsic['cy_offset']), 258 | aspect_ratio=intrinsic['aspect_ratio'] 259 | ) 260 | 261 | return cam 262 | -------------------------------------------------------------------------------- /source/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Valeo Brain Division and contributors 2 | # 3 | # Author: Lihao Wang 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from scipy.spatial.transform import Rotation as SciRot 24 | import json 25 | from projection import Camera, RadialPolyCamProjection 26 | 27 | def quat_to_mat(quat): 28 | return SciRot.from_quat(quat).as_matrix() 29 | 30 | 31 | def init_fisheye_cam(intr, quat, t): 32 | coef = [intr['k1'], intr['k2'], intr['k3'], intr['k4']] 33 | cam = Camera( 34 | rotation=quat_to_mat(quat), 35 | translation=t, 36 | lens=RadialPolyCamProjection(coef), 37 | size=(intr['width'], intr['height']), 38 | principle_point=(intr['cx_offset'], intr['cy_offset']), 39 | aspect_ratio=intr['aspect_ratio'] 40 | ) 41 | return cam 42 | 43 | 44 | def read_calib(path): 45 | with open(path) as f: 46 | calib = json.load(f) 47 | intr = calib['intrinsic'] 48 | quat = calib['extrinsic']['quaternion'] 49 | t = calib['extrinsic']['translation'] 50 | return intr, quat, t 51 | 52 | 53 | def write_calib(intr, quat, t, save_path): 54 | calib = {} 55 | calib["extrinsic"] = {"quaternion": quat, "translation": t} 56 | calib["intrinsic"] = intr 57 | with open(save_path, "w") as f: 58 | json.dump(calib, f, indent=4) --------------------------------------------------------------------------------