├── .gitignore ├── LICENSE ├── README.md ├── cadc_dataset_route_stats.csv ├── convert_novatel_to_pose.py ├── download_cadcd.py ├── images ├── 2019_02_27_0027_lidar_frame_90.png ├── 2019_02_27_0027_vehicle_path.png ├── 2019_02_27_0033_lidar_bev_frame_12_cropped.png └── 2019_02_27_0033_tracklets_frame_12.png ├── lidar_utils.py ├── load_calibration.py ├── load_novatel_data.py ├── other ├── README.md └── filter_pointcloud.py ├── run_demo_2d_tracklets.py ├── run_demo_lidar.py ├── run_demo_lidar_bev.py ├── run_demo_tracklets.py └── run_demo_vehicle_path.py /.gitignore: -------------------------------------------------------------------------------- 1 | ++ ### 2 | # Compiled Object files 3 | *.slo 4 | *.lo 5 | *.o 6 | *.obj 7 | 8 | # Precompiled Headers 9 | *.gch 10 | *.pch 11 | 12 | # Compiled Dynamic libraries 13 | *.so 14 | *.dylib 15 | *.dll 16 | 17 | # Fortran module files 18 | *.mod 19 | 20 | # Compiled Static libraries 21 | *.lai 22 | *.la 23 | *.a 24 | *.lib 25 | 26 | # Executables 27 | *.exe 28 | *.out 29 | *.app 30 | 31 | ### CMake ### 32 | CMakeCache.txt 33 | CMakeFiles 34 | CMakeScripts 35 | Makefile 36 | cmake_install.cmake 37 | install_manifest.txt 38 | 39 | ## Folders and custom rules 40 | build 41 | lib 42 | bin 43 | 44 | # Text editor temporary files 45 | *~ 46 | *.autosave 47 | *.swp 48 | 49 | ## Ros ignore stuff 50 | CATKIN_IGNORE 51 | 52 | ## MATLAB stuff 53 | *.mat 54 | 55 | ## Test Results 56 | 57 | 58 | # Profiler Results 59 | callgrind* 60 | 61 | # clion project files 62 | .idea/ 63 | 64 | # vscide project files 65 | .vscode/ 66 | 67 | # git conflicts file 68 | *.orig 69 | 70 | # ignore log files 71 | *.log 72 | 73 | # ignore cmake build folder 74 | cmake-build-*/ 75 | 76 | # python pyc files 77 | *.pyc 78 | 79 | # ctags file 80 | .tags 81 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Dataset Terms of Use 2 | Scale AI, Inc. and The University of Waterloo and their affiliates (hereinafter 3 | “Licensors”) strive to enhance public access to and use of data that they 4 | collect, annotate, and publish. The data are organized in datasets (the 5 | “Datasets”) listed at http://cadcd.uwaterloo.ca/ (the “Website”). The Datasets 6 | are collections of data, managed by Licensors and provided in a number of 7 | machine-readable formats. Licensors provide any individual or entity 8 | (hereinafter You” or “Your”) with access to the Datasets free of charge subject 9 | to the terms of this agreement (hereinafter “Dataset Terms”). Use of any data 10 | derived from the Datasets, which may appear in any format such as tables, 11 | charts, devkit, documentation, or code, is also subject to these Dataset Terms. 12 | By downloading any Datasets or using any Datasets, You are agreeing to be bound 13 | by the Dataset Terms. If you are downloading any Datasets or using any Datasets 14 | for an organization, you are agreeing to these Dataset Terms on behalf of that 15 | organization. If you do not have the right to agree to these Dataset Terms, do 16 | not download or use the Datasets. 17 | 18 | Licenses 19 | Unless specifically labeled otherwise, these Datasets are provided to You under 20 | a Creative Commons Attribution-NonCommercial 4.0 International Public License 21 | (“CC BY-NC 4.0”), with the additional terms included in these Dataset Terms. The 22 | CC BY-NC 4.0 may be accessed at 23 | https://creativecommons.org/licenses/by-nc/4.0/legalcode. When You download or 24 | use the Datasets from the Website or elsewhere, You are agreeing to comply with 25 | the terms of CC BY-NC 4.0. Where these Dataset Terms conflict with the terms of 26 | CC BY-NC 4.0, these Dataset Terms will control. 27 | 28 | Privacy 29 | This Dataset is provided for the purpose of research and education in autonomous 30 | driving and other smart city technologies. Licensors prohibit You from using 31 | the Datasets in any manner to identify or invade the privacy of any person whose 32 | personally identifiable information or personal data may have been incidentally 33 | collected in the creation of this Dataset, even when such use is otherwise 34 | legal. An individual with any privacy concerns, including a request to remove 35 | your personally identifiable information or personal data from the Dataset, may 36 | contact us by sending an e-mail to matthew.pitropov@uwaterloo.ca and 37 | kczarnec@gsd.uwaterloo.ca. 38 | 39 | No Publicity Rights 40 | You may not use the name, any trademark, official mark, official emblem, or logo 41 | of either Licensor, or any of either Licensor’s other means of promotion or 42 | publicity without the applicable Licensor’s prior written consent nor in any 43 | event to represent or imply an association or affiliation with a Licensor, 44 | except as required to comply with the attribution requirements of the CC BY-NC 45 | 4.0 license. 46 | 47 | Termination 48 | Licensors may terminate Your access to all or any part of the Datasets or the 49 | Website at any time, with or without cause, with or without notice, effective 50 | immediately. All provisions of the Dataset Terms which by their nature should 51 | survive termination will survive termination, including, without limitation, 52 | warranty disclaimers, indemnity, and limitations of liability. 53 | 54 | Indemnification 55 | You will indemnify and hold Licensors harmless from and against any and all 56 | claims, loss, cost, expense, liability, or damage, including, without 57 | limitation, all reasonable attorneys’ fees and court costs, arising from (i) 58 | Your use or misuse of the Website or the Datasets; (ii) Your access to the 59 | Website; (iii) Your violation of the Dataset Terms; or (iv) infringement by You, 60 | or any third party using Your account, of any intellectual property or other 61 | right of any person or entity. Such losses, costs, expenses, damages, or 62 | liabilities will include, without limitation, all actual, general, special, 63 | indirect, incidental, and consequential damages. 64 | 65 | Dispute Resolution 66 | These Terms of Use will be governed by and interpreted in accordance with the 67 | laws of California (excluding the conflict of laws rules thereof). All disputes 68 | under these Dataset Terms will be resolved in the applicable state or federal 69 | courts of San Francisco, California. You consent to the jurisdiction of such 70 | courts and waive any jurisdictional or venue defenses otherwise available. 71 | 72 | Miscellaneous 73 | You agree that it is Your responsibility to comply with all applicable laws with 74 | respect to Your use and publication of the Datasets or derivatives thereof, 75 | including any applicable privacy, data protection, security, and export control 76 | laws. These Dataset Terms constitute the entire agreement between You and 77 | Licensors with respect to the subject matter of these Dataset Terms and 78 | supersedes any prior or contemporaneous agreements whether written or oral. If a 79 | court of competent jurisdiction finds any term of these Dataset Terms to be 80 | unenforceable, the unenforceable term will be modified to reflect the parties’ 81 | intention and only to the extent necessary to make the term enforceable. The 82 | remaining provisions of these Dataset Terms will remain in effect. You may not 83 | assign these Dataset Terms without the prior written consent of the Licensors. 84 | The Licensors may assign, transfer, or delegate any of their rights and 85 | obligations under these Dataset Terms without consent. The parties are 86 | independent contractors. No failure or delay by either party in exercising a 87 | right under these Dataset Terms will constitute a waiver of that right. A 88 | waiver of a default is not a waiver of any subsequent default. These Dataset 89 | Terms may be amended by the Licensors from time to time in our discretion. If 90 | an update affects your use of the Dataset, Licensors will notify you before the 91 | updated terms are effective for your use. 92 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cadcd_devkit 2 | A devkit for the Canadian Adverse Driving Conditions dataset. 3 | 4 | ## download_cadcd.py 5 | This will download all raw or labeled data into the given folder. 6 | 7 | This dataset is licensed under the [Creative Commons Attribution-NonCommercial 4.0 International License](http://creativecommons.org/licenses/by-nc/4.0/). 8 | 9 | ## Transform naming convention 10 | The scripts in this repository as well as the calibration files for the dataset follow this naming convention for transforms: 11 | 12 | ``` 13 | T_FRAME1_FRAME2 14 | ``` 15 | This could be described as a transform from FRAME2 to FRAME1. 16 | For example, if you wanted to project lidar points onto an image and had the transforms `T_IMG_CAM` and `T_CAM_LIDAR` then this would be written: 17 | 18 | ``` 19 | T_IMG_LIDAR = T_IMG_CAM * T_CAM_LIDAR 20 | ``` 21 | 22 | ## run_demo_vehicle_path.py 23 | This script loads all GPS messages in a drive, converts them to an ENU frame with the origin at the first message and plots each message as an axis frame. 24 | 25 | ![Vehicle path demo](images/2019_02_27_0027_vehicle_path.png) 26 | 27 | ## run_demo_tracklets.py 28 | This script loads a camera image and the corresponding 3D annotation file in a drive, loads the calibration data, then creates and projects each cuboid within the frame onto the camera image. 29 | 30 | ![Vehicle path demo](images/2019_02_27_0033_tracklets_frame_12.png) 31 | 32 | ## run_demo_lidar.py 33 | This script loads a camera image and the corresponding lidar file in a drive, loads the calibration data, then projects each lidar point onto the camera image. Point color is scaled by depth. 34 | 35 | ![Vehicle path demo](images/2019_02_27_0027_lidar_frame_90.png) 36 | 37 | ## run_demo_lidar_bev.py 38 | This script loads lidar data and the corresponding 3D annotation file in a drive, then creates a birds eye view of the lidar point cloud with the cuboid boxes overlaid. Script created by [asvath](https://github.com/asvath) and also located [here](https://github.com/asvath/cadcd/blob/master/birds_eye_view.py). 39 | 40 | ![Vehicle path demo](images/2019_02_27_0033_lidar_bev_frame_12_cropped.png) 41 | 42 | ## OpenPCDet data loader 43 | [OpenPCDet](https://github.com/open-mmlab/OpenPCDet) is an open source project with multiple architectures implemented for lidar based 3D object detection and support of several different datasets. A CADC dataset loader has been implemented on the forked [cadc support branch](https://github.com/mpitropov/OpenPCDet/tree/cadc_support) with the [Getting started](https://github.com/mpitropov/OpenPCDet/blob/cadc_support/docs/GETTING_STARTED.md#cadc-dataset) document updated for the cadc dataset. Any issues with the data loader should be opened within this repository. As of right now there is only one difficulty level for test results. 44 | 45 | ## Additional scripts 46 | Please view [asvath's](https://github.com/asvath) [cadcd](https://github.com/asvath/cadcd) repository. 47 | 48 | ## Citation 49 | @article{pitropov2021canadian, 50 | title={Canadian adverse driving conditions dataset}, 51 | author={Pitropov, Matthew and Garcia, Danson Evan and Rebello, Jason and Smart, Michael and Wang, Carlos and Czarnecki, Krzysztof and Waslander, Steven}, 52 | journal={The International Journal of Robotics Research}, 53 | volume={40}, 54 | number={4-5}, 55 | pages={681--690}, 56 | year={2021}, 57 | publisher={SAGE Publications Sage UK: London, England} 58 | } 59 | -------------------------------------------------------------------------------- /cadc_dataset_route_stats.csv: -------------------------------------------------------------------------------- 1 | Date,Number,Frame Count,Dataset Type,Raw Km,Labeled Km,Raw Zip,Labeled Zip,Car,Truck,Bus,Bicycle,Horse_and_Buggy,Pedestrian,Pedestrian_With_Object,Animals,Garbage_Containers_on_Wheels,Traffic_Guidance_Objects,Snow points removed,Road snow cover,Cam 00 lens snow cover 2 | 2018_03_06,1,100,TRAIN,0.59,0.21,8.10,1.40,191,25,0,0,0,4,0,0,0,20,26,None,None 3 | 2018_03_06,2,100,TRAIN,0.42,0.36,8.50,1.40,60,4,1,0,0,2,0,0,2,0,340,None,None 4 | 2018_03_06,5,100,TRAIN,0.21,0.00,8.10,1.30,31,3,3,0,0,8,0,0,3,0,396,None,Partial 5 | 2018_03_06,6,100,TRAIN,0.73,0.35,8.30,1.40,32,3,3,0,0,0,0,0,0,4,500,None,Partial 6 | 2018_03_06,8,100,TRAIN,0.08,0.09,8.40,1.40,149,4,4,0,0,2,0,0,0,0,265,None,Partial 7 | 2018_03_06,9,100,TRAIN,0.45,0.32,8.50,1.40,143,8,1,0,0,10,0,0,0,0,235,None,Partial 8 | 2018_03_06,10,100,TRAIN,0.51,0.31,8.60,1.40,131,1,1,0,0,3,0,0,0,2,75,None,Partial 9 | 2018_03_06,12,100,TRAIN,0.66,0.32,8.60,1.40,75,2,1,0,0,1,0,0,2,0,245,None,None 10 | 2018_03_06,13,50,TRAIN,0.79,0.21,8.50,0.71,112,8,0,0,0,0,0,0,3,0,124,None,None 11 | 2018_03_06,15,50,TRAIN,0.76,0.17,8.20,0.68,21,0,1,0,0,0,0,0,0,0,269,None,None 12 | 2018_03_06,16,100,TRAIN,0.53,0.21,8.20,1.30,34,1,0,0,0,0,0,0,0,0,278,None,None 13 | 2018_03_06,18,100,TRAIN,0.10,0.00,8.50,1.40,83,1,1,0,0,5,0,0,0,1,137,None,None 14 | 2018_03_07,1,50,TRAIN,0.69,0.11,8.00,0.69,166,6,0,0,0,0,0,0,0,24,557,None,Partial 15 | 2018_03_07,2,100,TRAIN,0.51,0.20,8.20,1.40,341,16,0,0,0,2,0,0,1,8,472,None,Partial 16 | 2018_03_07,4,100,TRAIN,0.35,0.13,8.20,1.30,142,0,4,0,0,53,0,0,0,0,42,None,None 17 | 2018_03_07,5,100,TRAIN,0.25,0.12,8.50,1.40,26,1,2,10,0,115,0,0,15,0,100,None,Partial 18 | 2018_03_07,6,100,TRAIN,0.11,0.06,8.10,1.40,240,5,4,0,0,25,0,0,0,0,42,None,Partial 19 | 2018_03_07,7,100,TRAIN,0.37,0.07,8.20,1.40,245,12,3,0,0,30,0,0,0,0,182,None,Partial 20 | 2019_02_27,2,71,TRAIN,0.14,0.14,6.00,0.98,117,17,1,0,0,0,0,0,0,0,204,Covered,None 21 | 2019_02_27,3,100,TRAIN,0.00,0.00,5.30,1.40,74,3,2,0,0,3,0,0,0,0,401,Covered,None 22 | 2019_02_27,4,100,TRAIN,0.14,0.10,5.30,1.40,74,3,2,0,0,3,0,0,0,0,236,Covered,None 23 | 2019_02_27,5,99,TRAIN,0.08,0.08,4.30,1.30,97,7,3,0,0,4,0,0,0,0,163,Covered,None 24 | 2019_02_27,6,100,TRAIN,0.55,0.31,7.90,1.40,229,31,0,0,0,1,0,0,0,0,590,Covered,None 25 | 2019_02_27,8,96,TRAIN,0.24,0.24,5.40,1.30,30,6,0,0,0,0,0,0,0,13,496,Covered,None 26 | 2019_02_27,9,100,TRAIN,0.38,0.27,5.90,1.20,15,0,0,0,0,0,0,0,0,0,803,Covered,None 27 | 2019_02_27,10,91,TRAIN,0.21,0.21,4.10,1.20,27,1,0,0,0,0,0,0,0,0,587,Covered,None 28 | 2019_02_27,11,67,TRAIN,0.05,0.06,3.00,0.90,289,1,0,0,0,22,0,0,2,0,971,Covered,None 29 | 2019_02_27,13,100,TRAIN,0.01,0.01,5.30,1.40,66,5,0,0,0,40,0,0,0,0,527,Covered,None 30 | 2019_02_27,15,89,TRAIN,0.03,0.03,4.10,1.20,18,0,2,1,0,73,0,0,3,0,310,Covered,None 31 | 2019_02_27,16,100,TRAIN,0.21,0.11,8.50,1.40,5,2,1,3,0,72,0,0,0,4,404,Covered,None 32 | 2019_02_27,18,100,TRAIN,0.18,0.18,5.00,1.40,401,5,1,0,0,155,0,0,4,0,428,Covered,None 33 | 2019_02_27,19,95,TRAIN,0.00,0.00,4.50,1.40,65,0,1,0,0,80,0,0,0,0,685,Covered,None 34 | 2019_02_27,20,98,TRAIN,0.20,0.21,4.60,1.40,484,9,4,0,0,121,0,0,0,0,808,Covered,None 35 | 2019_02_27,22,100,TRAIN,0.31,0.20,7.20,1.40,99,3,4,0,0,34,1,0,0,0,243,Covered,None 36 | 2019_02_27,24,100,TRAIN,0.42,0.24,8.50,1.40,157,6,0,0,0,44,0,0,0,0,361,Covered,None 37 | 2019_02_27,25,100,TRAIN,0.12,0.12,8.40,1.40,77,13,1,0,0,5,2,0,5,0,328,Covered,None 38 | 2019_02_27,27,100,TRAIN,0.07,0.07,8.50,1.50,79,5,0,0,0,7,0,0,0,0,323,Covered,None 39 | 2019_02_27,28,100,TRAIN,0.47,0.27,8.60,1.40,212,8,0,0,0,5,1,0,2,2,194,Covered,None 40 | 2019_02_27,30,100,TRAIN,0.16,0.07,8.30,1.40,182,10,1,0,0,12,0,0,6,0,573,Covered,None 41 | 2019_02_27,31,100,TRAIN,0.52,0.27,8.30,1.40,155,14,2,0,0,5,0,0,6,1,369,Covered,None 42 | 2019_02_27,33,100,TRAIN,0.13,0.00,8.30,1.40,61,11,0,0,0,6,0,0,1,0,583,Covered,None 43 | 2019_02_27,34,73,TRAIN,0.13,0.13,7.30,1.00,111,4,0,0,0,11,0,0,1,0,322,Covered,None 44 | 2019_02_27,35,100,TRAIN,0.35,0.30,6.20,1.40,95,3,1,0,0,14,0,0,31,3,183,Covered,None 45 | 2019_02_27,37,58,TRAIN,0.00,0.00,2.80,0.87,27,0,1,0,0,2,0,0,0,0,1176,Covered,None 46 | 2019_02_27,39,58,TRAIN,0.16,0.17,2.70,0.80,288,1,0,0,0,0,0,0,2,0,541,Covered,None 47 | 2019_02_27,40,100,TRAIN,0.48,0.22,8.20,1.40,292,6,0,0,0,21,0,0,0,0,543,Covered,None 48 | 2019_02_27,41,100,TRAIN,0.18,0.00,8.20,1.40,29,0,1,0,0,3,0,0,0,0,1066,Covered,None 49 | 2019_02_27,43,100,TRAIN,0.36,0.20,8.40,1.40,58,20,0,0,0,3,0,0,0,0,1375,Covered,None 50 | 2019_02_27,44,100,TRAIN,0.19,0.19,4.70,1.40,23,1,1,0,0,11,0,0,2,0,492,Covered,None 51 | 2019_02_27,45,88,TRAIN,0.23,0.24,4.00,1.20,23,1,0,0,0,10,0,0,0,0,409,Covered,None 52 | 2019_02_27,46,100,TRAIN,0.32,0.24,6.50,1.40,52,1,0,0,0,4,0,0,7,0,884,Covered,None 53 | 2019_02_27,47,95,TRAIN,0.42,0.42,4.40,1.30,18,3,0,0,0,0,0,0,0,0,477,Covered,None 54 | 2019_02_27,49,100,TRAIN,0.59,0.35,7.60,1.30,19,1,0,0,0,0,0,0,3,0,937,Covered,None 55 | 2019_02_27,50,73,TRAIN,0.24,0.24,3.30,0.97,76,5,0,0,0,4,0,0,4,0,850,Covered,None 56 | 2019_02_27,51,100,TRAIN,0.19,0.06,7.50,1.30,61,3,1,0,0,3,0,0,0,0,1105,Covered,None 57 | 2019_02_27,54,100,TRAIN,0.19,0.14,6.30,1.40,126,6,2,0,0,2,0,0,3,0,739,Covered,None 58 | 2019_02_27,55,100,TRAIN,0.53,0.30,8.30,1.40,109,8,1,0,0,8,1,0,17,0,698,Covered,None 59 | 2019_02_27,56,100,TRAIN,0.51,0.28,8.50,1.40,269,2,3,0,0,9,1,0,11,0,863,Covered,None 60 | 2019_02_27,58,100,TRAIN,0.21,0.17,5.60,1.40,78,6,2,0,0,10,0,0,0,0,318,Covered,None 61 | 2019_02_27,59,100,TRAIN,0.24,0.24,8.30,1.40,185,1,0,0,0,48,0,0,3,0,411,Covered,None 62 | 2019_02_27,60,100,TRAIN,0.07,0.06,4.80,1.40,96,8,2,0,0,29,0,0,2,0,491,Covered,None 63 | 2019_02_27,61,100,TRAIN,0.51,0.34,7.40,1.40,183,11,1,2,0,19,1,0,3,0,457,Covered,None 64 | 2019_02_27,63,100,TRAIN,0.47,0.37,5.70,1.30,156,7,1,0,0,5,0,0,2,1,416,Covered,None 65 | 2019_02_27,65,100,TRAIN,0.35,0.24,8.10,1.30,261,14,4,0,0,2,0,0,0,0,853,Covered,None 66 | 2019_02_27,66,75,TRAIN,0.24,0.22,4.60,0.98,183,25,0,0,0,5,0,0,2,0,617,Covered,None 67 | 2019_02_27,68,100,TRAIN,0.22,0.02,7.90,1.30,27,6,1,0,0,0,0,0,0,0,1327,Covered,None 68 | 2019_02_27,70,50,TRAIN,0.69,0.13,8.00,0.65,44,9,0,0,0,0,0,0,0,0,1443,Covered,None 69 | 2019_02_27,72,97,TRAIN,0.27,0.28,4.40,1.30,58,13,0,0,0,7,2,0,4,0,173,Covered,None 70 | 2019_02_27,73,100,TRAIN,0.41,0.19,8.50,1.40,48,11,0,0,0,20,2,0,2,0,170,Covered,None 71 | 2019_02_27,75,100,TRAIN,0.03,0.03,5.60,1.20,29,12,0,0,0,0,0,0,0,0,269,Covered,None 72 | 2019_02_27,76,100,TRAIN,0.41,0.22,6.10,1.30,30,6,0,0,0,0,0,0,0,0,336,Covered,None 73 | 2019_02_27,78,100,TRAIN,0.60,0.52,4.90,1.20,13,5,1,0,0,0,0,0,0,0,932,Covered,None 74 | 2019_02_27,79,84,TRAIN,0.18,0.19,4.00,1.20,44,2,0,0,0,5,2,1,3,0,280,Covered,None 75 | 2019_02_27,80,96,TRAIN,0.36,0.36,4.30,1.30,44,4,0,0,1,1,0,0,0,0,628,Covered,None 76 | 2019_02_27,82,97,TRAIN,0.11,0.11,4.30,1.30,68,6,1,0,0,1,0,0,6,0,424,Covered,None -------------------------------------------------------------------------------- /convert_novatel_to_pose.py: -------------------------------------------------------------------------------- 1 | 2 | # Convert LL to UTM 3 | import utm 4 | import numpy as np 5 | import math 6 | 7 | # Converts GPS data to poses in the ENU frame 8 | def convert_novatel_to_pose(novatel): 9 | poses = []; 10 | FIRST_RUN = True; 11 | origin = []; 12 | 13 | for gps_msg in novatel: 14 | # utm_data[0] = East (m), utm_data[1] = North (m) 15 | utm_data = utm.from_latlon(float(gps_msg[0]), float(gps_msg[1])); 16 | # Ellipsoidal height = MSL (orthometric) + undulation 17 | ellipsoidal_height = float(gps_msg[2]) + float(gps_msg[3]); 18 | 19 | roll = np.deg2rad(float(gps_msg[7])); 20 | pitch = np.deg2rad(float(gps_msg[8])); 21 | 22 | # Azimuth = north at 0 degrees, east at 90 degrees, south at 180 degrees and west at 270 degrees 23 | azimuth = float(gps_msg[9]); 24 | # yaw = north at 0 deg, 90 at west and 180 at south, east at 270 deg 25 | yaw = np.deg2rad(-1.0 * azimuth); 26 | 27 | c_phi = math.cos(roll); 28 | s_phi = math.sin(roll); 29 | c_theta = math.cos(pitch); 30 | s_theta = math.sin(pitch); 31 | c_psi = math.cos(yaw); 32 | s_psi = math.sin(yaw); 33 | 34 | if FIRST_RUN: 35 | origin = [utm_data[0], utm_data[1], ellipsoidal_height]; 36 | FIRST_RUN = False; 37 | 38 | # This is the T_locallevel_body transform where ENU is the local level frame 39 | # and the imu is the body frame 40 | # https://www.novatel.com/assets/Documents/Bulletins/apn037.pdf 41 | poses.append(np.matrix([ 42 | [c_psi * c_phi - s_psi * s_theta * s_phi, -s_psi * c_theta, c_psi * s_phi + s_psi * s_theta * c_phi, utm_data[0] - origin[0]], 43 | [s_psi * c_phi + c_psi * s_theta * s_phi, c_psi * c_theta, s_psi * s_phi - c_psi * s_theta * c_phi, utm_data[1] - origin[1]], 44 | [-c_theta * s_phi, s_theta, c_theta * c_phi, ellipsoidal_height - origin[2]], 45 | [0.0, 0.0, 0.0, 1.0]])); 46 | 47 | return poses; 48 | -------------------------------------------------------------------------------- /download_cadcd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os, sys, wget, zipfile 4 | from pathlib import Path 5 | 6 | cadcd = { 7 | '2018_03_06': [ 8 | '0001','0002','0005','0006','0008','0009','0010', 9 | '0012','0013','0015','0016','0018' 10 | ], 11 | '2018_03_07': [ 12 | '0001','0002','0004','0005','0006','0007' 13 | ], 14 | '2019_02_27': [ 15 | '0002','0003','0004','0005','0006','0008','0009','0010', 16 | '0011','0013','0015','0016','0018','0019','0020', 17 | '0022','0024','0025','0027','0028','0030', 18 | '0031','0033','0034','0035','0037','0039','0040', 19 | '0041','0043','0044','0045','0046','0047','0049','0050', 20 | '0051','0054','0055','0056','0058','0059', 21 | '0060','0061','0063','0065','0066','0068','0070', 22 | '0072','0073','0075','0076','0078','0079', 23 | '0080','0082' 24 | ] 25 | } 26 | 27 | dataset_path='' 28 | 29 | if len(sys.argv) == 2: 30 | dataset_path = sys.argv[1] 31 | os.chdir(dataset_path) 32 | else: 33 | print('Please enter the path to store the dataset.') 34 | exit() 35 | 36 | labeled = True 37 | if labeled: 38 | print('Downloading labeled data') 39 | else: 40 | print('Downloading raw data') 41 | 42 | def main(): 43 | root_dir = Path(dataset_path + '/cadcd') 44 | root_dir.mkdir(parents=True, exist_ok=True) 45 | for date in cadcd: 46 | print(date) 47 | date_path = dataset_path + '/cadcd/' + date 48 | date_dir = Path(date_path) 49 | date_dir.mkdir(parents=True, exist_ok=True) 50 | 51 | # Download calibration for this date 52 | os.chdir(date_path) 53 | calib_url = 'http://wiselab.uwaterloo.ca/cadcd_data/' + date + '/calib.zip' 54 | calib_filename = wget.download(calib_url) 55 | # Extract files 56 | zip = zipfile.ZipFile(calib_filename) 57 | zip.extractall() 58 | zip.close() 59 | # Delete zip file 60 | os.remove(calib_filename) 61 | 62 | for drive in cadcd[date]: 63 | print(drive) 64 | drive_path = dataset_path + '/cadcd/' + date + '/' + drive 65 | drive_dir = Path(drive_path) 66 | drive_dir.mkdir(parents=True, exist_ok=True) 67 | 68 | # Download drive 69 | os.chdir(drive_path) 70 | base_url = 'http://wiselab.uwaterloo.ca/cadcd_data/' + date + '/' + drive 71 | if labeled: 72 | data_url = base_url + '/labeled.zip' 73 | ann_3d_url = base_url + '/3d_ann.json' 74 | ann_3d_filename = wget.download(ann_3d_url) 75 | else: 76 | data_url = base_url + '/raw.zip' 77 | data_filename = wget.download(data_url) 78 | 79 | # Extract files 80 | zip = zipfile.ZipFile(data_filename) 81 | zip.extractall() 82 | zip.close() 83 | 84 | # Delete zip file 85 | os.remove(data_filename) 86 | 87 | main() 88 | -------------------------------------------------------------------------------- /images/2019_02_27_0027_lidar_frame_90.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpitropov/cadc_devkit/2c28daeda893289a548dd9ba39f31f370055210f/images/2019_02_27_0027_lidar_frame_90.png -------------------------------------------------------------------------------- /images/2019_02_27_0027_vehicle_path.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpitropov/cadc_devkit/2c28daeda893289a548dd9ba39f31f370055210f/images/2019_02_27_0027_vehicle_path.png -------------------------------------------------------------------------------- /images/2019_02_27_0033_lidar_bev_frame_12_cropped.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpitropov/cadc_devkit/2c28daeda893289a548dd9ba39f31f370055210f/images/2019_02_27_0033_lidar_bev_frame_12_cropped.png -------------------------------------------------------------------------------- /images/2019_02_27_0033_tracklets_frame_12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mpitropov/cadc_devkit/2c28daeda893289a548dd9ba39f31f370055210f/images/2019_02_27_0033_tracklets_frame_12.png -------------------------------------------------------------------------------- /lidar_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import cv2 5 | 6 | class lidar_utils: 7 | def __init__(self, T_CAM_LIDAR): 8 | self.T_CAM_LIDAR = T_CAM_LIDAR; 9 | print("init lidar utils") 10 | 11 | def project_points(self, img, lidar_path, T_IMG_CAM, T_CAM_LIDAR, dist_coeffs, DISTORTED): 12 | self.T_CAM_LIDAR = T_CAM_LIDAR; 13 | 14 | scan_data = np.fromfile(lidar_path, dtype=np.float32); 15 | 16 | # 2D array where each row contains a point [x, y, z, intensity] 17 | lidar = scan_data.reshape((-1, 4)); 18 | 19 | # Get height and width of the image 20 | h, w = img.shape[:2] 21 | 22 | projected_points = []; 23 | 24 | [rows, cols] = lidar.shape; 25 | # print(lidar[0,:]) 26 | # print(lidar[1,:]) 27 | # print(lidar[1,0:3]) 28 | 29 | for i in range(rows): 30 | # print(lidar[i,:]) 31 | p = np.array([0.0, 0.0, 0.0, 1.0]); 32 | p[0:3] = lidar[i,0:3]; 33 | # print("p",p); 34 | projected_p = np.matmul(self.T_CAM_LIDAR, p.transpose()); 35 | if projected_p[2] < 2: # arbitrary cut off 36 | continue; 37 | projected_points.append([projected_p[0], projected_p[1], projected_p[2]]); 38 | 39 | #print("projected_points", projected_points) 40 | 41 | # Send [x, y, z] and Transform 42 | projected_points_np = np.array(projected_points) 43 | image_points = self.project(projected_points_np, T_IMG_CAM, dist_coeffs, DISTORTED); 44 | # print("image_points") 45 | # print(image_points) 46 | 47 | radius = 0 48 | 49 | [rows, cols] = projected_points_np.shape; 50 | 51 | NUM_COLOURS = 7; 52 | rainbow = [ 53 | [0, 0, 255], # Red 54 | [0, 127, 255], # Orange 55 | [0, 255, 255], # Yellow 56 | [0, 255, 0], # Green 57 | [255, 0, 0], # Blue 58 | [130, 0, 75], # Indigo 59 | [211, 0, 148] # Violet 60 | ]; 61 | 62 | for i in range(rows): 63 | colour = int(NUM_COLOURS*(projected_points_np[i][2]/70)); 64 | x = int(image_points[i][0]) 65 | y = int(image_points[i][1]) 66 | if x < 0 or x > w - 1 or y < 0 or y > h - 1: 67 | continue; 68 | if colour > NUM_COLOURS-1: 69 | continue; 70 | 71 | cv2.circle(img, (x,y), radius, rainbow[colour], thickness=2, lineType=8, shift=0); 72 | 73 | return img; 74 | 75 | def project(self, p_in, T_IMG_CAM, dist_coeffs, DISTORTED): 76 | p_out = [] 77 | [rows, cols] = p_in.shape; 78 | 79 | for i in range(rows): 80 | # print("p_in[i]", p_in[i]) 81 | point = np.array([0.0, 0.0, 0.0, 1.0]); 82 | # print("p_in[i][0]",p_in[i][0]) 83 | point[0:3] = p_in[i]; 84 | # print("p[0]",p[0]) 85 | if DISTORTED: 86 | rvec = tvec = np.zeros(3) 87 | # print(p[0:3]) 88 | # print(T_IMG_CAM[0:3,0:3]) 89 | # print(np.array(calib['CAM02']['distortion_coefficients']['data'])) 90 | image_points, jac = cv2.projectPoints(np.array([point[0:3]]), rvec, tvec, T_IMG_CAM[0:3,0:3], dist_coeffs) 91 | p_out.append([image_points[0,0,0], image_points[0,0,1]]); 92 | # print("image_points", image_points[0,0]) 93 | else: 94 | curr = np.matmul(T_IMG_CAM, point.transpose()).transpose(); 95 | # print("curr",curr); 96 | done = [curr[0] / curr[2], curr[1] / curr[2]] 97 | p_out.append(done); 98 | # print("p_out append", done) 99 | 100 | return p_out; 101 | -------------------------------------------------------------------------------- /load_calibration.py: -------------------------------------------------------------------------------- 1 | 2 | import yaml 3 | import numpy as np 4 | 5 | def load_calibration(calib_path): 6 | calib = {} 7 | 8 | # Get calibrations 9 | calib['extrinsics'] = yaml.load(open(calib_path + '/extrinsics.yaml'), yaml.SafeLoader) 10 | calib['CAM00'] = yaml.load(open(calib_path + '/00.yaml'), yaml.SafeLoader) 11 | calib['CAM01'] = yaml.load(open(calib_path + '/01.yaml'), yaml.SafeLoader) 12 | calib['CAM02'] = yaml.load(open(calib_path + '/02.yaml'), yaml.SafeLoader) 13 | calib['CAM03'] = yaml.load(open(calib_path + '/03.yaml'), yaml.SafeLoader) 14 | calib['CAM04'] = yaml.load(open(calib_path + '/04.yaml'), yaml.SafeLoader) 15 | calib['CAM05'] = yaml.load(open(calib_path + '/05.yaml'), yaml.SafeLoader) 16 | calib['CAM06'] = yaml.load(open(calib_path + '/06.yaml'), yaml.SafeLoader) 17 | calib['CAM07'] = yaml.load(open(calib_path + '/07.yaml'), yaml.SafeLoader) 18 | 19 | return calib 20 | -------------------------------------------------------------------------------- /load_novatel_data.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | 4 | def load_novatel_data(novatel_path): 5 | files = os.listdir(novatel_path); 6 | novatel = []; 7 | 8 | for file in sorted(files): 9 | with open(novatel_path + file) as fp: 10 | novatel.append(fp.readline().split(' ')); 11 | 12 | return novatel; -------------------------------------------------------------------------------- /other/README.md: -------------------------------------------------------------------------------- 1 | # Other 2 | 3 | ## filter_pointcloud.py 4 | This script loads the first point cloud in a sequence, crops it to around Autonomoose and then runs DROR on it. The number of removed points are considered to be from snow. 5 | -------------------------------------------------------------------------------- /other/filter_pointcloud.py: -------------------------------------------------------------------------------- 1 | 2 | # Implementation of DROR in python 3 | # https://github.com/nickcharron/lidar_snow_removal/blob/master/src/DROR.cpp 4 | # Slightly modified due to radiusSearch not being implemented in python-pcl 5 | 6 | import numpy as np 7 | import pcl 8 | import pcl.pcl_visualization 9 | import sys, math 10 | # seq = sys.argv[1] #'0006' 11 | # print(seq) 12 | 13 | def dror_filter(input_cloud): 14 | radius_multiplier_ = 3 15 | azimuth_angle_ = 0.16 # 0.04 16 | min_neighbors_ = 3 17 | k_neighbors_ = min_neighbors_ + 1 18 | min_search_radius_ = 0.04 19 | 20 | filtered_cloud_list = [] 21 | 22 | # init. kd search tree 23 | kd_tree = input_cloud.make_kdtree_flann() 24 | 25 | # Go over all the points and check which doesn't have enough neighbors 26 | # perform filtering 27 | for p_id in range(input_cloud.size): 28 | x_i = input_cloud[p_id][0]; 29 | y_i = input_cloud[p_id][1]; 30 | range_i = math.sqrt(pow(x_i, 2) + pow(y_i, 2)); 31 | search_radius_dynamic = \ 32 | radius_multiplier_ * azimuth_angle_ * 3.14159265359 / 180 * range_i; 33 | 34 | if (search_radius_dynamic < min_search_radius_): 35 | search_radius_dynamic = min_search_radius_ 36 | 37 | [ind, sqdist] = kd_tree.nearest_k_search_for_point(input_cloud, p_id, k_neighbors_) 38 | 39 | # Count all neighbours 40 | neighbors = -1 # Start at -1 since it will always be its own neighbour 41 | for val in sqdist: 42 | if math.sqrt(val) < search_radius_dynamic: 43 | neighbors += 1; 44 | 45 | # This point is not snow, add it to the filtered_cloud 46 | if (neighbors >= min_neighbors_): 47 | filtered_cloud_list.append(input_cloud[p_id]); 48 | # print(filtered_cloud_list) 49 | 50 | return pcl.PointCloud(np.array(filtered_cloud_list, dtype=np.float32)) 51 | 52 | def crop_cloud(input_cloud): 53 | clipper = input_cloud.make_cropbox() 54 | clipper.set_Translation(0,0,0) # tx,ty,tz 55 | clipper.set_Rotation(0,0,0) # rx,ry,rz 56 | min_vals = [-4,-4,-3,0] # x,y,z,s 57 | max_vals = [4,4,10,0] # x,y,z,s 58 | clipper.set_MinMax(min_vals[0], min_vals[1], min_vals[2], min_vals[3], \ 59 | max_vals[0], max_vals[1], max_vals[2], max_vals[3]) 60 | return clipper.filter() 61 | 62 | def print_snow_points(frame): 63 | # print(frame) 64 | seq = format(frame, '04') 65 | # print(seq) 66 | 67 | annotations_file = BASE + seq + "/3d_ann.json" 68 | export_annotations_file = BASE + seq + "/3d_ann_p.json" 69 | path_type = "processed" 70 | 71 | # Load lidar msg for this frame 72 | lidar_path = BASE + seq + "/" + path_type + "/lidar_points/data/" + format(0, '010') + ".bin" 73 | scan_data = np.fromfile(lidar_path, dtype=np.float32); 74 | # 2D array where each row contains a point [x, y, z, intensity] 75 | lidar = scan_data.reshape((-1, 4)); 76 | # Convert lidar 2d array to pcl cloud 77 | point_cloud = pcl.PointCloud() 78 | point_cloud.from_array(lidar[:,0:3]) 79 | # Crop the pointcloud to around autnomoose 80 | cropped_cloud = crop_cloud(point_cloud) 81 | # Run DROR 82 | filtered_cloud = dror_filter(cropped_cloud) 83 | # Print number of snow points 84 | number_snow_points = cropped_cloud.size - filtered_cloud.size 85 | print(number_snow_points) 86 | 87 | visual = pcl.pcl_visualization.CloudViewing() 88 | 89 | # PointXYZ 90 | visual.ShowMonochromeCloud(point_cloud, b'cloud') 91 | # visual.ShowMonochromeCloud(cropped_cloud, b'cloud') 92 | # visual.ShowMonochromeCloud(filtered_cloud, b'cloud') 93 | # visual.ShowGrayCloud(ptcloud_centred, b'cloud') 94 | # visual.ShowColorCloud(ptcloud_centred, b'cloud') 95 | # visual.ShowColorACloud(ptcloud_centred, b'cloud') 96 | 97 | v = True 98 | while v: 99 | v = not(visual.WasStopped()) 100 | 101 | BASE = '/media/matthew/WAVELAB_2TB/winter/data/' 102 | # BASE = '/media/matthew/MOOSE-4TB/2019_02_27/' 103 | # BASE = '/media/matthew/MOOSE-4TB/2018_03_06/data/' 104 | # BASE = '/media/matthew/MOOSE-4TB/2018_03_07/data/' 105 | 106 | LOOP = False 107 | 108 | # for frame in range(21, 83): 109 | print_snow_points(68) 110 | # Low 74 111 | # Medium 81 112 | # High 80 113 | # High example 68 114 | -------------------------------------------------------------------------------- /run_demo_2d_tracklets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import numpy as np 5 | import cv2 6 | import load_calibration 7 | from scipy.spatial.transform import Rotation as R 8 | 9 | frame = 0 10 | cam = '0' 11 | seq = '0069' 12 | DISTORTED = False 13 | MOVE_FORWARD = True 14 | base_path = "/media/matthew/WAVELAB_2TB/winter" 15 | calib_path = "/media/matthew/WAVELAB_2TB/winter/calib/"; 16 | 17 | if DISTORTED: 18 | path_type = 'raw' 19 | else: 20 | path_type = 'processed' 21 | 22 | img_path = base_path + "/data/" + seq + "/" + path_type + "/image_0" + cam + "/data/" + format(frame, '010') + ".png"; 23 | annotations_file = base_path + "/data/" + seq + "/" + '/2d_annotations.json'; 24 | 25 | # load calibration dictionary 26 | calib = load_calibration.load_calibration(calib_path); 27 | 28 | # Projection matrix from camera to image frame 29 | T_IMG_CAM = np.eye(4); 30 | T_IMG_CAM[0:3,0:3] = np.array(calib['CAM0' + cam]['camera_matrix']['data']).reshape(-1, 3); 31 | T_IMG_CAM = T_IMG_CAM[0:3,0:4]; # remove last row 32 | 33 | dist_coeffs = np.array(calib['CAM0' + cam]['distortion_coefficients']['data']) 34 | 35 | # Load 2d annotations 36 | annotations_data = None 37 | with open(annotations_file) as f: 38 | annotations_data = json.load(f) 39 | 40 | img = cv2.imread(img_path) 41 | img_h, img_w = img.shape[:2] 42 | 43 | # Add each box to image 44 | for camera_response in annotations_data[frame]['camera_responses']: 45 | if camera_response['camera_used'] != int(cam): 46 | continue; 47 | 48 | for annotation in camera_response['annotations']: 49 | left = int(annotation['left']) 50 | top = int(annotation['top']) 51 | width = int(annotation['width']) 52 | height = int(annotation['height']) 53 | 54 | if DISTORTED: 55 | cv2.rectangle(img,(left,top),(left + width,top + height),(0,255,0),thickness=3) 56 | else: 57 | pts_uv = np.array([[[left,top]],[[left + width,top + height]]], dtype=np.float32) 58 | new_pts = cv2.undistortPoints(pts_uv, T_IMG_CAM[0:3,0:3], dist_coeffs, P=T_IMG_CAM[0:3,0:3]) 59 | cv2.rectangle(img,(new_pts[0][0][0],new_pts[0][0][1]),(new_pts[1][0][0],new_pts[1][0][1]),(0,255,0),thickness=3) 60 | 61 | cv2.imshow('image',img) 62 | cv2.waitKey(10000) 63 | -------------------------------------------------------------------------------- /run_demo_lidar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import cv2 5 | import load_calibration 6 | from lidar_utils import lidar_utils 7 | 8 | frame = 90 9 | cam = '0' 10 | seq = '0027' 11 | DISTORTED = False 12 | MOVE_FORWARD = True 13 | BASE = "/media/matthew/WAVELAB_2TB/winter/" 14 | 15 | if DISTORTED: 16 | path_type = 'raw' 17 | else: 18 | path_type = 'processed' 19 | 20 | lidar_path = BASE + "data/" + seq + "/" + path_type + "/lidar_points/data/" + format(frame, '010') + ".bin"; 21 | calib_path = BASE + "calib/"; 22 | img_path = BASE + "data/" + seq + "/" + path_type + "/image_0" + cam + "/data/" + format(frame, '010') + ".png"; 23 | 24 | # load calibration dictionary 25 | calib = load_calibration.load_calibration(calib_path); 26 | 27 | # Projection matrix from camera to image frame 28 | T_IMG_CAM = np.eye(4); 29 | T_IMG_CAM[0:3,0:3] = np.array(calib['CAM0' + cam]['camera_matrix']['data']).reshape(-1, 3); 30 | T_IMG_CAM = T_IMG_CAM[0:3,0:4]; # remove last row 31 | 32 | T_CAM_LIDAR = np.linalg.inv(np.array(calib['extrinsics']['T_LIDAR_CAM0' + cam])); 33 | 34 | dist_coeffs = np.array(calib['CAM0' + cam]['distortion_coefficients']['data']) 35 | 36 | lidar_utils_obj = lidar_utils(T_CAM_LIDAR); 37 | 38 | while True: 39 | print(frame) 40 | # read image 41 | img = cv2.imread(img_path) 42 | 43 | # Project points onto image 44 | img = lidar_utils_obj.project_points(img, lidar_path, T_IMG_CAM, T_CAM_LIDAR, dist_coeffs, DISTORTED); 45 | # cv2.imwrite("test.png", img) 46 | 47 | cv2.imshow('image',img) 48 | cv2.waitKey(1000) 49 | 50 | if MOVE_FORWARD: 51 | frame += 1; 52 | lidar_path = BASE + "data/" + seq + "/" + path_type + "/lidar_points/data/" + format(frame, '010') + ".bin" 53 | img_path = BASE + "data/" + seq + "/" + path_type + "/image_0" + cam + "/data/" + format(frame, '010') + ".png" 54 | img = cv2.imread(img_path) 55 | -------------------------------------------------------------------------------- /run_demo_lidar_bev.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from scipy.spatial.transform import Rotation as R 4 | import json 5 | import matplotlib.patches as patches 6 | 7 | 8 | frame = 12 9 | cam = '0' 10 | seq = '0033' 11 | DISTORTED = False 12 | MOVE_FORWARD = True 13 | DISPLAY_LIDAR = False 14 | DISPLAY_CUBOID_CENTER = False 15 | MIN_CUBOID_DIST = 40.0 16 | 17 | BASE = '/media/matthew/WAVELAB_2TB/winter/data/' 18 | # BASE = '/media/matthew/MOOSE-4TB/2019_02_27/' 19 | # BASE = '/media/matthew/MOOSE-4TB/2018_03_06/data/' 20 | # BASE = '/media/matthew/MOOSE-4TB/2018_03_07/data/' 21 | 22 | if DISTORTED: 23 | path_type = 'raw' 24 | else: 25 | path_type = 'processed' 26 | 27 | lidar_path = BASE + seq + "/" + path_type + "/lidar_points/data/" + format(frame, '010') + ".bin"; 28 | calib_path = "/media/matthew/WAVELAB_2TB/winter/calib/"; 29 | img_path = BASE + seq + "/" + path_type + "/image_0" + cam + "/data/" + format(frame, '010') + ".png"; 30 | annotations_path = BASE + seq + "/3d_ann.json"; 31 | 32 | def bev(s1,s2,f1,f2,frame,lidar_path,annotations_path): 33 | ''' 34 | 35 | :param s1: example 15 (15 meter to the left of the car) 36 | :param s2: s2 meters from the right of the car 37 | :param f1: f1 meters from the front of the car 38 | :param f2: f2 meters from the back of the car 39 | :param frame: the frame number 40 | :return: 41 | ''' 42 | 43 | #limit the viewing range 44 | side_range = [-s1,s2] #15 meters from either side of the car 45 | fwd_range = [-f1,f2] # 15 m infront of the car 46 | 47 | scan_data = np.fromfile(lidar_path, dtype= np.float32) #numpy from file reads binary file 48 | #scan_data is a single row of all the lidar values 49 | # 2D array where each row contains a point [x, y, z, intensity] 50 | #we covert scan_data to format said above 51 | lidar = scan_data.reshape((-1, 4)); 52 | 53 | lidar_x = lidar[:,0] 54 | lidar_y = lidar[:,1] 55 | lidar_z = lidar [:,2] 56 | 57 | 58 | 59 | lidar_x_trunc = [] 60 | lidar_y_trunc = [] 61 | lidar_z_trunc = [] 62 | 63 | for i in range(len(lidar_x)): 64 | if lidar_x[i] > fwd_range[0] and lidar_x[i] < fwd_range[1]: #get the lidar coordinates 65 | if lidar_y[i] > side_range[0] and lidar_y[i] < side_range[1]: 66 | 67 | lidar_x_trunc.append(lidar_x[i]) 68 | lidar_y_trunc.append(lidar_y[i]) 69 | lidar_z_trunc.append(lidar_z[i]) 70 | 71 | # to use for the plot 72 | x_img = [i* -1 for i in lidar_y_trunc] #in the image plot, the negative lidar y axis is the img x axis 73 | y_img = lidar_x_trunc #the lidar x axis is the img y axis 74 | pixel_values = lidar_z_trunc 75 | 76 | 77 | #shift values such that 0,0 is the minimum 78 | x_img = [i -side_range[0] for i in x_img] 79 | y_img = [i -fwd_range[0] for i in y_img] 80 | 81 | 82 | 83 | 84 | ''' 85 | tracklets 86 | ''' 87 | 88 | # Load 3d annotations 89 | annotations_data = None 90 | with open(annotations_path) as f: 91 | annotations_data = json.load(f) 92 | 93 | # Add each cuboid to image 94 | ''' 95 | Rotations in 3 dimensions can be represented by a sequece of 3 rotations around a sequence of axes. 96 | In theory, any three axes spanning the 3D Euclidean space are enough. In practice the axes of rotation are chosen to be the basis vectors. 97 | The three rotations can either be in a global frame of reference (extrinsic) or in a body centred frame of refernce (intrinsic), 98 | which is attached to, and moves with, the object under rotation 99 | ''' 100 | 101 | 102 | # PLOT THE IMAGE 103 | cmap = "jet" # Color map to use 104 | dpi = 100 # Image resolution 105 | x_max = side_range[1] - side_range[0] 106 | y_max = fwd_range[1] - fwd_range[0] 107 | fig, ax = plt.subplots(figsize=(2000/dpi, 2000/dpi), dpi=dpi) 108 | 109 | # the coordinates in the tracklet json are lidar coords 110 | x_trunc = [] 111 | y_trunc = [] 112 | x_1 = [] 113 | x_2 =[] 114 | x_3 = [] 115 | x_4 =[] 116 | y_1 =[] 117 | y_2 =[] 118 | y_3 = [] 119 | y_4 =[] 120 | 121 | for cuboid in annotations_data[frame]['cuboids']: 122 | T_Lidar_Cuboid = np.eye(4); # identify matrix 123 | T_Lidar_Cuboid[0:3, 0:3] = R.from_euler('z', cuboid['yaw'], 124 | degrees=False).as_matrix(); # rotate the identity matrix 125 | T_Lidar_Cuboid[0][3] = cuboid['position']['x']; # center of the tracklet, from cuboid to lidar 126 | T_Lidar_Cuboid[1][3] = cuboid['position']['y']; 127 | T_Lidar_Cuboid[2][3] = cuboid['position']['z']; 128 | 129 | 130 | 131 | if cuboid['position']['x']> fwd_range[0] and cuboid['position']['x'] < fwd_range[1]: #make sure the cuboid is within the range we want to see 132 | if cuboid['position']['y'] > side_range[0] and cuboid['position']['y'] < side_range[1]: 133 | x_trunc.append(cuboid['position']['x']) 134 | y_trunc.append(cuboid['position']['y']) 135 | 136 | width = cuboid['dimensions']['x']; 137 | length = cuboid['dimensions']['y']; 138 | height = cuboid['dimensions']['z']; 139 | radius = 3 140 | 141 | 142 | #the top view of the tracklet in the "cuboid frame". The cuboid frame is a cuboid with origin (0,0,0) 143 | #we are making a cuboid that has the dimensions of the tracklet but is located at the origin 144 | front_right_top = np.array( 145 | [[1, 0, 0, length / 2], [0, 1, 0, width / 2], [0, 0, 1, height / 2], [0, 0, 0, 1]]); 146 | 147 | front_left_top = np.array( 148 | [[1, 0, 0, length / 2], [0, 1, 0, -width / 2], [0, 0, 1, height / 2], [0, 0, 0, 1]]); 149 | 150 | 151 | back_right_top = np.array( 152 | [[1, 0, 0, -length / 2], [0, 1, 0, width / 2], [0, 0, 1, height / 2], [0, 0, 0, 1]]); 153 | 154 | back_left_top = np.array( 155 | [[1, 0, 0, -length / 2], [0, 1, 0, -width / 2], [0, 0, 1, height / 2], [0, 0, 0, 1]]); 156 | 157 | # Project to lidar 158 | 159 | 160 | f_r_t = np.matmul(T_Lidar_Cuboid, front_right_top) 161 | f_l_t = np.matmul(T_Lidar_Cuboid, front_left_top) 162 | b_r_t = np.matmul(T_Lidar_Cuboid, back_right_top) 163 | b_l_t = np.matmul(T_Lidar_Cuboid, back_left_top) 164 | 165 | 166 | x_1.append(f_r_t[0][3]) 167 | y_1.append(f_r_t[1][3]) 168 | 169 | x_2.append(f_l_t[0][3]) 170 | y_2.append(f_l_t[1][3]) 171 | 172 | x_3.append(b_r_t[0][3]) 173 | y_3.append(b_r_t[1][3]) 174 | 175 | x_4.append(b_l_t[0][3]) 176 | y_4.append(b_l_t[1][3]) 177 | 178 | 179 | 180 | 181 | 182 | 183 | # to use for the plot 184 | 185 | x_img_tracklets = [i * -1 for i in y_trunc] # in the image to plot, the negative lidar y axis is the img x axis 186 | y_img_tracklets = x_trunc # the lidar x axis is the img y axis 187 | 188 | x_img_1 = [i * -1 for i in y_1] 189 | y_img_1 = x_1 190 | 191 | x_img_2 = [i * -1 for i in y_2] 192 | y_img_2 = x_2 193 | 194 | x_img_3 = [i * -1 for i in y_3] 195 | y_img_3 = x_3 196 | 197 | x_img_4 = [i * -1 for i in y_4] 198 | y_img_4 = x_4 199 | 200 | 201 | 202 | # shift values such that 0,0 is the minimum 203 | x_img_tracklets = [i -side_range[0] for i in x_img_tracklets] 204 | y_img_tracklets = [i -fwd_range[0] for i in y_img_tracklets] 205 | 206 | x_img_1 = [i -side_range[0] for i in x_img_1] 207 | y_img_1 = [i - fwd_range[0] for i in y_img_1] 208 | 209 | x_img_2 = [i -side_range[0] for i in x_img_2] 210 | y_img_2 = [i - fwd_range[0] for i in y_img_2] 211 | 212 | x_img_3 = [i -side_range[0] for i in x_img_3] 213 | y_img_3 = [i - fwd_range[0] for i in y_img_3] 214 | 215 | x_img_4 = [i -side_range[0] for i in x_img_4] 216 | y_img_4 = [i - fwd_range[0] for i in y_img_4] 217 | 218 | for i in range(len(x_img_1)): #plot the tracklets 219 | poly = np.array([[x_img_1[i], y_img_1[i]], [x_img_2[i], y_img_2[i]], [x_img_4[i], y_img_4[i]], [x_img_3[i], y_img_3[i]]]) 220 | polys = patches.Polygon(poly,closed=True,fill=False, edgecolor ='r', linewidth=1) 221 | ax.add_patch(polys) 222 | 223 | 224 | 225 | # ax.scatter(x_img_tracklets,y_img_tracklets, marker ='o', color='red', linewidths=1) #center of the tracklets 226 | ax.scatter(x_img, y_img, s=1, c=pixel_values, alpha=1.0, cmap=cmap) 227 | ax.set_facecolor((0, 0, 0)) # backgrounD is black 228 | ax.axis('scaled') # {equal, scaled} 229 | ax.xaxis.set_visible(False) # Do not draw axis tick marks 230 | ax.yaxis.set_visible(False) # Do not draw axis tick marks 231 | plt.xlim([0, x_max]) 232 | plt.ylim([0, y_max]) 233 | fig.savefig("/home/matthew/Desktop/bev_" + str(frame) + ".png", dpi=dpi, bbox_inches='tight', pad_inches=0.0) 234 | 235 | bev(50,50,50,50,frame,lidar_path,annotations_path) 236 | -------------------------------------------------------------------------------- /run_demo_tracklets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import numpy as np 5 | import cv2 6 | import load_calibration 7 | from scipy.spatial.transform import Rotation as R 8 | 9 | frame = 26 10 | cam = '0' 11 | seq = '0010' 12 | DISTORTED = False 13 | MOVE_FORWARD = False 14 | # BASE = "/media/matthew/WAVELAB_2TB/winter/data/" 15 | BASE = "/media/matthew/MOOSE-4TB/2019_02_27/" 16 | CALIB_BASE = "/media/matthew/WAVELAB_2TB/winter/" 17 | 18 | if DISTORTED: 19 | path_type = 'raw' 20 | else: 21 | path_type = 'processed' 22 | 23 | lidar_path = BASE + seq + "/" + path_type + "/lidar_points/data/" + format(frame, '010') + ".bin"; 24 | calib_path = CALIB_BASE + "calib/"; 25 | img_path = BASE + seq + "/" + path_type + "/image_0" + cam + "/data/" + format(frame, '010') + ".png"; 26 | 27 | annotations_file = BASE + seq + "/3d_ann.json"; 28 | 29 | # Load 3d annotations 30 | annotations_data = None 31 | with open(annotations_file) as f: 32 | annotations_data = json.load(f) 33 | 34 | calib = load_calibration.load_calibration(calib_path); 35 | 36 | # Projection matrix from camera to image frame 37 | T_IMG_CAM = np.eye(4); 38 | T_IMG_CAM[0:3,0:3] = np.array(calib['CAM0' + cam]['camera_matrix']['data']).reshape(-1, 3); 39 | T_IMG_CAM = T_IMG_CAM[0:3,0:4]; # remove last row 40 | 41 | T_CAM_LIDAR = np.linalg.inv(np.array(calib['extrinsics']['T_LIDAR_CAM0' + cam])); 42 | 43 | T_IMG_LIDAR = np.matmul(T_IMG_CAM, T_CAM_LIDAR); 44 | 45 | img = cv2.imread(img_path) 46 | img_h, img_w = img.shape[:2] 47 | 48 | # Add each cuboid to image 49 | for cuboid in annotations_data[frame]['cuboids']: 50 | T_Lidar_Cuboid = np.eye(4); 51 | T_Lidar_Cuboid[0:3,0:3] = R.from_euler('z', cuboid['yaw'], degrees=False).as_matrix(); 52 | T_Lidar_Cuboid[0][3] = cuboid['position']['x']; 53 | T_Lidar_Cuboid[1][3] = cuboid['position']['y']; 54 | T_Lidar_Cuboid[2][3] = cuboid['position']['z']; 55 | 56 | #T_Lidar_Cuboid[0][3] = -T_Lidar_Cuboid[0][3]; 57 | 58 | # if (cuboid['label'] != 'Truck'): 59 | # continue; 60 | # if (cuboid['attributes']['truck_type'] != 'Semi_Truck'): 61 | # continue; 62 | # print(cuboid['yaw']) 63 | # print(cuboid) 64 | # print(T_Lidar_Cuboid) 65 | width = cuboid['dimensions']['x']; 66 | length = cuboid['dimensions']['y']; 67 | height = cuboid['dimensions']['z']; 68 | radius = 3 69 | 70 | # Create circle in middle of the cuboid 71 | tmp = np.matmul(T_CAM_LIDAR, T_Lidar_Cuboid); 72 | if tmp[2][3] < 0: # Behind camera 73 | continue; 74 | test = np.matmul(T_IMG_CAM, tmp); 75 | x = int(test[0][3]/test[2][3]); 76 | y = int(test[1][3]/test[2][3]); 77 | cv2.circle(img, (x,y), radius, [0, 0, 255], thickness=2, lineType=8, shift=0); 78 | 79 | front_right_bottom = np.array([[1,0,0,length/2],[0,1,0,-width/2],[0,0,1,-height/2],[0,0,0,1]]); 80 | front_right_top = np.array([[1,0,0,length/2],[0,1,0,-width/2],[0,0,1,height/2],[0,0,0,1]]); 81 | front_left_bottom = np.array([[1,0,0,length/2],[0,1,0,width/2],[0,0,1,-height/2],[0,0,0,1]]); 82 | front_left_top = np.array([[1,0,0,length/2],[0,1,0,width/2],[0,0,1,height/2],[0,0,0,1]]); 83 | 84 | back_right_bottom = np.array([[1,0,0,-length/2],[0,1,0,-width/2],[0,0,1,-height/2],[0,0,0,1]]); 85 | back_right_top = np.array([[1,0,0,-length/2],[0,1,0,-width/2],[0,0,1,height/2],[0,0,0,1]]); 86 | back_left_bottom = np.array([[1,0,0,-length/2],[0,1,0,width/2],[0,0,1,-height/2],[0,0,0,1]]); 87 | back_left_top = np.array([[1,0,0,-length/2],[0,1,0,width/2],[0,0,1,height/2],[0,0,0,1]]); 88 | 89 | # Project to image 90 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, front_right_bottom)); 91 | if tmp[2][3] < 0: 92 | continue; 93 | f_r_b = np.matmul(T_IMG_CAM, tmp); 94 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, front_right_top)); 95 | if tmp[2][3] < 0: 96 | continue; 97 | f_r_t = np.matmul(T_IMG_CAM, tmp); 98 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, front_left_bottom)); 99 | if tmp[2][3] < 0: 100 | continue; 101 | f_l_b = np.matmul(T_IMG_CAM, tmp); 102 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, front_left_top)); 103 | if tmp[2][3] < 0: 104 | continue; 105 | f_l_t = np.matmul(T_IMG_CAM, tmp); 106 | 107 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, back_right_bottom)); 108 | if tmp[2][3] < 0: 109 | continue; 110 | b_r_b = np.matmul(T_IMG_CAM, tmp); 111 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, back_right_top)); 112 | if tmp[2][3] < 0: 113 | continue; 114 | b_r_t = np.matmul(T_IMG_CAM, tmp); 115 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, back_left_bottom)); 116 | if tmp[2][3] < 0: 117 | continue; 118 | b_l_b = np.matmul(T_IMG_CAM, tmp); 119 | tmp = np.matmul(T_CAM_LIDAR, np.matmul(T_Lidar_Cuboid, back_left_top)); 120 | if tmp[2][3] < 0: 121 | continue; 122 | b_l_t = np.matmul(T_IMG_CAM, tmp); 123 | 124 | # Make sure the 125 | # Remove z 126 | f_r_b_coord = (int(f_r_b[0][3]/f_r_b[2][3]), int(f_r_b[1][3]/f_r_b[2][3])); 127 | f_r_t_coord = (int(f_r_t[0][3]/f_r_t[2][3]), int(f_r_t[1][3]/f_r_t[2][3])); 128 | f_l_b_coord = (int(f_l_b[0][3]/f_l_b[2][3]), int(f_l_b[1][3]/f_l_b[2][3])); 129 | f_l_t_coord = (int(f_l_t[0][3]/f_l_t[2][3]), int(f_l_t[1][3]/f_l_t[2][3])); 130 | if f_r_b_coord[0] < 0 or f_r_b_coord[0] > img_w or f_r_b_coord[1] < 0 or f_r_b_coord[1] > img_h: 131 | continue; 132 | if f_r_t_coord[0] < 0 or f_r_t_coord[0] > img_w or f_r_t_coord[1] < 0 or f_r_t_coord[1] > img_h: 133 | continue; 134 | if f_l_b_coord[0] < 0 or f_l_b_coord[0] > img_w or f_l_b_coord[1] < 0 or f_l_b_coord[1] > img_h: 135 | continue; 136 | if f_l_t_coord[0] < 0 or f_l_t_coord[0] > img_w or f_l_t_coord[1] < 0 or f_l_t_coord[1] > img_h: 137 | continue; 138 | 139 | b_r_b_coord = (int(b_r_b[0][3]/b_r_b[2][3]), int(b_r_b[1][3]/b_r_b[2][3])); 140 | b_r_t_coord = (int(b_r_t[0][3]/b_r_t[2][3]), int(b_r_t[1][3]/b_r_t[2][3])); 141 | b_l_b_coord = (int(b_l_b[0][3]/b_l_b[2][3]), int(b_l_b[1][3]/b_l_b[2][3])); 142 | b_l_t_coord = (int(b_l_t[0][3]/b_l_t[2][3]), int(b_l_t[1][3]/b_l_t[2][3])); 143 | if b_r_b_coord[0] < 0 or b_r_b_coord[0] > img_w or b_r_b_coord[1] < 0 or b_r_b_coord[1] > img_h: 144 | continue; 145 | if b_r_t_coord[0] < 0 or b_r_t_coord[0] > img_w or b_r_t_coord[1] < 0 or b_r_t_coord[1] > img_h: 146 | continue; 147 | if b_l_b_coord[0] < 0 or b_l_b_coord[0] > img_w or b_l_b_coord[1] < 0 or b_l_b_coord[1] > img_h: 148 | continue; 149 | if b_l_t_coord[0] < 0 or b_l_t_coord[0] > img_w or b_l_t_coord[1] < 0 or b_l_t_coord[1] > img_h: 150 | continue; 151 | 152 | # Draw 12 lines 153 | # Front 154 | cv2.line(img, f_r_b_coord, f_r_t_coord, [0, 0, 255], thickness=2, lineType=8, shift=0); 155 | cv2.line(img, f_r_b_coord, f_l_b_coord, [0, 0, 255], thickness=2, lineType=8, shift=0); 156 | cv2.line(img, f_l_b_coord, f_l_t_coord, [0, 0, 255], thickness=2, lineType=8, shift=0); 157 | cv2.line(img, f_l_t_coord, f_r_t_coord, [0, 0, 255], thickness=2, lineType=8, shift=0); 158 | # back 159 | cv2.line(img, b_r_b_coord, b_r_t_coord, [0, 0, 100], thickness=2, lineType=8, shift=0); 160 | cv2.line(img, b_r_b_coord, b_l_b_coord, [0, 0, 100], thickness=2, lineType=8, shift=0); 161 | cv2.line(img, b_l_b_coord, b_l_t_coord, [0, 0, 100], thickness=2, lineType=8, shift=0); 162 | cv2.line(img, b_l_t_coord, b_r_t_coord, [0, 0, 100], thickness=2, lineType=8, shift=0); 163 | # connect front to back 164 | cv2.line(img, f_r_b_coord, b_r_b_coord, [0, 0, 150], thickness=2, lineType=8, shift=0); 165 | cv2.line(img, f_r_t_coord, b_r_t_coord, [0, 0, 150], thickness=2, lineType=8, shift=0); 166 | cv2.line(img, f_l_b_coord, b_l_b_coord, [0, 0, 150], thickness=2, lineType=8, shift=0); 167 | cv2.line(img, f_l_t_coord, b_l_t_coord, [0, 0, 150], thickness=2, lineType=8, shift=0); 168 | 169 | print(cuboid) 170 | print(f_r_b_coord) 171 | print(f_r_t_coord) 172 | print(f_l_b_coord) 173 | print(f_l_t_coord) 174 | 175 | print(b_r_b_coord) 176 | print(b_r_t_coord) 177 | print(b_l_b_coord) 178 | print(b_l_t_coord) 179 | 180 | #break; 181 | 182 | cv2.imshow('image',img) 183 | # cv2.imwrite("test.png", img) 184 | cv2.waitKey(10000) 185 | -------------------------------------------------------------------------------- /run_demo_vehicle_path.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import matplotlib as mpl 4 | from mpl_toolkits.mplot3d import Axes3D 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | import load_novatel_data, convert_novatel_to_pose 9 | 10 | novatel_path = '/media/matthew/WAVELAB_2TB/winter/data/0027/processed/novatel/data/'; 11 | 12 | novatel = load_novatel_data.load_novatel_data(novatel_path); 13 | poses = convert_novatel_to_pose.convert_novatel_to_pose(novatel); 14 | 15 | mpl.rcParams['legend.fontsize'] = 10 16 | 17 | fig = plt.figure() 18 | ax = fig.gca(projection='3d') 19 | 20 | ax.set_title('Vehicle path') 21 | ax.set_xlabel('East (m)') 22 | ax.set_ylabel('North (m)') 23 | ax.set_zlabel('Up (m)') 24 | 25 | length = 1 26 | A = np.matrix([[0, 0, 0, 1], 27 | [length, 0, 0, 1], 28 | [0, 0, 0, 1], 29 | [0, length, 0, 1], 30 | [0, 0, 0, 1], 31 | [0, 0, length, 1]]).transpose(); 32 | 33 | for pose in poses: 34 | B = np.matmul(pose, A); 35 | ax.plot([B[0,0], B[0,1]], [B[1,0], B[1,1]],[B[2,0],B[2,1]], 'r-'); # x: red 36 | ax.plot([B[0,2], B[0,3]], [B[1,2], B[1,3]],[B[2,2],B[2,3]], 'g-'); # y: green 37 | ax.plot([B[0,4], B[0,5]], [B[1,4], B[1,5]],[B[2,4],B[2,5]], 'b-'); # z: blue 38 | 39 | # Equal axis doesn't seem to work so set an arbitrary limit to the z axis 40 | ax.set_zlim3d(-10,10) 41 | 42 | plt.show() 43 | --------------------------------------------------------------------------------