├── .github
└── workflows
│ ├── pypi.yml
│ └── python.yml
├── .gitignore
├── LICENSE
├── README.md
└── python
├── LICENSE
├── README.md
├── pyproject.toml
└── vbr_devkit
├── __init__.py
├── datasets
├── __init__.py
├── convert_bag.py
├── kitti.py
└── ros.py
├── download
├── __init__.py
└── download_data.py
└── tools
├── __init__.py
├── console.py
├── image.py
├── imu.py
├── point_cloud2.py
└── run.py
/.github/workflows/pypi.yml:
--------------------------------------------------------------------------------
1 | name: Publish to PyPI.org
2 | on:
3 | release:
4 | types: [published]
5 | push:
6 | branches: ["main"]
7 | pull_request:
8 | branches: ["main"]
9 |
10 | jobs:
11 | build_sdist:
12 | name: Build source distribution
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | - name: Build sdist
17 | run: pipx run build --sdist ${{github.workspace}}/python/
18 | - name: Move sdist to dist
19 | run: mkdir -p dist && mv ${{github.workspace}}/python/dist/*.tar.gz dist/
20 |
21 | - uses: actions/upload-artifact@v3
22 | with:
23 | path: dist/*.tar.gz
24 |
25 | pypi:
26 | if: github.event_name == 'release'
27 | needs: build_sdist
28 | runs-on: ubuntu-latest
29 | steps:
30 | - uses: actions/download-artifact@v3
31 | with:
32 | name: artifact
33 | path: dist
34 |
35 | - uses: pypa/gh-action-pypi-publish@release/v1
36 | with:
37 | password: ${{ secrets.PYPI_API_TOKEN }}
38 |
39 |
40 |
41 |
--------------------------------------------------------------------------------
/.github/workflows/python.yml:
--------------------------------------------------------------------------------
1 | name: Python API
2 | on:
3 | push:
4 | branches: ["main"]
5 | pull_request:
6 | branches: ["main"]
7 |
8 | jobs:
9 | python_package:
10 | runs-on: ${{ matrix.os }}
11 | strategy:
12 | matrix:
13 | os: [ubuntu-20.04, ubuntu-22.04]
14 | steps:
15 | - uses: actions/checkout@v3
16 | - name: Set up Python3
17 | uses: actions/setup-python@v3
18 | - name: Install dependencies
19 | run: |
20 | python -m pip install --upgrade pip
21 | - name: Build pip package
22 | run: |
23 | python -m pip install --verbose ./python/
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Prerequisites
2 | *.d
3 |
4 | # Compiled Object files
5 | *.slo
6 | *.lo
7 | *.o
8 | *.obj
9 |
10 | # Precompiled Headers
11 | *.gch
12 | *.pch
13 |
14 | # Compiled Dynamic libraries
15 | *.so
16 | *.dylib
17 | *.dll
18 |
19 | # Fortran module files
20 | *.mod
21 | *.smod
22 |
23 | # Compiled Static libraries
24 | *.lai
25 | *.la
26 | *.a
27 | *.lib
28 |
29 | # Executables
30 | *.exe
31 | *.out
32 | *.app
33 |
34 | __pycache__
35 | cmake-build-debug
36 |
37 | .idea
38 | python/dist
39 |
40 |
41 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2024, Robotics Vision and Perception Group
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
VBR Development Kit
3 |
7 |

8 |

9 |

10 |
11 |
12 |

13 |
14 | This kit contains utilities to download the VBR dataset
15 |
16 | # Install
17 |
18 | ```shell
19 | pip install vbr-devkit
20 | ```
21 |
22 | You can install autocompletion for our package by typing:
23 |
24 | ```shell
25 | vbr --install-completion
26 | ```
27 |
28 | you might need to restart the shell for the autocompletion to take effect.
29 |
30 | # Usage
31 | ## Download sequences
32 |
33 | You can list the available sequences you can download by typing:
34 |
35 | ```shell
36 | vbr list
37 | ```
38 | You should see something similar to this
39 | 
40 |
41 | After choosing your sequence, you can type
42 |
43 | ```shell
44 | vbr download
45 | ```
46 |
47 | For instance, we could save `campus_train0` as follows:
48 |
49 | ```shell
50 | vbr download campus_train0 ~/data/
51 | ```
52 | **N.B.** The script will actually save the sequence at `/vbr_slam//`. Moreover, by calling the previous command, we expect the following directory:
53 | ```
54 | data
55 | - vbr_slam
56 | - campus
57 | - campus_train0
58 | - vbr_calib.yaml
59 | - campus_train0_gt.txt
60 | - campus_train0_00.bag
61 | - campus_train0_01.bag
62 | - campus_train0_02.bag
63 | - campus_train0_03.bag
64 | - campus_train0_04.bag
65 | ```
66 |
67 | ## Convert format
68 |
69 | The sequences are provided in ROS1 format. We offer a convenient tool to change representation if you prefer working on a different format.
70 | You can see the supported formats by typing:
71 |
72 | ```shell
73 | vbr convert --help
74 | ```
75 |
76 | To convert a bag or a sequence of bags, type:
77 | ```shell
78 | vbr convert
79 | ```
80 |
81 | for instance, we could convert the `campus_train0` sequence to `kitti` format as follows:
82 |
83 | ```shell
84 | vbr convert kitti ~/data/vbr_slam/campus/campus_train0/campus_train0_00.bag ~/data/campus_train0_00_kitti/
85 | ```
86 |
87 | We can expect the following result:
88 |
89 | ```
90 | data
91 | - campus_train0_00_kitti
92 | - camera_left
93 | - timestamps.txt
94 | - data
95 | - 0000000000.png
96 | - 0000000001.png
97 | - ...
98 | - camera_right
99 | - timestamps.txt
100 | - data
101 | - 0000000000.png
102 | - 0000000001.png
103 | - ...
104 | - ouster_points
105 | - timestamps.txt
106 | - data
107 | - .dtype.pkl
108 | - 0000000000.bin
109 | - 0000000001.bin
110 | - ...
111 | - ...
112 | ```
113 |
114 | **N.B.** In KITTI format, point clouds are embedded in binary files that can be opened using `Numpy` and `pickle` as follows:
115 |
116 | ```python
117 | import numpy as np
118 | import pickle
119 |
120 | with open("campus_train0_00_kitti/ouster_points/data/.dtype.pkl", "rb") as f:
121 | cdtype = pickle.load(f)
122 |
123 | cloud_numpy = np.fromfile("/campus_train0_00_kitti/ouster_points/data/0000000000.bin", dtype=cdtype)
124 | ```
125 |
126 |
--------------------------------------------------------------------------------
/python/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2024, Robotics Vision and Perception Group
4 |
5 | Redistribution and use in source and binary forms, with or without
6 | modification, are permitted provided that the following conditions are met:
7 |
8 | 1. Redistributions of source code must retain the above copyright notice, this
9 | list of conditions and the following disclaimer.
10 |
11 | 2. Redistributions in binary form must reproduce the above copyright notice,
12 | this list of conditions and the following disclaimer in the documentation
13 | and/or other materials provided with the distribution.
14 |
15 | 3. Neither the name of the copyright holder nor the names of its
16 | contributors may be used to endorse or promote products derived from
17 | this software without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 |
--------------------------------------------------------------------------------
/python/README.md:
--------------------------------------------------------------------------------
1 |
2 |
VBR Development Kit
3 |

4 |

5 |

6 |
7 |
8 |

9 |
10 | This kit contains utilities to work on the VBR SLAM dataset
11 |
12 | # Install
13 |
14 | ```shell
15 | pip install vbr-devkit
16 | ```
17 |
18 | You can install autocompletion for our package by typing:
19 |
20 | ```shell
21 | vbr --install-completion
22 | ```
23 |
24 | you might need to restart the shell for the autocompletion to take effect.
25 |
26 | # Usage
27 | ## Download sequences
28 |
29 | You can list the available sequences you can download by typing:
30 |
31 | ```shell
32 | vbr list
33 | ```
34 | You should see something similar to this
35 | 
36 |
37 | After choosing your sequence, you can type
38 |
39 | ```shell
40 | vbr download
41 | ```
42 |
43 | For instance, we could save `campus_train0` as follows:
44 |
45 | ```shell
46 | vbr download campus_train0 ~/data/
47 | ```
48 | **N.B.** The script will actually save the sequence at `/vbr_slam//`. Moreover, by calling the previous command, we expect the following directory:
49 | ```
50 | data
51 | - vbr_slam
52 | - campus
53 | - campus_train0
54 | - vbr_calib.yaml
55 | - campus_train0_gt.txt
56 | - campus_train0_00.bag
57 | - campus_train0_01.bag
58 | - campus_train0_02.bag
59 | - campus_train0_03.bag
60 | - campus_train0_04.bag
61 | ```
62 |
63 | ## Convert format
64 |
65 | The sequences are provided in ROS1 format. We offer a convenient tool to change representation if you prefer working on a different format.
66 | You can see the supported formats by typing:
67 |
68 | ```shell
69 | vbr convert --help
70 | ```
71 |
72 | To convert a bag or a sequence of bags, type:
73 | ```shell
74 | vbr convert
75 | ```
76 |
77 | for instance, we could convert the `campus_train0` sequence to `kitti` format as follows:
78 |
79 | ```shell
80 | vbr convert kitti ~/data/vbr_slam/campus/campus_train0/campus_train0_00.bag ~/data/campus_train0_00_kitti/
81 | ```
82 |
83 | We can expect the following result:
84 |
85 | ```
86 | data
87 | - campus_train0_00_kitti
88 | - camera_left
89 | - timestamps.txt
90 | - data
91 | - 0000000000.png
92 | - 0000000001.png
93 | - ...
94 | - camera_right
95 | - timestamps.txt
96 | - data
97 | - 0000000000.png
98 | - 0000000001.png
99 | - ...
100 | - ouster_points
101 | - timestamps.txt
102 | - data
103 | - .dtype.pkl
104 | - 0000000000.bin
105 | - 0000000001.bin
106 | - ...
107 | - ...
108 | ```
109 |
110 | **N.B.** In KITTI format, point clouds are embedded in binary files that can be opened using `Numpy` and `pickle` as follows:
111 |
112 | ```python
113 | import numpy as np
114 | import pickle
115 |
116 | with open("campus_train0_00_kitti/ouster_points/data/.dtype.pkl", "rb") as f:
117 | cdtype = pickle.load(f)
118 |
119 | cloud_numpy = np.fromfile("/campus_train0_00_kitti/ouster_points/data/0000000000.bin", dtype=cdtype)
120 | ```
121 |
122 |
--------------------------------------------------------------------------------
/python/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "vbr-devkit"
7 | version = "0.1.0"
8 | description = "Development kit for VBR SLAM dataset"
9 | readme = "README.md"
10 | authors = [
11 | { name = "Emanuele Giacomini", email = "giacomini@diag.uniroma1.it" },
12 | { name = "Leonardo Brizi", email = "brizi@diag.uniroma1.it" },
13 | ]
14 | requires-python = ">=3.8"
15 |
16 | classfiers = [
17 | "Programming Language :: Python :: 3",
18 | "License :: OSI Approved :: BSD License",
19 | "Operating System :: Unix",
20 | "Intended Audience :: Developers",
21 | "Intended Audience :: Science/Research"
22 | ]
23 |
24 | dependencies = [
25 | "natsort",
26 | "numpy",
27 | "rich",
28 | "typer[all]>=0.10.0",
29 | "rosbags",
30 | "rosbags.image"
31 | ]
32 |
33 | [project.scripts]
34 | vbr = "vbr_devkit.tools.run:app"
35 |
36 | [project.urls]
37 | Homepage = "https://github.com/rvp-group/vbr-devkit"
38 | Issues = "https://github.com/rvp-group/vbr-devkit/issues"
39 |
--------------------------------------------------------------------------------
/python/vbr_devkit/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.10.0"
2 |
--------------------------------------------------------------------------------
/python/vbr_devkit/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | from .ros import RosReader
2 | from .kitti import KittiWriter
--------------------------------------------------------------------------------
/python/vbr_devkit/datasets/convert_bag.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from pathlib import Path
4 |
5 | from vbr_devkit.datasets import KittiWriter, RosReader
6 | import typer
7 | from enum import Enum
8 | from rich.progress import track
9 | from rosbags import convert
10 |
11 |
12 | class OutputDataInterface(str, Enum):
13 | kitti = "kitti",
14 | ros2 = "ros2",
15 | # Can insert additional conversion formats
16 |
17 |
18 | OutputDataInterface_lut = {
19 | OutputDataInterface.kitti: KittiWriter
20 | }
21 |
22 | def main(to: OutputDataInterface, input_dir: Path, output_dir: Path) -> None:
23 | if to == OutputDataInterface.ros2:
24 | if not input_dir.is_dir():
25 | print("Processing...")
26 | convert.convert(input_dir, output_dir / input_dir.stem)
27 | else:
28 | for item in track(list(input_dir.iterdir()), description="Processing..."):
29 | if item.suffix == '.bag':
30 | convert.convert(item, output_dir / item.stem)
31 | else:
32 | with RosReader(input_dir) as reader:
33 | with OutputDataInterface_lut[to](output_dir) as writer:
34 | for timestamp, topic, message in track(reader, description="Processing..."):
35 | writer.publish(timestamp, topic, message)
36 |
37 |
38 | if __name__ == "__main__":
39 | typer.run(main)
40 |
--------------------------------------------------------------------------------
/python/vbr_devkit/datasets/kitti.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import json
3 | from pathlib import Path
4 | from typing import Union
5 | import numpy as np
6 | import cv2
7 | from datetime import datetime
8 |
9 | from vbr_devkit.tools import PointCloudXf, Image, Imu
10 |
11 | IMU_CSV_HEADER = ("acc_x,acc_y,acc_z,"
12 | "gyr_x,gyr_y,gyr_z,"
13 | "quat_w,quat_x,quat_y,quat_z,"
14 | "cov_acc_xx,cov_acc_xy,cov_acc_xz,"
15 | "cov_acc_yx,cov_acc_yy,cov_acc_zz,"
16 | "cov_acc_zx,cov_acc_zy,cov_acc_zz,"
17 | "cov_gyr_xx,cov_gyr_xy,cov_gyr_xz,"
18 | "cov_gyr_xx,cov_gyr_xy,cov_gyr_xz,"
19 | "cov_gyr_yx,cov_gyr_yy,cov_gyr_zz,"
20 | "cov_rot_zx,cov_rot_zy,cov_rot_zz,"
21 | "cov_rot_yx,cov_rot_yy,cov_rot_zz,"
22 | "cov_rot_zx,cov_rot_zy,cov_rot_zz\n")
23 |
24 |
25 | class KittiTopicHandler:
26 | def __init__(self, data_dir: Path, topic: str, format_fn=None):
27 | self.metadata_f = data_dir / ".metadata.json"
28 | self.timestamps_f = data_dir / "timestamps.txt"
29 | self.data_f = data_dir / "data"
30 |
31 | self.metadata = {}
32 | self.timestamps = []
33 |
34 | self.format_fn = format_fn
35 |
36 | self.imu_dest = None
37 |
38 | self.save_fn = {
39 | PointCloudXf.__name__: self._save_cloud,
40 | Image.__name__: self._save_image,
41 | Imu.__name__: self._save_imu
42 | }
43 |
44 | if self.metadata_f.exists():
45 | # Try loading the metadata file to get info
46 | with self.metadata_f.open("r") as f:
47 | self.metadata = json.load(f)
48 |
49 | if topic:
50 | assert topic == self.metadata["topic"]
51 | else:
52 | self.data_f.mkdir(exist_ok=True, parents=True)
53 | self.metadata["topic"] = topic
54 | self.metadata["num_messages"] = 0
55 |
56 | def push_back(self, data: Union[PointCloudXf, Image, Imu], timestamp, *args, **kwargs):
57 | if "msg_type" not in self.metadata:
58 | self.metadata["msg_type"] = data.__class__.__name__
59 |
60 | if self.metadata["msg_type"] != data.__class__.__name__:
61 | raise RuntimeError(
62 | f"TopicHandler is bound to {self.metadata['msg_type']}. Cannot handle data of type {type(data)}")
63 |
64 | self.save_fn[self.metadata["msg_type"]](data, timestamp, *args, **kwargs)
65 | self.timestamps.append(timestamp)
66 | self.metadata["num_messages"] += 1
67 |
68 | def _save_cloud(self, data: PointCloudXf, timestamp, *args, **kwargs):
69 | dest_path = self.data_f / Path(self.format_fn(self.metadata["num_messages"]) + ".bin")
70 | # Save fields to metadata to recover it later.
71 | # We assume fields to remain constant through data of this topic
72 | if "fields" not in self.metadata.keys():
73 | self.metadata["fields"] = [
74 | f.__dict__ for f in data.fields
75 | ]
76 | # Dump data.points.datatype in pickle (this shit is a workaround and should be fixed asap)
77 | import pickle
78 | with open(self.data_f / ".dtype.pkl", "wb") as f:
79 | pickle.dump(data.points.dtype, f)
80 |
81 | if "pcloud_kitti_format" in kwargs:
82 | if kwargs.get("pcloud_kitti_format"):
83 | clip_points = np.stack([data.points["x"], data.points["y"], data.points["z"], data.points["intensity"]],
84 | axis=1)
85 | clip_points.astype(np.float32).tofile(dest_path)
86 | return
87 |
88 | data.points.tofile(dest_path)
89 |
90 | def _save_image(self, data: Image, timestamp: float, *args, **kwargs):
91 | dest_path = self.data_f / Path(self.format_fn(self.metadata["num_messages"]) + ".png")
92 |
93 | if "rgb_convert" in kwargs:
94 | if kwargs.get("rgb_convert"):
95 | data.image = cv2.cvtColor(data.image, cv2.COLOR_BAYER_RG2RGB)
96 | data.encoding = "rgb8"
97 |
98 | if not "encoding" in self.metadata.keys():
99 | self.metadata["encoding"] = data.encoding
100 |
101 | cv2.imwrite(str(dest_path), data.image)
102 |
103 | def _save_imu(self, data: Imu, timestamp: float, *args, **kwargs):
104 | if not self.imu_dest:
105 | self.imu_dest = (self.data_f / "imu.txt").open("w")
106 | self.imu_dest.write(IMU_CSV_HEADER)
107 |
108 | imu_line = np.concatenate((data.linear_acceleration, data.angular_velocity, data.orientation,
109 | data.linear_acceleration_covariance, data.angular_velocity_covariance,
110 | data.orientation_covariance), axis=0)
111 | self.imu_dest.write(",".join(map(str, imu_line.tolist())) + "\n")
112 |
113 | def topic(self) -> str:
114 | return self.metadata["topic"]
115 |
116 | def close(self):
117 | with self.metadata_f.open("w") as f:
118 | json.dump(self.metadata, f)
119 | with self.timestamps_f.open("w") as f:
120 | f.writelines([
121 | str(np.datetime64(t, "ns")) + "\n" for t in self.timestamps])
122 |
123 |
124 | class KittiWriter:
125 | def __init__(self, data_dir: Path, rgb_convert: bool = True, pcloud_kitti_format: bool = True, *args, **kwargs):
126 | data_dir.mkdir(parents=True, exist_ok=True)
127 | self.destination_dir = data_dir
128 | self.data_handles = {}
129 | self.rgb_convert = rgb_convert
130 | self.pcloud_kitti_format = pcloud_kitti_format
131 |
132 | def __enter__(self):
133 | return self
134 |
135 | def publish(self, timestamp, topic: str, message: Union[PointCloudXf, Image, Imu]):
136 | if topic not in self.data_handles.keys():
137 | # Infer path to store stuff
138 | # Remove first / on topic
139 | # Remove /image_raw if present
140 | handle_dir = topic[1:].replace("/image_raw", "").replace("/data", "").replace("/", "_")
141 | # print(handle_dir)
142 | self.data_handles[topic] = KittiTopicHandler(self.destination_dir / Path(handle_dir), topic,
143 | lambda x: f"{x:010d}")
144 |
145 | self.data_handles[topic].push_back(message, timestamp, rgb_convert=self.rgb_convert,
146 | pcloud_kitti_format=self.pcloud_kitti_format)
147 |
148 | def __exit__(self, exc_type, exc_val, exc_tb):
149 | for handle in self.data_handles:
150 | self.data_handles[handle].close()
151 |
--------------------------------------------------------------------------------
/python/vbr_devkit/datasets/ros.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from pathlib import Path
4 | from typing import Sequence, List, Tuple, Union, Any
5 | import natsort
6 |
7 | from vbr_devkit.tools import convert_msg_to_datum
8 | from vbr_devkit.tools import PointCloudXf, Image, Imu
9 |
10 |
11 | class RosReader:
12 | def __init__(self, data_dir: Union[Path, Sequence[Path]], topics: List[str] = None, *args,
13 | **kwargs):
14 | """
15 | :param data_dir: Directory containing rosbags or path to a rosbag file
16 | :param topics: Topics to read. If None, all topics will be read
17 | :param args:
18 | :param kwargs:
19 | """
20 | try:
21 | from rosbags.highlevel import AnyReader
22 | except ModuleNotFoundError:
23 | print("rosbags library not installed, run 'pip install -U rosbags'")
24 | sys.exit(-1)
25 |
26 | if data_dir.is_file():
27 | #self.sequence_id = os.path.basename(data_dir).split(".")[0]
28 | self.bag = AnyReader([data_dir])
29 | else:
30 | #self.sequence_id = os.path.basename(data_dir[0]).split(".")[0]
31 | self.bag = AnyReader(natsort.natsorted([bag for bag in list(data_dir.glob("*.bag"))]))
32 | print("Reading multiple .bag files in directory:")
33 | print("\n".join([path.name for path in self.bag.paths]))
34 |
35 | self.bag.open()
36 | connections = self.bag.connections
37 |
38 | if topics:
39 | print("Reading the following topics")
40 | print("\n".join(topics))
41 | connections = [x for x in self.bag.connections if x.topic in topics]
42 | self.msgs = self.bag.messages(connections=connections)
43 |
44 | # def __del__(self):
45 | # if hasattr(self, "bag"):
46 | # self.bag.close()
47 |
48 | def __len__(self):
49 | return self.bag.message_count
50 |
51 | def __enter__(self):
52 | return self
53 |
54 | def __exit__(self, exc_type, exc_val, exc_tb):
55 | if hasattr(self, "bag"):
56 | self.bag.close()
57 |
58 | def __getitem__(self, item) -> Tuple[int, str, Union[PointCloudXf, Image, Imu, Any]]:
59 | connection, timestamp, rawdata = next(self.msgs)
60 | msg = self.bag.deserialize(rawdata, connection.msgtype)
61 | msg = convert_msg_to_datum(msg, connection.msgtype)
62 | return timestamp, connection.topic, msg
63 |
--------------------------------------------------------------------------------
/python/vbr_devkit/download/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rvp-group/vbr-devkit/462ce90cc632a8a5ded5ee6d4bfa5dbfc1682788/python/vbr_devkit/download/__init__.py
--------------------------------------------------------------------------------
/python/vbr_devkit/download/download_data.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import ftplib
3 | from rich.progress import Progress, SpinnerColumn, TextColumn
4 | from rich.panel import Panel
5 | from vbr_devkit.tools.console import console
6 |
7 | DATASET_LINK = "151.100.59.119"
8 | FTP_USER = "anonymous"
9 |
10 | vbr_downloads = [
11 | "campus_test0",
12 | "campus_test1",
13 | "campus_train0",
14 | "campus_train1",
15 | "ciampino_test0",
16 | "ciampino_test1",
17 | "ciampino_train0",
18 | "ciampino_train1",
19 | "colosseo_test0",
20 | "colosseo_train0",
21 | "diag_test0",
22 | "diag_train0",
23 | "pincio_test0",
24 | "pincio_train0",
25 | "spagna_test0",
26 | "spagna_train0",
27 | ]
28 |
29 | def download_seq_fld(seq: str, output_dir: Path) -> None:
30 | def human_readable_size(size, decimal_places=2):
31 | for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']:
32 | if size < 1024.0 or unit == 'PiB':
33 | break
34 | size /= 1024.0
35 | return f"{size:.{decimal_places}f} {unit}"
36 |
37 | console.rule(f"[bold green] Downloading {seq}")
38 | # output_dir.mkdir(parents=True, exist_ok=True)
39 |
40 | # Establish FTP connection
41 | console.log(f"Connecting to {DATASET_LINK}")
42 | ftp = ftplib.FTP(DATASET_LINK)
43 | ftp.login(FTP_USER, "")
44 | console.log(":white_check_mark: Connection established")
45 | with Progress(
46 | SpinnerColumn(),
47 | TextColumn("[progress.description]{task.description}"),
48 | transient=True) as progress:
49 | progress.add_task("Gathering files", total=None)
50 | db_path = "vbr_slam/" + seq.split("_")[0] + "/" + seq
51 | ftp.cwd(db_path)
52 |
53 | try:
54 | available_files = ftp.nlst()
55 | available_files = [(file, ftp.size(file)) for file in available_files]
56 | except ftplib.error_perm as resp:
57 | if str(resp) == "550 No files found":
58 | console.log("[bold red] Invalid input sequence")
59 | else:
60 | raise
61 | # Sort based on size
62 | available_files = sorted(available_files, key=lambda x: x[1])
63 |
64 | console.print(Panel(
65 | "\n".join(f"{f[0]}\t{human_readable_size(f[1])}" for f in available_files), title="Downloading files"
66 | ))
67 |
68 | available_files = [x[0] for x in available_files]
69 | # Downloading routine
70 | with Progress() as progress:
71 | for f in available_files:
72 | local_path = output_dir / "vbr_slam" / seq.split("_")[0] / seq
73 | local_path.mkdir(exist_ok=True, parents=True)
74 | local_fname = local_path / f
75 | fout = open(local_fname, "wb")
76 | task = progress.add_task(f"Downloading {f}", total=ftp.size(f))
77 |
78 | def write_cb(data):
79 | progress.update(task, advance=len(data))
80 | fout.write(data)
81 |
82 | ftp.retrbinary("RETR " + f, write_cb)
83 | fout.close()
84 | ftp.quit()
85 | console.print(":tada: Completed")
86 |
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/__init__.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Union
2 |
3 | from rosbags.typesys.types import (
4 | sensor_msgs__msg__Imu,
5 | sensor_msgs__msg__Image,
6 | sensor_msgs__msg__PointCloud2
7 | )
8 | from vbr_devkit.tools.image import Image
9 | from vbr_devkit.tools.imu import Imu
10 | from vbr_devkit.tools.point_cloud2 import PointCloudXf
11 |
12 | conversion_dict = {
13 | "sensor_msgs/msg/PointCloud2": PointCloudXf.from_ros,
14 | "sensor_msgs/msg/Image": Image.from_ros,
15 | "sensor_msgs/msg/Imu": Imu.from_ros
16 | }
17 |
18 |
19 | def convert_msg_to_datum(
20 | msg: Any,
21 | msg_type: str) -> Union[PointCloudXf, Image, Imu]:
22 | return conversion_dict[msg_type](msg)
23 |
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/console.py:
--------------------------------------------------------------------------------
1 | from rich.console import Console
2 |
3 | console = Console()
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/image.py:
--------------------------------------------------------------------------------
1 | try:
2 | from rosbags.typesys.types import sensor_msgs__msg__Image
3 | except ImportError as e:
4 | raise ImportError('rosbags library not installed, run "pip install -U rosbags"') from e
5 |
6 | try:
7 | from rosbags.image import message_to_cvimage
8 | except ImportError as e:
9 | raise ImportError('rosbags-image library not installed, run "pip install -U rosbags-image"') from e
10 |
11 |
12 | class Image:
13 | def __init__(self):
14 | self.image = None
15 | self.encoding = None
16 | ...
17 |
18 | @staticmethod
19 | def from_ros(msg: sensor_msgs__msg__Image) -> "Image":
20 | im = Image()
21 | im.image = message_to_cvimage(msg)
22 | im.encoding = msg.encoding
23 |
24 | return im
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/imu.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | try:
4 | from rosbags.typesys.types import sensor_msgs__msg__Imu
5 | except ImportError as e:
6 | raise ImportError('rosbags library not installed, run "pip install -U rosbags"') from e
7 |
8 |
9 | class Imu:
10 | def __init__(self):
11 | self.linear_acceleration = np.zeros((3,))
12 | self.angular_velocity = np.zeros((3,))
13 | self.orientation = np.float32([1.0, 0.0, 0.0, 0.0])
14 | self.linear_acceleration_covariance = np.zeros((9, ))
15 | self.angular_velocity_covariance = np.zeros((9, ))
16 | self.orientation_covariance = np.zeros((9, ))
17 |
18 | @staticmethod
19 | def from_ros(msg: sensor_msgs__msg__Imu) -> "Imu":
20 | meas = Imu()
21 | meas.linear_acceleration = np.float32([
22 | msg.linear_acceleration.x,
23 | msg.linear_acceleration.y,
24 | msg.linear_acceleration.z])
25 |
26 | meas.angular_velocity = np.float32([
27 | msg.angular_velocity.x,
28 | msg.angular_velocity.y,
29 | msg.angular_velocity.z])
30 |
31 | meas.orientation = np.float32([
32 | msg.orientation.w,
33 | msg.orientation.x,
34 | msg.orientation.y,
35 | msg.orientation.z
36 | ])
37 |
38 | meas.linear_acceleration_covariance = msg.linear_acceleration_covariance
39 | meas.angular_velocity_covariance = msg.angular_velocity_covariance
40 | meas.orientation_covariance = msg.orientation_covariance
41 |
42 | return meas
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/point_cloud2.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Iterable, List, Optional
3 |
4 | import numpy as np
5 |
6 | try:
7 | from rosbags.typesys.types import sensor_msgs__msg__PointCloud2 as PointCloud2
8 | from rosbags.typesys.types import sensor_msgs__msg__PointField as PointField
9 | except ImportError as e:
10 | raise ImportError('rosbags library not installed, run "pip install -U rosbags"') from e
11 |
12 | _DATATYPES = {}
13 | _DATATYPES[PointField.INT8] = np.dtype(np.int8)
14 | _DATATYPES[PointField.UINT8] = np.dtype(np.uint8)
15 | _DATATYPES[PointField.INT16] = np.dtype(np.int16)
16 | _DATATYPES[PointField.UINT16] = np.dtype(np.uint16)
17 | _DATATYPES[PointField.INT32] = np.dtype(np.int32)
18 | _DATATYPES[PointField.UINT32] = np.dtype(np.uint32)
19 | _DATATYPES[PointField.FLOAT32] = np.dtype(np.float32)
20 | _DATATYPES[PointField.FLOAT64] = np.dtype(np.float64)
21 |
22 | DUMMY_FIELD_PREFIX = "unnamed_field"
23 |
24 |
25 | class PointCloudXf:
26 | def __init__(self):
27 | self.fields: List[PointField] = []
28 | self.points: np.ndarray[None, np.dtype[np.float32]] = np.zeros(0)
29 | ...
30 |
31 | @staticmethod
32 | def from_ros(msg: PointCloud2):
33 | cloud = PointCloudXf()
34 | cloud.fields = msg.fields
35 | field_names = [f.name for f in msg.fields]
36 | cloud.points = read_points(msg, field_names)
37 |
38 | return cloud
39 |
40 |
41 | def read_points(
42 | cloud: PointCloud2,
43 | field_names: Optional[List[str]] = None,
44 | uvs: Optional[Iterable] = None,
45 | reshape_organized_cloud: bool = False,
46 | ) -> np.ndarray:
47 | """
48 | Read points from a sensor_msgs.PointCloud2 message.
49 | :param cloud: The point cloud to read from sensor_msgs.PointCloud2.
50 | :param field_names: The names of fields to read. If None, read all fields.
51 | (Type: Iterable, Default: None)
52 | :param uvs: If specified, then only return the points at the given
53 | coordinates. (Type: Iterable, Default: None)
54 | :param reshape_organized_cloud: Returns the array as an 2D organized point cloud if set.
55 | :return: Structured NumPy array containing all points.
56 | """
57 | # Cast bytes to numpy array
58 | points = np.ndarray(
59 | shape=(cloud.width * cloud.height,),
60 | dtype=dtype_from_fields(cloud.fields, point_step=cloud.point_step),
61 | buffer=cloud.data,
62 | )
63 |
64 | # Keep only the requested fields
65 | if field_names is not None:
66 | assert all(
67 | field_name in points.dtype.names for field_name in field_names
68 | ), "Requests field is not in the fields of the PointCloud!"
69 | # Mask fields
70 | points = points[list(field_names)]
71 |
72 | # Swap array if byte order does not match
73 | if bool(sys.byteorder != "little") != bool(cloud.is_bigendian):
74 | points = points.byteswap(inplace=True)
75 |
76 | # Select points indexed by the uvs field
77 | if uvs is not None:
78 | # Don't convert to numpy array if it is already one
79 | if not isinstance(uvs, np.ndarray):
80 | uvs = np.fromiter(uvs, int)
81 | # Index requested points
82 | points = points[uvs]
83 |
84 | # Cast into 2d array if cloud is 'organized'
85 | if reshape_organized_cloud and cloud.height > 1:
86 | points = points.reshape(cloud.width, cloud.height)
87 |
88 | return points
89 |
90 |
91 | def dtype_from_fields(fields: Iterable[PointField], point_step: Optional[int] = None) -> np.dtype:
92 | """
93 | Convert a Iterable of sensor_msgs.msg.PointField messages to a np.dtype.
94 | :param fields: The point cloud fields.
95 | (Type: iterable of sensor_msgs.msg.PointField)
96 | :param point_step: Point step size in bytes. Calculated from the given fields by default.
97 | (Type: optional of integer)
98 | :returns: NumPy datatype
99 | """
100 | # Create a lists containing the names, offsets and datatypes of all fields
101 | field_names = []
102 | field_offsets = []
103 | field_datatypes = []
104 | for i, field in enumerate(fields):
105 | # Datatype as numpy datatype
106 | datatype = _DATATYPES[field.datatype]
107 | # Name field
108 | if field.name == "":
109 | name = f"{DUMMY_FIELD_PREFIX}_{i}"
110 | else:
111 | name = field.name
112 | # Handle fields with count > 1 by creating subfields with a suffix consiting
113 | # of "_" followed by the subfield counter [0 -> (count - 1)]
114 | assert field.count > 0, "Can't process fields with count = 0."
115 | for a in range(field.count):
116 | # Add suffix if we have multiple subfields
117 | if field.count > 1:
118 | subfield_name = f"{name}_{a}"
119 | else:
120 | subfield_name = name
121 | assert subfield_name not in field_names, "Duplicate field names are not allowed!"
122 | field_names.append(subfield_name)
123 | # Create new offset that includes subfields
124 | field_offsets.append(field.offset + a * datatype.itemsize)
125 | field_datatypes.append(datatype.str)
126 |
127 | # Create dtype
128 | dtype_dict = {"names": field_names, "formats": field_datatypes, "offsets": field_offsets}
129 | if point_step is not None:
130 | dtype_dict["itemsize"] = point_step
131 | return np.dtype(dtype_dict)
132 |
--------------------------------------------------------------------------------
/python/vbr_devkit/tools/run.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | import typer
4 | from pathlib import Path
5 | from rich.console import Group
6 | from rich.panel import Panel
7 | from rich.progress import track
8 | from typing import Sequence
9 | from typing_extensions import Annotated
10 | from vbr_devkit.datasets import RosReader
11 | from vbr_devkit.download.download_data import vbr_downloads, download_seq_fld
12 | from vbr_devkit.datasets.convert_bag import OutputDataInterface, OutputDataInterface_lut
13 | from vbr_devkit.tools.console import console
14 | from rosbags import convert as rosconvert
15 |
16 | app = typer.Typer()
17 |
18 |
19 | @app.command("list",
20 | help="List all available VBR sequences")
21 | def list_sequences() -> None:
22 | panel_group = Group(
23 | Panel("\n".join(["all", "train", "test"]), title="Meta"),
24 | Panel("\n".join([x for x in vbr_downloads if "train" in x]), title="Train"),
25 | Panel("\n".join([x for x in vbr_downloads if "test" in x]), title="Test")
26 | )
27 | console.print(panel_group)
28 |
29 |
30 | def complete_sequence(incomplete: str) -> Sequence[str]:
31 | for seq in ["all", "train", "test"] + vbr_downloads:
32 | if seq.startswith(incomplete):
33 | yield seq
34 |
35 |
36 | @app.command(help="Download one or more VBR sequences. Type 'vbr list' to see the available sequences.")
37 | def download(sequence: Annotated[
38 | str, typer.Argument(help="Name of the sequence to download", show_default=False, autocompletion=complete_sequence)],
39 | output_dir: Annotated[
40 | Path, typer.Argument(help="Output directory. The sequence will be stored in a sub-folder",
41 | show_default=False)]) -> None:
42 | if sequence == "all":
43 | console.print(":boom: Downloading all sequences")
44 | console.print("[yellow] It will take a while")
45 | for seq in vbr_downloads:
46 | download_seq_fld(seq, output_dir)
47 | elif sequence == "train" or sequence == "test":
48 | console.print(f":woman_student: Downloading {sequence} sequences")
49 | console.print("[yellow] It will take a while")
50 | for seq in filter(lambda x: f"{sequence}" in x, vbr_downloads):
51 | download_seq_fld(seq, output_dir)
52 | else:
53 | if sequence not in vbr_downloads:
54 | console.log(
55 | f":thinking_face: Error {sequence} is not a valid sequence. Type 'vbr list' to see available sequences.")
56 | sys.exit(-1)
57 | download_seq_fld(sequence, output_dir)
58 |
59 |
60 | @app.command(help="Convert a sequence from ROS1 to other known formats")
61 | def convert(to: Annotated[OutputDataInterface, typer.Argument(help="Desired data format", show_default=False)],
62 | input_dir: Annotated[
63 | Path, typer.Argument(help="Input bag or directory containing multiple bags", show_default=False)],
64 | output_dir: Annotated[
65 | Path, typer.Argument(help="Output directory in which the data will be stored", show_default=False)],
66 | rgb_conversion: Annotated[
67 | bool, typer.Option(
68 | help="Enable BayerRG8->RGB conversion during conversion in KITTI format."
69 | " Disable this flag to reduce the memory footprint of the converted folder.",
70 | show_default=True)] = True,
71 | reduce_pcloud: Annotated[
72 | bool, typer.Option(
73 | help="Preserve only channels during PointCloud conversion in KITTI format. "
74 | "Allows compatibility with KITTI readers but removes extra LiDAR channels",
75 | show_default=True)] = True) -> None:
76 | console.print(f"Converting {input_dir} to {to} format at {output_dir}")
77 | if to == OutputDataInterface.ros2:
78 | print("Processing...")
79 | rosconvert.convert(
80 | sorted([f for f in input_dir.glob("*.bag")]) if input_dir.is_dir() else [input_dir],
81 | output_dir / input_dir.stem,
82 | dst_version=None,
83 | compress=None,
84 | compress_mode="file",
85 | default_typestore=None,
86 | typestore=None,
87 | exclude_topics=[],
88 | include_topics=[],
89 | exclude_msgtypes=[],
90 | include_msgtypes=[]
91 | )
92 | else:
93 | with RosReader(input_dir) as reader:
94 | with OutputDataInterface_lut[to](output_dir, rgb_convert=rgb_conversion,
95 | pcloud_kitti_format=reduce_pcloud) as writer:
96 | for timestamp, topic, message in track(reader, description="Processing..."):
97 | writer.publish(timestamp, topic, message)
98 | console.print(":tada: Completed")
99 |
100 |
101 | if __name__ == "__main__":
102 | app()
103 |
--------------------------------------------------------------------------------