├── .dockerignore ├── .github └── workflows │ └── docker-image.yml ├── .gitignore ├── .vscode └── settings.json ├── Dockerfile ├── LICENSE ├── Makefile ├── Readme.md ├── build_docker.sh ├── c_ws └── src │ ├── .gitkeep │ ├── inference_model │ └── lts_filter │ │ ├── CMakeLists.txt │ │ ├── launch │ │ └── filter.launch │ │ ├── model │ │ ├── .gitkeep │ │ └── best_model.pth │ │ ├── package.xml │ │ └── scripts │ │ ├── loader.py │ │ ├── stability_filter.py │ │ └── transformer.py │ ├── mapmos │ ├── CMakeLists.txt │ ├── launch │ │ └── mapmos.launch │ ├── package.xml │ └── scripts │ │ ├── mapmos.py │ │ ├── mapmos_node.py │ │ └── minkunet.py │ ├── mos4d │ ├── CMakeLists.txt │ ├── checkpoints │ │ └── .gitkeep │ ├── launch │ │ └── mos4d.launch │ ├── package.xml │ └── scripts │ │ ├── mos4d.py │ │ └── mos4d_node.py │ ├── scans_pub │ ├── CMakeLists.txt │ ├── launch │ │ └── pub_scans.launch │ ├── package.xml │ └── scripts │ │ ├── pub_scans.py │ │ └── raw_scans.py │ └── sps_filter │ ├── CMakeLists.txt │ ├── launch │ ├── mask.launch │ └── sps.launch │ ├── package.xml │ └── scripts │ ├── mask.py │ ├── sps_node.py │ └── sps_node_cvm.py ├── config ├── config.yaml └── rviz │ ├── debug.rviz │ ├── hdl.rviz │ ├── hdl_nclt.rviz │ ├── hdl_rise.rviz │ └── mulran.rviz ├── docker-compose.yml ├── exp_pipeline └── loc_exp_general.bash ├── pyproject.toml ├── scripts ├── predict.py └── train.py ├── setup.py └── src └── sps ├── __init__.py ├── datasets ├── __init__.py ├── augmentation.py ├── blt_dataset.py └── util.py └── models ├── MinkowskiEngine ├── __init__.py ├── customminkunet.py ├── minkunet.py └── resnet.py ├── __init__.py └── models.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | 9 | jobs: 10 | 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v3 17 | - name: Build the Docker image 18 | run: docker build . --file Dockerfile --tag my-image-name:$(date +%s) 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Project related 2 | pretrained/ 3 | predictions/ 4 | logs/ 5 | 6 | # Byte-compiled / optimized / DLL files 7 | __pycache__/ 8 | *.py[cod] 9 | *$py.class 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | wheels/ 28 | pip-wheel-metadata/ 29 | share/python-wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .nox/ 49 | .coverage 50 | .coverage.* 51 | .cache 52 | nosetests.xml 53 | coverage.xml 54 | *.cover 55 | *.py,cover 56 | .hypothesis/ 57 | .pytest_cache/ 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | local_settings.py 66 | db.sqlite3 67 | db.sqlite3-journal 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | .python-version 91 | 92 | # pipenv 93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 96 | # install all needed dependencies. 97 | #Pipfile.lock 98 | 99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 100 | __pypackages__/ 101 | 102 | # Celery stuff 103 | celerybeat-schedule 104 | celerybeat.pid 105 | 106 | # SageMath parsed files 107 | *.sage.py 108 | 109 | # Environments 110 | .env 111 | .venv 112 | env/ 113 | venv/ 114 | ENV/ 115 | env.bak/ 116 | venv.bak/ 117 | 118 | # Spyder project settings 119 | .spyderproject 120 | .spyproject 121 | 122 | # Rope project settings 123 | .ropeproject 124 | 125 | # mkdocs documentation 126 | /site 127 | 128 | # mypy 129 | .mypy_cache/ 130 | .dmypy.json 131 | dmypy.json 132 | 133 | # Pyre type checker 134 | .pyre/ 135 | 136 | Dataset/Bacchus/* 137 | data/* 138 | .neptune/* 139 | c_ws/devel/* 140 | c_ws/.catkin_tools/* 141 | 142 | 143 | tb_logs/* 144 | playgournd.py 145 | lightning_logs/* 146 | scripts/lightning_logs/* 147 | *.swp 148 | *.ckpt 149 | kiss_odom_ws/* 150 | KNN_CUDA/* 151 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "cctype": "cpp", 4 | "clocale": "cpp", 5 | "cmath": "cpp", 6 | "csetjmp": "cpp", 7 | "csignal": "cpp", 8 | "cstdarg": "cpp", 9 | "cstddef": "cpp", 10 | "cstdio": "cpp", 11 | "cstdlib": "cpp", 12 | "cstring": "cpp", 13 | "ctime": "cpp", 14 | "cwchar": "cpp", 15 | "cwctype": "cpp", 16 | "array": "cpp", 17 | "atomic": "cpp", 18 | "strstream": "cpp", 19 | "bit": "cpp", 20 | "*.tcc": "cpp", 21 | "bitset": "cpp", 22 | "chrono": "cpp", 23 | "codecvt": "cpp", 24 | "complex": "cpp", 25 | "condition_variable": "cpp", 26 | "cstdint": "cpp", 27 | "deque": "cpp", 28 | "forward_list": "cpp", 29 | "list": "cpp", 30 | "map": "cpp", 31 | "set": "cpp", 32 | "unordered_map": "cpp", 33 | "unordered_set": "cpp", 34 | "vector": "cpp", 35 | "exception": "cpp", 36 | "algorithm": "cpp", 37 | "functional": "cpp", 38 | "iterator": "cpp", 39 | "memory": "cpp", 40 | "memory_resource": "cpp", 41 | "numeric": "cpp", 42 | "optional": "cpp", 43 | "random": "cpp", 44 | "ratio": "cpp", 45 | "regex": "cpp", 46 | "string": "cpp", 47 | "string_view": "cpp", 48 | "system_error": "cpp", 49 | "tuple": "cpp", 50 | "type_traits": "cpp", 51 | "utility": "cpp", 52 | "fstream": "cpp", 53 | "future": "cpp", 54 | "initializer_list": "cpp", 55 | "iomanip": "cpp", 56 | "iosfwd": "cpp", 57 | "iostream": "cpp", 58 | "istream": "cpp", 59 | "limits": "cpp", 60 | "mutex": "cpp", 61 | "new": "cpp", 62 | "ostream": "cpp", 63 | "scoped_allocator": "cpp", 64 | "shared_mutex": "cpp", 65 | "sstream": "cpp", 66 | "stdexcept": "cpp", 67 | "streambuf": "cpp", 68 | "thread": "cpp", 69 | "cfenv": "cpp", 70 | "cinttypes": "cpp", 71 | "typeindex": "cpp", 72 | "typeinfo": "cpp", 73 | "valarray": "cpp", 74 | "variant": "cpp" 75 | }, 76 | "python.analysis.extraPaths": [ 77 | "./c_ws/src/mapmos/scripts" 78 | ] 79 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cudagl:11.3.0-devel-ubuntu20.04 2 | 3 | ############################################## 4 | # You should modify this to match your GPU compute capability 5 | ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 6.2 7.0 7.2 7.5 8.0 8.6" 6 | 7 | ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" 8 | 9 | # For faster build, use more jobs. 10 | ENV MAX_JOBS=6 11 | 12 | ENV ROS_DISTRO noetic 13 | 14 | ENV PROJECT=/sps 15 | 16 | ENV DATA=$PROJECT/data 17 | 18 | ############################################## 19 | # Minimal ubuntu setup 20 | RUN apt update --fix-missing && apt install -y locales lsb-release && apt clean 21 | 22 | ARG DEBIAN_FRONTEND=noninteractive 23 | RUN dpkg-reconfigure locales 24 | 25 | ############################################## 26 | # Setup torch, cuda for the model and other dependencies 27 | RUN apt install -y python3-pip && \ 28 | pip install torch==1.12.0+cu113 torchvision==0.13.0+cu113 torchaudio==0.12.0 \ 29 | --extra-index-url https://download.pytorch.org/whl/cu113 30 | 31 | # Update the package repository and install dependencies 32 | RUN apt update && apt install -y --no-install-recommends \ 33 | git curl ninja-build cmake libopenblas-dev xauth openssh-server \ 34 | && apt clean && rm -rf /var/lib/apt/lists/* 35 | 36 | ############################################## 37 | # Install MinkowskiEngine 38 | RUN git clone --recursive "https://github.com/NVIDIA/MinkowskiEngine" \ 39 | && cd MinkowskiEngine \ 40 | && python3 setup.py install --force_cuda --blas=openblas 41 | 42 | ############################################## 43 | # Install ROS 44 | # [ROS] a. Setup your sources.list 45 | RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' 46 | 47 | # [ROS] b. Set up the keys 48 | RUN curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | apt-key add - 49 | 50 | # [ROS] c. Installation 51 | RUN apt update && apt install -y --no-install-recommends ros-${ROS_DISTRO}-ros-base \ 52 | && echo "source /opt/ros/${ROS_DISTRO}/setup.bash" >> ~/.bashrc 53 | 54 | # Install catkin tools and other packages 55 | RUN apt update && apt install -y --no-install-recommends nano build-essential \ 56 | ros-${ROS_DISTRO}-catkin python3-catkin-tools ros-${ROS_DISTRO}-ros-numpy \ 57 | libomp-dev libboost-all-dev ros-${ROS_DISTRO}-pcl-ros \ 58 | ros-${ROS_DISTRO}-tf2 ros-${ROS_DISTRO}-tf2-ros ros-${ROS_DISTRO}-tf2-geometry-msgs \ 59 | ros-${ROS_DISTRO}-eigen-conversions ros-${ROS_DISTRO}-tf-conversions python3 \ 60 | && apt clean && rm -rf /var/lib/apt/lists/* 61 | 62 | #Set numpy version to 1.20.1 as higher version cause issues in ros-numpy package 63 | RUN pip3 install tensorboard install --upgrade numpy==1.20.1 install scipy 64 | 65 | ############################################## 66 | # Install project related dependencies 67 | RUN mkdir -p $PROJECT/logs && mkdir -p $DATA 68 | 69 | WORKDIR $PROJECT 70 | COPY . $PROJECT 71 | RUN python3 setup.py develop 72 | # \ 73 | # && cd c_ws \ 74 | # && . /opt/ros/${ROS_DISTRO}/setup.sh \ 75 | # && catkin build 76 | 77 | # Copy the 'build' and 'devel' folders into the image 78 | # COPY c_ws/build $PROJECT/c_ws/build 79 | # COPY c_ws/devel $PROJECT/c_ws/devel 80 | 81 | RUN rm -rf $PROJECT 82 | 83 | ############################################## 84 | # Add user to share files between container and host system 85 | ARG USER_ID 86 | ARG GROUP_ID 87 | 88 | RUN addgroup --gid 1000 user \ 89 | && adduser --disabled-password --gecos '' --uid 1000 --gid 1000 user \ 90 | && chown -R user:user /sps -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Ibrahim Hroob, Benedikt Mersch, Cyrill Stachniss 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | export USER_ID:=$(shell id -u) 2 | export GROUP_ID:=$(shell id -g) 3 | 4 | build: 5 | @echo Build docker image... 6 | @docker-compose build project 7 | 8 | test: check-env 9 | @echo NVIDIA and CUDA setup 10 | @docker-compose run project nvidia-smi 11 | @echo Pytorch CUDA setup installed? 12 | @docker-compose run project python3 -c "import torch; print(torch.cuda.is_available())" 13 | @echo MinkowskiEngine installed? 14 | @docker-compose run project python3 -c "import MinkowskiEngine as ME; print(ME.__version__)" 15 | 16 | run: check-env 17 | @docker-compose run project 18 | 19 | clean: 20 | @echo Removing docker image... 21 | @docker-compose rm project 22 | 23 | 24 | check-env: 25 | ifndef DATA 26 | $(error Please specify where your data is located, export DATA=) 27 | endif 28 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 |
2 |

Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization

3 | 4 |

5 | 6 |

7 | 8 |

9 | Our method segments stable and unstable points in 3D LiDAR scans exploiting the discrepancy of scan voxels and overlapping map voxels (highlighted as submap voxels). We showcase two LiDAR scans captured during separate localization sessions within an outdoor vineyard. The scan on the left depicts the vineyard state in April, while the scan on the right reveals environmental changes in plant growth in June 10 |

11 | 12 |
13 | Click here for qualitative results! 14 | 15 | [![ScanSPS](https://github.com/ibrahimhroob/SPS/assets/47870260/0f93743f-170c-4ca3-be15-77623f45720c)](https://github.com/ibrahimhroob/SPS/assets/47870260/0f93743f-170c-4ca3-be15-77623f45720c) 16 | 17 | 18 | Our stable points segmentation prediction for three datasets. The stable points are depicted in black, while the unstable points are represented in red. 19 | 20 |
21 | 22 |
23 | 24 | 25 | ### Building the Docker image 26 | We provide a ```Dockerfile``` and a ```docker-compose.yaml``` to run all docker commands. 27 | 28 | **IMPORTANT** To have GPU access during the build stage, make ```nvidia``` the default runtime in ```/etc/docker/daemon.json```: 29 | 30 | ```yaml 31 | { 32 | "runtimes": { 33 | "nvidia": { 34 | "path": "/usr/bin/nvidia-container-runtime", 35 | "runtimeArgs": [] 36 | } 37 | }, 38 | "default-runtime": "nvidia" 39 | } 40 | ``` 41 | Save the file and run ```sudo systemctl restart docker``` to restart docker. 42 | 43 | 44 | To build the image, simply type the following in the terminal: 45 | ```bash 46 | bash build_docker.sh 47 | ``` 48 | 49 | Once the build process finishes, initiate the Docker container in detached mode using Docker Compose from the project directory: 50 | ```bash 51 | docker-compose up -d # or [docker compose up -d] for older versions 52 | ``` 53 | 54 | ## Usage Instructions 55 | 56 | ### Training 57 | 58 | To train the model with the parameters specified in `config/config.yaml`, follow these steps: 59 | 60 | 1. Export the path to the dataset (This step may be necessary before initiating the container): 61 | ```bash 62 | export DATA=path/to/dataset 63 | ``` 64 | 65 | 2. Initiate training by executing the following command from within the container: 66 | ```bash 67 | python scripts/train.py 68 | ``` 69 | 70 | ### Segmentation Metrics 71 | 72 | To evaluate the segmentation metrics for a specific sequence: 73 | 74 | ```bash 75 | python scripts/predict.py -seq 76 | ``` 77 | 78 | This command will generate reports for the following metrics: 79 | - uIoU (unstable points IoU) 80 | - Precision 81 | - Recall 82 | - F1 score 83 | 84 | ### Localization 85 | Install and build the following packages in your `catkin_ws`: 86 | ```bash 87 | cd
/src 88 | git clone https://github.com/koide3/ndt_omp 89 | git clone https://github.com/SMRT-AIST/fast_gicp --recursive 90 | git clone https://github.com/koide3/hdl_global_localization 91 | git clone --branch SPS https://github.com/ibrahimhroob/hdl_localization.git 92 | cd .. 93 | catkin build 94 | source devel/setup.bash 95 | ``` 96 | Then, the localization experiment can be run using a single command: 97 | ```bash 98 | bash exp_pipeline/loc_exp_general.bash 99 | ``` 100 | 101 | In order to calculate the localization metrics please install [evo library](https://github.com/MichaelGrupp/evo) 102 | 103 | 104 | ### Data 105 | You can download the post-processed and labelled [BLT dataset](https://www.ipb.uni-bonn.de/html/projects/hroob2024ral/BLT.zip) and the parking lot of [NCLT dataset](https://www.ipb.uni-bonn.de/html/projects/hroob2024ral/NCLT.zip) from the proveded links. 106 | 107 | The [weights](https://www.ipb.uni-bonn.de/html/projects/hroob2024ral/420_601.ckpt) of our pre-trained model can be downloaded as well. 108 | 109 | Here the general structure of the dataset: 110 | ``` 111 | DATASET/ 112 | ├── maps 113 | │   ├── base_map.asc 114 | │   ├── base_map.asc.npy 115 | │   └── base_map.pcd 116 | └── sequence 117 | ├── SEQ 118 | │   ├── map_transform 119 | │   ├── poses 120 | | | ├── 0.txt 121 | | | └── ... 122 | │   └── scans 123 | | ├── 0.npy 124 | | └── ... 125 | | 126 | └── ... 127 | ``` 128 | 129 | ## Publication 130 | If you use our code in your academic work, please cite the corresponding [paper](https://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/hroob2024ral.pdf): 131 | 132 | ```bibtex 133 | @article{hroob2024ral, 134 | author = {I. Hroob* and B. Mersch* and C. Stachniss and M. Hanheide}, 135 | title = {{Generalizable Stable Points Segmentation for 3D LiDAR Scan-to-Map Long-Term Localization}}, 136 | journal = {IEEE Robotics and Automation Letters (RA-L)}, 137 | volume = {9}, 138 | number = {4}, 139 | pages = {3546-3553}, 140 | year = {2024}, 141 | doi = {10.1109/LRA.2024.3368236}, 142 | } 143 | ``` 144 | 145 | ## Acknowledgments 146 | This implementation is inspired by [4DMOS](https://github.com/PRBonn/4DMOS). 147 | 148 | 149 | ## License 150 | This project is free software made available under the MIT License. For details see the LICENSE file. 151 | -------------------------------------------------------------------------------- /build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | image_name=sps 4 | 5 | docker build --build-arg UID=$(id -u) --build-arg GID=$(id -g) -t ${image_name} $(dirname "$0")/ 6 | 7 | -------------------------------------------------------------------------------- /c_ws/src/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/c_ws/src/.gitkeep -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(lts_filter) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | roscpp 12 | rospy 13 | sensor_msgs 14 | std_msgs 15 | ) 16 | 17 | ## System dependencies are found with CMake's conventions 18 | # find_package(Boost REQUIRED COMPONENTS system) 19 | 20 | 21 | ################################### 22 | ## catkin specific configuration ## 23 | ################################### 24 | ## The catkin_package macro generates cmake config files for your package 25 | ## Declare things to be passed to dependent projects 26 | ## INCLUDE_DIRS: uncomment this if your package contains header files 27 | ## LIBRARIES: libraries you create in this project that dependent projects also need 28 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 29 | ## DEPENDS: system dependencies of this project that dependent projects also need 30 | catkin_package( 31 | # INCLUDE_DIRS include 32 | # LIBRARIES lts_filter 33 | # CATKIN_DEPENDS roscpp rospy sensor_msgs std_msgs 34 | # DEPENDS system_lib 35 | ) 36 | 37 | ########### 38 | ## Build ## 39 | ########### 40 | 41 | ## Specify additional locations of header files 42 | ## Your package locations should be listed before other locations 43 | include_directories( 44 | # include 45 | ${catkin_INCLUDE_DIRS} 46 | ) 47 | 48 | -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/launch/filter.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/model/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/c_ws/src/inference_model/lts_filter/model/.gitkeep -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/model/best_model.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/c_ws/src/inference_model/lts_filter/model/best_model.pth -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | lts_filter 4 | 0.0.0 5 | The lts_filter package 6 | 7 | 8 | Ibrahim Hroob 9 | 10 | 11 | 12 | 13 | 14 | MIT 15 | 16 | 17 | catkin 18 | roscpp 19 | rospy 20 | sensor_msgs 21 | std_msgs 22 | roscpp 23 | rospy 24 | sensor_msgs 25 | std_msgs 26 | roscpp 27 | rospy 28 | sensor_msgs 29 | std_msgs 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/scripts/loader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | from typing import List, Tuple 5 | from torch.utils.data import Dataset 6 | 7 | class Loader(Dataset): 8 | def __init__(self, data, lidar: str = 'vlp-16') -> None: 9 | super().__init__() 10 | 11 | assert lidar in {'vlp-16', 'hdl-32'}, 'lidar type should be \'vlp-16\' or \'hdl-32\'' 12 | 13 | lidar_params = { 14 | 'vlp-16': { 15 | 'num_beams': 16, 16 | 'fov_up': 16.8, 17 | 'fov_down': -16.8, 18 | 'window_size': 128, 19 | }, 20 | 'hdl-32': { 21 | 'num_beams': 32, 22 | 'fov_up': 30, 23 | 'fov_down': -10, 24 | 'window_size': 64, 25 | }, 26 | } 27 | lidar_param = lidar_params[lidar] 28 | 29 | self.num_slices = 1024 30 | self.window_size = lidar_param['window_size'] 31 | self.num_windows = self.num_slices // self.window_size 32 | 33 | self.frame = self.lidar_to_image(data, lidar_param, self.num_slices) 34 | 35 | 36 | def lidar_to_image(self, data: np.ndarray, lidar_param: dict, num_slices: int) -> np.ndarray: 37 | data = np.unique(data, axis=0) 38 | data = data[data[:, 3] != -1] 39 | 40 | x, y, z, s = data[:, 0], data[:, 1], data[:, 2], data[:, 3] 41 | theta = np.arctan2(z, np.sqrt(x**2 + y**2)) * 180 / np.pi 42 | phi = np.arctan2(y, x) * 180 / np.pi 43 | 44 | fov_total = lidar_param['fov_up'] - lidar_param['fov_down'] 45 | theta_res = fov_total / (lidar_param['num_beams'] - 1) 46 | phi_res = 360 / num_slices 47 | 48 | theta_idx = np.floor((theta - lidar_param['fov_down']) / theta_res).astype(np.int32) 49 | phi_idx = np.floor(phi / phi_res).astype(np.int32) 50 | 51 | num_channels = 4 52 | projected_data = np.zeros((lidar_param['num_beams'], num_slices, num_channels), dtype=np.float32) 53 | 54 | projected_data[theta_idx, phi_idx, 0] = x 55 | projected_data[theta_idx, phi_idx, 1] = y 56 | projected_data[theta_idx, phi_idx, 2] = z 57 | projected_data[theta_idx, phi_idx, 3] = s 58 | 59 | return projected_data 60 | 61 | 62 | def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray, str]: 63 | window_num = idx 64 | 65 | w_s = window_num * self.window_size 66 | w_e = w_s + self.window_size 67 | 68 | frame = self.frame[:, w_s:w_e, :].reshape(-1, self.frame.shape[-1]) 69 | 70 | points = frame[:, :3] 71 | labels = frame[:, 3] 72 | 73 | return points, labels 74 | 75 | def __len__(self) -> int: 76 | return self.num_windows 77 | -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/scripts/stability_filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import time 6 | import torch 7 | import struct 8 | import importlib 9 | import numpy as np 10 | 11 | import rospy 12 | import ros_numpy 13 | from std_msgs.msg import Float32 14 | from sensor_msgs.msg import PointCloud2, PointField 15 | import sensor_msgs.point_cloud2 as pc2 16 | 17 | from loader import Loader 18 | 19 | # from sklearn.metrics import r2_score 20 | from torchmetrics import R2Score 21 | 22 | from sps.datasets import util 23 | 24 | # Additionally, some operations on a GPU are implemented stochastic for efficiency 25 | # We want to ensure that all operations are deterministic on GPU (if used) for reproducibility 26 | # torch.backends.cudnn.deterministic = True 27 | # torch.backends.cudnn.benchmark = False 28 | 29 | class Stability(): 30 | def __init__(self): 31 | rospy.init_node('pointcloud_stability_inference') 32 | 33 | raw_cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 34 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 35 | epsilon_0 = rospy.get_param('~epsilon_0', 0.0) 36 | epsilon_1 = rospy.get_param('~epsilon_1', 0.84) 37 | self.lidar = rospy.get_param('~lidar', 'hdl-32') 38 | 39 | rospy.Subscriber(raw_cloud_topic, PointCloud2, self.callback) 40 | 41 | # Initialize the publisher 42 | self.pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=10) 43 | self.loss_pub = rospy.Publisher('debug/model_loss', Float32, queue_size=0) 44 | self.r2_pub = rospy.Publisher('debug/model_r2', Float32, queue_size=0) 45 | 46 | rospy.loginfo('raw_cloud: %s', raw_cloud_topic) 47 | rospy.loginfo('filtered_cloud: %s', filtered_cloud_topic) 48 | rospy.loginfo('Bottom threshold: %f', epsilon_0) 49 | rospy.loginfo('Upper threshold: %f', epsilon_1) 50 | rospy.loginfo('Lidar type: %s', self.lidar) 51 | 52 | self.threshold_ground = epsilon_0 53 | self.threshold_dynamic = epsilon_1 54 | self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 55 | self.model = self.load_model(self.device) 56 | 57 | ''' Define loss and r2score for model evaluation ''' 58 | self.loss = torch.nn.MSELoss() #.to(self.device) 59 | self.r2score = R2Score() #.to(self.device) 60 | 61 | rospy.spin() 62 | 63 | def callback(self, pointcloud_msg): 64 | pc = ros_numpy.numpify(pointcloud_msg) 65 | height = pc.shape[0] 66 | try: 67 | width = pc.shape[1] 68 | except: 69 | width = 1 70 | data = np.zeros((height * width, 4), dtype=np.float32) 71 | data[:, 0] = np.resize(pc['x'], height * width) 72 | data[:, 1] = np.resize(pc['y'], height * width) 73 | data[:, 2] = np.resize(pc['z'], height * width) 74 | data[:, 3] = np.resize(pc['intensity'], height * width) 75 | 76 | # Infere the stability labels 77 | data = self.infer(data) 78 | 79 | filtered_cloud = self.to_rosmsg(data, pointcloud_msg.header) 80 | 81 | self.pub.publish(filtered_cloud) 82 | 83 | 84 | def to_rosmsg(self, data, header): 85 | filtered_cloud = PointCloud2() 86 | filtered_cloud.header = header 87 | 88 | # Define the fields for the filtered point cloud 89 | filtered_fields = [PointField('x', 0, PointField.FLOAT32, 1), 90 | PointField('y', 4, PointField.FLOAT32, 1), 91 | PointField('z', 8, PointField.FLOAT32, 1), 92 | PointField('intensity', 12, PointField.FLOAT32, 1)] 93 | 94 | filtered_cloud.fields = filtered_fields 95 | filtered_cloud.is_bigendian = False 96 | filtered_cloud.point_step = 16 97 | filtered_cloud.row_step = filtered_cloud.point_step * len(data) 98 | filtered_cloud.is_bigendian = False 99 | filtered_cloud.is_dense = True 100 | filtered_cloud.width = len(data) 101 | filtered_cloud.height = 1 102 | 103 | 104 | # Filter the point cloud based on intensity 105 | for point in data: 106 | filtered_cloud.data += struct.pack('ffff', point[0], point[1], point[2], point[3]) 107 | 108 | return filtered_cloud 109 | 110 | 111 | def load_model(self, device): 112 | current_pth = os.path.dirname(os.path.realpath(__file__)) 113 | parent_pth = os.path.dirname(current_pth) 114 | rospy.loginfo('rosnode pth: %s', parent_pth) 115 | model_path = os.path.join(parent_pth, 'model') 116 | sys.path.append(model_path) 117 | 118 | '''HYPER PARAMETER''' 119 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 120 | 121 | MODEL = importlib.import_module('transformer') 122 | 123 | model = MODEL.SPCTReg() 124 | model.to(device) 125 | 126 | checkpoint = torch.load( os.path.join(model_path, 'best_model.pth')) 127 | model.load_state_dict(checkpoint['model_state_dict']) 128 | model = model.eval() 129 | 130 | rospy.loginfo("Model loaded successfully!") 131 | 132 | return model 133 | 134 | def infer(self, pointcloud): 135 | 136 | start_time = time.time() 137 | FRAME_DATASET = Loader(pointcloud, self.lidar) 138 | batch_size = FRAME_DATASET.num_windows 139 | 140 | points, labels = FRAME_DATASET[0] 141 | 142 | points = torch.from_numpy(points) 143 | points = points.unsqueeze(0) 144 | 145 | labels = torch.from_numpy(labels) 146 | labels = labels.unsqueeze(0) 147 | 148 | for i in range(1, len(FRAME_DATASET)): 149 | p, l = FRAME_DATASET[i] 150 | p = torch.from_numpy(p) 151 | p = p.unsqueeze(0) 152 | l = torch.from_numpy(l) 153 | l = l.unsqueeze(0) 154 | points = torch.vstack((points, p)) 155 | labels = torch.vstack((labels, l)) 156 | 157 | points = points.float().to(self.device) 158 | points = points.transpose(2, 1) 159 | labels_pred = self.model(points) 160 | labels_pred = labels_pred.permute(0,2,1).cpu() 161 | 162 | # ''' Step 5: Calculate loss and r2 ''' 163 | loss = self.loss(labels_pred.view(-1), labels.view(-1)) 164 | r2 = self.r2score(labels_pred.view(-1), labels.view(-1)) 165 | self.loss_pub.publish(loss) 166 | self.r2_pub.publish(r2) 167 | 168 | points = points.permute(0,2,1).cpu().data.numpy().reshape((-1, 3)) 169 | labels_pred = labels_pred.data.numpy().reshape((-1, )) 170 | 171 | data = np.column_stack((points, labels_pred)) 172 | 173 | ### -> mIoU start 174 | pred = np.where(labels_pred < self.threshold_dynamic, 0, 1) 175 | gt = np.where(labels.cpu().view(-1) < self.threshold_dynamic, 0, 1) 176 | 177 | precision, recall, f1, accuracy, dIoU = util.calculate_metrics(gt, pred) 178 | 179 | log_message = ( 180 | f"dIoU: {dIoU:.3f} " 181 | f"accuracy: {accuracy:.3f} " 182 | f"precision: {precision:.3f} " 183 | f"recall: {recall:.3f} " 184 | f"f1: {f1:.3f} " 185 | ) 186 | rospy.loginfo(log_message) 187 | ### <- mIoU ends 188 | 189 | original_len = len(data) 190 | 191 | data = data[(data[:,3] <= self.threshold_dynamic)] 192 | 193 | filtered_len = len(data) 194 | 195 | end_time = time.time() 196 | elapsed_time = end_time - start_time 197 | 198 | rospy.loginfo("T: {:.4f} sec [{:.2f} Hz], L: {:.4f}, R2: {:.4f}, N: {:d}, n: {:d}".format(elapsed_time, 1/elapsed_time, loss, r2, original_len, filtered_len)) 199 | 200 | return data 201 | 202 | 203 | if __name__ == '__main__': 204 | stability_node = Stability() 205 | 206 | -------------------------------------------------------------------------------- /c_ws/src/inference_model/lts_filter/scripts/transformer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class Embedding(nn.Module): 6 | """ 7 | Input Embedding layer which consists of 2 stacked 1D convolutional layers with batch normalization and ReLU activation. 8 | """ 9 | def __init__(self, in_channels=3, out_channels=128): 10 | super(Embedding, self).__init__() 11 | 12 | self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False) 13 | self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False) 14 | 15 | self.bn1 = nn.BatchNorm1d(out_channels) 16 | self.bn2 = nn.BatchNorm1d(out_channels) 17 | self.relu = nn.ReLU(inplace=True) 18 | 19 | def forward(self, x): 20 | """ 21 | Input 22 | x: [B, in_channels, N] 23 | 24 | Output 25 | x: [B, out_channels, N] 26 | """ 27 | x = self.relu(self.bn1(self.conv1(x))) 28 | x = self.relu(self.bn2(self.conv2(x))) 29 | return x 30 | 31 | 32 | class OA(nn.Module): 33 | """ 34 | Offset-Attention Module. 35 | """ 36 | def __init__(self, channels): 37 | super(OA, self).__init__() 38 | 39 | self.q_conv = nn.Conv1d(channels, channels // 4, 1, bias=False) 40 | self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False) 41 | self.q_conv.weight = self.k_conv.weight 42 | self.v_conv = nn.Conv1d(channels, channels, 1) 43 | 44 | self.trans_conv = nn.Conv1d(channels, channels, 1) 45 | self.after_norm = nn.BatchNorm1d(channels) 46 | 47 | self.act = nn.ReLU(inplace=True) 48 | self.softmax = nn.Softmax(dim=-1) 49 | 50 | def forward(self, x): 51 | """ 52 | Input: 53 | x: [B, de, N] 54 | 55 | Output: 56 | x: [B, de, N] 57 | """ 58 | x_q = self.q_conv(x).permute(0, 2, 1) 59 | x_k = self.k_conv(x) 60 | x_v = self.v_conv(x) 61 | 62 | energy = torch.bmm(x_q, x_k) 63 | attention = self.softmax(energy) 64 | attention = attention / (1e-9 + attention.sum(dim=1, keepdims=True)) 65 | 66 | x_r = torch.bmm(x_v, attention) 67 | x_r = self.act(self.after_norm(self.trans_conv(x - x_r))) 68 | x = x + x_r 69 | 70 | return x 71 | 72 | class SPCTReg(nn.Module): 73 | def __init__(self): 74 | super(SPCTReg, self).__init__() 75 | 76 | self.embedding = Embedding(3, 128) 77 | 78 | self.sa1 = OA(128) 79 | self.sa2 = OA(128) 80 | self.sa3 = OA(128) 81 | self.sa4 = OA(128) 82 | 83 | self.linear1 = nn.Sequential( 84 | nn.Conv1d(512, 2048, kernel_size=1, bias=False), 85 | nn.BatchNorm1d(2048), 86 | nn.LeakyReLU(negative_slope=0.2, inplace=True) 87 | ) 88 | 89 | self.linear2 = nn.Sequential( 90 | nn.Conv1d(2048 * 3, 512, 1), 91 | nn.BatchNorm1d(512), 92 | nn.SiLU(inplace=True), 93 | nn.Dropout(0.2) 94 | ) 95 | 96 | self.linear3 = nn.Sequential( 97 | nn.Conv1d(512, 256, 1), 98 | nn.BatchNorm1d(256), 99 | nn.SiLU(inplace=True), 100 | nn.Dropout(0.2) 101 | ) 102 | 103 | self.convs = nn.Conv1d(256, 1, 1) 104 | 105 | self.sigmoid = nn.Sigmoid() 106 | 107 | 108 | def forward(self, x): 109 | x = self.embedding(x) 110 | 111 | x1 = self.sa1(x) 112 | x2 = self.sa2(x1) 113 | x3 = self.sa3(x2) 114 | x4 = self.sa4(x3) 115 | x = torch.cat([x1, x2, x3, x4], dim=1) 116 | 117 | x = self.linear1(x) 118 | 119 | batch_size, _, N = x.size() 120 | 121 | # x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1) 122 | x_max = torch.max(x, dim=-1)[0] 123 | x_mean = torch.mean(x, dim=-1) 124 | 125 | x_max_feature = x_max.unsqueeze(-1).repeat(1, 1, N) 126 | x_mean_feature = x_mean.unsqueeze(-1).repeat(1, 1, N) 127 | 128 | x = torch.cat([x, x_max_feature, x_mean_feature], dim=1) # 2048 * 3 129 | 130 | x = self.linear2(x) 131 | x = self.linear3(x) 132 | 133 | x = self.convs(x) 134 | 135 | x = self.sigmoid(x) 136 | 137 | return x 138 | 139 | -------------------------------------------------------------------------------- /c_ws/src/mapmos/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(mapmos) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | rospy 12 | rospy 13 | sensor_msgs 14 | std_msgs 15 | ) 16 | 17 | ## System dependencies are found with CMake's conventions 18 | # find_package(Boost REQUIRED COMPONENTS system) 19 | 20 | 21 | ## Uncomment this if the package has a setup.py. This macro ensures 22 | ## modules and global scripts declared therein get installed 23 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 24 | # catkin_python_setup() 25 | 26 | ################################################ 27 | ## Declare ROS messages, services and actions ## 28 | ################################################ 29 | 30 | ## To declare and build messages, services or actions from within this 31 | ## package, follow these steps: 32 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 33 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 34 | ## * In the file package.xml: 35 | ## * add a build_depend tag for "message_generation" 36 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 37 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 38 | ## but can be declared for certainty nonetheless: 39 | ## * add a exec_depend tag for "message_runtime" 40 | ## * In this file (CMakeLists.txt): 41 | ## * add "message_generation" and every package in MSG_DEP_SET to 42 | ## find_package(catkin REQUIRED COMPONENTS ...) 43 | ## * add "message_runtime" and every package in MSG_DEP_SET to 44 | ## catkin_package(CATKIN_DEPENDS ...) 45 | ## * uncomment the add_*_files sections below as needed 46 | ## and list every .msg/.srv/.action file to be processed 47 | ## * uncomment the generate_messages entry below 48 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 49 | 50 | ## Generate messages in the 'msg' folder 51 | # add_message_files( 52 | # FILES 53 | # Message1.msg 54 | # Message2.msg 55 | # ) 56 | 57 | ## Generate services in the 'srv' folder 58 | # add_service_files( 59 | # FILES 60 | # Service1.srv 61 | # Service2.srv 62 | # ) 63 | 64 | ## Generate actions in the 'action' folder 65 | # add_action_files( 66 | # FILES 67 | # Action1.action 68 | # Action2.action 69 | # ) 70 | 71 | ## Generate added messages and services with any dependencies listed here 72 | # generate_messages( 73 | # DEPENDENCIES 74 | # std_msgs # Or other packages containing msgs 75 | # ) 76 | 77 | ################################################ 78 | ## Declare ROS dynamic reconfigure parameters ## 79 | ################################################ 80 | 81 | ## To declare and build dynamic reconfigure parameters within this 82 | ## package, follow these steps: 83 | ## * In the file package.xml: 84 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 85 | ## * In this file (CMakeLists.txt): 86 | ## * add "dynamic_reconfigure" to 87 | ## find_package(catkin REQUIRED COMPONENTS ...) 88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 89 | ## and list every .cfg file to be processed 90 | 91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 92 | # generate_dynamic_reconfigure_options( 93 | # cfg/DynReconf1.cfg 94 | # cfg/DynReconf2.cfg 95 | # ) 96 | 97 | ################################### 98 | ## catkin specific configuration ## 99 | ################################### 100 | ## The catkin_package macro generates cmake config files for your package 101 | ## Declare things to be passed to dependent projects 102 | ## INCLUDE_DIRS: uncomment this if your package contains header files 103 | ## LIBRARIES: libraries you create in this project that dependent projects also need 104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 105 | ## DEPENDS: system dependencies of this project that dependent projects also need 106 | catkin_package( 107 | # INCLUDE_DIRS include 108 | # LIBRARIES mapmos 109 | # CATKIN_DEPENDS rospy 110 | # DEPENDS system_lib 111 | ) 112 | 113 | ########### 114 | ## Build ## 115 | ########### 116 | 117 | ## Specify additional locations of header files 118 | ## Your package locations should be listed before other locations 119 | include_directories( 120 | # include 121 | ${catkin_INCLUDE_DIRS} 122 | ) 123 | 124 | ## Declare a C++ library 125 | # add_library(${PROJECT_NAME} 126 | # src/${PROJECT_NAME}/mapmos.cpp 127 | # ) 128 | 129 | ## Add cmake target dependencies of the library 130 | ## as an example, code may need to be generated before libraries 131 | ## either from message generation or dynamic reconfigure 132 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 133 | 134 | ## Declare a C++ executable 135 | ## With catkin_make all packages are built within a single CMake context 136 | ## The recommended prefix ensures that target names across packages don't collide 137 | # add_executable(${PROJECT_NAME}_node src/mapmos_node.cpp) 138 | 139 | ## Rename C++ executable without prefix 140 | ## The above recommended prefix causes long target names, the following renames the 141 | ## target back to the shorter version for ease of user use 142 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 143 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 144 | 145 | ## Add cmake target dependencies of the executable 146 | ## same as for the library above 147 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 148 | 149 | ## Specify libraries to link a library or executable target against 150 | # target_link_libraries(${PROJECT_NAME}_node 151 | # ${catkin_LIBRARIES} 152 | # ) 153 | 154 | ############# 155 | ## Install ## 156 | ############# 157 | 158 | # all install targets should use catkin DESTINATION variables 159 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 160 | 161 | ## Mark executable scripts (Python etc.) for installation 162 | ## in contrast to setup.py, you can choose the destination 163 | # catkin_install_python(PROGRAMS 164 | # scripts/my_python_script 165 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 166 | # ) 167 | 168 | ## Mark executables for installation 169 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 170 | # install(TARGETS ${PROJECT_NAME}_node 171 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 172 | # ) 173 | 174 | ## Mark libraries for installation 175 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 176 | # install(TARGETS ${PROJECT_NAME} 177 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 178 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 179 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 180 | # ) 181 | 182 | ## Mark cpp header files for installation 183 | # install(DIRECTORY include/${PROJECT_NAME}/ 184 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 185 | # FILES_MATCHING PATTERN "*.h" 186 | # PATTERN ".svn" EXCLUDE 187 | # ) 188 | 189 | ## Mark other files for installation (e.g. launch and bag files, etc.) 190 | # install(FILES 191 | # # myfile1 192 | # # myfile2 193 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 194 | # ) 195 | 196 | ############# 197 | ## Testing ## 198 | ############# 199 | 200 | ## Add gtest based cpp test target and link libraries 201 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_mapmos.cpp) 202 | # if(TARGET ${PROJECT_NAME}-test) 203 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 204 | # endif() 205 | 206 | ## Add folders to be run by python nosetests 207 | # catkin_add_nosetests(test) 208 | -------------------------------------------------------------------------------- /c_ws/src/mapmos/launch/mapmos.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /c_ws/src/mapmos/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mapmos 4 | 0.0.0 5 | The mapmos package 6 | 7 | root 8 | 9 | 10 | 11 | 12 | MIT 13 | 14 | catkin 15 | roscpp 16 | rospy 17 | sensor_msgs 18 | std_msgs 19 | roscpp 20 | rospy 21 | sensor_msgs 22 | std_msgs 23 | roscpp 24 | rospy 25 | sensor_msgs 26 | std_msgs 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /c_ws/src/mapmos/scripts/mapmos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # MIT License 4 | # 5 | # Copyright (c) 2023 Benedikt Mersch, Tiziano Guadagnino, Ignacio Vizzo, Cyrill Stachniss 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a copy 8 | # of this software and associated documentation files (the "Software"), to deal 9 | # in the Software without restriction, including without limitation the rights 10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | # copies of the Software, and to permit persons to whom the Software is 12 | # furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in all 15 | # copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | # SOFTWARE. 24 | 25 | import torch 26 | import copy 27 | import MinkowskiEngine as ME 28 | from pytorch_lightning import LightningModule 29 | 30 | from minkunet import CustomMinkUNet14 31 | 32 | class MapMOSNet(LightningModule): 33 | def __init__(self, voxel_size: float): 34 | super().__init__() 35 | self.voxel_size = voxel_size 36 | self.MinkUNet = CustomMinkUNet14(in_channels=1, out_channels=1, D=4) 37 | 38 | def predict(self, scan_input, map_input, scan_indices, map_indices): 39 | def extend(tensor, batch_idx, time_idx): 40 | ones = torch.ones(len(tensor), 1).type_as(tensor) 41 | return torch.hstack([batch_idx * ones, tensor, time_idx * ones]) 42 | 43 | # Extend to [batch_idx, x,y,z,t] 44 | scan_input = extend(scan_input, 0, 0) 45 | map_input = extend(map_input, 0, -1) 46 | 47 | coordinates = torch.vstack([scan_input.reshape(-1, 5), map_input.reshape(-1, 5)]) 48 | indices = torch.vstack([scan_indices.reshape(-1, 1), map_indices.reshape(-1, 1)]) 49 | 50 | logits = self.forward(coordinates, indices) 51 | 52 | # Get logits from current scan 53 | mask_scan = coordinates[:, 4] == 0.0 54 | logits_scan = logits[mask_scan] 55 | logits_map = logits[~mask_scan] 56 | torch.cuda.empty_cache() 57 | return logits_scan, logits_map 58 | 59 | def forward(self, coordinates: torch.Tensor, indices: torch.Tensor): 60 | quantization = torch.Tensor( 61 | [1.0, self.voxel_size, self.voxel_size, self.voxel_size, 1.0] 62 | ).type_as(coordinates) 63 | coordinates = torch.div(coordinates, quantization) 64 | 65 | # Normalize indices 66 | i_max = torch.max(indices) 67 | i_min = torch.min(indices) 68 | if i_min == i_max: 69 | features = 1.0 * torch.ones_like(indices) 70 | else: 71 | features = 1 + (i_max - indices) / (i_max - i_min) 72 | 73 | tensor_field = ME.TensorField( 74 | features=features.reshape(-1, 1), coordinates=coordinates.reshape(-1, 5) 75 | ) 76 | 77 | sparse_tensor = tensor_field.sparse() 78 | 79 | predicted_sparse_tensor = self.MinkUNet(sparse_tensor) 80 | out = predicted_sparse_tensor.slice(tensor_field) 81 | 82 | logits = out.features.reshape(-1) 83 | return logits 84 | 85 | def to_label(self, logits): 86 | labels = copy.deepcopy(logits) 87 | mask = logits > 0 88 | labels[mask] = 1.0 89 | labels[~mask] = 0.0 90 | return labels -------------------------------------------------------------------------------- /c_ws/src/mapmos/scripts/mapmos_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import time 4 | import torch 5 | import numpy as np 6 | import rospy 7 | import message_filters 8 | from nav_msgs.msg import Odometry 9 | from sensor_msgs.msg import PointCloud2 10 | from sps.datasets import util 11 | from mapmos import MapMOSNet 12 | 13 | class MapMos(): 14 | def __init__(self): 15 | rospy.init_node('mapmos_node') 16 | 17 | # Retrieve parameters from ROS parameter server 18 | self.odom_frame = rospy.get_param('~odom_frame', "odom") 19 | raw_cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 20 | predicted_pose_topic = rospy.get_param('~predicted_pose', "/odometry_node/odometry_estimate") 21 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 22 | weights_pth = rospy.get_param('~model_weights_pth', "/sps/c_ws/src/mapmos/checkpoints/mapmos.ckpt") 23 | 24 | # Subscribe to ROS topics 25 | odom_sub = message_filters.Subscriber(predicted_pose_topic, Odometry) 26 | scan_sub = message_filters.Subscriber(raw_cloud_topic, PointCloud2) 27 | 28 | ts = message_filters.TimeSynchronizer([odom_sub, scan_sub], 10) 29 | ts.registerCallback(self.callback) 30 | 31 | # Initialize the publisher 32 | self.scan_pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=10) 33 | self.mapmos_pub = rospy.Publisher('/debug/raw_cloud_tr', PointCloud2, queue_size=10) 34 | self.mapmos_map_pub = rospy.Publisher('/debug/mos_map', PointCloud2, queue_size=10) 35 | 36 | # Load the model 37 | self.model = self.load_model(weights_pth) 38 | 39 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 40 | 41 | self.pc_map = np.load('/sps/data/maps/base_map.asc.npy') 42 | 43 | rospy.spin() 44 | 45 | 46 | def load_model(self, path, voxel_size=0.1): 47 | # Load model 48 | state_dict = torch.load(path)["state_dict"].items() 49 | state_dict = { 50 | k.replace("mos.MinkUNet.", ""): v 51 | for k, v in torch.load(path)["state_dict"].items() 52 | } 53 | state_dict = {k: v for k, v in state_dict.items() if "MOSLoss" not in k} 54 | model = MapMOSNet(voxel_size) 55 | model.MinkUNet.load_state_dict(state_dict) 56 | model = model.cuda() 57 | model.eval() 58 | model.freeze() 59 | rospy.loginfo("Model loaded successfully!") 60 | return model 61 | 62 | 63 | def select_points_within_radius(self, coordinates, center, radius=30): 64 | # Calculate the Euclidean distance from each point to the center 65 | distances = np.sqrt(np.sum((coordinates - center) ** 2, axis=1)) 66 | # Select the indexes of points within the radius 67 | indexes = np.where(distances <= radius)[0] 68 | return indexes 69 | 70 | def callback(self, odom_msg, scan_msg): 71 | start_time = time.time() 72 | 73 | scan = util.to_numpy(scan_msg) 74 | scan_msg_header = scan_msg.header 75 | 76 | odom_msg_stamp = odom_msg.header.stamp.to_sec() 77 | transformation_matrix = util.to_tr_matrix(odom_msg) 78 | 79 | origin = transformation_matrix[:3, 3] 80 | pc_map = self.pc_map[self.select_points_within_radius(self.pc_map[:,:3], origin)] 81 | 82 | ''' Make sure the time stamp for odom and scan are the same!''' 83 | df = odom_msg_stamp - scan_msg_header.stamp.to_sec() 84 | if df != 0: 85 | rospy.logwarn(f"Odom and scan time stamp is out of sync! time difference is {df} sec.") 86 | 87 | ''' Transform the scan ''' 88 | scan_tr = util.transform_point_cloud(scan[:,:3], transformation_matrix) 89 | 90 | scan_points = torch.tensor(scan_tr[:,:3], dtype=torch.float32).reshape(-1, 3).to(self.device) 91 | map_points = torch.tensor(pc_map[:,:3], dtype=torch.float32).reshape(-1, 3).to(self.device) 92 | 93 | scan_indices = torch.ones(len(scan_points), 1).type_as(scan_points).to(self.device) 94 | map_indices = torch.zeros(len(map_points), 1).type_as(map_points).to(self.device) 95 | 96 | logits_scan, logits_map = self.model.predict(scan_points, map_points, scan_indices, map_indices) 97 | 98 | scan_lablels = self.model.to_label(logits_scan) 99 | 100 | scan_lablels = scan_lablels.cpu().data.numpy() 101 | 102 | scan = np.hstack((scan[:,:3], scan_lablels.reshape(-1,1))) 103 | filtered_scan = scan[(scan_lablels == 0)] 104 | 105 | self.scan_pub.publish(util.to_rosmsg(filtered_scan, scan_msg_header)) 106 | self.mapmos_pub.publish(util.to_rosmsg(np.hstack((scan_tr[:,:3], scan_lablels.reshape(-1,1))), scan_msg_header, self.odom_frame)) 107 | self.mapmos_map_pub.publish(util.to_rosmsg(pc_map, scan_msg_header, self.odom_frame)) 108 | 109 | 110 | end_time = time.time() 111 | elapsed_time = end_time - start_time 112 | 113 | hz = lambda t: 1 / t if t else 0 114 | rospy.loginfo(f"T: {elapsed_time:.3f} [{hz(elapsed_time):.2f} Hz], map {len(pc_map):d}, scan {len(scan):d}, filtered {len(filtered_scan):d}") 115 | 116 | 117 | if __name__ == "__main__": 118 | MapMos() 119 | -------------------------------------------------------------------------------- /c_ws/src/mapmos/scripts/minkunet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) Chris Choy (chrischoy@ai.stanford.edu). 4 | # Copyright (c) 2023 Benedikt Mersch, Tiziano Guadagnino, Ignacio Vizzo, Cyrill Stachniss 5 | # 6 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | # this software and associated documentation files (the "Software"), to deal in 8 | # the Software without restriction, including without limitation the rights to 9 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 10 | # of the Software, and to permit persons to whom the Software is furnished to do 11 | # so, subject to the following conditions: 12 | # 13 | # The above copyright notice and this permission notice shall be included in all 14 | # copies or substantial portions of the Software. 15 | # 16 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | # SOFTWARE. 23 | # 24 | # Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural 25 | # Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part 26 | # of the code. 27 | import torch.nn as nn 28 | import MinkowskiEngine as ME 29 | 30 | 31 | class BasicBlock(nn.Module): 32 | def __init__( 33 | self, 34 | inplanes, 35 | planes, 36 | stride=1, 37 | dilation=1, 38 | downsample=None, 39 | bn_momentum=0.1, 40 | dimension=-1, 41 | ): 42 | super(BasicBlock, self).__init__() 43 | assert dimension > 0 44 | 45 | self.conv1 = ME.MinkowskiConvolution( 46 | inplanes, 47 | planes, 48 | kernel_size=3, 49 | stride=stride, 50 | dilation=dilation, 51 | dimension=dimension, 52 | ) 53 | self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) 54 | self.conv2 = ME.MinkowskiConvolution( 55 | planes, 56 | planes, 57 | kernel_size=3, 58 | stride=1, 59 | dilation=dilation, 60 | dimension=dimension, 61 | ) 62 | self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum) 63 | self.relu = ME.MinkowskiReLU(inplace=True) 64 | self.downsample = downsample 65 | 66 | def forward(self, x): 67 | residual = x 68 | 69 | out = self.conv1(x) 70 | out = self.norm1(out) 71 | out = self.relu(out) 72 | 73 | out = self.conv2(out) 74 | out = self.norm2(out) 75 | 76 | if self.downsample is not None: 77 | residual = self.downsample(x) 78 | 79 | out += residual 80 | out = self.relu(out) 81 | 82 | return out 83 | 84 | 85 | class CustomMinkUNet14(nn.Module): 86 | BLOCK = BasicBlock 87 | DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) 88 | LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) 89 | PLANES = (8, 16, 32, 64, 64, 32, 16, 8) 90 | INIT_DIM = 8 91 | OUT_TENSOR_STRIDE = 1 92 | 93 | def m_space_n_time(self, m, n): 94 | return [m, m, m, n] 95 | 96 | def __init__(self, in_channels, out_channels, D=4): 97 | nn.Module.__init__(self) 98 | self.D = D 99 | assert self.BLOCK is not None 100 | 101 | self.network_initialization(in_channels, out_channels, D) 102 | self.weight_initialization() 103 | 104 | def network_initialization(self, in_channels, out_channels, D): 105 | self.inplanes = self.INIT_DIM 106 | self.conv0p1s1 = ME.MinkowskiConvolution( 107 | in_channels, 108 | self.inplanes, 109 | kernel_size=self.m_space_n_time(5, 1), 110 | dimension=D, 111 | ) 112 | 113 | self.bn0 = ME.MinkowskiBatchNorm(self.inplanes) 114 | 115 | self.conv1p1s2 = ME.MinkowskiConvolution( 116 | self.inplanes, 117 | self.inplanes, 118 | kernel_size=self.m_space_n_time(2, 1), 119 | stride=self.m_space_n_time(2, 1), 120 | dimension=D, 121 | ) 122 | self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) 123 | 124 | self.block1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0]) 125 | 126 | self.conv2p2s2 = ME.MinkowskiConvolution( 127 | self.inplanes, 128 | self.inplanes, 129 | kernel_size=self.m_space_n_time(2, 1), 130 | stride=self.m_space_n_time(2, 1), 131 | dimension=D, 132 | ) 133 | self.bn2 = ME.MinkowskiBatchNorm(self.inplanes) 134 | 135 | self.block2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1]) 136 | 137 | self.conv3p4s2 = ME.MinkowskiConvolution( 138 | self.inplanes, 139 | self.inplanes, 140 | kernel_size=self.m_space_n_time(2, 1), 141 | stride=self.m_space_n_time(2, 1), 142 | dimension=D, 143 | ) 144 | 145 | self.bn3 = ME.MinkowskiBatchNorm(self.inplanes) 146 | self.block3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2]) 147 | 148 | self.conv4p8s2 = ME.MinkowskiConvolution( 149 | self.inplanes, 150 | self.inplanes, 151 | kernel_size=self.m_space_n_time(2, 1), 152 | stride=self.m_space_n_time(2, 1), 153 | dimension=D, 154 | ) 155 | self.bn4 = ME.MinkowskiBatchNorm(self.inplanes) 156 | self.block4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3]) 157 | 158 | self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose( 159 | self.inplanes, 160 | self.PLANES[4], 161 | kernel_size=self.m_space_n_time(2, 1), 162 | stride=self.m_space_n_time(2, 1), 163 | dimension=D, 164 | ) 165 | self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4]) 166 | 167 | self.inplanes = self.PLANES[4] + self.PLANES[2] 168 | self.block5 = self._make_layer(self.BLOCK, self.PLANES[4], self.LAYERS[4]) 169 | self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose( 170 | self.inplanes, 171 | self.PLANES[5], 172 | kernel_size=self.m_space_n_time(2, 1), 173 | stride=self.m_space_n_time(2, 1), 174 | dimension=D, 175 | ) 176 | self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5]) 177 | 178 | self.inplanes = self.PLANES[5] + self.PLANES[1] 179 | self.block6 = self._make_layer(self.BLOCK, self.PLANES[5], self.LAYERS[5]) 180 | self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose( 181 | self.inplanes, 182 | self.PLANES[6], 183 | kernel_size=self.m_space_n_time(2, 1), 184 | stride=self.m_space_n_time(2, 1), 185 | dimension=D, 186 | ) 187 | self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6]) 188 | 189 | self.inplanes = self.PLANES[6] + self.PLANES[0] 190 | self.block7 = self._make_layer(self.BLOCK, self.PLANES[6], self.LAYERS[6]) 191 | self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose( 192 | self.inplanes, 193 | self.PLANES[7], 194 | kernel_size=self.m_space_n_time(2, 1), 195 | stride=self.m_space_n_time(2, 1), 196 | dimension=D, 197 | ) 198 | self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7]) 199 | 200 | self.inplanes = self.PLANES[7] + self.INIT_DIM 201 | self.block8 = self._make_layer(self.BLOCK, self.PLANES[7], self.LAYERS[7]) 202 | 203 | self.final = ME.MinkowskiConvolution( 204 | self.PLANES[7], 205 | out_channels, 206 | kernel_size=1, 207 | bias=True, 208 | dimension=D, 209 | ) 210 | self.relu = ME.MinkowskiReLU(inplace=True) 211 | 212 | def weight_initialization(self): 213 | for m in self.modules(): 214 | if isinstance(m, ME.MinkowskiConvolution): 215 | ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu") 216 | 217 | if isinstance(m, ME.MinkowskiBatchNorm): 218 | nn.init.constant_(m.bn.weight, 1) 219 | nn.init.constant_(m.bn.bias, 0) 220 | 221 | def _make_layer(self, block, planes, blocks): 222 | downsample = None 223 | if self.inplanes != planes: 224 | downsample = nn.Sequential( 225 | ME.MinkowskiConvolution( 226 | self.inplanes, 227 | planes, 228 | kernel_size=1, 229 | stride=1, 230 | dimension=self.D, 231 | ), 232 | ME.MinkowskiBatchNorm(planes), 233 | ) 234 | layers = [] 235 | layers.append( 236 | block( 237 | self.inplanes, 238 | planes, 239 | stride=1, 240 | dilation=1, 241 | downsample=downsample, 242 | dimension=self.D, 243 | ) 244 | ) 245 | self.inplanes = planes 246 | for i in range(1, blocks): 247 | layers.append(block(self.inplanes, planes, stride=1, dilation=1, dimension=self.D)) 248 | 249 | return nn.Sequential(*layers) 250 | 251 | def forward(self, x): 252 | out = self.conv0p1s1(x) 253 | out = self.bn0(out) 254 | out_p1 = self.relu(out) 255 | 256 | out = self.conv1p1s2(out_p1) 257 | out = self.bn1(out) 258 | out = self.relu(out) 259 | out_b1p2 = self.block1(out) 260 | 261 | out = self.conv2p2s2(out_b1p2) 262 | out = self.bn2(out) 263 | out = self.relu(out) 264 | out_b2p4 = self.block2(out) 265 | 266 | out = self.conv3p4s2(out_b2p4) 267 | out = self.bn3(out) 268 | out = self.relu(out) 269 | out_b3p8 = self.block3(out) 270 | 271 | # tensor_stride=16 272 | out = self.conv4p8s2(out_b3p8) 273 | out = self.bn4(out) 274 | out = self.relu(out) 275 | out = self.block4(out) 276 | 277 | # tensor_stride=8 278 | out = self.convtr4p16s2(out) 279 | out = self.bntr4(out) 280 | out = self.relu(out) 281 | 282 | out = ME.cat(out, out_b3p8) 283 | out = self.block5(out) 284 | 285 | # tensor_stride=4 286 | out = self.convtr5p8s2(out) 287 | out = self.bntr5(out) 288 | out = self.relu(out) 289 | 290 | out = ME.cat(out, out_b2p4) 291 | out = self.block6(out) 292 | 293 | # tensor_stride=2 294 | out = self.convtr6p4s2(out) 295 | out = self.bntr6(out) 296 | out = self.relu(out) 297 | 298 | out = ME.cat(out, out_b1p2) 299 | out = self.block7(out) 300 | 301 | # tensor_stride=1 302 | out = self.convtr7p2s2(out) 303 | out = self.bntr7(out) 304 | out = self.relu(out) 305 | 306 | out = ME.cat(out, out_p1) 307 | out = self.block8(out) 308 | 309 | return self.final(out) -------------------------------------------------------------------------------- /c_ws/src/mos4d/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(mos4d) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | rospy 12 | rospy 13 | sensor_msgs 14 | std_msgs 15 | ) 16 | 17 | ## System dependencies are found with CMake's conventions 18 | # find_package(Boost REQUIRED COMPONENTS system) 19 | 20 | 21 | ## Uncomment this if the package has a setup.py. This macro ensures 22 | ## modules and global scripts declared therein get installed 23 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 24 | # catkin_python_setup() 25 | 26 | ################################################ 27 | ## Declare ROS messages, services and actions ## 28 | ################################################ 29 | 30 | ## To declare and build messages, services or actions from within this 31 | ## package, follow these steps: 32 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 33 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 34 | ## * In the file package.xml: 35 | ## * add a build_depend tag for "message_generation" 36 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 37 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 38 | ## but can be declared for certainty nonetheless: 39 | ## * add a exec_depend tag for "message_runtime" 40 | ## * In this file (CMakeLists.txt): 41 | ## * add "message_generation" and every package in MSG_DEP_SET to 42 | ## find_package(catkin REQUIRED COMPONENTS ...) 43 | ## * add "message_runtime" and every package in MSG_DEP_SET to 44 | ## catkin_package(CATKIN_DEPENDS ...) 45 | ## * uncomment the add_*_files sections below as needed 46 | ## and list every .msg/.srv/.action file to be processed 47 | ## * uncomment the generate_messages entry below 48 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 49 | 50 | ## Generate messages in the 'msg' folder 51 | # add_message_files( 52 | # FILES 53 | # Message1.msg 54 | # Message2.msg 55 | # ) 56 | 57 | ## Generate services in the 'srv' folder 58 | # add_service_files( 59 | # FILES 60 | # Service1.srv 61 | # Service2.srv 62 | # ) 63 | 64 | ## Generate actions in the 'action' folder 65 | # add_action_files( 66 | # FILES 67 | # Action1.action 68 | # Action2.action 69 | # ) 70 | 71 | ## Generate added messages and services with any dependencies listed here 72 | # generate_messages( 73 | # DEPENDENCIES 74 | # std_msgs # Or other packages containing msgs 75 | # ) 76 | 77 | ################################################ 78 | ## Declare ROS dynamic reconfigure parameters ## 79 | ################################################ 80 | 81 | ## To declare and build dynamic reconfigure parameters within this 82 | ## package, follow these steps: 83 | ## * In the file package.xml: 84 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 85 | ## * In this file (CMakeLists.txt): 86 | ## * add "dynamic_reconfigure" to 87 | ## find_package(catkin REQUIRED COMPONENTS ...) 88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 89 | ## and list every .cfg file to be processed 90 | 91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 92 | # generate_dynamic_reconfigure_options( 93 | # cfg/DynReconf1.cfg 94 | # cfg/DynReconf2.cfg 95 | # ) 96 | 97 | ################################### 98 | ## catkin specific configuration ## 99 | ################################### 100 | ## The catkin_package macro generates cmake config files for your package 101 | ## Declare things to be passed to dependent projects 102 | ## INCLUDE_DIRS: uncomment this if your package contains header files 103 | ## LIBRARIES: libraries you create in this project that dependent projects also need 104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 105 | ## DEPENDS: system dependencies of this project that dependent projects also need 106 | catkin_package( 107 | # INCLUDE_DIRS include 108 | # LIBRARIES mos4d 109 | # CATKIN_DEPENDS rospy 110 | # DEPENDS system_lib 111 | ) 112 | 113 | ########### 114 | ## Build ## 115 | ########### 116 | 117 | ## Specify additional locations of header files 118 | ## Your package locations should be listed before other locations 119 | include_directories( 120 | # include 121 | ${catkin_INCLUDE_DIRS} 122 | ) 123 | 124 | ## Declare a C++ library 125 | # add_library(${PROJECT_NAME} 126 | # src/${PROJECT_NAME}/mos4d.cpp 127 | # ) 128 | 129 | ## Add cmake target dependencies of the library 130 | ## as an example, code may need to be generated before libraries 131 | ## either from message generation or dynamic reconfigure 132 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 133 | 134 | ## Declare a C++ executable 135 | ## With catkin_make all packages are built within a single CMake context 136 | ## The recommended prefix ensures that target names across packages don't collide 137 | # add_executable(${PROJECT_NAME}_node src/mos4d_node.cpp) 138 | 139 | ## Rename C++ executable without prefix 140 | ## The above recommended prefix causes long target names, the following renames the 141 | ## target back to the shorter version for ease of user use 142 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 143 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 144 | 145 | ## Add cmake target dependencies of the executable 146 | ## same as for the library above 147 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 148 | 149 | ## Specify libraries to link a library or executable target against 150 | # target_link_libraries(${PROJECT_NAME}_node 151 | # ${catkin_LIBRARIES} 152 | # ) 153 | 154 | ############# 155 | ## Install ## 156 | ############# 157 | 158 | # all install targets should use catkin DESTINATION variables 159 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 160 | 161 | ## Mark executable scripts (Python etc.) for installation 162 | ## in contrast to setup.py, you can choose the destination 163 | # catkin_install_python(PROGRAMS 164 | # scripts/my_python_script 165 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 166 | # ) 167 | 168 | ## Mark executables for installation 169 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 170 | # install(TARGETS ${PROJECT_NAME}_node 171 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 172 | # ) 173 | 174 | ## Mark libraries for installation 175 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 176 | # install(TARGETS ${PROJECT_NAME} 177 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 178 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 179 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 180 | # ) 181 | 182 | ## Mark cpp header files for installation 183 | # install(DIRECTORY include/${PROJECT_NAME}/ 184 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 185 | # FILES_MATCHING PATTERN "*.h" 186 | # PATTERN ".svn" EXCLUDE 187 | # ) 188 | 189 | ## Mark other files for installation (e.g. launch and bag files, etc.) 190 | # install(FILES 191 | # # myfile1 192 | # # myfile2 193 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 194 | # ) 195 | 196 | ############# 197 | ## Testing ## 198 | ############# 199 | 200 | ## Add gtest based cpp test target and link libraries 201 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_mos4d.cpp) 202 | # if(TARGET ${PROJECT_NAME}-test) 203 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 204 | # endif() 205 | 206 | ## Add folders to be run by python nosetests 207 | # catkin_add_nosetests(test) 208 | -------------------------------------------------------------------------------- /c_ws/src/mos4d/checkpoints/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/c_ws/src/mos4d/checkpoints/.gitkeep -------------------------------------------------------------------------------- /c_ws/src/mos4d/launch/mos4d.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /c_ws/src/mos4d/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mos4d 4 | 0.0.1 5 | The mos4d package 6 | 7 | root 8 | 9 | 10 | 11 | 12 | MIT 13 | 14 | catkin 15 | roscpp 16 | rospy 17 | sensor_msgs 18 | std_msgs 19 | roscpp 20 | rospy 21 | sensor_msgs 22 | std_msgs 23 | roscpp 24 | rospy 25 | sensor_msgs 26 | std_msgs 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /c_ws/src/mos4d/scripts/mos4d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # @file mos4d.py 3 | # @author Benedikt Mersch [mersch@igg.uni-bonn.de] 4 | # Copyright (c) 2022 Benedikt Mersch, all rights reserved 5 | import torch 6 | import MinkowskiEngine as ME 7 | from pytorch_lightning import LightningModule 8 | 9 | from sps.models.MinkowskiEngine.customminkunet import CustomMinkUNet 10 | 11 | class MOS4DNet(LightningModule): 12 | def __init__(self, voxel_size): 13 | super().__init__() 14 | self.ds = voxel_size 15 | self.MinkUNet = CustomMinkUNet(in_channels=1, out_channels=3, D=4) 16 | 17 | def forward(self, coordinates: torch.Tensor): 18 | quantization = torch.Tensor([1.0, self.ds, self.ds, self.ds, 1.0]).type_as( 19 | coordinates 20 | ) 21 | coordinates = torch.div(coordinates, quantization) 22 | features = 0.5 * torch.ones(len(coordinates), 1).type_as(coordinates) 23 | 24 | tensor_field = ME.TensorField( 25 | features=features, coordinates=coordinates.type_as(features) 26 | ) 27 | sparse_tensor = tensor_field.sparse() 28 | 29 | predicted_sparse_tensor = self.MinkUNet(sparse_tensor) 30 | out = predicted_sparse_tensor.slice(tensor_field) 31 | 32 | return out.features[:, 2].reshape(-1) 33 | -------------------------------------------------------------------------------- /c_ws/src/mos4d/scripts/mos4d_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import re 4 | import sys 5 | import time 6 | import torch 7 | import numpy as np 8 | from mos4d import MOS4DNet 9 | 10 | import rospy 11 | 12 | import message_filters 13 | from nav_msgs.msg import Odometry 14 | from sensor_msgs.msg import PointCloud2 15 | 16 | from sps.datasets import util 17 | 18 | class MOS4D(): 19 | def __init__(self): 20 | rospy.init_node('mos4d_node') 21 | 22 | ''' Retrieve parameters from ROS parameter server ''' 23 | raw_cloud_topic = rospy.get_param('~raw_cloud', '/os1_points_cropped') #"/os_cloud_node/points") 24 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 25 | predicted_pose_topic = rospy.get_param('~predicted_pose', "/odometry_node/odometry_estimate") 26 | 27 | weights_pth = rospy.get_param('~model_weights_pth', "/sps/c_ws/src/mos4d/checkpoints/10_scans.ckpt") 28 | 29 | self.odom_frame = rospy.get_param('~odom_frame', "map") 30 | self.filter = rospy.get_param('~filter' , True ) 31 | 32 | # Use regular expressions to find the integer 33 | self.buffer_size = re.search(r'(\d+)_scans\.ckpt', weights_pth) 34 | 35 | if self.buffer_size: 36 | self.buffer_size = int(self.buffer_size.group(1)) 37 | rospy.loginfo('Scan buffer size: %d' % self.buffer_size) 38 | else: 39 | rospy.logerr("Buffer size not found in the path.") 40 | sys.exit() 41 | 42 | ''' Subscribe to ROS topics ''' 43 | odom_sub = message_filters.Subscriber(predicted_pose_topic, Odometry) 44 | scan_sub = message_filters.Subscriber(raw_cloud_topic, PointCloud2) 45 | 46 | ts = message_filters.TimeSynchronizer([scan_sub, odom_sub], 10) 47 | ts.registerCallback(self.callback) 48 | 49 | ''' Initialize the publisher ''' 50 | self.scan_pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=10) 51 | self.mos4d_pub = rospy.Publisher('/debug/raw_cloud_tr', PointCloud2, queue_size=10) 52 | 53 | self.lidar_buffer = [] 54 | 55 | ''' Load the model ''' 56 | self.model = self.load_model(weights_pth, voxel_size = 0.2) 57 | 58 | self.scan_index = 0 59 | 60 | rospy.spin() 61 | 62 | 63 | def load_model(self, path, voxel_size=0.1): 64 | # Load model 65 | state_dict = { 66 | k.replace("model.MinkUNet.", ""): v 67 | for k, v in torch.load(path)["state_dict"].items() 68 | } 69 | state_dict = {k: v for k, v in state_dict.items() if "MOSLoss" not in k} 70 | model = MOS4DNet(voxel_size) 71 | model.MinkUNet.load_state_dict(state_dict) 72 | model = model.cuda() 73 | model.eval() 74 | model.freeze() 75 | 76 | rospy.loginfo("Model loaded successfully!") 77 | 78 | return model 79 | 80 | def callback(self, scan_msg, odom_msg): 81 | self.scan = util.to_numpy(scan_msg) 82 | self.scan_msg_header = scan_msg.header 83 | gt = np.where(self.scan[:,3] < 0.84, 0, 1) 84 | 85 | start_time = time.time() 86 | odom_msg_stamp = odom_msg.header.stamp.to_sec() 87 | 88 | transformation_matrix = util.to_tr_matrix(odom_msg) 89 | 90 | ''' Make sure the time stamp for odom and scan are the same!''' 91 | df = odom_msg_stamp - self.scan_msg_header.stamp.to_sec() 92 | if df != 0: 93 | rospy.logwarn(f"Odom and scan time stamp is out of sync! time difference is {df} sec.") 94 | 95 | ''' Transform the scan ''' 96 | scan_tr = util.transform_point_cloud(self.scan[:,:3], transformation_matrix) 97 | 98 | ''' Append scan to the buffer ''' 99 | scan_tr = np.hstack( 100 | [ 101 | scan_tr, 102 | np.ones(len(scan_tr)).reshape(-1, 1)*self.scan_index 103 | ] 104 | ) 105 | self.scan_index += 1 106 | self.lidar_buffer.append(scan_tr) 107 | 108 | if len(self.lidar_buffer) > self.buffer_size: 109 | self.lidar_buffer.pop(0) 110 | 111 | # Merge the scans in the buffer into a single tensor 112 | merged_scans = np.vstack(self.lidar_buffer) 113 | merged_scans = torch.from_numpy(merged_scans).squeeze().to(torch.float32).cuda() 114 | 115 | # Add batch index and pass through the model 116 | coordinates = torch.hstack([torch.zeros(len(merged_scans)).reshape(-1, 1).type_as(merged_scans), merged_scans]) 117 | predicted_logits = self.model.forward(coordinates) if self.filter else torch.zeros(len(merged_scans)) 118 | torch.cuda.empty_cache() 119 | 120 | end_time = time.time() 121 | elapsed_time = end_time - start_time 122 | 123 | predicted_logits = (predicted_logits > 0).int().cpu().data.numpy() 124 | scan_len = len(self.scan) 125 | scan_labels = predicted_logits[-scan_len:] 126 | self.scan = np.hstack((self.scan[:,:3], scan_labels.reshape(-1, 1))) 127 | filtered_scan = self.scan[(scan_labels == 0)] if self.filter else self.scan 128 | self.scan_pub.publish(util.to_rosmsg(filtered_scan, self.scan_msg_header)) 129 | self.mos4d_pub.publish(util.to_rosmsg(np.hstack((scan_tr[:,:3], scan_labels.reshape(-1, 1))), self.scan_msg_header, self.odom_frame)) 130 | 131 | ### -> mIoU start 132 | pred = scan_labels 133 | 134 | precision, recall, f1, accuracy, dIoU = util.calculate_metrics(gt, pred) 135 | 136 | log_message = ( 137 | f"dIoU: {dIoU:.3f} " 138 | f"accuracy: {accuracy:.3f} " 139 | f"precision: {precision:.3f} " 140 | f"recall: {recall:.3f} " 141 | f"f1: {f1:.3f} " 142 | ) 143 | rospy.loginfo(log_message) 144 | ### <- mIoU ends 145 | 146 | hz = lambda t: 1 / t if t else 0 147 | rospy.loginfo(f"T: {elapsed_time:.3f} [{hz(elapsed_time):.2f} Hz], N: {len(self.scan):d}, n: {len(filtered_scan):d}") 148 | 149 | if __name__ == "__main__": 150 | MOS4D() 151 | -------------------------------------------------------------------------------- /c_ws/src/scans_pub/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(scans_pub) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | roscpp 12 | rospy 13 | sensor_msgs 14 | std_msgs 15 | ) 16 | 17 | ## System dependencies are found with CMake's conventions 18 | # find_package(Boost REQUIRED COMPONENTS system) 19 | 20 | 21 | ## Uncomment this if the package has a setup.py. This macro ensures 22 | ## modules and global scripts declared therein get installed 23 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 24 | # catkin_python_setup() 25 | 26 | ################################################ 27 | ## Declare ROS messages, services and actions ## 28 | ################################################ 29 | 30 | ## To declare and build messages, services or actions from within this 31 | ## package, follow these steps: 32 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 33 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 34 | ## * In the file package.xml: 35 | ## * add a build_depend tag for "message_generation" 36 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 37 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 38 | ## but can be declared for certainty nonetheless: 39 | ## * add a exec_depend tag for "message_runtime" 40 | ## * In this file (CMakeLists.txt): 41 | ## * add "message_generation" and every package in MSG_DEP_SET to 42 | ## find_package(catkin REQUIRED COMPONENTS ...) 43 | ## * add "message_runtime" and every package in MSG_DEP_SET to 44 | ## catkin_package(CATKIN_DEPENDS ...) 45 | ## * uncomment the add_*_files sections below as needed 46 | ## and list every .msg/.srv/.action file to be processed 47 | ## * uncomment the generate_messages entry below 48 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 49 | 50 | ## Generate messages in the 'msg' folder 51 | # add_message_files( 52 | # FILES 53 | # Message1.msg 54 | # Message2.msg 55 | # ) 56 | 57 | ## Generate services in the 'srv' folder 58 | # add_service_files( 59 | # FILES 60 | # Service1.srv 61 | # Service2.srv 62 | # ) 63 | 64 | ## Generate actions in the 'action' folder 65 | # add_action_files( 66 | # FILES 67 | # Action1.action 68 | # Action2.action 69 | # ) 70 | 71 | ## Generate added messages and services with any dependencies listed here 72 | # generate_messages( 73 | # DEPENDENCIES 74 | # sensor_msgs# std_msgs 75 | # ) 76 | 77 | ################################################ 78 | ## Declare ROS dynamic reconfigure parameters ## 79 | ################################################ 80 | 81 | ## To declare and build dynamic reconfigure parameters within this 82 | ## package, follow these steps: 83 | ## * In the file package.xml: 84 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 85 | ## * In this file (CMakeLists.txt): 86 | ## * add "dynamic_reconfigure" to 87 | ## find_package(catkin REQUIRED COMPONENTS ...) 88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 89 | ## and list every .cfg file to be processed 90 | 91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 92 | # generate_dynamic_reconfigure_options( 93 | # cfg/DynReconf1.cfg 94 | # cfg/DynReconf2.cfg 95 | # ) 96 | 97 | ################################### 98 | ## catkin specific configuration ## 99 | ################################### 100 | ## The catkin_package macro generates cmake config files for your package 101 | ## Declare things to be passed to dependent projects 102 | ## INCLUDE_DIRS: uncomment this if your package contains header files 103 | ## LIBRARIES: libraries you create in this project that dependent projects also need 104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 105 | ## DEPENDS: system dependencies of this project that dependent projects also need 106 | catkin_package( 107 | # INCLUDE_DIRS include 108 | # LIBRARIES scans_pub 109 | # CATKIN_DEPENDS roscpp rospy sensor_msgs std_msgs 110 | # DEPENDS system_lib 111 | ) 112 | 113 | ########### 114 | ## Build ## 115 | ########### 116 | 117 | ## Specify additional locations of header files 118 | ## Your package locations should be listed before other locations 119 | include_directories( 120 | # include 121 | ${catkin_INCLUDE_DIRS} 122 | ) 123 | 124 | ## Declare a C++ library 125 | # add_library(${PROJECT_NAME} 126 | # src/${PROJECT_NAME}/scans_pub.cpp 127 | # ) 128 | 129 | ## Add cmake target dependencies of the library 130 | ## as an example, code may need to be generated before libraries 131 | ## either from message generation or dynamic reconfigure 132 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 133 | 134 | ## Declare a C++ executable 135 | ## With catkin_make all packages are built within a single CMake context 136 | ## The recommended prefix ensures that target names across packages don't collide 137 | # add_executable(${PROJECT_NAME}_node src/scans_pub_node.cpp) 138 | 139 | ## Rename C++ executable without prefix 140 | ## The above recommended prefix causes long target names, the following renames the 141 | ## target back to the shorter version for ease of user use 142 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 143 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 144 | 145 | ## Add cmake target dependencies of the executable 146 | ## same as for the library above 147 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 148 | 149 | ## Specify libraries to link a library or executable target against 150 | # target_link_libraries(${PROJECT_NAME}_node 151 | # ${catkin_LIBRARIES} 152 | # ) 153 | 154 | ############# 155 | ## Install ## 156 | ############# 157 | 158 | # all install targets should use catkin DESTINATION variables 159 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 160 | 161 | ## Mark executable scripts (Python etc.) for installation 162 | ## in contrast to setup.py, you can choose the destination 163 | # catkin_install_python(PROGRAMS 164 | # scripts/my_python_script 165 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 166 | # ) 167 | 168 | ## Mark executables for installation 169 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 170 | # install(TARGETS ${PROJECT_NAME}_node 171 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 172 | # ) 173 | 174 | ## Mark libraries for installation 175 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 176 | # install(TARGETS ${PROJECT_NAME} 177 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 178 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 179 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 180 | # ) 181 | 182 | ## Mark cpp header files for installation 183 | # install(DIRECTORY include/${PROJECT_NAME}/ 184 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 185 | # FILES_MATCHING PATTERN "*.h" 186 | # PATTERN ".svn" EXCLUDE 187 | # ) 188 | 189 | ## Mark other files for installation (e.g. launch and bag files, etc.) 190 | # install(FILES 191 | # # myfile1 192 | # # myfile2 193 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 194 | # ) 195 | 196 | ############# 197 | ## Testing ## 198 | ############# 199 | 200 | ## Add gtest based cpp test target and link libraries 201 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_scans_pub.cpp) 202 | # if(TARGET ${PROJECT_NAME}-test) 203 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 204 | # endif() 205 | 206 | ## Add folders to be run by python nosetests 207 | # catkin_add_nosetests(test) 208 | -------------------------------------------------------------------------------- /c_ws/src/scans_pub/launch/pub_scans.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /c_ws/src/scans_pub/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | scans_pub 4 | 0.0.0 5 | The scans_pub package 6 | 7 | 8 | 9 | 10 | root 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | roscpp 53 | rospy 54 | sensor_msgs 55 | std_msgs 56 | roscpp 57 | rospy 58 | sensor_msgs 59 | std_msgs 60 | roscpp 61 | rospy 62 | sensor_msgs 63 | std_msgs 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /c_ws/src/scans_pub/scripts/pub_scans.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import time 6 | import tqdm 7 | import numpy as np 8 | 9 | import rospy 10 | 11 | from sensor_msgs.msg import PointCloud2, PointField 12 | 13 | from nav_msgs.msg import Odometry 14 | from geometry_msgs.msg import Pose, Quaternion 15 | from tf.transformations import quaternion_from_matrix 16 | ''' 17 | Note, this code is mainly for debugging in order to debug the filter node, so here we 18 | publish the true labelled scan and the true pose, this will help dubugging the submap 19 | generation and the filter health overall. 20 | 21 | ''' 22 | 23 | class PubScans: 24 | def __init__(self): 25 | rospy.init_node('Labelled_scans_publisher') 26 | 27 | root_dir = str(os.environ.get("DATA")) 28 | 29 | ''' Retrieve parameters from ROS parameter server ''' 30 | cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 31 | sequence = rospy.get_param('~seq', "115") 32 | pub_rate = rospy.get_param('~rate', 2) 33 | 34 | scans_pth = os.path.join(root_dir, 'sequence', sequence, 'scans') 35 | poses_pth = os.path.join(root_dir, 'sequence', sequence, 'poses') 36 | map_tr_pth = os.path.join(root_dir, 'sequence', sequence, 'map_transform') 37 | 38 | self.pub = rospy.Publisher(cloud_topic, PointCloud2, queue_size=10) 39 | self.pose_pub = rospy.Publisher('/ndt/predicted/odom', Odometry, queue_size=10) 40 | 41 | self.rate = rospy.Rate(pub_rate) # Set the publishing rate 42 | self.scans_pth = scans_pth 43 | self.poses_pth = poses_pth 44 | 45 | self.timer = rospy.Timer(rospy.Duration(1.0 / pub_rate), self.timer_callback) 46 | 47 | self.scans = self.load_scans() 48 | self.poses = self.load_poses() 49 | 50 | self.map_transform = np.loadtxt(map_tr_pth, delimiter=',') 51 | 52 | self.total_scans = len(self.scans) 53 | 54 | assert len(self.scans) == len(self.poses), 'Must have the same length!!' 55 | 56 | self.index = 0 57 | 58 | rospy.spin() 59 | 60 | 61 | def timer_callback(self, event): 62 | scan = self.scans[self.index] 63 | pose = self.poses[self.index] 64 | timestamp_str = os.path.splitext(scan)[0] 65 | scan_data = np.load(os.path.join(self.scans_pth, scan)) 66 | pose_data = np.loadtxt(os.path.join(self.poses_pth, pose), delimiter=',') 67 | 68 | '''Transform the scan_data using the transformed pose''' 69 | scan_data[:,:3] = self.transform_point_cloud(scan_data[:,:3], pose_data) 70 | scan_data[:,:3] = self.transform_point_cloud(scan_data[:,:3], self.map_transform) 71 | 72 | msg = self.to_pointmsg(scan_data, timestamp_str) 73 | print('Publish: %s\t\t\r' % (timestamp_str), end='') 74 | self.pub.publish(msg) 75 | self.pose_pub.publish(self.to_posemsg(pose_data, timestamp_str)) 76 | 77 | #update index 78 | if(self.index < self.total_scans): 79 | self.index += 1 80 | 81 | if(self.index >= self.total_scans): 82 | rospy.signal_shutdown("No more scans to publish.") 83 | 84 | def load_scans(self): 85 | scans = sorted(os.listdir(self.scans_pth)) 86 | return scans 87 | 88 | def load_poses(self): 89 | poses = sorted(os.listdir(self.poses_pth)) 90 | return poses 91 | 92 | def transform_point_cloud(self, point_cloud, transformation_matrix): 93 | # Convert point cloud to homogeneous coordinates 94 | homogeneous_coords = np.hstack((point_cloud, np.ones((point_cloud.shape[0], 1)))) 95 | # Apply transformation matrix 96 | transformed_coords = np.dot(homogeneous_coords, transformation_matrix.T) 97 | # Convert back to Cartesian coordinates 98 | transformed_point_cloud = transformed_coords[:, :3] / transformed_coords[:, 3][:, np.newaxis] 99 | return transformed_point_cloud 100 | 101 | def to_pointmsg(self, data, timestamp_str): 102 | cloud = PointCloud2() 103 | timestamp = float(timestamp_str) 104 | scan_time = rospy.Time.from_sec(timestamp) 105 | cloud.header.stamp = scan_time 106 | cloud.header.frame_id = 'map' 107 | 108 | filtered_fields = [ 109 | PointField('x', 0, PointField.FLOAT32, 1), 110 | PointField('y', 4, PointField.FLOAT32, 1), 111 | PointField('z', 8, PointField.FLOAT32, 1), 112 | PointField('intensity', 12, PointField.FLOAT32, 1) 113 | ] 114 | cloud.fields = filtered_fields 115 | cloud.is_bigendian = False 116 | cloud.point_step = 16 117 | cloud.row_step = cloud.point_step * len(data) 118 | cloud.is_bigendian = False 119 | cloud.is_dense = True 120 | cloud.width = len(data) 121 | cloud.height = 1 122 | 123 | point_data = np.array(data, dtype=np.float32) 124 | cloud.data = point_data.tobytes() 125 | 126 | return cloud 127 | 128 | def to_posemsg(self, pose_data, timestamp_str): 129 | odom_msg = Odometry() 130 | timestamp = float(timestamp_str) 131 | pose_time = rospy.Time.from_sec(timestamp) 132 | odom_msg.header.stamp = pose_time 133 | odom_msg.header.frame_id = 'map' 134 | odom_msg.child_frame_id = 'base_link' 135 | 136 | # Extract position (translation) from the 4x4 transformation matrix 137 | pose_msg = Pose() 138 | pose_msg.position.x = pose_data[0, 3] 139 | pose_msg.position.y = pose_data[1, 3] 140 | pose_msg.position.z = pose_data[2, 3] 141 | 142 | # Extract orientation (quaternion) from the 4x4 transformation matrix 143 | quaternion = quaternion_from_matrix(pose_data) 144 | pose_msg.orientation = Quaternion(*quaternion) 145 | 146 | odom_msg.pose.pose = pose_msg 147 | 148 | # Fill in the covariance matrix if available (optional) 149 | # odom_msg.pose.covariance = [0.0] * 36 150 | 151 | return odom_msg 152 | 153 | if __name__ == '__main__': 154 | pub_scans_node = PubScans() -------------------------------------------------------------------------------- /c_ws/src/scans_pub/scripts/raw_scans.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import time 6 | from tqdm import tqdm 7 | import numpy as np 8 | 9 | import rospy 10 | 11 | from sensor_msgs.msg import PointCloud2, PointField 12 | 13 | from nav_msgs.msg import Odometry 14 | from geometry_msgs.msg import Pose, Quaternion 15 | from tf.transformations import quaternion_from_matrix 16 | ''' 17 | Note, this code is mainly for debugging in order to debug the filter node, so here we 18 | publish the true labelled scan and the true pose, this will help dubugging the submap 19 | generation and the filter health overall. 20 | 21 | ''' 22 | 23 | class PubScans: 24 | def __init__(self): 25 | rospy.init_node('Labelled_scans_publisher') 26 | 27 | root_dir = str(os.environ.get("DATA")) 28 | 29 | ''' Retrieve parameters from ROS parameter server ''' 30 | cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 31 | sequence = rospy.get_param('~seq', "219") 32 | pub_rate = rospy.get_param('~rate', 10) 33 | 34 | scans_pth = os.path.join(root_dir, 'sequence', sequence, 'scans') 35 | 36 | self.pub = rospy.Publisher(cloud_topic, PointCloud2, queue_size=10) 37 | 38 | self.rate = rospy.Rate(pub_rate) # Set the publishing rate 39 | self.scans_pth = scans_pth 40 | 41 | self.scans_data = [] 42 | self.num_scan_to_load = 1000 43 | self.scans = self.load_scans(self.num_scan_to_load) 44 | 45 | self.total_scans = len(self.scans) 46 | 47 | self.index = 0 48 | 49 | self.timer = rospy.Timer(rospy.Duration(1.0 / pub_rate), self.timer_callback) 50 | 51 | rospy.spin() 52 | 53 | 54 | def timer_callback(self, event): 55 | scan = self.scans[self.index] 56 | timestamp_str = os.path.splitext(scan)[0] 57 | # np_load = np.load if scan[:-3] == 'npy' else np.loadtxt 58 | # scan_data = np_load(os.path.join(self.scans_pth, scan)) 59 | scan_data = self.scans_data[self.index] 60 | 61 | msg = self.to_pointmsg(scan_data, timestamp_str) 62 | print('Publish: %s\t\t\r' % (timestamp_str), end='') 63 | self.pub.publish(msg) 64 | 65 | #update index 66 | if(self.index < self.total_scans): 67 | self.index += 1 68 | 69 | if(self.index >= self.total_scans): 70 | rospy.signal_shutdown("No more scans to publish.") 71 | 72 | def load_scans(self, num_scan): 73 | scans = sorted(os.listdir(self.scans_pth)) 74 | 75 | scans = scans[:num_scan] 76 | 77 | for i in tqdm(range(num_scan), desc="Loading scans"): 78 | scan = scans[i] 79 | np_load = np.load if scan[:-3] == 'npy' else np.loadtxt 80 | scan_data = np_load(os.path.join(self.scans_pth, scan)) 81 | scan_data = np.unique(scan_data, axis=0) 82 | self.scans_data.append(scan_data) 83 | 84 | return scans 85 | 86 | def to_pointmsg(self, data, timestamp_str): 87 | cloud = PointCloud2() 88 | timestamp = float(timestamp_str) 89 | scan_time = rospy.Time.from_sec(timestamp) 90 | cloud.header.stamp = scan_time 91 | cloud.header.frame_id = 'os_sensor' 92 | 93 | filtered_fields = [ 94 | PointField('x', 0, PointField.FLOAT32, 1), 95 | PointField('y', 4, PointField.FLOAT32, 1), 96 | PointField('z', 8, PointField.FLOAT32, 1), 97 | PointField('intensity', 12, PointField.FLOAT32, 1) 98 | ] 99 | cloud.fields = filtered_fields 100 | cloud.is_bigendian = False 101 | cloud.point_step = 16 102 | cloud.row_step = cloud.point_step * len(data) 103 | cloud.is_bigendian = False 104 | cloud.is_dense = True 105 | cloud.width = len(data) 106 | cloud.height = 1 107 | 108 | point_data = np.array(data, dtype=np.float32) 109 | cloud.data = point_data.tobytes() 110 | 111 | return cloud 112 | 113 | if __name__ == '__main__': 114 | pub_scans_node = PubScans() -------------------------------------------------------------------------------- /c_ws/src/sps_filter/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.0.2) 2 | project(sps_filter) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | ## Find catkin macros and libraries 8 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 9 | ## is used, also find other catkin packages 10 | find_package(catkin REQUIRED COMPONENTS 11 | roscpp 12 | rospy 13 | sensor_msgs 14 | std_msgs 15 | ) 16 | 17 | ## System dependencies are found with CMake's conventions 18 | # find_package(Boost REQUIRED COMPONENTS system) 19 | 20 | 21 | ## Uncomment this if the package has a setup.py. This macro ensures 22 | ## modules and global scripts declared therein get installed 23 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 24 | # catkin_python_setup() 25 | 26 | ################################################ 27 | ## Declare ROS messages, services and actions ## 28 | ################################################ 29 | 30 | ## To declare and build messages, services or actions from within this 31 | ## package, follow these steps: 32 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 33 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 34 | ## * In the file package.xml: 35 | ## * add a build_depend tag for "message_generation" 36 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 37 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 38 | ## but can be declared for certainty nonetheless: 39 | ## * add a exec_depend tag for "message_runtime" 40 | ## * In this file (CMakeLists.txt): 41 | ## * add "message_generation" and every package in MSG_DEP_SET to 42 | ## find_package(catkin REQUIRED COMPONENTS ...) 43 | ## * add "message_runtime" and every package in MSG_DEP_SET to 44 | ## catkin_package(CATKIN_DEPENDS ...) 45 | ## * uncomment the add_*_files sections below as needed 46 | ## and list every .msg/.srv/.action file to be processed 47 | ## * uncomment the generate_messages entry below 48 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 49 | 50 | ## Generate messages in the 'msg' folder 51 | # add_message_files( 52 | # FILES 53 | # Message1.msg 54 | # Message2.msg 55 | # ) 56 | 57 | ## Generate services in the 'srv' folder 58 | # add_service_files( 59 | # FILES 60 | # Service1.srv 61 | # Service2.srv 62 | # ) 63 | 64 | ## Generate actions in the 'action' folder 65 | # add_action_files( 66 | # FILES 67 | # Action1.action 68 | # Action2.action 69 | # ) 70 | 71 | ## Generate added messages and services with any dependencies listed here 72 | # generate_messages( 73 | # DEPENDENCIES 74 | # sensor_msgs# std_msgs 75 | # ) 76 | 77 | ################################################ 78 | ## Declare ROS dynamic reconfigure parameters ## 79 | ################################################ 80 | 81 | ## To declare and build dynamic reconfigure parameters within this 82 | ## package, follow these steps: 83 | ## * In the file package.xml: 84 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 85 | ## * In this file (CMakeLists.txt): 86 | ## * add "dynamic_reconfigure" to 87 | ## find_package(catkin REQUIRED COMPONENTS ...) 88 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 89 | ## and list every .cfg file to be processed 90 | 91 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 92 | # generate_dynamic_reconfigure_options( 93 | # cfg/DynReconf1.cfg 94 | # cfg/DynReconf2.cfg 95 | # ) 96 | 97 | ################################### 98 | ## catkin specific configuration ## 99 | ################################### 100 | ## The catkin_package macro generates cmake config files for your package 101 | ## Declare things to be passed to dependent projects 102 | ## INCLUDE_DIRS: uncomment this if your package contains header files 103 | ## LIBRARIES: libraries you create in this project that dependent projects also need 104 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 105 | ## DEPENDS: system dependencies of this project that dependent projects also need 106 | catkin_package( 107 | # INCLUDE_DIRS include 108 | # LIBRARIES sps_filter 109 | # CATKIN_DEPENDS roscpp rospy sensor_msgs std_msgs 110 | # DEPENDS system_lib 111 | ) 112 | 113 | ########### 114 | ## Build ## 115 | ########### 116 | 117 | ## Specify additional locations of header files 118 | ## Your package locations should be listed before other locations 119 | include_directories( 120 | # include 121 | ${catkin_INCLUDE_DIRS} 122 | ) 123 | 124 | ## Declare a C++ library 125 | # add_library(${PROJECT_NAME} 126 | # src/${PROJECT_NAME}/sps_filter.cpp 127 | # ) 128 | 129 | ## Add cmake target dependencies of the library 130 | ## as an example, code may need to be generated before libraries 131 | ## either from message generation or dynamic reconfigure 132 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 133 | 134 | ## Declare a C++ executable 135 | ## With catkin_make all packages are built within a single CMake context 136 | ## The recommended prefix ensures that target names across packages don't collide 137 | # add_executable(${PROJECT_NAME}_node src/sps_filter_node.cpp) 138 | 139 | ## Rename C++ executable without prefix 140 | ## The above recommended prefix causes long target names, the following renames the 141 | ## target back to the shorter version for ease of user use 142 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 143 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 144 | 145 | ## Add cmake target dependencies of the executable 146 | ## same as for the library above 147 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 148 | 149 | ## Specify libraries to link a library or executable target against 150 | # target_link_libraries(${PROJECT_NAME}_node 151 | # ${catkin_LIBRARIES} 152 | # ) 153 | 154 | ############# 155 | ## Install ## 156 | ############# 157 | 158 | # all install targets should use catkin DESTINATION variables 159 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 160 | 161 | ## Mark executable scripts (Python etc.) for installation 162 | ## in contrast to setup.py, you can choose the destination 163 | # catkin_install_python(PROGRAMS 164 | # scripts/my_python_script 165 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 166 | # ) 167 | 168 | ## Mark executables for installation 169 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 170 | # install(TARGETS ${PROJECT_NAME}_node 171 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 172 | # ) 173 | 174 | ## Mark libraries for installation 175 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 176 | # install(TARGETS ${PROJECT_NAME} 177 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 178 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 179 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 180 | # ) 181 | 182 | ## Mark cpp header files for installation 183 | # install(DIRECTORY include/${PROJECT_NAME}/ 184 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 185 | # FILES_MATCHING PATTERN "*.h" 186 | # PATTERN ".svn" EXCLUDE 187 | # ) 188 | 189 | ## Mark other files for installation (e.g. launch and bag files, etc.) 190 | # install(FILES 191 | # # myfile1 192 | # # myfile2 193 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 194 | # ) 195 | 196 | ############# 197 | ## Testing ## 198 | ############# 199 | 200 | ## Add gtest based cpp test target and link libraries 201 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_sps_filter.cpp) 202 | # if(TARGET ${PROJECT_NAME}-test) 203 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 204 | # endif() 205 | 206 | ## Add folders to be run by python nosetests 207 | # catkin_add_nosetests(test) 208 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/launch/mask.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/launch/sps.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | sps_filter 4 | 0.0.1 5 | The sps_filter package 6 | 7 | root 8 | 9 | 10 | 11 | 12 | MIT 13 | 14 | catkin 15 | roscpp 16 | rospy 17 | sensor_msgs 18 | std_msgs 19 | roscpp 20 | rospy 21 | sensor_msgs 22 | std_msgs 23 | roscpp 24 | rospy 25 | sensor_msgs 26 | std_msgs 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/scripts/mask.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import time 4 | import torch 5 | import numpy as np 6 | 7 | import rospy 8 | 9 | import message_filters 10 | from std_msgs.msg import Float32 11 | from nav_msgs.msg import Odometry 12 | from sensor_msgs.msg import PointCloud2 13 | 14 | from torchmetrics import R2Score 15 | 16 | from sps.datasets import util 17 | 18 | class SPS(): 19 | def __init__(self): 20 | rospy.init_node('Stable_Points_Segmentation_node') 21 | 22 | ''' Retrieve parameters from ROS parameter server ''' 23 | raw_cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 24 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 25 | predicted_pose_topic = rospy.get_param('~predicted_pose', "/odometry_node/odometry_estimate") 26 | 27 | weights_pth = rospy.get_param('~model_weights_pth', "/sps/tb_logs/SPS_ME_Union/version_39/checkpoints/last.ckpt") 28 | 29 | self.odom_frame = rospy.get_param('~odom_frame', "map") 30 | self.epsilon = rospy.get_param('~epsilon', 0.84) 31 | self.pub_submap = rospy.get_param('~pub_submap', True) 32 | self.pub_cloud_tr = rospy.get_param('~pub_cloud_tr', True) 33 | 34 | ''' Subscribe to ROS topics ''' 35 | odom_sub = message_filters.Subscriber(predicted_pose_topic, Odometry) 36 | scan_sub = message_filters.Subscriber(raw_cloud_topic, PointCloud2) 37 | 38 | ts = message_filters.TimeSynchronizer([odom_sub, scan_sub], 1) 39 | ts.registerCallback(self.callback) 40 | 41 | ''' Initialize the publisher ''' 42 | self.scan_pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=0) 43 | self.submap_pub = rospy.Publisher('debug/cloud_submap', PointCloud2, queue_size=0) 44 | self.cloud_tr_pub = rospy.Publisher('debug/raw_cloud_tr', PointCloud2, queue_size=0) 45 | self.loss_pub = rospy.Publisher('debug/model_loss', Float32, queue_size=0) 46 | self.r2_pub = rospy.Publisher('debug/model_r2', Float32, queue_size=0) 47 | 48 | rospy.loginfo('raw_cloud: %s', raw_cloud_topic) 49 | rospy.loginfo('cloud_filtered: %s', filtered_cloud_topic) 50 | rospy.loginfo('predicted_pose: %s', predicted_pose_topic) 51 | rospy.loginfo('epsilon: %f', self.epsilon) 52 | 53 | ''' Load configs ''' 54 | cfg = torch.load(weights_pth)["hyper_parameters"] 55 | rospy.loginfo(cfg) 56 | 57 | ''' Load the model ''' 58 | self.model = util.load_model(cfg, weights_pth) 59 | 60 | ''' Get VOXEL_SIZE for quantization ''' 61 | self.ds = cfg["MODEL"]["VOXEL_SIZE"] 62 | 63 | ''' Get available device ''' 64 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 65 | 66 | ''' Load point cloud map ''' 67 | cfg["TRAIN"]["MAP"] = 'base_map.asc.npy' 68 | pc_map_tensor = util.load_point_cloud_map(cfg) 69 | self.pc_map_coord_feat = util.to_coords_features(pc_map_tensor, 70 | feature_type='map', 71 | ds=self.ds, 72 | device=self.device) 73 | 74 | ''' Define loss and r2score for model evaluation ''' 75 | self.loss = torch.nn.MSELoss().to(self.device) 76 | self.r2score = R2Score().to(self.device) 77 | 78 | ''' Variable to hold point cloud frame ''' 79 | self.scan = None 80 | self.scan_msg_header = None 81 | self.scan_received = False 82 | 83 | rospy.spin() 84 | 85 | 86 | def callback(self, odom_msg, scan_msg): 87 | self.scan = util.to_numpy(scan_msg) 88 | self.scan_msg_header = scan_msg.header 89 | 90 | start_time = time.time() 91 | odom_msg_stamp = odom_msg.header.stamp.to_sec() 92 | 93 | transformation_matrix = util.to_tr_matrix(odom_msg) 94 | 95 | ''' Step 0: make sure the time stamp for odom and scan are the same!''' 96 | df = odom_msg_stamp - self.scan_msg_header.stamp.to_sec() 97 | if df != 0: 98 | rospy.logwarn(f"Odom and scan time stamp is out of sync! time difference is {df} sec.") 99 | 100 | ''' Step 1: Transform the scan ''' 101 | scan_tr = util.transform_point_cloud(self.scan[:,:3], transformation_matrix) 102 | 103 | ''' Step 2: convert scan points to torch tensor ''' 104 | scan_points = torch.tensor(scan_tr[:,:3], dtype=torch.float32).reshape(-1, 3).to(self.device) 105 | scan_labels = torch.tensor(self.scan[:, 3], dtype=torch.float32).reshape(-1, 1).to(self.device) 106 | 107 | ''' Step 3: Prune map points by keeping only the points that in the same voxel as of the scan points''' 108 | prune_start_time = time.time() 109 | scan_coord_feat = util.to_coords_features(scan_points, 110 | feature_type='scan', 111 | ds=self.ds, 112 | device=self.device) 113 | submap_points, len_scan_coord = util.prune(self.pc_map_coord_feat, scan_coord_feat, self.ds) 114 | 115 | prune_time = time.time() - prune_start_time 116 | 117 | submap_points = submap_points.cpu() 118 | submap_labels = torch.ones(submap_points.shape[0], 1) 119 | filtered_scan = torch.hstack([submap_points, submap_labels]) 120 | filtered_scan = filtered_scan.numpy() 121 | filtered_scan[:,:3] = util.inverse_transform_point_cloud(filtered_scan[:,:3], transformation_matrix) 122 | self.scan_pub.publish(util.to_rosmsg(filtered_scan, self.scan_msg_header)) 123 | 124 | ''' Publish the transformed point cloud for debugging ''' 125 | if self.pub_cloud_tr: 126 | scan_tr = np.hstack([scan_tr[:,:3], self.scan[:,3].reshape(-1,1)]) 127 | self.cloud_tr_pub.publish(util.to_rosmsg(scan_tr, self.scan_msg_header, self.odom_frame)) 128 | 129 | ''' Publish the submap points for debugging ''' 130 | submap_points = submap_points.cpu() 131 | submap_labels = torch.ones(submap_points.shape[0], 1) 132 | if self.pub_submap: 133 | submap = torch.hstack([submap_points, submap_labels]) 134 | self.submap_pub.publish(util.to_rosmsg(submap.numpy(), self.scan_msg_header, self.odom_frame)) 135 | 136 | ''' Print log message ''' 137 | end_time = time.time() 138 | elapsed_time = end_time - start_time 139 | hz = lambda t: 1 / t if t else 0 140 | 141 | log_message = ( 142 | f"T: {elapsed_time:.3f} [{hz(elapsed_time):.2f} Hz] " 143 | f"P: {prune_time:.3f} [{hz(prune_time):.2f} Hz] " 144 | f"N: {len(self.scan):d} n: {len(filtered_scan):d} " 145 | f"S: {len_scan_coord:d} M: {len(submap_labels):d} " 146 | ) 147 | rospy.loginfo(log_message) 148 | 149 | 150 | if __name__ == '__main__': 151 | SPS_node = SPS() 152 | 153 | 154 | 155 | 156 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/scripts/sps_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import time 4 | import yaml 5 | import torch 6 | import numpy as np 7 | 8 | import rospy 9 | 10 | import message_filters 11 | from std_msgs.msg import Float32 12 | from nav_msgs.msg import Odometry 13 | from sensor_msgs.msg import PointCloud2 14 | 15 | from torchmetrics import R2Score 16 | 17 | from sps.datasets import util 18 | 19 | class SPS(): 20 | def __init__(self): 21 | rospy.init_node('Stable_Points_Segmentation_node') 22 | 23 | ''' Retrieve parameters from ROS parameter server ''' 24 | raw_cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 25 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 26 | predicted_pose_topic = rospy.get_param('~predicted_pose', "/odometry_node/odometry_estimate") 27 | 28 | weights_pth = rospy.get_param('~model_weights_pth', "/sps/best_models/420_601.ckpt") 29 | config_pth = rospy.get_param('~config_pth', "/sps/config/config.yaml") 30 | 31 | self.odom_frame = rospy.get_param( '~odom_frame' , "map" ) 32 | self.epsilon = rospy.get_param( '~epsilon' , 0.84 ) 33 | self.pub_submap = rospy.get_param( '~pub_submap' , True ) 34 | self.pub_cloud_tr = rospy.get_param( '~pub_cloud_tr' , True ) 35 | 36 | ''' Subscribe to ROS topics ''' 37 | odom_sub = message_filters.Subscriber(predicted_pose_topic, Odometry) 38 | scan_sub = message_filters.Subscriber(raw_cloud_topic, PointCloud2) 39 | 40 | ts = message_filters.TimeSynchronizer([odom_sub, scan_sub], 1) 41 | ts.registerCallback(self.callback) 42 | 43 | ''' Initialize the publisher ''' 44 | self.scan_pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=0) 45 | self.submap_pub = rospy.Publisher('debug/cloud_submap', PointCloud2, queue_size=0) 46 | self.cloud_tr_pub = rospy.Publisher('debug/raw_cloud_tr', PointCloud2, queue_size=0) 47 | self.loss_pub = rospy.Publisher('debug/model_loss', Float32, queue_size=0) 48 | self.r2_pub = rospy.Publisher('debug/model_r2', Float32, queue_size=0) 49 | 50 | rospy.loginfo('raw_cloud: %s', raw_cloud_topic) 51 | rospy.loginfo('cloud_filtered: %s', filtered_cloud_topic) 52 | rospy.loginfo('predicted_pose: %s', predicted_pose_topic) 53 | rospy.loginfo('epsilon: %f', self.epsilon) 54 | 55 | ''' Load configs ''' 56 | cfg = yaml.safe_load(open(config_pth)) 57 | rospy.loginfo(cfg) 58 | 59 | ''' Load the model ''' 60 | self.model = util.load_model(cfg, weights_pth) 61 | 62 | ''' Get VOXEL_SIZE for quantization ''' 63 | self.ds = cfg["MODEL"]["VOXEL_SIZE"] 64 | 65 | ''' Get available device ''' 66 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 67 | 68 | ''' Load point cloud map ''' 69 | cfg["TRAIN"]["MAP"] = 'base_map.asc.npy' 70 | pc_map_tensor = util.load_point_cloud_map(cfg) 71 | self.pc_map_coord_feat = util.to_coords_features(pc_map_tensor, 72 | feature_type='map', 73 | ds=self.ds, 74 | device=self.device) 75 | 76 | ''' Define loss and r2score for model evaluation ''' 77 | self.loss = torch.nn.MSELoss().to(self.device) 78 | self.r2score = R2Score().to(self.device) 79 | 80 | ''' Variable to hold point cloud frame ''' 81 | self.scan = None 82 | self.scan_msg_header = None 83 | self.scan_received = False 84 | 85 | rospy.spin() 86 | 87 | 88 | def callback(self, odom_msg, scan_msg): 89 | self.scan = util.to_numpy(scan_msg) 90 | self.scan_msg_header = scan_msg.header 91 | 92 | start_time = time.time() 93 | odom_msg_stamp = odom_msg.header.stamp.to_sec() 94 | 95 | transformation_matrix = util.to_tr_matrix(odom_msg) 96 | 97 | ''' Step 0: make sure the time stamp for odom and scan are the same!''' 98 | df = odom_msg_stamp - self.scan_msg_header.stamp.to_sec() 99 | if df != 0: 100 | rospy.logwarn(f"Odom and scan time stamp is out of sync! time difference is {df} sec.") 101 | 102 | ''' Step 1: Transform the scan ''' 103 | scan_tr = util.transform_point_cloud(self.scan[:,:3], transformation_matrix) 104 | 105 | ''' Step 2: convert scan points to torch tensor ''' 106 | scan_points = torch.tensor(scan_tr[:,:3], dtype=torch.float32).reshape(-1, 3).to(self.device) 107 | scan_labels = torch.tensor(self.scan[:, 3], dtype=torch.float32).reshape(-1, 1).to(self.device) 108 | 109 | ''' Step 3: Prune map points by keeping only the points that in the same voxel as of the scan points''' 110 | prune_start_time = time.time() 111 | scan_coord_feat = util.to_coords_features(scan_points, 112 | feature_type='scan', 113 | ds=self.ds, 114 | device=self.device) 115 | submap_points, len_scan_coord = util.prune(self.pc_map_coord_feat, scan_coord_feat, self.ds) 116 | 117 | prune_time = time.time() - prune_start_time 118 | 119 | ''' Step 4: Infer the stability labels''' 120 | predicted_scan_labels, infer_time = util.infer(scan_points, submap_points, self.model) 121 | 122 | ''' Step 5: Calculate loss and r2 ''' 123 | loss = self.loss(predicted_scan_labels.view(-1), scan_labels.view(-1)) 124 | r2 = self.r2score(predicted_scan_labels.view(-1), scan_labels.view(-1)) 125 | self.loss_pub.publish(loss) 126 | self.r2_pub.publish(r2) 127 | 128 | predicted_scan_labels = predicted_scan_labels.cpu() 129 | 130 | ### -> mIoU start 131 | pred = np.where(predicted_scan_labels.view(-1) < self.epsilon, 0, 1) 132 | gt = np.where( scan_labels.cpu().view(-1) < self.epsilon, 0, 1) 133 | 134 | precision, recall, f1, accuracy, dIoU = util.calculate_metrics(gt, pred) 135 | 136 | log_message = ( 137 | f"dIoU: {dIoU:.3f} " 138 | f"accuracy: {accuracy:.3f} " 139 | f"precision: {precision:.3f} " 140 | f"recall: {recall:.3f} " 141 | f"f1: {f1:.3f} " 142 | ) 143 | rospy.loginfo(log_message) 144 | ### <- mIoU ends 145 | 146 | ''' Step 6: Filter the scan points based on the threshold''' 147 | assert len(predicted_scan_labels) == len(self.scan), f"Predicted scans labels len ({len(predicted_scan_labels)}) does not equal scan len ({len(self.scan)})" 148 | filtered_scan = self.scan[(predicted_scan_labels.cpu() <= self.epsilon)] 149 | self.scan_pub.publish(util.to_rosmsg(filtered_scan, self.scan_msg_header)) 150 | 151 | ''' Publish the transformed point cloud for debugging ''' 152 | if self.pub_cloud_tr: 153 | scan_tr = np.hstack([scan_tr[:,:3], pred.reshape(-1,1)]) 154 | self.cloud_tr_pub.publish(util.to_rosmsg(scan_tr, self.scan_msg_header, self.odom_frame)) 155 | 156 | ''' Publish the submap points for debugging ''' 157 | submap_points = submap_points.cpu() 158 | submap_labels = torch.ones(submap_points.shape[0], 1) 159 | if self.pub_submap: 160 | submap = torch.hstack([submap_points, submap_labels]) 161 | self.submap_pub.publish(util.to_rosmsg(submap.numpy(), self.scan_msg_header, self.odom_frame)) 162 | 163 | ''' Print log message ''' 164 | end_time = time.time() 165 | elapsed_time = end_time - start_time 166 | hz = lambda t: 1 / t if t else 0 167 | 168 | log_message = ( 169 | f"T: {elapsed_time:.3f} [{hz(elapsed_time):.2f} Hz] " 170 | f"P: {prune_time:.3f} [{hz(prune_time):.2f} Hz] " 171 | f"I: {infer_time:.3f} [{hz(infer_time):.2f} Hz] " 172 | f"L: {loss:.3f} r2: {r2:.3f} " 173 | f"N: {len(self.scan):d} n: {len(filtered_scan):d} " 174 | f"S: {len_scan_coord:d} M: {len(submap_labels):d} " 175 | ) 176 | rospy.loginfo(log_message) 177 | 178 | if __name__ == '__main__': 179 | SPS_node = SPS() 180 | 181 | 182 | 183 | 184 | -------------------------------------------------------------------------------- /c_ws/src/sps_filter/scripts/sps_node_cvm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | #here I add an implementation of a very simple constant velocity model 4 | 5 | import time 6 | import yaml 7 | import torch 8 | import numpy as np 9 | 10 | import rospy 11 | 12 | from std_msgs.msg import Float32 13 | from nav_msgs.msg import Odometry 14 | from sensor_msgs.msg import PointCloud2 15 | 16 | from torchmetrics import R2Score 17 | from sps.datasets import util 18 | 19 | class SPS(): 20 | def __init__(self): 21 | rospy.init_node('Stable_Points_Segmentation_node') 22 | 23 | ''' Retrieve parameters from ROS parameter server ''' 24 | raw_cloud_topic = rospy.get_param('~raw_cloud', "/os_cloud_node/points") 25 | filtered_cloud_topic = rospy.get_param('~filtered_cloud', "/cloud_filtered") 26 | corrected_pose_topic = rospy.get_param('~corrected_pose', "/odometry_node/odometry") 27 | 28 | weights_pth = rospy.get_param('~model_weights_pth', "/sps/best_models/420_601.ckpt") 29 | config_pth = rospy.get_param('~config_pth', "/sps/config/config.yaml") 30 | 31 | self.epsilon = rospy.get_param('~epsilon' , 0.84 ) 32 | self.odom_frame = rospy.get_param('~odom_frame' , 'map' ) 33 | self.pub_submap = rospy.get_param('~pub_submap' , True ) 34 | self.pub_cloud_tr = rospy.get_param('~pub_cloud_tr' , True ) 35 | 36 | ''' Subscribe to ROS topics ''' 37 | rospy.Subscriber(corrected_pose_topic, Odometry, self.odom_callback) 38 | rospy.Subscriber(raw_cloud_topic, PointCloud2, self.cloud_callback) 39 | 40 | ''' Initialize the publisher ''' 41 | self.scan_pub = rospy.Publisher(filtered_cloud_topic, PointCloud2, queue_size=10) 42 | self.submap_pub = rospy.Publisher('debug/cloud_submap', PointCloud2, queue_size=10) 43 | self.cloud_tr_pub = rospy.Publisher('debug/raw_cloud_tr', PointCloud2, queue_size=10) 44 | self.loss_pub = rospy.Publisher('debug/model_loss' , Float32, queue_size=10) 45 | self.r2_pub = rospy.Publisher('debug/model_r2' , Float32, queue_size=10) 46 | 47 | rospy.loginfo('raw_cloud: %s', raw_cloud_topic) 48 | rospy.loginfo('cloud_filtered: %s', filtered_cloud_topic) 49 | rospy.loginfo('epsilon: %f', self.epsilon) 50 | 51 | ''' Load configs ''' 52 | cfg = yaml.safe_load(open(config_pth)) 53 | 54 | rospy.loginfo(cfg) 55 | 56 | ''' Load the model ''' 57 | self.model = util.load_model(cfg, weights_pth) 58 | 59 | ''' Get VOXEL_SIZE for quantization ''' 60 | self.ds = 0.2 #cfg["MODEL"]["VOXEL_SIZE"] 61 | 62 | ''' Get available device ''' 63 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 64 | 65 | ''' Load point cloud map ''' 66 | cfg["TRAIN"]["MAP"] = 'base_map.asc.npy' 67 | pc_map_tensor = util.load_point_cloud_map(cfg) 68 | self.pc_map_coord_feat = util.to_coords_features(pc_map_tensor, 69 | feature_type='map', 70 | ds=self.ds, 71 | device=self.device) 72 | 73 | ''' Define loss and r2score for model evaluation ''' 74 | self.loss = torch.nn.MSELoss().to(self.device) 75 | self.r2score = R2Score().to(self.device) 76 | 77 | ''' Variable to hold point cloud frame ''' 78 | self.scan = None 79 | self.scan_msg_header = None 80 | self.scan_received = False 81 | 82 | ''' List to save corrected poses''' 83 | self.poses = [np.eye(4)] 84 | 85 | rospy.spin() 86 | 87 | def get_prediction_model(self): 88 | identity_matrix = np.eye(4) # Identity matrix for SE(3) 89 | num_poses = len(self.poses) 90 | 91 | if num_poses < 4: 92 | return identity_matrix 93 | 94 | num_predictions = 3 if num_poses <= 10 else 9 95 | 96 | # Initialize lists to store inverses and predictions 97 | inverse_poses = [np.linalg.inv(self.poses[num_poses - i]) for i in range(2, 2 + num_predictions)] 98 | predictions = [np.dot(inverse_poses[i - 2], self.poses[num_poses - i + 1]) for i in range(2, 2 + num_predictions)] 99 | 100 | mean_prediction = np.mean(predictions, axis=0) 101 | 102 | # Uncomment the line below if you want to calculate the median prediction 103 | # median_prediction = np.median(predictions, axis=0) 104 | 105 | prediction = predictions[-1] 106 | prediction[:, 3] = mean_prediction[:, 3] 107 | 108 | final_prediction = np.dot(self.poses[-1], prediction) 109 | return final_prediction 110 | 111 | 112 | def odom_callback(self, odom_msg): 113 | pose = util.to_tr_matrix(odom_msg) 114 | self.poses.append(pose) 115 | 116 | def cloud_callback(self, scan_msg): 117 | self.scan = util.to_numpy(scan_msg) 118 | self.scan_msg_header = scan_msg.header 119 | 120 | start_time = time.time() 121 | 122 | transformation_matrix = self.get_prediction_model() 123 | 124 | ''' Step 1: Transform the scan ''' 125 | scan_tr = util.transform_point_cloud(self.scan[:,:3], transformation_matrix) 126 | 127 | ''' Step 2: convert scan points to torch tensor ''' 128 | scan_points = torch.tensor(scan_tr[:,:3], dtype=torch.float32).reshape(-1, 3).to(self.device) 129 | scan_labels = torch.tensor(self.scan[:, 3], dtype=torch.float32).reshape(-1, 1).to(self.device) 130 | 131 | ''' Step 3: Prune map points by keeping only the points that in the same voxel as of the scan points''' 132 | prune_start_time = time.time() 133 | scan_coord_feat = util.to_coords_features(scan_points, 134 | feature_type='scan', 135 | ds=self.ds, 136 | device=self.device) 137 | submap_points, len_scan_coord = util.prune(self.pc_map_coord_feat, scan_coord_feat, self.ds) 138 | 139 | prune_time = time.time() - prune_start_time 140 | 141 | ''' Step 4: Infer the stability labels''' 142 | predicted_scan_labels, infer_time = util.infer(scan_points, submap_points, self.model) 143 | 144 | ''' Step 5: Calculate loss and r2 ''' 145 | loss = self.loss(predicted_scan_labels.view(-1), scan_labels.view(-1)) 146 | r2 = self.r2score(predicted_scan_labels.view(-1), scan_labels.view(-1)) 147 | self.loss_pub.publish(loss) 148 | self.r2_pub.publish(r2) 149 | 150 | predicted_scan_labels = predicted_scan_labels.cpu() 151 | 152 | ### -> mIoU start 153 | pred = np.where(predicted_scan_labels.view(-1) < self.epsilon, 0, 1) 154 | gt = np.where( scan_labels.cpu().view(-1) < self.epsilon, 0, 1) 155 | 156 | precision, recall, f1, accuracy, dIoU = util.calculate_metrics(gt, pred) 157 | 158 | log_message = ( 159 | f"dIoU: {dIoU:.3f} " 160 | f"accuracy: {accuracy:.3f} " 161 | f"precision: {precision:.3f} " 162 | f"recall: {recall:.3f} " 163 | f"f1: {f1:.3f} " 164 | ) 165 | rospy.loginfo(log_message) 166 | ### <- mIoU ends 167 | 168 | 169 | ''' Step 6: Filter the scan points based on the threshold''' 170 | assert len(predicted_scan_labels) == len(self.scan), f"Predicted scans labels len ({len(predicted_scan_labels)}) does not equal scan len ({len(self.scan)})" 171 | filtered_scan = self.scan[(pred == 0)] 172 | self.scan_pub.publish(util.to_rosmsg(filtered_scan, self.scan_msg_header)) 173 | 174 | ''' Publish the transformed point cloud for debugging ''' 175 | if self.pub_cloud_tr: 176 | scan_tr = np.hstack([scan_tr[:,:3], pred.reshape(-1,1) ]) 177 | self.cloud_tr_pub.publish(util.to_rosmsg(scan_tr, self.scan_msg_header, self.odom_frame)) 178 | 179 | ''' Publish the submap points for debugging ''' 180 | submap_points = submap_points.cpu() 181 | submap_labels = torch.ones(submap_points.shape[0], 1) 182 | if self.pub_submap: 183 | submap = torch.hstack([submap_points, submap_labels]) 184 | self.submap_pub.publish(util.to_rosmsg(submap.numpy(), self.scan_msg_header, self.odom_frame)) 185 | 186 | ''' Print log message ''' 187 | end_time = time.time() 188 | elapsed_time = end_time - start_time 189 | hz = lambda t: 1 / t if t else 0 190 | 191 | log_message = ( 192 | f"T: {elapsed_time:.3f} [{hz(elapsed_time):.2f} Hz] " 193 | f"P: {prune_time:.3f} [{hz(prune_time):.2f} Hz] " 194 | f"I: {infer_time:.3f} [{hz(infer_time):.2f} Hz] " 195 | f"L: {loss:.3f} r2: {r2:.3f} " 196 | f"N: {len(self.scan):d} n: {len(filtered_scan):d} " 197 | f"S: {len_scan_coord:d} M: {len(submap_labels):d} " 198 | ) 199 | rospy.loginfo(log_message) 200 | 201 | 202 | if __name__ == '__main__': 203 | SPS_node = SPS() 204 | 205 | 206 | 207 | 208 | -------------------------------------------------------------------------------- /config/config.yaml: -------------------------------------------------------------------------------- 1 | EXPERIMENT: 2 | ID: BLT 3 | 4 | ##Data 5 | DATA: 6 | SHUFFLE: True 7 | NUM_WORKER: 8 8 | SPLIT: 9 | TRAIN: 10 | - '20220420' 11 | - '20220601' 12 | VAL: 13 | - '20220608' 14 | TEST: 15 | - '20220629' 16 | 17 | ##Training 18 | TRAIN: 19 | MAP: base_map.asc.npy 20 | MAX_EPOCH: 80 21 | LR: 0.00007 22 | LR_EPOCH: 1 23 | LR_DECAY: 0.99 24 | WEIGHT_DECAY: 0.0001 25 | BATCH_SIZE: 2 26 | AUGMENTATION: True 27 | 28 | ##Network 29 | MODEL: 30 | VOXEL_SIZE: 0.1 # Used for discretization in x,y,z when creating a sparse tensor 31 | 32 | ##Unstable points filter 33 | FILTER: 34 | THRESHOLD: 0.84 -------------------------------------------------------------------------------- /config/rviz/debug.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 79 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Global Options1 8 | - /Odometry1/Shape1 9 | - /PointCloud21 10 | - /PointCloud22 11 | - /PointCloud23 12 | Splitter Ratio: 0.5 13 | Tree Height: 654 14 | - Class: rviz/Selection 15 | Name: Selection 16 | - Class: rviz/Tool Properties 17 | Expanded: 18 | - /2D Pose Estimate1 19 | - /2D Nav Goal1 20 | - /Publish Point1 21 | Name: Tool Properties 22 | Splitter Ratio: 0.5886790156364441 23 | - Class: rviz/Views 24 | Expanded: 25 | - /Current View1 26 | Name: Views 27 | Splitter Ratio: 0.5 28 | - Class: rviz/Time 29 | Experimental: false 30 | Name: Time 31 | SyncMode: 0 32 | SyncSource: PointCloud2 33 | Preferences: 34 | PromptSaveOnExit: true 35 | Toolbars: 36 | toolButtonStyle: 2 37 | Visualization Manager: 38 | Class: "" 39 | Displays: 40 | - Angle Tolerance: 0.10000000149011612 41 | Class: rviz/Odometry 42 | Covariance: 43 | Orientation: 44 | Alpha: 0.5 45 | Color: 255; 255; 127 46 | Color Style: Unique 47 | Frame: Local 48 | Offset: 1 49 | Scale: 1 50 | Value: true 51 | Position: 52 | Alpha: 0.30000001192092896 53 | Color: 204; 51; 204 54 | Scale: 1 55 | Value: true 56 | Value: true 57 | Enabled: false 58 | Keep: 20000 59 | Name: Odometry 60 | Position Tolerance: 0.10000000149011612 61 | Shape: 62 | Alpha: 0.699999988079071 63 | Axes Length: 1 64 | Axes Radius: 0.10000000149011612 65 | Color: 255; 25; 0 66 | Head Length: 0.30000001192092896 67 | Head Radius: 0.20000000298023224 68 | Shaft Length: 0.5 69 | Shaft Radius: 0.05000000074505806 70 | Value: Arrow 71 | Topic: /ndt/predicted/odom 72 | Unreliable: false 73 | Value: false 74 | - Alpha: 0.5 75 | Autocompute Intensity Bounds: true 76 | Autocompute Value Bounds: 77 | Max Value: 10 78 | Min Value: -10 79 | Value: true 80 | Axis: Z 81 | Channel Name: intensity 82 | Class: rviz/PointCloud2 83 | Color: 252; 233; 79 84 | Color Transformer: Intensity 85 | Decay Time: 0 86 | Enabled: true 87 | Invert Rainbow: false 88 | Max Color: 32; 74; 135 89 | Min Color: 32; 74; 135 90 | Name: PointCloud2 91 | Position Transformer: XYZ 92 | Queue Size: 10 93 | Selectable: true 94 | Size (Pixels): 2 95 | Size (m): 0.20000000298023224 96 | Style: Spheres 97 | Topic: /cloud_submap 98 | Unreliable: false 99 | Use Fixed Frame: true 100 | Use rainbow: false 101 | Value: true 102 | - Alpha: 1 103 | Autocompute Intensity Bounds: true 104 | Autocompute Value Bounds: 105 | Max Value: 10 106 | Min Value: -10 107 | Value: true 108 | Axis: Z 109 | Channel Name: intensity 110 | Class: rviz/PointCloud2 111 | Color: 255; 255; 255 112 | Color Transformer: FlatColor 113 | Decay Time: 0 114 | Enabled: true 115 | Invert Rainbow: false 116 | Max Color: 164; 0; 0 117 | Min Color: 32; 74; 135 118 | Name: PointCloud2 119 | Position Transformer: XYZ 120 | Queue Size: 10 121 | Selectable: true 122 | Size (Pixels): 3 123 | Size (m): 0.10000000149011612 124 | Style: Spheres 125 | Topic: /cloud_filtered 126 | Unreliable: false 127 | Use Fixed Frame: true 128 | Use rainbow: false 129 | Value: true 130 | - Alpha: 1 131 | Autocompute Intensity Bounds: true 132 | Autocompute Value Bounds: 133 | Max Value: 10 134 | Min Value: -10 135 | Value: true 136 | Axis: Z 137 | Channel Name: intensity 138 | Class: rviz/PointCloud2 139 | Color: 255; 255; 255 140 | Color Transformer: Intensity 141 | Decay Time: 0 142 | Enabled: false 143 | Invert Rainbow: false 144 | Max Color: 164; 0; 0 145 | Min Color: 32; 74; 135 146 | Name: PointCloud2 147 | Position Transformer: XYZ 148 | Queue Size: 10 149 | Selectable: true 150 | Size (Pixels): 3 151 | Size (m): 0.10000000149011612 152 | Style: Flat Squares 153 | Topic: /ndt/predicted/aligned_points 154 | Unreliable: false 155 | Use Fixed Frame: true 156 | Use rainbow: false 157 | Value: false 158 | Enabled: true 159 | Global Options: 160 | Background Color: 48; 48; 48 161 | Default Light: true 162 | Fixed Frame: map 163 | Frame Rate: 30 164 | Name: root 165 | Tools: 166 | - Class: rviz/Interact 167 | Hide Inactive Objects: true 168 | - Class: rviz/MoveCamera 169 | - Class: rviz/Select 170 | - Class: rviz/FocusCamera 171 | - Class: rviz/Measure 172 | - Class: rviz/SetInitialPose 173 | Theta std deviation: 0.2617993950843811 174 | Topic: /initialpose 175 | X std deviation: 0.5 176 | Y std deviation: 0.5 177 | - Class: rviz/SetGoal 178 | Topic: /move_base_simple/goal 179 | - Class: rviz/PublishPoint 180 | Single click: true 181 | Topic: /clicked_point 182 | - Class: rviz_visual_tools/KeyTool 183 | Value: true 184 | Views: 185 | Current: 186 | Class: rviz/XYOrbit 187 | Distance: 10.5123872756958 188 | Enable Stereo Rendering: 189 | Stereo Eye Separation: 0.05999999865889549 190 | Stereo Focal Distance: 1 191 | Swap Stereo Eyes: false 192 | Value: false 193 | Focal Point: 194 | X: 3.8349480628967285 195 | Y: -4.608598709106445 196 | Z: 1.6316440451191738e-5 197 | Focal Shape Fixed Size: false 198 | Focal Shape Size: 0.05000000074505806 199 | Invert Z Axis: false 200 | Name: Current View 201 | Near Clip Distance: 0.009999999776482582 202 | Pitch: 0.7497968077659607 203 | Target Frame: os_sensor 204 | Value: XYOrbit (rviz) 205 | Yaw: 5.410818099975586 206 | Saved: ~ 207 | Window Geometry: 208 | Displays: 209 | collapsed: false 210 | Height: 976 211 | Hide Left Dock: false 212 | Hide Right Dock: false 213 | QMainWindow State: 000000ff00000000fd0000000400000000000001560000031afc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000031a000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000002290000012e0000000000000000000000010000011a0000031afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000031a000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000078000000056fc0100000002fb0000000800540069006d0065010000000000000780000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005040000031a00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 214 | Selection: 215 | collapsed: false 216 | Time: 217 | collapsed: false 218 | Tool Properties: 219 | collapsed: false 220 | Views: 221 | collapsed: false 222 | Width: 1920 223 | X: 398 224 | Y: 137 225 | -------------------------------------------------------------------------------- /config/rviz/hdl_nclt.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 79 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /AlignedFrame1 8 | - /Odometry2/Shape1 9 | Splitter Ratio: 0.5 10 | Tree Height: 703 11 | - Class: rviz/Selection 12 | Name: Selection 13 | - Class: rviz/Tool Properties 14 | Expanded: 15 | - /2D Pose Estimate1 16 | - /2D Nav Goal1 17 | - /Publish Point1 18 | Name: Tool Properties 19 | Splitter Ratio: 0.5886790156364441 20 | - Class: rviz/Views 21 | Expanded: 22 | - /Current View1 23 | Name: Views 24 | Splitter Ratio: 0.5 25 | - Class: rviz/Time 26 | Experimental: false 27 | Name: Time 28 | SyncMode: 0 29 | SyncSource: GlobalMap 30 | Preferences: 31 | PromptSaveOnExit: true 32 | Toolbars: 33 | toolButtonStyle: 2 34 | Visualization Manager: 35 | Class: "" 36 | Displays: 37 | - Alpha: 0.5 38 | Cell Size: 10 39 | Class: rviz/Grid 40 | Color: 160; 160; 164 41 | Enabled: false 42 | Line Style: 43 | Line Width: 0.029999999329447746 44 | Value: Lines 45 | Name: Grid 46 | Normal Cell Count: 0 47 | Offset: 48 | X: 0 49 | Y: 0 50 | Z: 0 51 | Plane: XY 52 | Plane Cell Count: 50 53 | Reference Frame: 54 | Value: false 55 | - Class: rviz/TF 56 | Enabled: false 57 | Filter (blacklist): "" 58 | Filter (whitelist): "" 59 | Frame Timeout: 50 60 | Frames: 61 | All Enabled: true 62 | Marker Scale: 3 63 | Name: TF 64 | Show Arrows: true 65 | Show Axes: true 66 | Show Names: true 67 | Tree: 68 | {} 69 | Update Interval: 0 70 | Value: false 71 | - Alpha: 0.30000001192092896 72 | Autocompute Intensity Bounds: false 73 | Autocompute Value Bounds: 74 | Max Value: 10 75 | Min Value: -10 76 | Value: true 77 | Axis: Z 78 | Channel Name: intensity 79 | Class: rviz/PointCloud2 80 | Color: 0; 0; 0 81 | Color Transformer: Intensity 82 | Decay Time: 0 83 | Enabled: true 84 | Invert Rainbow: false 85 | Max Color: 186; 189; 182 86 | Max Intensity: 1 87 | Min Color: 255; 255; 255 88 | Min Intensity: 0 89 | Name: GlobalMap 90 | Position Transformer: XYZ 91 | Queue Size: 10 92 | Selectable: true 93 | Size (Pixels): 2 94 | Size (m): 0.10000000149011612 95 | Style: Boxes 96 | Topic: /ndt/globalmap 97 | Unreliable: false 98 | Use Fixed Frame: true 99 | Use rainbow: false 100 | Value: true 101 | - Alpha: 0.699999988079071 102 | Autocompute Intensity Bounds: true 103 | Autocompute Value Bounds: 104 | Max Value: 7.400000095367432 105 | Min Value: -1.3000000715255737 106 | Value: true 107 | Axis: Z 108 | Channel Name: intensity 109 | Class: rviz/PointCloud2 110 | Color: 237; 212; 0 111 | Color Transformer: FlatColor 112 | Decay Time: 0 113 | Enabled: true 114 | Invert Rainbow: false 115 | Max Color: 52; 101; 164 116 | Min Color: 32; 74; 135 117 | Name: SubMap 118 | Position Transformer: XYZ 119 | Queue Size: 10 120 | Selectable: true 121 | Size (Pixels): 3 122 | Size (m): 0.10000000149011612 123 | Style: Boxes 124 | Topic: /debug/cloud_submap 125 | Unreliable: false 126 | Use Fixed Frame: true 127 | Use rainbow: true 128 | Value: true 129 | - Alpha: 1 130 | Autocompute Intensity Bounds: true 131 | Autocompute Value Bounds: 132 | Max Value: 9.977742195129395 133 | Min Value: -1.7571426630020142 134 | Value: true 135 | Axis: Z 136 | Channel Name: intensity 137 | Class: rviz/PointCloud2 138 | Color: 0; 0; 0 139 | Color Transformer: Intensity 140 | Decay Time: 0 141 | Enabled: true 142 | Invert Rainbow: false 143 | Max Color: 239; 41; 41 144 | Min Color: 46; 52; 54 145 | Name: AlignedFrame 146 | Position Transformer: XYZ 147 | Queue Size: 10 148 | Selectable: true 149 | Size (Pixels): 3 150 | Size (m): 0.14000000059604645 151 | Style: Spheres 152 | Topic: /odometry_node/frame_registered 153 | Unreliable: false 154 | Use Fixed Frame: true 155 | Use rainbow: false 156 | Value: true 157 | - Alpha: 1 158 | Autocompute Intensity Bounds: true 159 | Autocompute Value Bounds: 160 | Max Value: 9.977742195129395 161 | Min Value: -1.7571426630020142 162 | Value: true 163 | Axis: Z 164 | Channel Name: intensity 165 | Class: rviz/PointCloud2 166 | Color: 0; 0; 0 167 | Color Transformer: Intensity 168 | Decay Time: 0 169 | Enabled: false 170 | Invert Rainbow: false 171 | Max Color: 239; 41; 41 172 | Min Color: 0; 0; 0 173 | Name: EstimatedFrame 174 | Position Transformer: XYZ 175 | Queue Size: 10 176 | Selectable: true 177 | Size (Pixels): 3 178 | Size (m): 0.14000000059604645 179 | Style: Points 180 | Topic: /debug/raw_cloud_tr 181 | Unreliable: false 182 | Use Fixed Frame: true 183 | Use rainbow: false 184 | Value: false 185 | - Angle Tolerance: 0.10000000149011612 186 | Class: rviz/Odometry 187 | Covariance: 188 | Orientation: 189 | Alpha: 0.5 190 | Color: 255; 255; 127 191 | Color Style: Unique 192 | Frame: Local 193 | Offset: 1 194 | Scale: 1 195 | Value: true 196 | Position: 197 | Alpha: 0.30000001192092896 198 | Color: 204; 51; 204 199 | Scale: 1 200 | Value: true 201 | Value: true 202 | Enabled: false 203 | Keep: 20000 204 | Name: Odometry 205 | Position Tolerance: 0.10000000149011612 206 | Shape: 207 | Alpha: 0.6000000238418579 208 | Axes Length: 1 209 | Axes Radius: 0.10000000149011612 210 | Color: 78; 154; 6 211 | Head Length: 0.30000001192092896 212 | Head Radius: 0.20000000298023224 213 | Shaft Length: 0.4000000059604645 214 | Shaft Radius: 0.10000000149011612 215 | Value: Arrow 216 | Topic: /odometry_node/odometry 217 | Unreliable: false 218 | Value: false 219 | - Angle Tolerance: 0.10000000149011612 220 | Class: rviz/Odometry 221 | Covariance: 222 | Orientation: 223 | Alpha: 0.5 224 | Color: 255; 255; 127 225 | Color Style: Unique 226 | Frame: Local 227 | Offset: 1 228 | Scale: 1 229 | Value: true 230 | Position: 231 | Alpha: 0.30000001192092896 232 | Color: 204; 51; 204 233 | Scale: 1 234 | Value: true 235 | Value: true 236 | Enabled: false 237 | Keep: 20000 238 | Name: Odometry 239 | Position Tolerance: 0.10000000149011612 240 | Shape: 241 | Alpha: 0.699999988079071 242 | Axes Length: 1 243 | Axes Radius: 0.10000000149011612 244 | Color: 255; 25; 0 245 | Head Length: 0.30000001192092896 246 | Head Radius: 0.20000000298023224 247 | Shaft Length: 0.5 248 | Shaft Radius: 0.05000000074505806 249 | Value: Arrow 250 | Topic: /odometry_node/odometry_estimate 251 | Unreliable: false 252 | Value: false 253 | - Alpha: 0.699999988079071 254 | Autocompute Intensity Bounds: true 255 | Autocompute Value Bounds: 256 | Max Value: 10 257 | Min Value: -10 258 | Value: true 259 | Axis: Z 260 | Channel Name: intensity 261 | Class: rviz/PointCloud2 262 | Color: 255; 255; 255 263 | Color Transformer: Intensity 264 | Decay Time: 0 265 | Enabled: true 266 | Invert Rainbow: false 267 | Max Color: 255; 255; 255 268 | Min Color: 0; 0; 0 269 | Name: map_cropped 270 | Position Transformer: XYZ 271 | Queue Size: 10 272 | Selectable: true 273 | Size (Pixels): 3 274 | Size (m): 0.10000000149011612 275 | Style: Spheres 276 | Topic: /debug/mos_map 277 | Unreliable: false 278 | Use Fixed Frame: true 279 | Use rainbow: true 280 | Value: true 281 | Enabled: true 282 | Global Options: 283 | Background Color: 255; 255; 255 284 | Default Light: true 285 | Fixed Frame: map 286 | Frame Rate: 30 287 | Name: root 288 | Tools: 289 | - Class: rviz/Interact 290 | Hide Inactive Objects: true 291 | - Class: rviz/MoveCamera 292 | - Class: rviz/Select 293 | - Class: rviz/FocusCamera 294 | - Class: rviz/Measure 295 | - Class: rviz/SetInitialPose 296 | Theta std deviation: 0.2617993950843811 297 | Topic: /initialpose 298 | X std deviation: 0.5 299 | Y std deviation: 0.5 300 | - Class: rviz/SetGoal 301 | Topic: /move_base_simple/goal 302 | - Class: rviz/PublishPoint 303 | Single click: true 304 | Topic: /clicked_point 305 | - Class: rviz_visual_tools/KeyTool 306 | Value: true 307 | Views: 308 | Current: 309 | Class: rviz/XYOrbit 310 | Distance: 19.07025909423828 311 | Enable Stereo Rendering: 312 | Stereo Eye Separation: 0.05999999865889549 313 | Stereo Focal Distance: 1 314 | Swap Stereo Eyes: false 315 | Value: false 316 | Focal Point: 317 | X: 4.501746654510498 318 | Y: -8.03341293334961 319 | Z: 1.238536242453847e-5 320 | Focal Shape Fixed Size: false 321 | Focal Shape Size: 0.05000000074505806 322 | Invert Z Axis: false 323 | Name: Current View 324 | Near Clip Distance: 0.009999999776482582 325 | Pitch: 0.4353986382484436 326 | Target Frame: os_sensor 327 | Value: XYOrbit (rviz) 328 | Yaw: 4.388580799102783 329 | Saved: ~ 330 | Window Geometry: 331 | Displays: 332 | collapsed: false 333 | Height: 1025 334 | Hide Left Dock: false 335 | Hide Right Dock: true 336 | QMainWindow State: 000000ff00000000fd0000000400000000000001560000034bfc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000034b000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d006100670065000000025a0000012e0000000000000000000000010000011a0000034bfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000034b000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000074d00000056fc0100000002fb0000000800540069006d006501000000000000074d000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005f10000034b00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 337 | Selection: 338 | collapsed: false 339 | Time: 340 | collapsed: false 341 | Tool Properties: 342 | collapsed: false 343 | Views: 344 | collapsed: true 345 | Width: 1869 346 | X: 51 347 | Y: 27 348 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | project: 4 | image: sps 5 | build: 6 | context: . 7 | dockerfile: ./Dockerfile 8 | args: 9 | USER_ID: ${USER_ID:-1000} 10 | GROUP_ID: ${GROUP_ID:-1000} 11 | ipc: host 12 | network_mode: host 13 | volumes: 14 | - ${PWD}/:/sps 15 | - ${DATA}:/sps/data 16 | stdin_open: true 17 | tty: true 18 | runtime: nvidia 19 | environment: 20 | - NVIDIA_DRIVER_CAPABILITIES=all 21 | - ROS_MASTER_URI=$ROS_MASTER_URI 22 | working_dir: /sps -------------------------------------------------------------------------------- /exp_pipeline/loc_exp_general.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ######### BASH TERMINAL COLORS ############################################### 4 | # Black 0;30 Dark Gray 1;30 5 | # Red 0;31 Light Red 1;31 6 | # Green 0;32 Light Green 1;32 7 | # Brown/Orange 0;33 Yellow 1;33 8 | # Blue 0;34 Light Blue 1;34 9 | # Purple 0;35 Light Purple 1;35 10 | # Cyan 0;36 Light Cyan 1;36 11 | # Light Gray 0;37 White 1;37 12 | 13 | RED='\033[0;31m' 14 | BLUE='\033[0;34m' 15 | CYAN='\033[0;36m' 16 | GREEN='\033[0;32m' 17 | BROWN='\033[0;33m' 18 | PURPLE='\033[0;35m' 19 | NC='\033[0m' # No Color 20 | 21 | printf "\n${PURPLE}*** Script ID: $(basename "$0") ***${NC}\n\n" 22 | 23 | ######### DIRECTORIES & FILES ################################################# 24 | BAGS_DIR=${DATA}/bags 25 | DOCKER_IMAGE=sps-project-1 26 | LOG_DIR=None 27 | 28 | model_weights_pth=/sps/best_models/420_601.ckpt 29 | ######### ENVIRONMENTAL VARIABLES ############################################ 30 | LOCALIZER=hdl 31 | LIDAR_FRAME=os_sensor 32 | TOPIC_ODOM=/odometry_node/odometry 33 | TOPIC_CLOUD=/os_cloud_node/points 34 | PLAY_RATE=1 35 | DURATION=100 36 | NUM_OF_EXP=1 37 | 38 | EXP_ID=test 39 | 40 | SEQS=(20220420 20220601 20220608 20220629 20220714) 41 | 42 | odom_frame=map 43 | filter_method=( sps ) 44 | 45 | ######### LOG DIRECTORY ####################################################### 46 | # create log dir 47 | log() { #params are: (1)dir 48 | LOG_DIR=$1/log_$2 49 | mkdir -p $LOG_DIR 50 | printf "log dir: ${BLUE}${LOG_DIR}${NC}\n" 51 | } 52 | 53 | ######### Clean up code ###################################################### 54 | kill_ros_nodes(){ 55 | rosnode kill -a >/dev/null # kill all the nodes 56 | sleep 2 57 | } 58 | 59 | # Function to handle the kill signal 60 | cleanup() { 61 | echo "Received Ctrl+C. Cleaning up before exiting..." 62 | # Add your cleanup actions here (if any) 63 | kill_ros_nodes 64 | exit 1 65 | } 66 | 67 | # Set up the trap to call the cleanup function when the script receives a kill signal 68 | trap cleanup SIGINT 69 | 70 | ############################################################################## 71 | sps(){ 72 | docker exec ${DOCKER_IMAGE} \ 73 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 74 | source /sps/c_ws/devel/setup.bash && \ 75 | roslaunch sps_filter sps.launch raw_cloud:=$TOPIC_CLOUD odom_frame:=$odom_frame epsilon:=0.84 model_weights_pth:=$model_weights_pth" &> "$LOG_DIR/sps_log.txt" & 76 | } 77 | 78 | mos4d(){ 79 | docker exec ${DOCKER_IMAGE} \ 80 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 81 | source /sps/c_ws/devel/setup.bash && \ 82 | roslaunch mos4d mos4d.launch raw_cloud:=$TOPIC_CLOUD odom_frame:=$odom_frame" &> "$LOG_DIR/mos4d_log.txt" & 83 | } 84 | 85 | mapmos(){ 86 | docker exec ${DOCKER_IMAGE} \ 87 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 88 | source /sps/c_ws/devel/setup.bash && \ 89 | roslaunch mapmos mapmos.launch raw_cloud:=$TOPIC_CLOUD odom_frame:=$odom_frame" &> "$LOG_DIR/mapmos_log.txt" & 90 | } 91 | 92 | lts(){ 93 | docker exec ${DOCKER_IMAGE} \ 94 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 95 | source /sps/c_ws/devel/setup.bash && \ 96 | roslaunch lts_filter filter.launch raw_cloud:=$TOPIC_CLOUD epsilon_0:=0 epsilon_1:=0.84 lidar:=vlp-16" &> "$LOG_DIR/lts_log.txt" & 97 | } 98 | 99 | mask(){ 100 | docker exec ${DOCKER_IMAGE} \ 101 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 102 | source /sps/c_ws/devel/setup.bash && \ 103 | roslaunch sps_filter mask.launch raw_cloud:=$TOPIC_CLOUD odom_frame:=$odom_frame epsilon:=2" &> "$LOG_DIR/mask_log.txt" & 104 | } 105 | 106 | raw(){ 107 | docker exec ${DOCKER_IMAGE} \ 108 | /bin/bash -c "source /opt/ros/noetic/setup.bash && \ 109 | source /sps/c_ws/devel/setup.bash && \ 110 | roslaunch sps_filter sps.launch raw_cloud:=$TOPIC_CLOUD odom_frame:=$odom_frame epsilon:=2" &> "$LOG_DIR/sps_log.txt" & 111 | } 112 | 113 | ############################################################################## 114 | #NOTE: Please update the following paths based on the correct paths on your machine 115 | hdl(){ 116 | rviz -d /home/ibrahim/SPS/config/rviz/hdl.rviz &>$LOG_DIR/rviz.txt& 117 | source /home/ibrahim/Neptune/catkin_ws/devel/setup.bash 118 | roslaunch hdl_localization hdl_localization.launch odom_child_frame_id:=$LIDAR_FRAME points_topic:=$TOPIC_CLOUD downsample_resolution:=0.2 &> "$LOG_DIR/hdl_log.txt" & 119 | } 120 | 121 | ############################################################################## 122 | run_exp() { 123 | local filter="$1" 124 | local bag_dir_play="$2" 125 | local bag_dir_record="$3" 126 | 127 | if [[ "$bag_dir_play" == "$bag_dir_record" ]]; then 128 | echo "${bag_dir_play} and ${bag_dir_record} are equal." 129 | cleanup 130 | fi 131 | 132 | printf "${CYAN}Running ${LOCALIZER} in background with $filter filter ... ${NC}\n" 133 | $LOCALIZER; $filter; sleep 10 # give it time to load the map and initialize the filter 134 | 135 | printf "${CYAN}Recording to $bag_dir_record ${NC}\n" 136 | rosbag record -O "$bag_dir_record" $TOPIC_ODOM &> "$LOG_DIR/rosbag_record.txt" & 137 | sleep 1 138 | 139 | printf "${CYAN}Cropping LiDAR points to $MAX_RANGE ${NC}\n" 140 | 141 | printf "${CYAN}Playing $bag_dir_play ${NC}\n" 142 | rosbag play "$bag_dir_play" --clock -r "$PLAY_RATE" -u "$DURATION" #>/dev/null 143 | sleep 5 144 | 145 | kill_ros_nodes 146 | } 147 | 148 | calculate_metrics() { 149 | local gt_traj_pth="$1" 150 | local exp_dir="$2" 151 | local exp_num="$3" 152 | 153 | local traj_bag_pth="$exp_dir/$exp_num.bag" 154 | local odom_traj_pth="$exp_dir/$exp_num.tum" 155 | local plot_pth="$exp_dir/$exp_num.pdf" 156 | local res_table_pth="$exp_dir/$exp_num.zip" 157 | 158 | printf "Processing: ${BROWN}${traj_bag_pth}${NC} ...\n" 159 | 160 | evo_traj bag "$traj_bag_pth" "$TOPIC_ODOM" --save_as_tum 161 | mv *.tum "$odom_traj_pth" 162 | 163 | rm -f "$plot_pth" "$res_table_pth" 164 | 165 | evo_ape tum "$gt_traj_pth" "$odom_traj_pth" --plot_mode xy --save_results "$res_table_pth" --save_plot "$plot_pth" --t_max_diff 0.1 #--n_to_align 10 -a 166 | } 167 | 168 | ######### Main program ####################################################### 169 | for exp_num in $(seq 0 "$NUM_OF_EXP"); do 170 | for seq in "${SEQS[@]}"; do 171 | gt_traj_pth="$DATA/gt/$seq.tum" 172 | bag_play_pth="$DATA/bags/$seq.bag" 173 | 174 | for filter in "${filter_method[@]}"; do 175 | exp_dir="$DATA/$EXP/$LOCALIZER/$seq/${filter}" 176 | mkdir -p "$exp_dir" 177 | 178 | printf "Exp dir: ${BLUE}${exp_dir}${NC}, exp num: ${exp_num}\n" 179 | log "$exp_dir" "$exp_num" 180 | 181 | bag_record_pth="$exp_dir/$exp_num.bag" 182 | 183 | run_exp "$filter" "$bag_play_pth" "$bag_record_pth" 184 | # calculate_metrics "$gt_traj_pth" "$exp_dir" "$exp_num" 185 | 186 | echo "" 187 | echo "----------------------------------------" 188 | echo "----------------------------------------" 189 | echo "" 190 | done 191 | done 192 | done 193 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.black] 6 | line-length = 100 7 | exclude = ''' 8 | ( 9 | /( 10 | \.eggs # exclude a few common directories in the 11 | | \.git # root of the project 12 | | \.hg 13 | | \.mypy_cache 14 | | \.tox 15 | | \.venv 16 | | _build 17 | | buck-out 18 | | build 19 | | dist 20 | | 3rdparty 21 | )/ 22 | ) 23 | ''' 24 | 25 | [tool.isort] 26 | multi_line_output = 3 27 | include_trailing_comma = true 28 | force_grid_wrap = 0 29 | use_parentheses = true 30 | ensure_newline_before_comments = true 31 | line_length = 100 32 | 33 | [tool.pylint.format] 34 | max-line-length = "100" 35 | 36 | [tool.pylint.messages_control] 37 | disable = """, 38 | bad-continuation, 39 | invalid-name, 40 | """ 41 | 42 | -------------------------------------------------------------------------------- /scripts/predict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import yaml 4 | import torch 5 | import click 6 | from pytorch_lightning import Trainer 7 | 8 | # import sps.datasets.datasets_nclt as datasets 9 | import sps.datasets.blt_dataset as datasets 10 | import sps.models.models as models 11 | 12 | # Constants 13 | DEFAULT_CONFIG_PATH = "./config/config.yaml" 14 | 15 | @click.command() 16 | ### Add your options here 17 | @click.option( 18 | "--weights", 19 | "-w", 20 | type=str, 21 | help="path to checkpoint file (.ckpt) to do inference.", 22 | default='/sps/best_models/420_601.ckpt', 23 | # required=True, 24 | ) 25 | @click.option( 26 | "--sequence", 27 | "-seq", 28 | type=str, 29 | help="Run inference on specific sequences. Otherwise, test split from config is used.", 30 | default=None, 31 | multiple=False, 32 | ) 33 | @click.option( 34 | "--config", 35 | "-c", 36 | type=str, 37 | help="Path to the config file (.yaml)", 38 | default=DEFAULT_CONFIG_PATH, 39 | ) 40 | def main(weights, sequence, config): 41 | 42 | cfg = yaml.safe_load(open(config)) 43 | 44 | if sequence: 45 | cfg["DATA"]["SPLIT"]["TEST"] = list(sequence) 46 | 47 | print('Test seq: ', cfg["DATA"]["SPLIT"]["TEST"]) 48 | assert len(cfg["DATA"]["SPLIT"]["TEST"]) == 1, "Only one test SEQ is allowed at a time!" 49 | 50 | cfg["TRAIN"]["BATCH_SIZE"] = 1 51 | 52 | # Load data and model 53 | data = datasets.BacchusModule(cfg, test=True) 54 | data.setup() 55 | 56 | ckpt = torch.load(weights) 57 | model = models.SPSNet(cfg, len(data.test_scans)) 58 | model.load_state_dict(ckpt["state_dict"]) 59 | model = model.cuda() 60 | model.eval() 61 | model.freeze() 62 | 63 | # Setup trainer 64 | trainer = Trainer(accelerator="gpu", devices=1, logger=None) 65 | 66 | # Infer! 67 | trainer.predict(model, data.test_dataloader()) 68 | 69 | # Print metrics 70 | metrics = { 71 | "Loss": model.predict_loss, 72 | "R2": model.predict_r2, 73 | "dIoU": model.dIoU, 74 | "Precision": model.precision, 75 | "Recall": model.recall, 76 | "F1": model.F1 77 | } 78 | 79 | print('\n########## Inference Metrics ##########') 80 | for metric_name, metric_values in metrics.items(): 81 | mean_value = sum(metric_values) / len(metric_values) 82 | space_fill = '.' * (12 - len(metric_name)) # Calculate the number of dots needed for filling 83 | print(f'{metric_name} {space_fill} {mean_value:.3f}') # Fixed width for metric names 84 | 85 | if __name__ == "__main__": 86 | main() -------------------------------------------------------------------------------- /scripts/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Training script for SPS model. 4 | Authors: Ibrahim Hroob and Benedikt Mersch 5 | """ 6 | 7 | import os 8 | import yaml 9 | import click 10 | from pytorch_lightning import Trainer 11 | from pytorch_lightning import loggers as pl_loggers 12 | from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint 13 | 14 | import sps.datasets.blt_dataset as datasets 15 | import sps.models.models as models 16 | 17 | # Constants 18 | DEFAULT_CONFIG_PATH = "./config/config.yaml" 19 | LOG_DIR = "./tb_logs" 20 | 21 | 22 | @click.command() 23 | @click.option( 24 | "--config", 25 | "-c", 26 | type=str, 27 | help="Path to the config file (.yaml)", 28 | default=DEFAULT_CONFIG_PATH, 29 | ) 30 | def main(config): 31 | cfg = yaml.safe_load(open(config)) 32 | 33 | # Load data and model 34 | data = datasets.BacchusModule(cfg) 35 | model = models.SPSNet(cfg) 36 | 37 | """Set up the PyTorch Lightning trainer.""" 38 | # Add callbacks 39 | lr_monitor = LearningRateMonitor(logging_interval="step") 40 | checkpoint_saver_loss = ModelCheckpoint( 41 | monitor="val_loss", 42 | filename=cfg["EXPERIMENT"]["ID"] + "_{epoch:03d}_{val_moving_iou_step0:.3f}", 43 | mode="min", 44 | save_last=True, 45 | ) 46 | 47 | # Logger 48 | os.makedirs(LOG_DIR, exist_ok=True) 49 | tb_logger = pl_loggers.TensorBoardLogger( 50 | LOG_DIR, name=cfg["EXPERIMENT"]["ID"], default_hp_metric=False 51 | ) 52 | 53 | # Setup trainer 54 | trainer = Trainer( 55 | accelerator="gpu", 56 | devices=1, 57 | logger=[tb_logger], 58 | max_epochs=cfg["TRAIN"]["MAX_EPOCH"], 59 | callbacks=[ 60 | lr_monitor, 61 | checkpoint_saver_loss, 62 | ], 63 | ) 64 | 65 | # Train! 66 | trainer.fit(model, data) 67 | 68 | if __name__ == "__main__": 69 | main() 70 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="sps", 5 | version="0.1", 6 | authors="Ibrahim Hroob, Benedikt Mersch", 7 | package_dir={"": "src"}, 8 | description="Stable Points Segmentation", 9 | packages=find_packages(where="src"), 10 | install_requires=[ 11 | "Click==7.0", 12 | "numpy==1.20.1", 13 | "pytorch_lightning==1.6.4", 14 | "PyYAML==6.0", 15 | "tqdm==4.62.3", 16 | "torch", 17 | "ninja", 18 | ], 19 | ) 20 | -------------------------------------------------------------------------------- /src/sps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/src/sps/__init__.py -------------------------------------------------------------------------------- /src/sps/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/src/sps/datasets/__init__.py -------------------------------------------------------------------------------- /src/sps/datasets/augmentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import torch 4 | 5 | def rotate_point_cloud(points): 6 | """Randomly rotate the point clouds to augment the dataset""" 7 | rotation_angle = torch.rand(1) * 2 * torch.pi 8 | cosval = torch.cos(rotation_angle) 9 | sinval = torch.sin(rotation_angle) 10 | rotation_matrix = torch.Tensor([[cosval, -sinval, 0], [sinval, cosval, 0], [0, 0, 1]]) 11 | points = points @ rotation_matrix.type_as(points) 12 | return points 13 | 14 | 15 | def rotate_perturbation_point_cloud(points, angle_sigma=0.2, angle_clip=0.5): 16 | """Randomly perturb the point clouds by small rotations""" 17 | angles = torch.clip(angle_sigma * torch.randn(3), -angle_clip, angle_clip) 18 | Rx = torch.Tensor( 19 | [ 20 | [1, 0, 0], 21 | [0, torch.cos(angles[0]), -torch.sin(angles[0])], 22 | [0, torch.sin(angles[0]), torch.cos(angles[0])], 23 | ] 24 | ) 25 | Ry = torch.Tensor( 26 | [ 27 | [torch.cos(angles[1]), 0, torch.sin(angles[1])], 28 | [0, 1, 0], 29 | [-torch.sin(angles[1]), 0, torch.cos(angles[1])], 30 | ] 31 | ) 32 | Rz = torch.Tensor( 33 | [ 34 | [torch.cos(angles[2]), -torch.sin(angles[2]), 0], 35 | [torch.sin(angles[2]), torch.cos(angles[2]), 0], 36 | [0, 0, 1], 37 | ] 38 | ) 39 | rotation_matrix = Rz @ Ry @ Rx 40 | points = points @ rotation_matrix.type_as(points) 41 | return points 42 | 43 | 44 | def random_flip_point_cloud(points): 45 | """Randomly flip the point cloud. Flip is per points.""" 46 | if torch.rand(1).item() > 0.5: 47 | points = torch.multiply(points, torch.tensor([-1, 1, 1]).type_as(points)) 48 | if torch.rand(1).item() > 0.5: 49 | points = torch.multiply(points, torch.tensor([1, -1, 1]).type_as(points)) 50 | return points 51 | 52 | 53 | def random_scale_point_cloud(points, scale_low=0.8, scale_high=1.2): 54 | """Randomly scale the points.""" 55 | scales = (scale_low - scale_high) * torch.rand(1, 3) + scale_high 56 | points = torch.multiply(points, scales.type_as(points)) 57 | return points -------------------------------------------------------------------------------- /src/sps/datasets/blt_dataset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import yaml 5 | import time 6 | import torch 7 | import random 8 | import numpy as np 9 | from tqdm import tqdm 10 | from torch.utils.data import Dataset, DataLoader 11 | from pytorch_lightning import LightningDataModule 12 | 13 | from scipy.spatial import cKDTree 14 | 15 | from sps.datasets.augmentation import ( 16 | rotate_point_cloud, 17 | random_flip_point_cloud, 18 | random_scale_point_cloud, 19 | rotate_perturbation_point_cloud, 20 | ) 21 | 22 | from sps.datasets import util 23 | 24 | ##################################################################### 25 | class BacchusModule(LightningDataModule): 26 | def __init__(self, cfg, test=False): 27 | super(BacchusModule, self).__init__() 28 | self.cfg = cfg 29 | self.test = test 30 | self.root_dir = str(os.environ.get("DATA")) 31 | 32 | 33 | if self.test: 34 | print('Loading testing data ...') 35 | test_seqs = self.cfg['DATA']['SPLIT']['TEST'] 36 | test_scans, test_poses, test_map_tr = self.get_scans_poses(test_seqs) 37 | self.test_scans = self.cash_scans(test_scans, test_poses, test_map_tr) 38 | else: 39 | print('Loading training data ...') 40 | train_seqs = self.cfg['DATA']['SPLIT']['TRAIN'] 41 | train_scans, train_poses, train_map_tr = self.get_scans_poses(train_seqs) 42 | self.train_scans = self.cash_scans(train_scans, train_poses, train_map_tr) 43 | 44 | print('Loading validating data ...') 45 | val_seqs = self.cfg['DATA']['SPLIT']['VAL'] 46 | val_scans, val_poses, val_map_tr = self.get_scans_poses(val_seqs) 47 | self.val_scans = self.cash_scans(val_scans, val_poses, val_map_tr) 48 | 49 | map_str = self.cfg["TRAIN"]["MAP"] 50 | 51 | # Load map data points, data structure: [x,y,z,label] 52 | map_pth = os.path.join(self.root_dir, "maps", map_str) 53 | filename, file_extension = os.path.splitext(map_pth) 54 | self.map = np.load(map_pth) if file_extension == '.npy' else np.loadtxt(map_pth) 55 | self.map = self.map[:,:4] 56 | 57 | def cash_scans(self, scans_pth, poses_pth, map_tr_pths): 58 | scans_data = [] 59 | # Zip the two lists together and iterate with tqdm 60 | for scan_pth, pose_pth, map_tr_pth in tqdm(zip(scans_pth, poses_pth, map_tr_pths), total=len(scans_pth)): 61 | # Load scan and poses: 62 | scan_data = np.load(scan_pth) 63 | pose_data = np.loadtxt(pose_pth, delimiter=',') 64 | # Load map transformation 65 | map_transform = np.loadtxt(map_tr_pth, delimiter=',') 66 | 67 | # Transform the scan to the map coordinates 68 | # (1) First we transform the scan using the pose from SLAM system 69 | # (2) Second we align the transformed scan to the map using map_transform matrix 70 | scan_data[:,:3] = util.transform_point_cloud(scan_data[:,:3], pose_data) 71 | scan_data[:,:3] = util.transform_point_cloud(scan_data[:,:3], map_transform) 72 | 73 | scans_data.append(scan_data) 74 | 75 | return scans_data 76 | 77 | 78 | def get_scans_poses(self, seqs): 79 | seq_scans = [] 80 | seq_poses = [] 81 | map_transform = [] #path to the transformation matrix that is used to align 82 | #the transformed scans (using their poses) to the base map 83 | 84 | for sequence in seqs: 85 | scans_dir = os.path.join(self.root_dir, "sequence", sequence, "scans") 86 | poses_dir = os.path.join(self.root_dir, "sequence", sequence, "poses") 87 | 88 | scans = sorted([os.path.join(scans_dir, file) for file in os.listdir(scans_dir)]) 89 | poses = sorted([os.path.join(poses_dir, file) for file in os.listdir(poses_dir)]) 90 | 91 | map_transform_pth = os.path.join(self.root_dir, "sequence", sequence, "map_transform") 92 | transform_paths = [map_transform_pth] * len(scans) 93 | 94 | seq_scans.extend(scans) 95 | seq_poses.extend(poses) 96 | map_transform.extend(transform_paths) 97 | 98 | assert len(seq_scans) == len(seq_poses) == len(map_transform), 'The length of those arrays should be the same!' 99 | 100 | return seq_scans, seq_poses, map_transform 101 | 102 | 103 | def setup(self, stage=None): 104 | ########## Point dataset splits 105 | if self.test: 106 | test_set = BacchusDataset( 107 | self.cfg, 108 | self.test_scans, 109 | self.map, 110 | ) 111 | else: 112 | train_set = BacchusDataset( 113 | self.cfg, 114 | self.train_scans, 115 | self.map, 116 | split='train' 117 | ) 118 | 119 | val_set = BacchusDataset( 120 | self.cfg, 121 | self.val_scans, 122 | self.map, 123 | ) 124 | 125 | ########## Generate dataloaders and iterables 126 | if self.test: 127 | self.test_loader = DataLoader( 128 | dataset=test_set, 129 | batch_size=self.cfg["TRAIN"]["BATCH_SIZE"], 130 | collate_fn=self.collate_fn, 131 | shuffle=False, 132 | num_workers=self.cfg["DATA"]["NUM_WORKER"], 133 | pin_memory=True, 134 | drop_last=False, 135 | timeout=0, 136 | ) 137 | self.test_iter = iter(self.test_loader) 138 | else: 139 | self.train_loader = DataLoader( 140 | dataset=train_set, 141 | batch_size=self.cfg["TRAIN"]["BATCH_SIZE"], 142 | collate_fn=self.collate_fn, 143 | shuffle=self.cfg["DATA"]["SHUFFLE"], 144 | num_workers=self.cfg["DATA"]["NUM_WORKER"], 145 | pin_memory=True, 146 | drop_last=False, 147 | timeout=0, 148 | ) 149 | self.train_iter = iter(self.train_loader) 150 | 151 | self.valid_loader = DataLoader( 152 | dataset=val_set, 153 | batch_size=self.cfg["TRAIN"]["BATCH_SIZE"], 154 | collate_fn=self.collate_fn, 155 | shuffle=False, 156 | num_workers=self.cfg["DATA"]["NUM_WORKER"], 157 | pin_memory=True, 158 | drop_last=False, 159 | timeout=0, 160 | ) 161 | self.valid_iter = iter(self.valid_loader) 162 | 163 | 164 | def train_dataloader(self): 165 | return self.train_loader 166 | 167 | def val_dataloader(self): 168 | return self.valid_loader 169 | 170 | def test_dataloader(self): 171 | return self.test_loader 172 | 173 | @staticmethod 174 | def collate_fn(batch): 175 | tensor_batch = None 176 | 177 | for i, (scan_submap_data) in enumerate(batch): 178 | ones = torch.ones(len(scan_submap_data), 1, dtype=scan_submap_data.dtype) 179 | tensor = torch.hstack([i * ones, scan_submap_data]) 180 | tensor_batch = tensor if tensor_batch is None else torch.vstack([tensor_batch, tensor]) 181 | 182 | return tensor_batch 183 | 184 | ##################################################################### 185 | class BacchusDataset(Dataset): 186 | """Dataset class for point cloud prediction""" 187 | 188 | def __init__(self, cfg, scans, pc_map, split = None): 189 | self.cfg = cfg 190 | self.scans = scans 191 | 192 | self.dataset_size = len(scans) 193 | 194 | self.map = pc_map 195 | 196 | # This is to find the closest map points to the scan points, this is mainly to prune the map points 197 | # Note: If the batch size exceeds 1, using the ME pruning function will result in an error; hence, kd_tree was employed instead. 198 | self.kd_tree_target = cKDTree(self.map[:,:3]) 199 | 200 | self.augment = self.cfg["TRAIN"]["AUGMENTATION"] and split == "train" 201 | 202 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 203 | 204 | 205 | def __len__(self): 206 | return self.dataset_size 207 | 208 | 209 | def __getitem__(self, idx): 210 | # Load scan and poses: 211 | scan_data = self.scans[idx] 212 | 213 | scan_points = torch.tensor(scan_data[:, :3]).to(torch.float32).reshape(-1, 3) 214 | scan_labels = torch.tensor(scan_data[:, 3]).to(torch.float32).reshape(-1, 1) 215 | 216 | # Bind time stamp to scan points 217 | scan_points = self.add_timestamp(scan_points, util.SCAN_TIMESTAMP) 218 | 219 | # Bind points label in the same tensor 220 | scan_points = torch.hstack([scan_points, scan_labels]) 221 | 222 | # Find the closest map to scan points (submap points), i.e. prune map points based on scan points 223 | # Note: this will find the closest points that are within the voxel size. 224 | kd_tree_scan = cKDTree(scan_data[:,:3]) 225 | submap_idx = self.select_closest_points(kd_tree_scan, self.kd_tree_target) 226 | submap_points = torch.tensor(self.map[submap_idx, :3]).to(torch.float32).reshape(-1, 3) 227 | 228 | # Submap points labels are not important, so we just create a tensor of ones 229 | submap_labels = torch.ones(submap_points.shape[0], 1) 230 | 231 | # Bind time stamp to submap points 232 | submap_points = self.add_timestamp(submap_points, util.MAP_TIMESTAMP) 233 | 234 | # Bind points label in the same tensor 235 | submap_points = torch.hstack([submap_points, submap_labels]) 236 | 237 | # Bine scans and map in the same tensor 238 | scan_submap_data = torch.vstack([scan_points, submap_points]) 239 | 240 | # Augment the points 241 | if self.augment: 242 | scan_submap_data[:,:3] = self.augment_data(scan_submap_data[:,:3]) 243 | 244 | return scan_submap_data 245 | 246 | def add_timestamp(self, data, stamp): 247 | ones = torch.ones(len(data), 1, dtype=data.dtype) 248 | data = torch.hstack([data, ones * stamp]) 249 | return data 250 | 251 | def select_points_within_radius(self, coordinates, center, radius): 252 | # Calculate the Euclidean distance from each point to the center 253 | distances = np.sqrt(np.sum((coordinates - center) ** 2, axis=1)) 254 | # Select the indexes of points within the radius 255 | indexes = np.where(distances <= radius)[0] 256 | return indexes 257 | 258 | def select_closest_points(self, kd_tree_ref, kd_tree_target): 259 | start_time = time.time() 260 | 261 | indexes = kd_tree_ref.query_ball_tree(kd_tree_target, self.cfg["MODEL"]["VOXEL_SIZE"]) 262 | 263 | # Merge the indexes into one array using numpy.concatenate and list comprehension 264 | merged_indexes = np.concatenate([idx_list for idx_list in indexes]) 265 | 266 | # Convert the merged_indexes to int data type 267 | merged_indexes = merged_indexes.astype(int) 268 | 269 | elapsed_time = time.time() - start_time 270 | 271 | return merged_indexes 272 | 273 | def augment_data(self, scan_map_batch): 274 | scan_map_batch = rotate_point_cloud(scan_map_batch) 275 | scan_map_batch = rotate_perturbation_point_cloud(scan_map_batch) 276 | scan_map_batch = random_flip_point_cloud(scan_map_batch) 277 | scan_map_batch = random_scale_point_cloud(scan_map_batch) 278 | return scan_map_batch 279 | 280 | if __name__ == "__main__": 281 | pass 282 | -------------------------------------------------------------------------------- /src/sps/models/MinkowskiEngine/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/src/sps/models/MinkowskiEngine/__init__.py -------------------------------------------------------------------------------- /src/sps/models/MinkowskiEngine/customminkunet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # @file customminkunet.py 3 | # @author Benedikt Mersch [mersch@igg.uni-bonn.de] 4 | # Copyright (c) 2022 Benedikt Mersch, all rights reserved 5 | 6 | 7 | from .minkunet import MinkUNet14 8 | 9 | 10 | class CustomMinkUNet(MinkUNet14): 11 | PLANES = (8, 16, 32, 64, 64, 32, 16, 8) 12 | INIT_DIM = 8 13 | -------------------------------------------------------------------------------- /src/sps/models/MinkowskiEngine/minkunet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Chris Choy (chrischoy@ai.stanford.edu). 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | # of the Software, and to permit persons to whom the Software is furnished to do 8 | # so, subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | # SOFTWARE. 20 | # 21 | # Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural 22 | # Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part 23 | # of the code. 24 | import torch 25 | import torch.nn as nn 26 | from torch.optim import SGD 27 | 28 | import MinkowskiEngine as ME 29 | from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck 30 | 31 | from sps.models.MinkowskiEngine.resnet import ResNetBase 32 | 33 | 34 | class MinkUNetBase(ResNetBase): 35 | BLOCK = None 36 | PLANES = None 37 | DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1) 38 | LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) 39 | PLANES = (32, 64, 128, 256, 256, 128, 96, 96) 40 | INIT_DIM = 32 41 | OUT_TENSOR_STRIDE = 1 42 | 43 | def m_space_n_time(self, m, n): 44 | return [m, m, m, n] 45 | 46 | # To use the model, must call initialize_coords before forward pass. 47 | # Once data is processed, call clear to reset the model before calling 48 | # initialize_coords 49 | def __init__(self, in_channels, out_channels, D=3): 50 | ResNetBase.__init__(self, in_channels, out_channels, D) 51 | 52 | def network_initialization(self, in_channels, out_channels, D): 53 | # Output of the first conv concated to conv6 54 | self.inplanes = self.INIT_DIM 55 | self.conv0p1s1 = ME.MinkowskiConvolution( 56 | in_channels, 57 | self.inplanes, 58 | kernel_size=self.m_space_n_time(5, 1), 59 | dimension=D, 60 | ) 61 | 62 | self.bn0 = ME.MinkowskiBatchNorm(self.inplanes) 63 | 64 | self.conv1p1s2 = ME.MinkowskiConvolution( 65 | self.inplanes, 66 | self.inplanes, 67 | kernel_size=self.m_space_n_time(2, 1), 68 | stride=self.m_space_n_time(2, 1), 69 | dimension=D, 70 | ) 71 | self.bn1 = ME.MinkowskiBatchNorm(self.inplanes) 72 | 73 | self.block1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0]) 74 | 75 | self.conv2p2s2 = ME.MinkowskiConvolution( 76 | self.inplanes, 77 | self.inplanes, 78 | kernel_size=self.m_space_n_time(2, 1), 79 | stride=self.m_space_n_time(2, 1), 80 | dimension=D, 81 | ) 82 | self.bn2 = ME.MinkowskiBatchNorm(self.inplanes) 83 | 84 | self.block2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1]) 85 | 86 | self.conv3p4s2 = ME.MinkowskiConvolution( 87 | self.inplanes, 88 | self.inplanes, 89 | kernel_size=self.m_space_n_time(2, 1), 90 | stride=self.m_space_n_time(2, 1), 91 | dimension=D, 92 | ) 93 | 94 | self.bn3 = ME.MinkowskiBatchNorm(self.inplanes) 95 | self.block3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2]) 96 | 97 | self.conv4p8s2 = ME.MinkowskiConvolution( 98 | self.inplanes, 99 | self.inplanes, 100 | kernel_size=self.m_space_n_time(2, 1), 101 | stride=self.m_space_n_time(2, 1), 102 | dimension=D, 103 | ) 104 | self.bn4 = ME.MinkowskiBatchNorm(self.inplanes) 105 | self.block4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3]) 106 | 107 | self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose( 108 | self.inplanes, 109 | self.PLANES[4], 110 | kernel_size=self.m_space_n_time(2, 1), 111 | stride=self.m_space_n_time(2, 1), 112 | dimension=D, 113 | ) 114 | self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4]) 115 | 116 | self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion 117 | self.block5 = self._make_layer(self.BLOCK, self.PLANES[4], self.LAYERS[4]) 118 | self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose( 119 | self.inplanes, 120 | self.PLANES[5], 121 | kernel_size=self.m_space_n_time(2, 1), 122 | stride=self.m_space_n_time(2, 1), 123 | dimension=D, 124 | ) 125 | self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5]) 126 | 127 | self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion 128 | self.block6 = self._make_layer(self.BLOCK, self.PLANES[5], self.LAYERS[5]) 129 | self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose( 130 | self.inplanes, 131 | self.PLANES[6], 132 | kernel_size=self.m_space_n_time(2, 1), 133 | stride=self.m_space_n_time(2, 1), 134 | dimension=D, 135 | ) 136 | self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6]) 137 | 138 | self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion 139 | self.block7 = self._make_layer(self.BLOCK, self.PLANES[6], self.LAYERS[6]) 140 | self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose( 141 | self.inplanes, 142 | self.PLANES[7], 143 | kernel_size=self.m_space_n_time(2, 1), 144 | stride=self.m_space_n_time(2, 1), 145 | dimension=D, 146 | ) 147 | self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7]) 148 | 149 | self.inplanes = self.PLANES[7] + self.INIT_DIM 150 | self.block8 = self._make_layer(self.BLOCK, self.PLANES[7], self.LAYERS[7]) 151 | 152 | self.final = ME.MinkowskiConvolution( 153 | self.PLANES[7] * self.BLOCK.expansion, 154 | out_channels, 155 | kernel_size=1, 156 | bias=True, 157 | dimension=D, 158 | ) 159 | self.relu = ME.MinkowskiReLU(inplace=True) 160 | 161 | def forward(self, x): 162 | out = self.conv0p1s1(x) 163 | out = self.bn0(out) 164 | out_p1 = self.relu(out) 165 | 166 | out = self.conv1p1s2(out_p1) 167 | out = self.bn1(out) 168 | out = self.relu(out) 169 | out_b1p2 = self.block1(out) 170 | 171 | out = self.conv2p2s2(out_b1p2) 172 | out = self.bn2(out) 173 | out = self.relu(out) 174 | out_b2p4 = self.block2(out) 175 | 176 | out = self.conv3p4s2(out_b2p4) 177 | out = self.bn3(out) 178 | out = self.relu(out) 179 | out_b3p8 = self.block3(out) 180 | 181 | # tensor_stride=16 182 | out = self.conv4p8s2(out_b3p8) 183 | out = self.bn4(out) 184 | out = self.relu(out) 185 | out = self.block4(out) 186 | 187 | # tensor_stride=8 188 | out = self.convtr4p16s2(out) 189 | out = self.bntr4(out) 190 | out = self.relu(out) 191 | 192 | out = ME.cat(out, out_b3p8) 193 | out = self.block5(out) 194 | 195 | # tensor_stride=4 196 | out = self.convtr5p8s2(out) 197 | out = self.bntr5(out) 198 | out = self.relu(out) 199 | 200 | out = ME.cat(out, out_b2p4) 201 | out = self.block6(out) 202 | 203 | # tensor_stride=2 204 | out = self.convtr6p4s2(out) 205 | out = self.bntr6(out) 206 | out = self.relu(out) 207 | 208 | out = ME.cat(out, out_b1p2) 209 | out = self.block7(out) 210 | 211 | # tensor_stride=1 212 | out = self.convtr7p2s2(out) 213 | out = self.bntr7(out) 214 | out = self.relu(out) 215 | 216 | out = ME.cat(out, out_p1) 217 | out = self.block8(out) 218 | 219 | return self.final(out) 220 | 221 | 222 | class MinkUNet14(MinkUNetBase): 223 | BLOCK = BasicBlock 224 | LAYERS = (1, 1, 1, 1, 1, 1, 1, 1) 225 | 226 | 227 | class MinkUNet18(MinkUNetBase): 228 | BLOCK = BasicBlock 229 | LAYERS = (2, 2, 2, 2, 2, 2, 2, 2) 230 | 231 | 232 | class MinkUNet34(MinkUNetBase): 233 | BLOCK = BasicBlock 234 | LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) 235 | 236 | 237 | class MinkUNet50(MinkUNetBase): 238 | BLOCK = Bottleneck 239 | LAYERS = (2, 3, 4, 6, 2, 2, 2, 2) 240 | 241 | 242 | class MinkUNet101(MinkUNetBase): 243 | BLOCK = Bottleneck 244 | LAYERS = (2, 3, 4, 23, 2, 2, 2, 2) 245 | 246 | 247 | class MinkUNet14A(MinkUNet14): 248 | PLANES = (32, 64, 128, 256, 128, 128, 96, 96) 249 | 250 | 251 | class MinkUNet14B(MinkUNet14): 252 | PLANES = (32, 64, 128, 256, 128, 128, 128, 128) 253 | 254 | 255 | class MinkUNet14C(MinkUNet14): 256 | PLANES = (32, 64, 128, 256, 192, 192, 128, 128) 257 | 258 | 259 | class MinkUNet14D(MinkUNet14): 260 | PLANES = (32, 64, 128, 256, 384, 384, 384, 384) 261 | 262 | 263 | class MinkUNet18A(MinkUNet18): 264 | PLANES = (32, 64, 128, 256, 128, 128, 96, 96) 265 | 266 | 267 | class MinkUNet18B(MinkUNet18): 268 | PLANES = (32, 64, 128, 256, 128, 128, 128, 128) 269 | 270 | 271 | class MinkUNet18D(MinkUNet18): 272 | PLANES = (32, 64, 128, 256, 384, 384, 384, 384) 273 | 274 | 275 | class MinkUNet34A(MinkUNet34): 276 | PLANES = (32, 64, 128, 256, 256, 128, 64, 64) 277 | 278 | 279 | class MinkUNet34B(MinkUNet34): 280 | PLANES = (32, 64, 128, 256, 256, 128, 64, 32) 281 | 282 | 283 | class MinkUNet34C(MinkUNet34): 284 | PLANES = (32, 64, 128, 256, 256, 128, 96, 96) 285 | 286 | 287 | if __name__ == "__main__": 288 | from tests.python.common import data_loader 289 | 290 | # loss and network 291 | criterion = nn.CrossEntropyLoss() 292 | net = MinkUNet14A(in_channels=3, out_channels=5, D=2) 293 | print(net) 294 | 295 | # a data loader must return a tuple of coords, features, and labels. 296 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 297 | 298 | net = net.to(device) 299 | optimizer = SGD(net.parameters(), lr=1e-2) 300 | 301 | for i in range(10): 302 | optimizer.zero_grad() 303 | 304 | # Get new data 305 | coords, feat, label = data_loader(is_classification=False) 306 | input = ME.SparseTensor(feat, coordinates=coords, device=device) 307 | label = label.to(device) 308 | 309 | # Forward 310 | output = net(input) 311 | 312 | # Loss 313 | loss = criterion(output.F, label) 314 | print("Iteration: ", i, ", Loss: ", loss.item()) 315 | 316 | # Gradient 317 | loss.backward() 318 | optimizer.step() 319 | 320 | # Saving and loading a network 321 | torch.save(net.state_dict(), "test.pth") 322 | net.load_state_dict(torch.load("test.pth")) 323 | -------------------------------------------------------------------------------- /src/sps/models/MinkowskiEngine/resnet.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Chris Choy (chrischoy@ai.stanford.edu). 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | # of the Software, and to permit persons to whom the Software is furnished to do 8 | # so, subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | # SOFTWARE. 20 | # 21 | # Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural 22 | # Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part 23 | # of the code. 24 | import os 25 | from urllib.request import urlretrieve 26 | import numpy as np 27 | 28 | import torch 29 | import torch.nn as nn 30 | from torch.optim import SGD 31 | 32 | import MinkowskiEngine as ME 33 | from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck 34 | 35 | 36 | def load_file(file_name): 37 | pcd = o3d.io.read_point_cloud(file_name) 38 | coords = np.array(pcd.points) 39 | colors = np.array(pcd.colors) 40 | return coords, colors, pcd 41 | 42 | 43 | class ResNetBase(nn.Module): 44 | BLOCK = None 45 | LAYERS = () 46 | INIT_DIM = 64 47 | PLANES = (64, 128, 256, 512) 48 | 49 | def __init__(self, in_channels, out_channels, D=3): 50 | nn.Module.__init__(self) 51 | self.D = D 52 | assert self.BLOCK is not None 53 | 54 | self.network_initialization(in_channels, out_channels, D) 55 | self.weight_initialization() 56 | 57 | def network_initialization(self, in_channels, out_channels, D): 58 | 59 | self.inplanes = self.INIT_DIM 60 | self.conv1 = nn.Sequential( 61 | ME.MinkowskiConvolution( 62 | in_channels, self.inplanes, kernel_size=3, stride=2, dimension=D 63 | ), 64 | ME.MinkowskiInstanceNorm(self.inplanes), 65 | ME.MinkowskiReLU(inplace=True), 66 | ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D), 67 | ) 68 | 69 | self.layer1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2) 70 | self.layer2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2) 71 | self.layer3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2) 72 | self.layer4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2) 73 | 74 | self.conv5 = nn.Sequential( 75 | ME.MinkowskiDropout(), 76 | ME.MinkowskiConvolution( 77 | self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D 78 | ), 79 | ME.MinkowskiInstanceNorm(self.inplanes), 80 | ME.MinkowskiGELU(), 81 | ) 82 | 83 | self.glob_pool = ME.MinkowskiGlobalMaxPooling() 84 | 85 | self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True) 86 | 87 | def weight_initialization(self): 88 | for m in self.modules(): 89 | if isinstance(m, ME.MinkowskiConvolution): 90 | ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu") 91 | 92 | if isinstance(m, ME.MinkowskiBatchNorm): 93 | nn.init.constant_(m.bn.weight, 1) 94 | nn.init.constant_(m.bn.bias, 0) 95 | 96 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0.1): 97 | downsample = None 98 | if stride != 1 or self.inplanes != planes * block.expansion: 99 | downsample = nn.Sequential( 100 | ME.MinkowskiConvolution( 101 | self.inplanes, 102 | planes * block.expansion, 103 | kernel_size=1, 104 | stride=stride, 105 | dimension=self.D, 106 | ), 107 | ME.MinkowskiBatchNorm(planes * block.expansion), 108 | ) 109 | layers = [] 110 | layers.append( 111 | block( 112 | self.inplanes, 113 | planes, 114 | stride=stride, 115 | dilation=dilation, 116 | downsample=downsample, 117 | dimension=self.D, 118 | ) 119 | ) 120 | self.inplanes = planes * block.expansion 121 | for i in range(1, blocks): 122 | layers.append( 123 | block(self.inplanes, planes, stride=1, dilation=dilation, dimension=self.D) 124 | ) 125 | 126 | return nn.Sequential(*layers) 127 | 128 | def forward(self, x: ME.SparseTensor): 129 | x = self.conv1(x) 130 | x = self.layer1(x) 131 | x = self.layer2(x) 132 | x = self.layer3(x) 133 | x = self.layer4(x) 134 | x = self.conv5(x) 135 | x = self.glob_pool(x) 136 | return self.final(x) 137 | 138 | 139 | class ResNet14(ResNetBase): 140 | BLOCK = BasicBlock 141 | LAYERS = (1, 1, 1, 1) 142 | 143 | 144 | class ResNet18(ResNetBase): 145 | BLOCK = BasicBlock 146 | LAYERS = (2, 2, 2, 2) 147 | 148 | 149 | class ResNet34(ResNetBase): 150 | BLOCK = BasicBlock 151 | LAYERS = (3, 4, 6, 3) 152 | 153 | 154 | class ResNet50(ResNetBase): 155 | BLOCK = Bottleneck 156 | LAYERS = (3, 4, 6, 3) 157 | 158 | 159 | class ResNet101(ResNetBase): 160 | BLOCK = Bottleneck 161 | LAYERS = (3, 4, 23, 3) 162 | 163 | 164 | class ResFieldNetBase(ResNetBase): 165 | def network_initialization(self, in_channels, out_channels, D): 166 | field_ch = 32 167 | field_ch2 = 64 168 | self.field_network = nn.Sequential( 169 | ME.MinkowskiSinusoidal(in_channels, field_ch), 170 | ME.MinkowskiBatchNorm(field_ch), 171 | ME.MinkowskiReLU(inplace=True), 172 | ME.MinkowskiLinear(field_ch, field_ch), 173 | ME.MinkowskiBatchNorm(field_ch), 174 | ME.MinkowskiReLU(inplace=True), 175 | ME.MinkowskiToSparseTensor(), 176 | ) 177 | self.field_network2 = nn.Sequential( 178 | ME.MinkowskiSinusoidal(field_ch + in_channels, field_ch2), 179 | ME.MinkowskiBatchNorm(field_ch2), 180 | ME.MinkowskiReLU(inplace=True), 181 | ME.MinkowskiLinear(field_ch2, field_ch2), 182 | ME.MinkowskiBatchNorm(field_ch2), 183 | ME.MinkowskiReLU(inplace=True), 184 | ME.MinkowskiToSparseTensor(), 185 | ) 186 | 187 | ResNetBase.network_initialization(self, field_ch2, out_channels, D) 188 | 189 | def forward(self, x: ME.TensorField): 190 | otensor = self.field_network(x) 191 | otensor2 = self.field_network2(otensor.cat_slice(x)) 192 | return ResNetBase.forward(self, otensor2) 193 | 194 | 195 | class ResFieldNet14(ResFieldNetBase): 196 | BLOCK = BasicBlock 197 | LAYERS = (1, 1, 1, 1) 198 | 199 | 200 | class ResFieldNet18(ResFieldNetBase): 201 | BLOCK = BasicBlock 202 | LAYERS = (2, 2, 2, 2) 203 | 204 | 205 | class ResFieldNet34(ResFieldNetBase): 206 | BLOCK = BasicBlock 207 | LAYERS = (3, 4, 6, 3) 208 | 209 | 210 | class ResFieldNet50(ResFieldNetBase): 211 | BLOCK = Bottleneck 212 | LAYERS = (3, 4, 6, 3) 213 | 214 | 215 | class ResFieldNet101(ResFieldNetBase): 216 | BLOCK = Bottleneck 217 | LAYERS = (3, 4, 23, 3) 218 | 219 | 220 | if __name__ == "__main__": 221 | # loss and network 222 | voxel_size = 0.02 223 | N_labels = 10 224 | 225 | criterion = nn.CrossEntropyLoss() 226 | net = ResNet14(in_channels=3, out_channels=N_labels, D=3) 227 | print(net) 228 | 229 | # a data loader must return a tuple of coords, features, and labels. 230 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 231 | 232 | net = net.to(device) 233 | optimizer = SGD(net.parameters(), lr=1e-2) 234 | 235 | coords, colors, pcd = load_file("1.ply") 236 | coords = torch.from_numpy(coords) 237 | # Get new data 238 | coordinates = ME.utils.batched_coordinates( 239 | [coords / voxel_size, coords / 2 / voxel_size, coords / 4 / voxel_size], 240 | dtype=torch.float32, 241 | ) 242 | features = torch.rand((len(coordinates), 3), device=device) 243 | for i in range(10): 244 | optimizer.zero_grad() 245 | 246 | input = ME.SparseTensor(features, coordinates, device=device) 247 | dummy_label = torch.randint(0, N_labels, (3,), device=device) 248 | 249 | # Forward 250 | output = net(input) 251 | 252 | # Loss 253 | loss = criterion(output.F, dummy_label) 254 | print("Iteration: ", i, ", Loss: ", loss.item()) 255 | 256 | # Gradient 257 | loss.backward() 258 | optimizer.step() 259 | 260 | # Saving and loading a network 261 | torch.save(net.state_dict(), "test.pth") 262 | net.load_state_dict(torch.load("test.pth")) 263 | -------------------------------------------------------------------------------- /src/sps/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ibrahimhroob/SPS/49b8204bb0a1c643aea6f134b4c8d96644d85ed0/src/sps/models/__init__.py -------------------------------------------------------------------------------- /src/sps/models/models.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import torch 5 | import numpy as np 6 | import torch.nn as nn 7 | import pytorch_lightning as pl 8 | import MinkowskiEngine as ME 9 | from sps.datasets import util 10 | from sps.models.MinkowskiEngine.customminkunet import CustomMinkUNet 11 | from torchmetrics import R2Score 12 | 13 | class SPSModel(nn.Module): 14 | def __init__(self, voxel_size: float): 15 | super().__init__() 16 | self.quantization = torch.Tensor([1.0, voxel_size, voxel_size, voxel_size, 1.0]) 17 | self.MinkUNet = CustomMinkUNet(in_channels=1, out_channels=1, D=4) 18 | self.sigmoid = nn.Sigmoid() 19 | 20 | def forward(self, coordinates: torch.Tensor): 21 | coordinates = torch.div(coordinates, self.quantization.type_as(coordinates)) 22 | features = 0.5 * torch.ones(len(coordinates), 1).type_as(coordinates) 23 | 24 | tensor_field = ME.TensorField(features=features, coordinates=coordinates.type_as(features)) 25 | sparse_tensor = tensor_field.sparse() 26 | 27 | predicted_sparse_tensor = self.MinkUNet(sparse_tensor) 28 | out = predicted_sparse_tensor.slice(tensor_field) 29 | scores = self.sigmoid(out.features.reshape(-1)) 30 | return scores 31 | 32 | 33 | class SPSNet(pl.LightningModule): 34 | def __init__(self, hparams: dict, data_size = 0, save_vis = False): 35 | super().__init__() 36 | self.save_hyperparameters(hparams) 37 | self.model = SPSModel(hparams['MODEL']['VOXEL_SIZE']) 38 | self.save_vis = save_vis 39 | 40 | self.data_dir = str(os.environ.get("DATA")) 41 | self.test_seq = hparams["DATA"]["SPLIT"]["TEST"] 42 | self.epsilon = hparams['FILTER']['THRESHOLD'] 43 | 44 | self.loss = nn.MSELoss() 45 | self.r2score = R2Score() 46 | 47 | self.predict_loss = [] 48 | self.predict_r2 = [] 49 | self.dIoU = [] 50 | self.precision = [] 51 | self.recall = [] 52 | self.F1 = [] 53 | 54 | self.data_size = data_size 55 | 56 | def forward(self, batch): 57 | coordinates = batch[:, :5].reshape(-1, 5) 58 | scores = self.model(coordinates) 59 | # torch.cuda.empty_cache() 60 | return scores 61 | 62 | def common_step(self, batch): 63 | coordinates = batch[:, :5].reshape(-1, 5) 64 | gt_labels = batch[:, 5].reshape(-1) 65 | scan_indices = np.where(coordinates[:, 4].cpu().data.numpy() == 1)[0] 66 | scores = self.model(coordinates) 67 | loss = self.loss(scores[scan_indices], gt_labels[scan_indices]) 68 | r2 = self.r2score(scores[scan_indices], gt_labels[scan_indices]) 69 | torch.cuda.empty_cache() 70 | return loss, r2 71 | 72 | def training_step(self, batch, batch_idx, dataloader_idx=0): 73 | loss, r2 = self.common_step(batch) 74 | self.log("train_loss", loss.item(), on_step=True, prog_bar=True) 75 | self.log("train_r2", r2.item(), on_step=True, prog_bar=False) 76 | return {"loss": loss, "val_r2": r2} 77 | 78 | def validation_step(self, batch, batch_idx): 79 | loss, r2 = self.common_step(batch) 80 | self.log("val_loss", loss.item(), on_step=True, prog_bar=True) 81 | self.log("val_r2", r2.item(), on_step=True, prog_bar=False) 82 | return {"val_loss": loss, "val_r2": r2} 83 | 84 | def predict_step(self, batch, batch_idx, dataloader_idx=0): 85 | coordinates = batch[:, :5].reshape(-1, 5) 86 | gt_labels = batch[:, 5].reshape(-1) 87 | scan_indices = np.where(coordinates[:, 4].cpu().data.numpy() == 1)[0] 88 | scores = self.model(coordinates) 89 | scan_gt_labels = gt_labels[scan_indices] 90 | loss = self.loss(scores[scan_indices], scan_gt_labels) 91 | r2 = self.r2score(scores[scan_indices], scan_gt_labels) 92 | 93 | self.predict_loss.append(loss) 94 | self.predict_r2.append(r2) 95 | 96 | ### -> mIoU start 97 | pred = np.where(scores[scan_indices].cpu().view(-1) < self.epsilon, 0, 1) 98 | gt = np.where( scan_gt_labels.cpu().view(-1) < self.epsilon, 0, 1) 99 | 100 | precision, recall, f1, accuracy, dIoU = util.calculate_metrics(gt, pred) 101 | self.dIoU.append(dIoU) 102 | self.precision.append(precision) 103 | self.recall.append(recall) 104 | self.F1.append(f1) 105 | ### <- mIoU ends 106 | 107 | 108 | if self.save_vis: 109 | self.__save_vis(batch, batch_idx, scores, scan_indices) 110 | 111 | torch.cuda.empty_cache() 112 | 113 | def __save_vis(self, batch, batch_idx, scores, scan_indices): 114 | # create log_dir to save some visuals 115 | s_path = os.path.join( 116 | self.data_dir, 117 | 'predictions', 118 | self.test_seq[0], 119 | 'scans' 120 | ) 121 | m_path = os.path.join( 122 | self.data_dir, 123 | 'predictions', 124 | self.test_seq[0], 125 | 'maps' 126 | ) 127 | 128 | os.makedirs(s_path, exist_ok=True) 129 | os.makedirs(m_path, exist_ok=True) 130 | 131 | batch_indices = [unique.item() for unique in torch.unique(batch[:, 0])] 132 | for b in batch_indices: 133 | mask_batch = batch[:, 0] == b 134 | mask_scan = batch[:, -2] == 1 135 | mask_map = batch[:, -2] == 0 136 | 137 | scan_points = batch[torch.logical_and(mask_batch, mask_scan), 1:4].cpu().data.numpy() 138 | scan_labels_gt = batch[torch.logical_and(mask_batch, mask_scan), -1].cpu().data.numpy() 139 | scan_labels_hat = scores[scan_indices].cpu().data.numpy() 140 | 141 | map_points = batch[torch.logical_and(mask_batch, mask_map), 1:4].cpu().data.numpy() 142 | map_labels_gt = batch[torch.logical_and(mask_batch, mask_map), -1].cpu().data.numpy() 143 | 144 | assert len(scan_points) == len(scan_labels_gt) == len(scan_labels_hat), "Lengths of arrays are not equal." 145 | 146 | scan_data = np.column_stack((scan_points, scan_labels_gt, scan_labels_hat)) 147 | map_data = np.column_stack((map_points, map_labels_gt)) 148 | 149 | scan_pth = os.path.join(s_path, str(batch_idx) + '_' + str(b) + '.npy') 150 | map_pth = os.path.join(m_path, str(batch_idx) + '_' + str(b) + '.npy') 151 | np.save(scan_pth, scan_data) 152 | np.save(map_pth, map_data) 153 | 154 | def configure_optimizers(self): 155 | optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams['TRAIN']['LR'], 156 | weight_decay=self.hparams['TRAIN']['WEIGHT_DECAY']) 157 | scheduler = torch.optim.lr_scheduler.StepLR( 158 | optimizer, step_size=self.hparams['TRAIN']['LR_EPOCH'], gamma=self.hparams['TRAIN']['LR_DECAY'] 159 | ) 160 | return [optimizer], [scheduler] 161 | 162 | if __name__ == "__main__": 163 | # Example usage or script execution logic 164 | pass --------------------------------------------------------------------------------