├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
└── workflows
│ ├── base.yml
│ └── simple_checks.yml
├── .gitignore
├── LICENSE
├── README.md
├── docker-env
├── .dockerignore
├── Dockerfile
├── Dockerfile.jupyter-pcl
├── fix-permissions
├── jupyter_notebook_config.py
├── requirements.txt
├── start-notebook.sh
├── start-singleuser.sh
└── start.sh
├── obstacle-detection
├── .ipynb_checkpoints
│ └── obstacle detection-checkpoint.ipynb
├── config.yaml
├── examples
│ ├── Segmentation + Obstacle Detection.ipynb
│ ├── img
│ │ ├── MainGif.gif
│ │ ├── MainGifforGit.gif
│ │ ├── MainGifwithLabels.gif
│ │ ├── about_project.gif
│ │ ├── bounding_boxes_step.png
│ │ ├── clustering_step.png
│ │ ├── darknet12_scan_000000_seq_00.png
│ │ ├── darknet12_scan_000000_seq_00_obstacles.png
│ │ ├── seg_od_clustering.png
│ │ ├── seg_od_clustering_bbox_000066.png
│ │ ├── squeezeseg_scan_000000_seq_00.png
│ │ ├── squeezeseg_scan_000000_seq_00_obstacles.png
│ │ ├── step1.png
│ │ ├── step2.png
│ │ └── step3.png
│ ├── inference_example.ipynb
│ ├── obstacle detection.ipynb
│ └── pcl_pipeline.ipynb
├── model
│ ├── backbones
│ │ ├── __init__.py
│ │ ├── darknet.py
│ │ ├── squeezeseg.py
│ │ └── squeezesegV2.py
│ ├── common
│ │ ├── __init__.py
│ │ ├── avgmeter.py
│ │ ├── laserscan.py
│ │ ├── laserscanvis.py
│ │ ├── logger.py
│ │ ├── onehot.py
│ │ ├── sync_batchnorm
│ │ │ ├── __init__.py
│ │ │ ├── batchnorm.py
│ │ │ ├── comm.py
│ │ │ └── replicate.py
│ │ └── warmupLR.py
│ ├── darknet21
│ │ ├── arch_cfg.yaml
│ │ ├── backbone
│ │ ├── data_cfg.yaml
│ │ ├── segmentation_decoder
│ │ └── segmentation_head
│ ├── squeezeseg-crf
│ │ ├── arch_cfg.yaml
│ │ ├── backbone
│ │ ├── data_cfg.yaml
│ │ ├── segmentation_CRF
│ │ ├── segmentation_decoder
│ │ └── segmentation_head
│ ├── squeezeseg
│ │ ├── arch_cfg.yaml
│ │ ├── backbone
│ │ ├── data_cfg.yaml
│ │ ├── segmentation_decoder
│ │ └── segmentation_head
│ └── tasks
│ │ └── semantic
│ │ ├── README(1).md
│ │ ├── __init__.py
│ │ ├── config
│ │ ├── arch
│ │ │ ├── darknet21.yaml
│ │ │ ├── darknet53-1024px.yaml
│ │ │ ├── darknet53-512px.yaml
│ │ │ ├── darknet53-crf-1024px.yaml
│ │ │ ├── darknet53-crf-512px.yaml
│ │ │ ├── darknet53-crf.yaml
│ │ │ ├── darknet53.yaml
│ │ │ ├── squeezeseg.yaml
│ │ │ ├── squeezesegV2.yaml
│ │ │ ├── squeezesegV2_crf.yaml
│ │ │ └── squeezeseg_crf.yaml
│ │ └── labels
│ │ │ ├── semantic-kitti-all.yaml
│ │ │ └── semantic-kitti.yaml
│ │ ├── dataset
│ │ └── kitti
│ │ │ ├── __init__.py
│ │ │ └── parser.py
│ │ ├── decoders
│ │ ├── __init__.py
│ │ ├── darknet.py
│ │ ├── squeezeseg.py
│ │ └── squeezesegV2.py
│ │ ├── evaluate_biou.py
│ │ ├── evaluate_iou.py
│ │ ├── infer.py
│ │ ├── infer2.py
│ │ ├── modules
│ │ ├── __init__.py
│ │ ├── ioueval.py
│ │ ├── segmentator.py
│ │ ├── trainer.py
│ │ └── user.py
│ │ ├── postproc
│ │ ├── CRF.py
│ │ ├── KNN.py
│ │ ├── __init__.py
│ │ └── borderMask.py
│ │ ├── readme.md
│ │ ├── train.py
│ │ └── visualize.py
├── pipeline
│ ├── common.py
│ ├── pcl_pipeline.py
│ ├── pcl_utils.py
│ └── pipeline.py
└── scripts
│ ├── draw.py
│ └── test.py
├── setup.cfg
├── test
├── config.yaml
├── data
│ ├── 000100.bin
│ ├── 000100.label
│ ├── 000101.bin
│ ├── 000101.label
│ ├── 000102.bin
│ └── 000102.label
├── requirements.txt
└── simple_checks.py
└── visualization
├── README.md
├── __init__.py
├── config
└── labels
│ ├── semantic-kitti-all.yaml
│ └── semantic-kitti.yaml
├── laserscan.py
├── laserscanvis.py
└── visualize.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Environment:**
27 | - OS version
28 | - Docker version
29 | - GPU/ CPU
30 |
31 | **Additional context**
32 | Add any other context about the problem here.
33 |
--------------------------------------------------------------------------------
/.github/workflows/base.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | env:
4 | PROJECT_NAME: point_cloud_segmentation
5 | BUILD_DIR: docker-env
6 |
7 | on:
8 | pull_request:
9 | branches: master
10 |
11 | jobs:
12 |
13 | codeStyles:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@master
17 | - name: Python Style Checker
18 | uses: andymckay/pycodestyle-action@0.1.3
19 | env:
20 | PRECOMMAND_MESSAGE: Vaba Laba Dab Dab!
21 |
22 | - name: CodeStyle VK Notify
23 | uses: alphamusic/VK-Notifications@1.0.2
24 | env:
25 | VK_USERS : ${{ secrets.VK_ME }}, ${{ secrets.VK_POLINA }}, ${{ secrets.VK_ILYA }}
26 | VK_MESSAGE: \#github_notify %0A ${{ github.actor}} on ${{ github.event_name }} %0A CodeStyle status✅
27 | VK_TOKEN: ${{ secrets.VK_TOKEN }}
28 |
29 | build:
30 | runs-on: ubuntu-latest
31 | needs: codeStyles
32 |
33 | steps:
34 | - uses: actions/checkout@v2
35 |
36 | - name: Docker version
37 | run: docker -v
38 |
39 | - name: Docker build
40 | run: |
41 | docker system prune -a
42 | docker build -t ${{ secrets.DOCKER_LOGIN }}/${{ secrets.DOCKER_NAME }}:ver_$GITHUB_RUN_NUMBER $BUILD_DIR
43 | docker images
44 | continue-on-error: true
45 | - name: DockerBuild VK Notify
46 | uses: alphamusic/VK-Notifications@1.0.2
47 | env:
48 | VK_USERS : ${{ secrets.VK_ME }}, ${{ secrets.VK_POLINA }}, ${{ secrets.VK_ILYA }}
49 | VK_MESSAGE: \#github_notify %0A ${{ github.actor}} on ${{ github.event_name }} %0A Building status✅
50 | VK_TOKEN: ${{ secrets.VK_TOKEN }}
51 |
52 | - name: Docker Upload
53 | run: |
54 | docker login --username=${{ secrets.DOCKER_LOGIN }} --password=${{ secrets.DOCKER_PASSWORD}}
55 | docker push ${{ secrets.DOCKER_LOGIN }}/${{ secrets.DOCKER_NAME }}:ver_$GITHUB_RUN_NUMBER
56 | continue-on-error: true
57 | - name: Docker Push VK Notify
58 | uses: alphamusic/VK-Notifications@1.0.2
59 | env:
60 | VK_USERS : ${{ secrets.VK_ME }} , ${{ secrets.VK_POLINA }}, ${{ secrets.VK_ILYA }}
61 | VK_MESSAGE: \#github_notify %0A ${{ github.actor}} on ${{ github.event_name }} %0A Uploading status✅
62 | VK_TOKEN: ${{ secrets.VK_TOKEN }}
63 |
--------------------------------------------------------------------------------
/.github/workflows/simple_checks.yml:
--------------------------------------------------------------------------------
1 | name: simple_checks
2 |
3 | env:
4 | PROJECT_NAME: PointCloudSegmentation
5 | on:
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout repo
14 | uses: actions/checkout@v2
15 | - name: Set up Python
16 | uses: actions/setup-python@v1
17 | with:
18 | python-version: '3.x'
19 | - name: Install dependencies
20 | run: |
21 | python -m pip install --upgrade pip
22 | pip install -r test/requirements.txt
23 | pip freeze
24 | - name: Simple Checks
25 | run: |
26 | pwd
27 | pip install pytest
28 | pytest -q test/simple_checks.py -v
29 |
30 | - name: Docker Push VK Notify
31 | uses: alphamusic/VK-Notifications@1.0.2
32 | env:
33 | VK_USERS : ${{ secrets.VK_ME }}, ${{ secrets.VK_POLINA }}, ${{ secrets.VK_ILYA }}
34 | VK_MESSAGE: \#github_notify %0A ${{ github.actor}} on ${{ github.event_name }} %0A Testing status✅
35 | VK_TOKEN: ${{ secrets.VK_TOKEN }}
36 |
37 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | dataset/*
3 | dataset/sequences/
4 | obstacle-detection/dataset/
5 | obstacle-detection/.ipynb_checkpoints/*
6 | __pycache__/
7 | *.ipynb_checkpoints*
8 | *__pycache__*
9 | .vscode
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 valperovich
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # PointCloudSegmentation
2 |
3 | ---
4 |
5 |  
6 |
7 | ---
8 |
9 |
10 | ---
11 | **Project structure:**
12 | ```
13 | ├───docker-env/
14 | ├───obstacle-detection/
15 | │ ├───dataset/
16 | │ │ └───sequences/
17 | │ │ └───00/
18 | │ │ ├───clusters/
19 | │ │ ├───labels/
20 | │ │ └───velodyne/
21 | | ├───model/
22 | | |
23 | │ ├───examples/
24 | │ │
25 | │ ├───pipeline/
26 | │ │
27 | │ └───scripts/
28 | │
29 | └───visualization/
30 | ```
31 |
32 |
33 | ## How to dockerize this:
34 | ---
35 | - In *base-notebook/* folder start Docker and build an image:
36 | `$ docker build -t jupyter .`
37 | - After that you can verify a successful build by running: `$ docker images`
38 | - Then start container by running:
39 | `$ docker run -it --rm -p 8888:8888 -v /path/to/obstacle-detection:/home/jovyan/work jupyter`
40 | **NOTE:** on Windows you need to convert your path into a quasi-Linux format (*e.g. //c/path/to/obstacle-detection*). More details [here](https://medium.com/@kale.miller96/how-to-mount-your-current-working-directory-to-your-docker-container-in-windows-74e47fa104d7)
41 | Also, if you want to use drive *D:/* you need to check whether it is mounted or not and if not mount it manually. More details [here](http://support.divio.com/en/articles/646695-how-to-use-a-directory-outside-c-users-with-docker-toolbox-docker-for-windows) if you use Docker toolbox
42 | - After correct running you will see URL to access jupyter, e.g.:
43 | *httр://127.0.0.1:8888?token=0cccd15e74216ed2dbe681738ed0f9c78bf65515e94f27a8*
44 | - To access jupyter you need to go for **Docker IP**:8888?token=xxxx...
( e.g. httр://192.168.99.100:8888/?token=0cccd15e74216ed2dbe681738ed0f9c78bf65515e94f27a8)
45 | - To enter a docker container run `$ docker exec -it *CONTAINER ID* bash` (find out ID by running `$ docker ps`)
46 |
47 | ## Pre-trained Models
48 |
49 | ### [SemanticKITTI](http://semantic-kitti.org)
50 |
51 | - [squeezeseg](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/squeezeseg.tar.gz)
52 | - [squeezeseg + crf](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/squeezeseg-crf.tar.gz)
53 | - [squeezesegV2](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/squeezesegV2.tar.gz)
54 | - [squeezesegV2 + crf](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/squeezesegV2-crf.tar.gz)
55 | - [darknet21](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/darknet21.tar.gz)
56 | - [darknet53](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/darknet53.tar.gz)
57 | - [darknet53-1024](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/darknet53-1024.tar.gz)
58 | - [darknet53-512](http://www.ipb.uni-bonn.de/html/projects/bonnetal/lidar/semantic/models/darknet53-512.tar.gz)
59 |
60 | ## References and useful links:
61 | ---
62 |
Dataset:
63 |
64 | 1. [Web-site Semantic KITTI](http://semantic-kitti.org/)
65 | 2. [Paper Semantic KITTI](https://arxiv.org/abs/1904.01416)
66 |
67 |
Segmentation:
68 |
69 | 3. [Segmentation approaches Point Clouds](https://habr.com/ru/post/459088/)
70 | 4. [Also about point cloud segmentation](http://primo.ai/index.php?title=Point_Cloud)
71 | 5. [PointNet](http://stanford.edu/~rqi/pointnet/)
72 | 6. [PointNet++ from Stanford](http://stanford.edu/~rqi/pointnet2/)
73 | 7. [PointNet++](https://towardsdatascience.com/understanding-machine-learning-on-point-clouds-through-pointnet-f8f3f2d53cc3)
74 | 8. [RangeNet++](http://www.ipb.uni-bonn.de/wp-content/papercite-data/pdf/milioto2019iros.pdf)
75 |
76 |
Obstacle detection:
77 |
78 | 9. [Obstacle Detection and Avoidance System for Drones](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5469666/)
79 | 10. [3D Lidar-based Static and Moving Obstacle Detection](https://home.isr.uc.pt/~cpremebida/files_cp/3D%20Lidar-based%20static%20and%20moving%20obstacle%20detection%20in%20driving%20environments_Preprint.pdf)
80 | 11. [USER-TRAINABLE OBJECT RECOGNITION SYSTEMS](http://www.alexteichman.com/files/dissertation.pdf)
81 | 12. [Real-Time Plane Segmentation and Obstacle Detection](https://vk.com/doc136761433_537895530?hash=da67f1d282ddb72f49&dl=957ba302f8b35cd695)
82 |
83 |
Useful Github links:
84 |
85 | 13. https://github.com/PRBonn/semantic-kitti-api
86 | 14. https://github.com/jbehley/point_labeler
87 | 15. https://github.com/daavoo/pyntcloud
88 | 16. https://github.com/strawlab/python-pcl
89 | 17. https://github.com/kuixu/kitti_object_vis
90 | 18. https://github.com/lilyhappily/SFND-P1-Lidar-Obstacle-Detection
91 | 19. https://github.com/kcg2015/lidar_ground_plane_and_obstacles_detections
92 | 20. https://github.com/enginBozkurt/LidarObstacleDetection
93 |
--------------------------------------------------------------------------------
/docker-env/.dockerignore:
--------------------------------------------------------------------------------
1 | # Documentation
2 | README.md
3 |
--------------------------------------------------------------------------------
/docker-env/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM vadikalp/jupyter-pcl
2 |
3 | USER root
4 |
5 | ENV CONDA_DIR=/opt/conda
6 |
7 | COPY requirements.txt requirements.txt
8 |
9 | RUN pip install -r requirements.txt
10 |
11 | RUN pip install --no-cache-dir torch
12 |
13 | RUN pip install --no-cache-dir torchvision
14 |
15 | USER jovyan
16 |
--------------------------------------------------------------------------------
/docker-env/Dockerfile.jupyter-pcl:
--------------------------------------------------------------------------------
1 | FROM ubuntu:xenial
2 |
3 |
4 | LABEL maintainer="Jupyter Project "
5 | ARG NB_USER="jovyan"
6 | ARG NB_UID="1000"
7 | ARG NB_GID="100"
8 |
9 | USER root
10 |
11 | # Install all OS dependencies for notebook server that starts but lacks all
12 | # features (e.g., download as all possible file formats)
13 | ENV DEBIAN_FRONTEND noninteractive
14 | RUN apt-get update \
15 | && apt-get install -yq --no-install-recommends \
16 | wget \
17 | bzip2 \
18 | ca-certificates \
19 | sudo \
20 | locales \
21 | fonts-liberation \
22 | run-one \
23 | && apt-get clean && rm -rf /var/lib/apt/lists/*
24 |
25 | RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \
26 | locale-gen
27 |
28 | # Configure environment
29 | ENV CONDA_DIR=/opt/conda \
30 | SHELL=/bin/bash \
31 | NB_USER=$NB_USER \
32 | NB_UID=$NB_UID \
33 | NB_GID=$NB_GID \
34 | LC_ALL=en_US.UTF-8 \
35 | LANG=en_US.UTF-8 \
36 | LANGUAGE=en_US.UTF-8
37 | ENV PATH=$CONDA_DIR/bin:$PATH \
38 | HOME=/home/$NB_USER
39 |
40 | # Copy a script that we will use to correct permissions after running certain commands
41 | COPY fix-permissions /usr/local/bin/fix-permissions
42 | RUN chmod a+rx /usr/local/bin/fix-permissions
43 |
44 | # Enable prompt color in the skeleton .bashrc before creating the default NB_USER
45 | RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc
46 |
47 | # Create NB_USER wtih name jovyan user with UID=1000 and in the 'users' group
48 | # and make sure these dirs are writable by the `users` group.
49 | RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
50 | sed -i.bak -e 's/^%admin/#%admin/' /etc/sudoers && \
51 | sed -i.bak -e 's/^%sudo/#%sudo/' /etc/sudoers && \
52 | useradd -m -s /bin/bash -N -u $NB_UID $NB_USER && \
53 | mkdir -p $CONDA_DIR && \
54 | chown $NB_USER:$NB_GID $CONDA_DIR && \
55 | chmod g+w /etc/passwd && \
56 | fix-permissions $HOME && \
57 | fix-permissions $CONDA_DIR
58 |
59 | USER $NB_UID
60 | WORKDIR $HOME
61 | ARG PYTHON_VERSION=default
62 |
63 | # Setup work directory for backward-compatibility
64 | RUN mkdir /home/$NB_USER/work && \
65 | fix-permissions /home/$NB_USER
66 |
67 | # Install conda as jovyan and check the md5 sum provided on the download site
68 | ENV MINICONDA_VERSION=4.7.12.1 \
69 | MINICONDA_MD5=81c773ff87af5cfac79ab862942ab6b3 \
70 | CONDA_VERSION=4.7.12
71 |
72 | RUN cd /tmp && \
73 | wget --quiet https://repo.continuum.io/miniconda/Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \
74 | echo "${MINICONDA_MD5} *Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh" | md5sum -c - && \
75 | /bin/bash Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
76 | rm Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \
77 | echo "conda ${CONDA_VERSION}" >> $CONDA_DIR/conda-meta/pinned && \
78 | conda config --system --prepend channels conda-forge && \
79 | conda config --system --set auto_update_conda false && \
80 | conda config --system --set show_channel_urls true && \
81 | conda config --system --set channel_priority strict && \
82 | if [ ! $PYTHON_VERSION = 'default' ]; then conda install --yes python=$PYTHON_VERSION; fi && \
83 | conda list python | grep '^python ' | tr -s ' ' | cut -d '.' -f 1,2 | sed 's/$/.*/' >> $CONDA_DIR/conda-meta/pinned && \
84 | conda install --quiet --yes conda && \
85 | conda install --quiet --yes pip && \
86 | conda update --all --quiet --yes && \
87 | conda clean --all -f -y && \
88 | rm -rf /home/$NB_USER/.cache/yarn && \
89 | fix-permissions $CONDA_DIR && \
90 | fix-permissions /home/$NB_USER
91 |
92 | # Install Tini
93 | RUN conda install --quiet --yes 'tini=0.18.0' && \
94 | conda list tini | grep tini | tr -s ' ' | cut -d ' ' -f 1,2 >> $CONDA_DIR/conda-meta/pinned && \
95 | conda clean --all -f -y && \
96 | fix-permissions $CONDA_DIR && \
97 | fix-permissions /home/$NB_USER
98 |
99 | # Install Jupyter Notebook, Lab, and Hub
100 | # Generate a notebook server config
101 | # Cleanup temporary files
102 | # Correct permissions
103 | # Do all this in a single RUN command to avoid duplicating all of the
104 | # files across image layers when the permissions change
105 | RUN conda install --quiet --yes \
106 | 'notebook=6.0.3' \
107 | 'jupyterhub=1.1.0' \
108 | 'jupyterlab=1.2.5' && \
109 | conda clean --all -f -y && \
110 | npm cache clean --force && \
111 | jupyter notebook --generate-config && \
112 | rm -rf $CONDA_DIR/share/jupyter/lab/staging && \
113 | rm -rf /home/$NB_USER/.cache/yarn && \
114 | fix-permissions $CONDA_DIR && \
115 | fix-permissions /home/$NB_USER
116 |
117 | # COPY /obstacle-detection home/work
118 |
119 | EXPOSE 8888
120 |
121 | # Copy local files as late as possible to avoid cache busting
122 | COPY start.sh start-notebook.sh start-singleuser.sh /usr/local/bin/
123 | COPY jupyter_notebook_config.py /etc/jupyter/
124 |
125 | # Fix permissions on /etc/jupyter as root
126 | USER root
127 |
128 | RUN fix-permissions /etc/jupyter/
129 |
130 | RUN apt update && apt-get install libpcl-dev -y \
131 | && pip install python-pcl \
132 | && apt-get clean \
133 | && rm -rf /var/lib/apt/lists/*
134 |
135 | USER jovyan
136 |
137 | # Configure container startup
138 | ENTRYPOINT ["tini", "-g", "--"]
139 | CMD ["start-notebook.sh"]
140 |
--------------------------------------------------------------------------------
/docker-env/fix-permissions:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # set permissions on a directory
3 | # after any installation, if a directory needs to be (human) user-writable,
4 | # run this script on it.
5 | # It will make everything in the directory owned by the group $NB_GID
6 | # and writable by that group.
7 | # Deployments that want to set a specific user id can preserve permissions
8 | # by adding the `--group-add users` line to `docker run`.
9 |
10 | # uses find to avoid touching files that already have the right permissions,
11 | # which would cause massive image explosion
12 |
13 | # right permissions are:
14 | # group=$NB_GID
15 | # AND permissions include group rwX (directory-execute)
16 | # AND directories have setuid,setgid bits set
17 |
18 | set -e
19 |
20 | for d in "$@"; do
21 | find "$d" \
22 | ! \( \
23 | -group $NB_GID \
24 | -a -perm -g+rwX \
25 | \) \
26 | -exec chgrp $NB_GID {} \; \
27 | -exec chmod g+rwX {} \;
28 | # setuid,setgid *on directories only*
29 | find "$d" \
30 | \( \
31 | -type d \
32 | -a ! -perm -6000 \
33 | \) \
34 | -exec chmod +6000 {} \;
35 | done
36 |
--------------------------------------------------------------------------------
/docker-env/jupyter_notebook_config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Jupyter Development Team.
2 | # Distributed under the terms of the Modified BSD License.
3 |
4 | from jupyter_core.paths import jupyter_data_dir
5 | import subprocess
6 | import os
7 | import errno
8 | import stat
9 |
10 | c = get_config()
11 | c.NotebookApp.ip = '0.0.0.0'
12 | c.NotebookApp.port = 8888
13 | c.NotebookApp.open_browser = False
14 |
15 | # https://github.com/jupyter/notebook/issues/3130
16 | c.FileContentsManager.delete_to_trash = False
17 |
18 | # Generate a self-signed certificate
19 | if 'GEN_CERT' in os.environ:
20 | dir_name = jupyter_data_dir()
21 | pem_file = os.path.join(dir_name, 'notebook.pem')
22 | try:
23 | os.makedirs(dir_name)
24 | except OSError as exc: # Python >2.5
25 | if exc.errno == errno.EEXIST and os.path.isdir(dir_name):
26 | pass
27 | else:
28 | raise
29 |
30 | # Generate an openssl.cnf file to set the distinguished name
31 | cnf_file = os.path.join(os.getenv('CONDA_DIR', '/usr/lib'), 'ssl',
32 | 'openssl.cnf')
33 | if not os.path.isfile(cnf_file):
34 | with open(cnf_file, 'w') as fh:
35 | fh.write('''\
36 | [req]
37 | distinguished_name = req_distinguished_name
38 | [req_distinguished_name]
39 | ''')
40 |
41 | # Generate a certificate if one doesn't exist on disk
42 | subprocess.check_call([
43 | 'openssl', 'req', '-new', '-newkey', 'rsa:2048', '-days',
44 | '365', '-nodes', '-x509', '-subj',
45 | '/C=XX/ST=XX/L=XX/O=generated/CN=generated', '-keyout',
46 | pem_file, '-out', pem_file
47 | ])
48 | # Restrict access to the file
49 | os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR)
50 | c.NotebookApp.certfile = pem_file
51 |
52 | # Change default umask for all subprocesses of the notebook server if set in
53 | # the environment
54 | if 'NB_UMASK' in os.environ:
55 | os.umask(int(os.environ['NB_UMASK'], 8))
56 |
--------------------------------------------------------------------------------
/docker-env/requirements.txt:
--------------------------------------------------------------------------------
1 | #torch
2 | #torchvision
3 | pyntcloud
4 | sklearn
5 | pandas
6 | pythreejs
7 | ipywidgets
8 | pyyaml
9 | matplotlib
10 |
--------------------------------------------------------------------------------
/docker-env/start-notebook.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Jupyter Development Team.
3 | # Distributed under the terms of the Modified BSD License.
4 |
5 | set -e
6 |
7 | wrapper=""
8 | if [[ "${RESTARTABLE}" == "yes" ]]; then
9 | wrapper="run-one-constantly"
10 | fi
11 |
12 | if [[ ! -z "${JUPYTERHUB_API_TOKEN}" ]]; then
13 | # launched by JupyterHub, use single-user entrypoint
14 | exec /usr/local/bin/start-singleuser.sh "$@"
15 | elif [[ ! -z "${JUPYTER_ENABLE_LAB}" ]]; then
16 | . /usr/local/bin/start.sh $wrapper jupyter lab "$@"
17 | else
18 | . /usr/local/bin/start.sh $wrapper jupyter notebook "$@"
19 | fi
20 |
--------------------------------------------------------------------------------
/docker-env/start-singleuser.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Jupyter Development Team.
3 | # Distributed under the terms of the Modified BSD License.
4 |
5 | set -e
6 |
7 | # set default ip to 0.0.0.0
8 | if [[ "$NOTEBOOK_ARGS $@" != *"--ip="* ]]; then
9 | NOTEBOOK_ARGS="--ip=0.0.0.0 $NOTEBOOK_ARGS"
10 | fi
11 |
12 | # handle some deprecated environment variables
13 | # from DockerSpawner < 0.8.
14 | # These won't be passed from DockerSpawner 0.9,
15 | # so avoid specifying --arg=empty-string
16 | if [ ! -z "$NOTEBOOK_DIR" ]; then
17 | NOTEBOOK_ARGS="--notebook-dir='$NOTEBOOK_DIR' $NOTEBOOK_ARGS"
18 | fi
19 | if [ ! -z "$JPY_PORT" ]; then
20 | NOTEBOOK_ARGS="--port=$JPY_PORT $NOTEBOOK_ARGS"
21 | fi
22 | if [ ! -z "$JPY_USER" ]; then
23 | NOTEBOOK_ARGS="--user=$JPY_USER $NOTEBOOK_ARGS"
24 | fi
25 | if [ ! -z "$JPY_COOKIE_NAME" ]; then
26 | NOTEBOOK_ARGS="--cookie-name=$JPY_COOKIE_NAME $NOTEBOOK_ARGS"
27 | fi
28 | if [ ! -z "$JPY_BASE_URL" ]; then
29 | NOTEBOOK_ARGS="--base-url=$JPY_BASE_URL $NOTEBOOK_ARGS"
30 | fi
31 | if [ ! -z "$JPY_HUB_PREFIX" ]; then
32 | NOTEBOOK_ARGS="--hub-prefix=$JPY_HUB_PREFIX $NOTEBOOK_ARGS"
33 | fi
34 | if [ ! -z "$JPY_HUB_API_URL" ]; then
35 | NOTEBOOK_ARGS="--hub-api-url=$JPY_HUB_API_URL $NOTEBOOK_ARGS"
36 | fi
37 | if [ ! -z "$JUPYTER_ENABLE_LAB" ]; then
38 | NOTEBOOK_BIN="jupyter labhub"
39 | else
40 | NOTEBOOK_BIN="jupyterhub-singleuser"
41 | fi
42 |
43 | . /usr/local/bin/start.sh $NOTEBOOK_BIN $NOTEBOOK_ARGS "$@"
44 |
--------------------------------------------------------------------------------
/docker-env/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright (c) Jupyter Development Team.
3 | # Distributed under the terms of the Modified BSD License.
4 |
5 | set -e
6 |
7 | # Exec the specified command or fall back on bash
8 | if [ $# -eq 0 ]; then
9 | cmd=( "bash" )
10 | else
11 | cmd=( "$@" )
12 | fi
13 |
14 | run-hooks () {
15 | # Source scripts or run executable files in a directory
16 | if [[ ! -d "$1" ]] ; then
17 | return
18 | fi
19 | echo "$0: running hooks in $1"
20 | for f in "$1/"*; do
21 | case "$f" in
22 | *.sh)
23 | echo "$0: running $f"
24 | source "$f"
25 | ;;
26 | *)
27 | if [[ -x "$f" ]] ; then
28 | echo "$0: running $f"
29 | "$f"
30 | else
31 | echo "$0: ignoring $f"
32 | fi
33 | ;;
34 | esac
35 | done
36 | echo "$0: done running hooks in $1"
37 | }
38 |
39 | run-hooks /usr/local/bin/start-notebook.d
40 |
41 | # Handle special flags if we're root
42 | if [ $(id -u) == 0 ] ; then
43 |
44 | # Only attempt to change the jovyan username if it exists
45 | if id jovyan &> /dev/null ; then
46 | echo "Set username to: $NB_USER"
47 | usermod -d /home/$NB_USER -l $NB_USER jovyan
48 | fi
49 |
50 | # Handle case where provisioned storage does not have the correct permissions by default
51 | # Ex: default NFS/EFS (no auto-uid/gid)
52 | if [[ "$CHOWN_HOME" == "1" || "$CHOWN_HOME" == 'yes' ]]; then
53 | echo "Changing ownership of /home/$NB_USER to $NB_UID:$NB_GID with options '${CHOWN_HOME_OPTS}'"
54 | chown $CHOWN_HOME_OPTS $NB_UID:$NB_GID /home/$NB_USER
55 | fi
56 | if [ ! -z "$CHOWN_EXTRA" ]; then
57 | for extra_dir in $(echo $CHOWN_EXTRA | tr ',' ' '); do
58 | echo "Changing ownership of ${extra_dir} to $NB_UID:$NB_GID with options '${CHOWN_EXTRA_OPTS}'"
59 | chown $CHOWN_EXTRA_OPTS $NB_UID:$NB_GID $extra_dir
60 | done
61 | fi
62 |
63 | # handle home and working directory if the username changed
64 | if [[ "$NB_USER" != "jovyan" ]]; then
65 | # changing username, make sure homedir exists
66 | # (it could be mounted, and we shouldn't create it if it already exists)
67 | if [[ ! -e "/home/$NB_USER" ]]; then
68 | echo "Relocating home dir to /home/$NB_USER"
69 | mv /home/jovyan "/home/$NB_USER" || ln -s /home/jovyan "/home/$NB_USER"
70 | fi
71 | # if workdir is in /home/jovyan, cd to /home/$NB_USER
72 | if [[ "$PWD/" == "/home/jovyan/"* ]]; then
73 | newcwd="/home/$NB_USER/${PWD:13}"
74 | echo "Setting CWD to $newcwd"
75 | cd "$newcwd"
76 | fi
77 | fi
78 |
79 | # Change UID:GID of NB_USER to NB_UID:NB_GID if it does not match
80 | if [ "$NB_UID" != $(id -u $NB_USER) ] || [ "$NB_GID" != $(id -g $NB_USER) ]; then
81 | echo "Set user $NB_USER UID:GID to: $NB_UID:$NB_GID"
82 | if [ "$NB_GID" != $(id -g $NB_USER) ]; then
83 | groupadd -g $NB_GID -o ${NB_GROUP:-${NB_USER}}
84 | fi
85 | userdel $NB_USER
86 | useradd --home /home/$NB_USER -u $NB_UID -g $NB_GID -G 100 -l $NB_USER
87 | fi
88 |
89 | # Enable sudo if requested
90 | if [[ "$GRANT_SUDO" == "1" || "$GRANT_SUDO" == 'yes' ]]; then
91 | echo "Granting $NB_USER sudo access and appending $CONDA_DIR/bin to sudo PATH"
92 | echo "$NB_USER ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/notebook
93 | fi
94 |
95 | # Add $CONDA_DIR/bin to sudo secure_path
96 | sed -r "s#Defaults\s+secure_path=\"([^\"]+)\"#Defaults secure_path=\"\1:$CONDA_DIR/bin\"#" /etc/sudoers | grep secure_path > /etc/sudoers.d/path
97 |
98 | # Exec the command as NB_USER with the PATH and the rest of
99 | # the environment preserved
100 | run-hooks /usr/local/bin/before-notebook.d
101 | echo "Executing the command: ${cmd[@]}"
102 | exec sudo -E -H -u $NB_USER PATH=$PATH XDG_CACHE_HOME=/home/$NB_USER/.cache PYTHONPATH=${PYTHONPATH:-} "${cmd[@]}"
103 | else
104 | if [[ "$NB_UID" == "$(id -u jovyan)" && "$NB_GID" == "$(id -g jovyan)" ]]; then
105 | # User is not attempting to override user/group via environment
106 | # variables, but they could still have overridden the uid/gid that
107 | # container runs as. Check that the user has an entry in the passwd
108 | # file and if not add an entry.
109 | STATUS=0 && whoami &> /dev/null || STATUS=$? && true
110 | if [[ "$STATUS" != "0" ]]; then
111 | if [[ -w /etc/passwd ]]; then
112 | echo "Adding passwd file entry for $(id -u)"
113 | cat /etc/passwd | sed -e "s/^jovyan:/nayvoj:/" > /tmp/passwd
114 | echo "jovyan:x:$(id -u):$(id -g):,,,:/home/jovyan:/bin/bash" >> /tmp/passwd
115 | cat /tmp/passwd > /etc/passwd
116 | rm /tmp/passwd
117 | else
118 | echo 'Container must be run with group "root" to update passwd file'
119 | fi
120 | fi
121 |
122 | # Warn if the user isn't going to be able to write files to $HOME.
123 | if [[ ! -w /home/jovyan ]]; then
124 | echo 'Container must be run with group "users" to update files'
125 | fi
126 | else
127 | # Warn if looks like user want to override uid/gid but hasn't
128 | # run the container as root.
129 | if [[ ! -z "$NB_UID" && "$NB_UID" != "$(id -u)" ]]; then
130 | echo 'Container must be run as root to set $NB_UID'
131 | fi
132 | if [[ ! -z "$NB_GID" && "$NB_GID" != "$(id -g)" ]]; then
133 | echo 'Container must be run as root to set $NB_GID'
134 | fi
135 | fi
136 |
137 | # Warn if looks like user want to run in sudo mode but hasn't run
138 | # the container as root.
139 | if [[ "$GRANT_SUDO" == "1" || "$GRANT_SUDO" == 'yes' ]]; then
140 | echo 'Container must be run as root to grant sudo permissions'
141 | fi
142 |
143 | # Execute the command
144 | run-hooks /usr/local/bin/before-notebook.d
145 | echo "Executing the command: ${cmd[@]}"
146 | exec "${cmd[@]}"
147 | fi
148 |
--------------------------------------------------------------------------------
/obstacle-detection/config.yaml:
--------------------------------------------------------------------------------
1 | segments:
2 | 0 : unlabeled
3 | 1 : outlier
4 | 10: car
5 | 11: bicycle
6 | 13: bus
7 | 15: motorcycle
8 | 16: on-rails
9 | 18: truck
10 | 20: other-vehicle
11 | 30: person
12 | 31: bicyclist
13 | 32: motorcyclist
14 | 40: road
15 | 44: parking
16 | 48: sidewalk
17 | 49: other-ground
18 | 50: building
19 | 51: fence
20 | 52: other-structure
21 | 60: lane-marking
22 | 70: vegetation
23 | 71: trunk
24 | 72: terrain
25 | 80: pole
26 | 81: traffic-sign
27 | 99: other-object
28 | 252: moving-car
29 | 253: moving-bicyclist
30 | 254: moving-person
31 | 255: moving-motorcyclist
32 | 256: moving-on-rails
33 | 257: moving-bus
34 | 258: moving-truck
35 | 259: moving-other-vehicle
36 | obstacles:
37 | 10: car
38 | 11: bicycle
39 | 13: bus
40 | 15: motorcycle
41 | 16: on-rails
42 | 18: truck
43 | 20: other-vehicle
44 | 30: person
45 | 31: bicyclist
46 | 32: motorcyclist
47 | 252: moving-car
48 | 253: moving-bicyclist
49 | 254: moving-person
50 | 255: moving-motorcyclist
51 | 256: moving-on-rails
52 | 257: moving-bus
53 | 258: moving-truck
54 | 259: moving-other-vehicle
55 | learning_map:
56 | 0 : 0 # "unlabeled"
57 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
58 | 10: 1 # "car"
59 | 11: 2 # "bicycle"
60 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
61 | 15: 3 # "motorcycle"
62 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
63 | 18: 4 # "truck"
64 | 20: 5 # "other-vehicle"
65 | 30: 6 # "person"
66 | 31: 7 # "bicyclist"
67 | 32: 8 # "motorcyclist"
68 | 40: 9 # "road"
69 | 44: 10 # "parking"
70 | 48: 11 # "sidewalk"
71 | 49: 12 # "other-ground"
72 | 50: 13 # "building"
73 | 51: 14 # "fence"
74 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
75 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
76 | 70: 15 # "vegetation"
77 | 71: 16 # "trunk"
78 | 72: 17 # "terrain"
79 | 80: 18 # "pole"
80 | 81: 19 # "traffic-sign"
81 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
82 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
83 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
84 | 254: 6 # "moving-person" to "person" ------------------------------mapped
85 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
86 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
87 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
88 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
89 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
90 |
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/MainGif.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/MainGif.gif
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/MainGifforGit.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/MainGifforGit.gif
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/MainGifwithLabels.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/MainGifwithLabels.gif
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/about_project.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/about_project.gif
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/bounding_boxes_step.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/bounding_boxes_step.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/clustering_step.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/clustering_step.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/darknet12_scan_000000_seq_00.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/darknet12_scan_000000_seq_00.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/darknet12_scan_000000_seq_00_obstacles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/darknet12_scan_000000_seq_00_obstacles.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/seg_od_clustering.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/seg_od_clustering.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/seg_od_clustering_bbox_000066.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/seg_od_clustering_bbox_000066.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/squeezeseg_scan_000000_seq_00.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/squeezeseg_scan_000000_seq_00.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/squeezeseg_scan_000000_seq_00_obstacles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/squeezeseg_scan_000000_seq_00_obstacles.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/step1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/step1.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/step2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/step2.png
--------------------------------------------------------------------------------
/obstacle-detection/examples/img/step3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/examples/img/step3.png
--------------------------------------------------------------------------------
/obstacle-detection/model/backbones/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/backbones/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/backbones/squeezeseg.py:
--------------------------------------------------------------------------------
1 | # Adapted from https://github.com/BichenWuUCB/SqueezeSeg
2 | from __future__ import print_function
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 | class Fire(nn.Module):
9 | def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
10 | expand3x3_planes):
11 | super(Fire, self).__init__()
12 | self.inplanes = inplanes
13 | self.activation = nn.ReLU(inplace=True)
14 | self.squeeze = nn.Conv2d(inplanes,
15 | squeeze_planes,
16 | kernel_size=1)
17 | self.expand1x1 = nn.Conv2d(squeeze_planes,
18 | expand1x1_planes,
19 | kernel_size=1)
20 | self.expand3x3 = nn.Conv2d(squeeze_planes,
21 | expand3x3_planes,
22 | kernel_size=3,
23 | padding=1)
24 |
25 | def forward(self, x):
26 | x = self.activation(self.squeeze(x))
27 | return torch.cat([
28 | self.activation(self.expand1x1(x)),
29 | self.activation(self.expand3x3(x))
30 | ], 1)
31 |
32 |
33 | # ******************************************************************************
34 |
35 |
36 | class Backbone(nn.Module):
37 | """
38 | Class for Squeezeseg. Subclasses PyTorch's own "nn" module
39 | """
40 | def __init__(self, params):
41 | # Call the super constructor
42 | super(Backbone, self).__init__()
43 | print("Using SqueezeNet Backbone")
44 | self.use_range = params["input_depth"]["range"]
45 | self.use_xyz = params["input_depth"]["xyz"]
46 | self.use_remission = params["input_depth"]["remission"]
47 | self.drop_prob = params["dropout"]
48 | self.OS = params["OS"]
49 |
50 | # input depth calc
51 | self.input_depth = 0
52 | self.input_idxs = []
53 | if self.use_range:
54 | self.input_depth += 1
55 | self.input_idxs.append(0)
56 | if self.use_xyz:
57 | self.input_depth += 3
58 | self.input_idxs.extend([1, 2, 3])
59 | if self.use_remission:
60 | self.input_depth += 1
61 | self.input_idxs.append(4)
62 | print("Depth of backbone input = ", self.input_depth)
63 |
64 | # stride play
65 | self.strides = [2, 2, 2, 2]
66 | # check current stride
67 | current_os = 1
68 | for s in self.strides:
69 | current_os *= s
70 | print("Original OS: ", current_os)
71 |
72 | # make the new stride
73 | if self.OS > current_os:
74 | print("Can't do OS, ", self.OS,
75 | " because it is bigger than original ", current_os)
76 | else:
77 | # redo strides according to needed stride
78 | for i, stride in enumerate(reversed(self.strides), 0):
79 | if int(current_os) != self.OS:
80 | if stride == 2:
81 | current_os /= 2
82 | self.strides[-1 - i] = 1
83 | if int(current_os) == self.OS:
84 | break
85 | print("New OS: ", int(current_os))
86 | print("Strides: ", self.strides)
87 |
88 | # encoder
89 | self.conv1a = nn.Sequential(
90 | nn.Conv2d(self.input_depth,
91 | 64,
92 | kernel_size=3,
93 | stride=[1, self.strides[0]],
94 | padding=1), nn.ReLU(inplace=True))
95 | self.conv1b = nn.Conv2d(self.input_depth,
96 | 64,
97 | kernel_size=1,
98 | stride=1,
99 | padding=0)
100 | self.fire23 = nn.Sequential(
101 | nn.MaxPool2d(kernel_size=3,
102 | stride=[1, self.strides[1]],
103 | padding=1), Fire(64, 16, 64, 64),
104 | Fire(128, 16, 64, 64))
105 | self.fire45 = nn.Sequential(
106 | nn.MaxPool2d(kernel_size=3,
107 | stride=[1, self.strides[2]],
108 | padding=1), Fire(128, 32, 128, 128),
109 | Fire(256, 32, 128, 128))
110 | self.fire6789 = nn.Sequential(
111 | nn.MaxPool2d(kernel_size=3,
112 | stride=[1, self.strides[3]],
113 | padding=1), Fire(256, 48, 192, 192),
114 | Fire(384, 48, 192, 192), Fire(384, 64, 256, 256),
115 | Fire(512, 64, 256, 256))
116 |
117 | # output
118 | self.dropout = nn.Dropout2d(self.drop_prob)
119 |
120 | # last channels
121 | self.last_channels = 512
122 |
123 | def run_layer(self, x, layer, skips, os):
124 | y = layer(x)
125 | if y.shape[2] < x.shape[2] or y.shape[3] < x.shape[3]:
126 | skips[os] = x.detach()
127 | os *= 2
128 | x = y
129 | return x, skips, os
130 |
131 | def forward(self, x):
132 | # filter input
133 | x = x[:, self.input_idxs]
134 |
135 | # run cnn
136 | # store for skip connections
137 | skips = {}
138 | os = 1
139 |
140 | # encoder
141 | skip_in = self.conv1b(x)
142 | x = self.conv1a(x)
143 | # first skip done manually
144 | skips[1] = skip_in.detach()
145 | os *= 2
146 |
147 | x, skips, os = self.run_layer(x, self.fire23, skips, os)
148 | x, skips, os = self.run_layer(x, self.dropout, skips, os)
149 | x, skips, os = self.run_layer(x, self.fire45, skips, os)
150 | x, skips, os = self.run_layer(x, self.dropout, skips, os)
151 | x, skips, os = self.run_layer(x, self.fire6789, skips, os)
152 | x, skips, os = self.run_layer(x, self.dropout, skips, os)
153 |
154 | return x, skips
155 |
156 | def get_last_depth(self):
157 | return self.last_channels
158 |
159 | def get_input_depth(self):
160 | return self.input_depth
161 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/common/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/common/avgmeter.py:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 |
3 |
4 | class AverageMeter(object):
5 | """Computes and stores the average and current value"""
6 | def __init__(self):
7 | self.reset()
8 |
9 | def reset(self):
10 | self.val = 0
11 | self.avg = 0
12 | self.sum = 0
13 | self.count = 0
14 |
15 | def update(self, val, n=1):
16 | self.val = val
17 | self.sum += val * n
18 | self.count += n
19 | self.avg = self.sum / self.count
20 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/logger.py:
--------------------------------------------------------------------------------
1 | # Code referenced from
2 | # https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
3 |
4 | import tensorflow as tf
5 | import numpy as np
6 | import scipy.misc
7 | try:
8 | from StringIO import StringIO # Python 2.7
9 | except ImportError:
10 | from io import BytesIO # Python 3.x
11 |
12 |
13 | class Logger(object):
14 | def __init__(self, log_dir):
15 | """Create a summary writer logging to log_dir."""
16 | self.writer = tf.summary.FileWriter(log_dir)
17 |
18 | def scalar_summary(self, tag, value, step):
19 | """Log a scalar variable."""
20 | summary = tf.Summary(
21 | value=[tf.Summary.Value(tag=tag, simple_value=value)])
22 | self.writer.add_summary(summary, step)
23 | self.writer.flush()
24 |
25 | def image_summary(self, tag, images, step):
26 | """Log a list of images."""
27 |
28 | img_summaries = []
29 | for i, img in enumerate(images):
30 | # Write the image to a string
31 | try:
32 | s = StringIO()
33 | except BaseException:
34 | s = BytesIO()
35 | scipy.misc.toimage(img).save(s, format="png")
36 |
37 | # Create an Image object
38 | img_sum = tf.Summary.Image(
39 | encoded_image_string=s.getvalue(),
40 | height=img.shape[0],
41 | width=img.shape[1])
42 | # Create a Summary value
43 | img_summaries.append(
44 | tf.Summary.Value(tag='%s/%d' % (tag, i),
45 | image=img_sum))
46 |
47 | # Create and write Summary
48 | summary = tf.Summary(value=img_summaries)
49 | self.writer.add_summary(summary, step)
50 | self.writer.flush()
51 |
52 | def histo_summary(self, tag, values, step, bins=1000):
53 | """Log a histogram of the tensor of values."""
54 |
55 | # Create a histogram using numpy
56 | counts, bin_edges = np.histogram(values, bins=bins)
57 |
58 | # Fill the fields of the histogram proto
59 | hist = tf.HistogramProto()
60 | hist.min = float(np.min(values))
61 | hist.max = float(np.max(values))
62 | hist.num = int(np.prod(values.shape))
63 | hist.sum = float(np.sum(values))
64 | hist.sum_squares = float(np.sum(values**2))
65 |
66 | # Drop the start of the first bin
67 | bin_edges = bin_edges[1:]
68 |
69 | # Add bin edges and counts
70 | for edge in bin_edges:
71 | hist.bucket_limit.append(edge)
72 | for c in counts:
73 | hist.bucket.append(c)
74 |
75 | # Create and write Summary
76 | summary = tf.Summary(
77 | value=[tf.Summary.Value(tag=tag, histo=hist)])
78 | self.writer.add_summary(summary, step)
79 | self.writer.flush()
80 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/onehot.py:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | import __init__ as booger
6 |
7 |
8 | class oneHot(nn.Module):
9 | def __init__(self, device, nclasses, spatial_dim=2):
10 | super().__init__()
11 | self.device = device
12 | self.nclasses = nclasses
13 | self.spatial_dim = spatial_dim
14 |
15 | def onehot1dspatial(self, x):
16 | # we only do tensors that 1d tensors that are batched or not, so check
17 | assert (len(x.shape) == 1 or len(x.shape) == 2)
18 | # if not batched, batch
19 | remove_dim = False # flag to unbatch
20 | if len(x.shape) == 1:
21 | # add batch dimension
22 | x = x[None, ...]
23 | remove_dim = True
24 |
25 | # get tensor shape
26 | n, b = x.shape
27 |
28 | # scatter to onehot
29 | one_hot = torch.zeros(
30 | (n, self.nclasses, b),
31 | device=self.device).scatter_(1, x.unsqueeze(1), 1)
32 |
33 | # x is now [n,classes,b]
34 |
35 | # if it used to be unbatched, then unbatch it
36 | if remove_dim:
37 | one_hot = one_hot[0]
38 |
39 | return one_hot
40 |
41 | def onehot2dspatial(self, x):
42 | # we only do tensors that 2d tensors that are batched or not, so check
43 | assert (len(x.shape) == 2 or len(x.shape) == 3)
44 | # if not batched, batch
45 | remove_dim = False # flag to unbatch
46 | if len(x.shape) == 2:
47 | # add batch dimension
48 | x = x[None, ...]
49 | remove_dim = True
50 |
51 | # get tensor shape
52 | n, h, w = x.shape
53 |
54 | # scatter to onehot
55 | one_hot = torch.zeros(
56 | (n, self.nclasses, h, w),
57 | device=self.device).scatter_(1, x.unsqueeze(1), 1)
58 |
59 | # x is now [n,classes,b]
60 |
61 | # if it used to be unbatched, then unbatch it
62 | if remove_dim:
63 | one_hot = one_hot[0]
64 |
65 | return one_hot
66 |
67 | def forward(self, x):
68 | # do onehot here
69 | if self.spatial_dim == 1:
70 | return self.onehot1dspatial(x)
71 | elif self.spatial_dim == 2:
72 | return self.onehot2dspatial(x)
73 |
74 |
75 | if __name__ == "__main__":
76 | # get device
77 | if torch.cuda.is_available():
78 | device = torch.device('cuda')
79 | else:
80 | device = torch.device('cpu')
81 |
82 | # define number of classes
83 | nclasses = 6
84 | print("*" * 80)
85 | print("Num classes 1d =", nclasses)
86 | print("*" * 80)
87 |
88 | # test 1d unbatched case
89 | print("Tensor 1d spat dim, unbatched")
90 | tensor = torch.arange(0, nclasses).to(device) # [0,1,2,3,4,5]
91 | print("in:", tensor)
92 | module = oneHot(device, nclasses, spatial_dim=1)
93 | print("out:", module(tensor))
94 | print("*" * 80)
95 |
96 | # test 1d batched case
97 | print("*" * 80)
98 | print("Tensor 1d spat dim, batched")
99 | tensor = torch.arange(0, nclasses).to(device) # [0,1,2,3,4,5]
100 | # [[0,1,2,3,4,5], [0,1,2,3,4,5]]
101 | tensor = torch.cat([tensor.unsqueeze(0), tensor.unsqueeze(0)])
102 | print("in:", tensor)
103 | module = oneHot(device, nclasses, spatial_dim=1)
104 | print("out:", module(tensor))
105 | print("*" * 80)
106 |
107 | # for 2 use less classes
108 | nclasses = 3
109 | print("*" * 80)
110 | print("Num classes 2d =", nclasses)
111 | print("*" * 80)
112 |
113 | # test 2d unbatched case
114 | print("*" * 80)
115 | print("Tensor 2d spat dim, unbatched")
116 | tensor = torch.arange(0, nclasses).to(device) # [0,1,2]
117 | tensor = torch.cat([
118 | tensor.unsqueeze(0), # [[0,1,2],
119 | tensor.unsqueeze(0), # [0,1,2],
120 | tensor.unsqueeze(0), # [0,1,2],
121 | tensor.unsqueeze(0)
122 | ]) # [0,1,2]]
123 | print("in:", tensor)
124 | module = oneHot(device, nclasses, spatial_dim=2)
125 | print("out:", module(tensor))
126 | print("*" * 80)
127 |
128 | # test 2d batched case
129 | print("*" * 80)
130 | print("Tensor 2d spat dim, unbatched")
131 | tensor = torch.arange(0, nclasses).to(device) # [0,1,2]
132 | tensor = torch.cat([
133 | tensor.unsqueeze(0), # [[0,1,2],
134 | tensor.unsqueeze(0), # [0,1,2],
135 | tensor.unsqueeze(0), # [0,1,2],
136 | tensor.unsqueeze(0)
137 | ]) # [0,1,2]]
138 | tensor = torch.cat([tensor.unsqueeze(0),
139 | tensor.unsqueeze(0)
140 | ]) # 2 of the same 2d tensor
141 | print("in:", tensor)
142 | module = oneHot(device, nclasses, spatial_dim=2)
143 | print("out:", module(tensor))
144 | print("*" * 80)
145 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/sync_batchnorm/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/common/sync_batchnorm/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/common/sync_batchnorm/comm.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : comm.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import queue
12 | import collections
13 | import threading
14 |
15 | __all__ = ['FutureResult', 'SlavePipe', 'SyncMaster']
16 |
17 |
18 | class FutureResult(object):
19 | """A thread-safe future implementation. Used only as one-to-one pipe."""
20 | def __init__(self):
21 | self._result = None
22 | self._lock = threading.Lock()
23 | self._cond = threading.Condition(self._lock)
24 |
25 | def put(self, result):
26 | with self._lock:
27 | assert self._result is None, 'Previous result has\'t been fetched.'
28 | self._result = result
29 | self._cond.notify()
30 |
31 | def get(self):
32 | with self._lock:
33 | if self._result is None:
34 | self._cond.wait()
35 |
36 | res = self._result
37 | self._result = None
38 | return res
39 |
40 |
41 | _MasterRegistry = collections.namedtuple('MasterRegistry', ['result'])
42 | _SlavePipeBase = collections.namedtuple(
43 | '_SlavePipeBase', ['identifier', 'queue', 'result'])
44 |
45 |
46 | class SlavePipe(_SlavePipeBase):
47 | """Pipe for master-slave communication."""
48 | def run_slave(self, msg):
49 | self.queue.put((self.identifier, msg))
50 | ret = self.result.get()
51 | self.queue.put(True)
52 | return ret
53 |
54 |
55 | class SyncMaster(object):
56 | """An abstract `SyncMaster` object.
57 |
58 | - During the replication, as the data parallel will trigger an callback of each module, all slave devices should
59 | call `register(id)` and obtain an `SlavePipe` to communicate with the master.
60 | - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,
61 | and passed to a registered callback.
62 | - After receiving the messages, the master device should gather the information and determine to message passed
63 | back to each slave devices.
64 | """
65 | def __init__(self, master_callback):
66 | """
67 |
68 | Args:
69 | master_callback: a callback to be invoked after having collected messages from slave devices.
70 | """
71 | self._master_callback = master_callback
72 | self._queue = queue.Queue()
73 | self._registry = collections.OrderedDict()
74 | self._activated = False
75 |
76 | def __getstate__(self):
77 | return {'master_callback': self._master_callback}
78 |
79 | def __setstate__(self, state):
80 | self.__init__(state['master_callback'])
81 |
82 | def register_slave(self, identifier):
83 | """
84 | Register an slave device.
85 |
86 | Args:
87 | identifier: an identifier, usually is the device id.
88 |
89 | Returns: a `SlavePipe` object which can be used to communicate with the master device.
90 |
91 | """
92 | if self._activated:
93 | assert self._queue.empty(
94 | ), 'Queue is not clean before next initialization.'
95 | self._activated = False
96 | self._registry.clear()
97 | future = FutureResult()
98 | self._registry[identifier] = _MasterRegistry(future)
99 | return SlavePipe(identifier, self._queue, future)
100 |
101 | def run_master(self, master_msg):
102 | """
103 | Main entry for the master device in each forward pass.
104 | The messages were first collected from each devices (including the master device), and then
105 | an callback will be invoked to compute the message to be sent back to each devices
106 | (including the master device).
107 |
108 | Args:
109 | master_msg: the message that the master want to send to itself. This will be placed as the first
110 | message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.
111 |
112 | Returns: the message to be sent back to the master device.
113 |
114 | """
115 | self._activated = True
116 |
117 | intermediates = [(0, master_msg)]
118 | for i in range(self.nr_slaves):
119 | intermediates.append(self._queue.get())
120 |
121 | results = self._master_callback(intermediates)
122 | assert results[0][
123 | 0] == 0, 'The first result should belongs to the master.'
124 |
125 | for i, res in results:
126 | if i == 0:
127 | continue
128 | self._registry[i].result.put(res)
129 |
130 | for i in range(self.nr_slaves):
131 | assert self._queue.get() is True
132 |
133 | return results[0][1]
134 |
135 | @property
136 | def nr_slaves(self):
137 | return len(self._registry)
138 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/sync_batchnorm/replicate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # File : replicate.py
3 | # Author : Jiayuan Mao
4 | # Email : maojiayuan@gmail.com
5 | # Date : 27/01/2018
6 | #
7 | # This file is part of Synchronized-BatchNorm-PyTorch.
8 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9 | # Distributed under MIT License.
10 |
11 | import functools
12 |
13 | from torch.nn.parallel.data_parallel import DataParallel
14 |
15 | __all__ = [
16 | 'CallbackContext', 'execute_replication_callbacks',
17 | 'DataParallelWithCallback', 'patch_replication_callback'
18 | ]
19 |
20 |
21 | class CallbackContext(object):
22 | pass
23 |
24 |
25 | def execute_replication_callbacks(modules):
26 | """
27 | Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
28 |
29 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
30 |
31 | Note that, as all modules are isomorphism, we assign each sub-module with a context
32 | (shared among multiple copies of this module on different devices).
33 | Through this context, different copies can share some information.
34 |
35 | We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
36 | of any slave copies.
37 | """
38 | master_copy = modules[0]
39 | nr_modules = len(list(master_copy.modules()))
40 | ctxs = [CallbackContext() for _ in range(nr_modules)]
41 |
42 | for i, module in enumerate(modules):
43 | for j, m in enumerate(module.modules()):
44 | if hasattr(m, '__data_parallel_replicate__'):
45 | m.__data_parallel_replicate__(ctxs[j], i)
46 |
47 |
48 | class DataParallelWithCallback(DataParallel):
49 | """
50 | Data Parallel with a replication callback.
51 |
52 | An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
53 | original `replicate` function.
54 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
55 |
56 | Examples:
57 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
58 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
59 | # sync_bn.__data_parallel_replicate__ will be invoked.
60 | """
61 | def replicate(self, module, device_ids):
62 | modules = super(DataParallelWithCallback,
63 | self).replicate(module, device_ids)
64 | execute_replication_callbacks(modules)
65 | return modules
66 |
67 |
68 | def patch_replication_callback(data_parallel):
69 | """
70 | Monkey-patch an existing `DataParallel` object. Add the replication callback.
71 | Useful when you have customized `DataParallel` implementation.
72 |
73 | Examples:
74 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
75 | > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
76 | > patch_replication_callback(sync_bn)
77 | # this is equivalent to
78 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
79 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
80 | """
81 |
82 | assert isinstance(data_parallel, DataParallel)
83 |
84 | old_replicate = data_parallel.replicate
85 |
86 | @functools.wraps(old_replicate)
87 | def new_replicate(module, device_ids):
88 | modules = old_replicate(module, device_ids)
89 | execute_replication_callbacks(modules)
90 | return modules
91 |
92 | data_parallel.replicate = new_replicate
93 |
--------------------------------------------------------------------------------
/obstacle-detection/model/common/warmupLR.py:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 |
3 | import torch.optim.lr_scheduler as toptim
4 |
5 |
6 | class warmupLR(toptim._LRScheduler):
7 | """ Warmup learning rate scheduler.
8 | Initially, increases the learning rate from 0 to the final value, in a
9 | certain number of steps. After this number of steps, each step decreases
10 | LR exponentially.
11 | """
12 | def __init__(self, optimizer, lr, warmup_steps, momentum, decay):
13 | # cyclic params
14 | self.optimizer = optimizer
15 | self.lr = lr
16 | self.warmup_steps = warmup_steps
17 | self.momentum = momentum
18 | self.decay = decay
19 |
20 | # cap to one
21 | if self.warmup_steps < 1:
22 | self.warmup_steps = 1
23 |
24 | # cyclic lr
25 | self.initial_scheduler = toptim.CyclicLR(
26 | self.optimizer,
27 | base_lr=0,
28 | max_lr=self.lr,
29 | step_size_up=self.warmup_steps,
30 | step_size_down=self.warmup_steps,
31 | cycle_momentum=False,
32 | base_momentum=self.momentum,
33 | max_momentum=self.momentum)
34 |
35 | # our params
36 | self.last_epoch = -1 # fix for pytorch 1.1 and below
37 | self.finished = False # am i done
38 | super().__init__(optimizer)
39 |
40 | def get_lr(self):
41 | return [
42 | self.lr * (self.decay**self.last_epoch)
43 | for lr in self.base_lrs
44 | ]
45 |
46 | def step(self, epoch=None):
47 | if self.finished or self.initial_scheduler.last_epoch >= self.warmup_steps:
48 | if not self.finished:
49 | self.base_lrs = [self.lr for lr in self.base_lrs]
50 | self.finished = True
51 | return super(warmupLR, self).step(epoch)
52 | else:
53 | return self.initial_scheduler.step(epoch)
54 |
--------------------------------------------------------------------------------
/obstacle-detection/model/darknet21/arch_cfg.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.995 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 12 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 |
24 | ################################################################################
25 | # backbone parameters
26 | ################################################################################
27 | backbone:
28 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
29 | input_depth:
30 | range: True
31 | xyz: True
32 | remission: True
33 | dropout: 0.01
34 | bn_d: 0.01
35 | OS: 32 # output stride (only horizontally)
36 | train: False # train backbone?
37 | extra:
38 | layers: 21
39 |
40 | ################################################################################
41 | # decoder parameters
42 | ################################################################################
43 | decoder:
44 | name: "darknet"
45 | dropout: 0.01
46 | bn_d: 0.01
47 | train: False # train decoder?
48 | extra: False # nothing to add for this decoder, otherwise this is a dict
49 |
50 | ################################################################################
51 | # classification head parameters
52 | ################################################################################
53 | head:
54 | name: "segmentation"
55 | train: False
56 | dropout: 0.01
57 |
58 | ################################################################################
59 | # postproc parameters
60 | ################################################################################
61 | post:
62 | CRF:
63 | use: False
64 | train: False
65 | params: False # this should be a dict when in use
66 | KNN:
67 | use: False
68 | params:
69 | knn: 5
70 | search: 5
71 | sigma: 1.0
72 | cutoff: 1.0
73 |
74 | ################################################################################
75 | # classification head parameters
76 | ################################################################################
77 | # dataset (to find parser)
78 | dataset:
79 | labels: "kitti"
80 | scans: "kitti"
81 | max_points: 150000 # max of any scan in dataset
82 | sensor:
83 | name: "HDL64"
84 | type: "spherical" # projective
85 | fov_up: 3
86 | fov_down: -25
87 | img_prop:
88 | width: 1048
89 | height: 64
90 | img_means: #range,x,y,z,signal
91 | - 12.12
92 | - 10.88
93 | - 0.23
94 | - -1.04
95 | - 0.21
96 | img_stds: #range,x,y,z,signal
97 | - 12.32
98 | - 11.47
99 | - 6.91
100 | - 0.86
101 | - 0.16
102 |
--------------------------------------------------------------------------------
/obstacle-detection/model/darknet21/backbone:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/darknet21/backbone
--------------------------------------------------------------------------------
/obstacle-detection/model/darknet21/data_cfg.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
138 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
139 | 254: 6 # "moving-person" to "person" ------------------------------mapped
140 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
141 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
142 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
143 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
144 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | learning_ignore: # Ignore classes
167 | 0: True # "unlabeled", and others ignored
168 | 1: False # "car"
169 | 2: False # "bicycle"
170 | 3: False # "motorcycle"
171 | 4: False # "truck"
172 | 5: False # "other-vehicle"
173 | 6: False # "person"
174 | 7: False # "bicyclist"
175 | 8: False # "motorcyclist"
176 | 9: False # "road"
177 | 10: False # "parking"
178 | 11: False # "sidewalk"
179 | 12: False # "other-ground"
180 | 13: False # "building"
181 | 14: False # "fence"
182 | 15: False # "vegetation"
183 | 16: False # "trunk"
184 | 17: False # "terrain"
185 | 18: False # "pole"
186 | 19: False # "traffic-sign"
187 | split: # sequence numbers
188 | train:
189 | valid:
190 | test:
191 | - 00
192 |
--------------------------------------------------------------------------------
/obstacle-detection/model/darknet21/segmentation_decoder:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/darknet21/segmentation_decoder
--------------------------------------------------------------------------------
/obstacle-detection/model/darknet21/segmentation_head:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/darknet21/segmentation_head
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/arch_cfg.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.001 # sgd learning rate
8 | wup_epochs: 0.01 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: True # show scans during training
21 | workers: 4 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezeseg" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | train: True # train backbone?
35 | extra: False
36 |
37 | ################################################################################
38 | # decoder parameters
39 | ################################################################################
40 | decoder:
41 | name: "squeezeseg"
42 | dropout: 0.01
43 | train: True # train decoder?
44 | extra: False # nothing to add for this decoder, otherwise this is a dict
45 |
46 | ################################################################################
47 | # classification head parameters
48 | ################################################################################
49 | head:
50 | name: "segmentation"
51 | train: True
52 | dropout: 0.01
53 |
54 | ################################################################################
55 | # postproc parameters
56 | ################################################################################
57 | post:
58 | CRF:
59 | use: True
60 | train: True
61 | params:
62 | iter: 3
63 | lcn_size:
64 | h: 3
65 | w: 5
66 | xyz_coef: 0.1
67 | xyz_sigma: 0.7
68 | KNN:
69 | use: False
70 | params:
71 | knn: 5
72 | search: 5
73 | sigma: 1.0
74 | cutoff: 1.0
75 |
76 | ################################################################################
77 | # classification head parameters
78 | ################################################################################
79 | # dataset (to find parser)
80 | dataset:
81 | labels: "kitti"
82 | scans: "kitti"
83 | max_points: 150000 # max of any scan in dataset
84 | sensor:
85 | name: "HDL64"
86 | type: "spherical" # projective
87 | fov_up: 3
88 | fov_down: -25
89 | img_prop:
90 | width: 2048
91 | height: 64
92 | img_means: #range,x,y,z,signal
93 | - 12.12
94 | - 10.88
95 | - 0.23
96 | - -1.04
97 | - 0.21
98 | img_stds: #range,x,y,z,signal
99 | - 12.32
100 | - 11.47
101 | - 6.91
102 | - 0.86
103 | - 0.16
104 |
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/backbone:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg-crf/backbone
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/data_cfg.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
138 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
139 | 254: 6 # "moving-person" to "person" ------------------------------mapped
140 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
141 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
142 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
143 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
144 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | learning_ignore: # Ignore classes
167 | 0: True # "unlabeled", and others ignored
168 | 1: False # "car"
169 | 2: False # "bicycle"
170 | 3: False # "motorcycle"
171 | 4: False # "truck"
172 | 5: False # "other-vehicle"
173 | 6: False # "person"
174 | 7: False # "bicyclist"
175 | 8: False # "motorcyclist"
176 | 9: False # "road"
177 | 10: False # "parking"
178 | 11: False # "sidewalk"
179 | 12: False # "other-ground"
180 | 13: False # "building"
181 | 14: False # "fence"
182 | 15: False # "vegetation"
183 | 16: False # "trunk"
184 | 17: False # "terrain"
185 | 18: False # "pole"
186 | 19: False # "traffic-sign"
187 | split: # sequence numbers
188 | train:
189 | - 0
190 | - 1
191 | - 2
192 | - 3
193 | - 4
194 | - 5
195 | - 6
196 | - 7
197 | - 9
198 | - 10
199 | valid:
200 | - 8
201 | test:
202 | - 11
203 | - 12
204 | - 13
205 | - 14
206 | - 15
207 | - 16
208 | - 17
209 | - 18
210 | - 19
211 | - 20
212 | - 21
213 |
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/segmentation_CRF:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg-crf/segmentation_CRF
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/segmentation_decoder:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg-crf/segmentation_decoder
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg-crf/segmentation_head:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg-crf/segmentation_head
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg/arch_cfg.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.995 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 1 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezeseg" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | train: True # train backbone?
35 | extra: False
36 |
37 | ################################################################################
38 | # decoder parameters
39 | ################################################################################
40 | decoder:
41 | name: "squeezeseg"
42 | dropout: 0.01
43 | train: True # train decoder?
44 | extra: False # nothing to add for this decoder, otherwise this is a dict
45 |
46 | ################################################################################
47 | # classification head parameters
48 | ################################################################################
49 | head:
50 | name: "segmentation"
51 | train: True
52 | dropout: 0.3
53 |
54 | ################################################################################
55 | # postproc parameters
56 | ################################################################################
57 | post:
58 | CRF:
59 | use: False
60 | train: True
61 | params: False # this should be a dict when in use
62 | KNN:
63 | use: False
64 | params:
65 | knn: 5
66 | search: 5
67 | sigma: 1.0
68 | cutoff: 1.0
69 |
70 | ################################################################################
71 | # classification head parameters
72 | ################################################################################
73 | # dataset (to find parser)
74 | dataset:
75 | labels: "kitti"
76 | scans: "kitti"
77 | max_points: 150000 # max of any scan in dataset
78 | sensor:
79 | name: "HDL64"
80 | type: "spherical" # projective
81 | fov_up: 3
82 | fov_down: -25
83 | img_prop:
84 | width: 512
85 | height: 64
86 | img_means: #range,x,y,z,signal
87 | - 12.12
88 | - 10.88
89 | - 0.23
90 | - -1.04
91 | - 0.21
92 | img_stds: #range,x,y,z,signal
93 | - 12.32
94 | - 11.47
95 | - 6.91
96 | - 0.86
97 | - 0.16
98 |
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg/backbone:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg/backbone
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg/data_cfg.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
138 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
139 | 254: 6 # "moving-person" to "person" ------------------------------mapped
140 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
141 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
142 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
143 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
144 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | learning_ignore: # Ignore classes
167 | 0: True # "unlabeled", and others ignored
168 | 1: False # "car"
169 | 2: False # "bicycle"
170 | 3: False # "motorcycle"
171 | 4: False # "truck"
172 | 5: False # "other-vehicle"
173 | 6: False # "person"
174 | 7: False # "bicyclist"
175 | 8: False # "motorcyclist"
176 | 9: False # "road"
177 | 10: False # "parking"
178 | 11: False # "sidewalk"
179 | 12: False # "other-ground"
180 | 13: False # "building"
181 | 14: False # "fence"
182 | 15: False # "vegetation"
183 | 16: False # "trunk"
184 | 17: False # "terrain"
185 | 18: False # "pole"
186 | 19: False # "traffic-sign"
187 | split: # sequence numbers
188 | train:
189 | - 0
190 | - 1
191 | - 2
192 | - 3
193 | - 4
194 | - 5
195 | - 6
196 | - 7
197 | - 9
198 | - 10
199 | valid:
200 | - 8
201 | test:
202 | - 11
203 | - 12
204 | - 13
205 | - 14
206 | - 15
207 | - 16
208 | - 17
209 | - 18
210 | - 19
211 | - 20
212 | - 21
213 |
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg/segmentation_decoder:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg/segmentation_decoder
--------------------------------------------------------------------------------
/obstacle-detection/model/squeezeseg/segmentation_head:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/squeezeseg/segmentation_head
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | TRAIN_PATH = "../../"
3 | DEPLOY_PATH = "../../../deploy"
4 | sys.path.insert(0, TRAIN_PATH)
5 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet21.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.995 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 2 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 21
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.01
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.01
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: False
63 | train: True
64 | params: False # this should be a dict when in use
65 | KNN:
66 | use: False
67 | params:
68 | knn: 5
69 | search: 5
70 | sigma: 1.0
71 | cutoff: 1.0
72 |
73 | ################################################################################
74 | # classification head parameters
75 | ################################################################################
76 | # dataset (to find parser)
77 | dataset:
78 | labels: "kitti"
79 | scans: "kitti"
80 | max_points: 150000 # max of any scan in dataset
81 | sensor:
82 | name: "HDL64"
83 | type: "spherical" # projective
84 | fov_up: 3
85 | fov_down: -25
86 | img_prop:
87 | width: 2048
88 | height: 64
89 | img_means: #range,x,y,z,signal
90 | - 12.12
91 | - 10.88
92 | - 0.23
93 | - -1.04
94 | - 0.21
95 | img_stds: #range,x,y,z,signal
96 | - 12.32
97 | - 11.47
98 | - 6.91
99 | - 0.86
100 | - 0.16
101 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53-1024px.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 16 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.05
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.05
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.05
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: False
63 | train: True
64 | params: False # this should be a dict when in use
65 | KNN:
66 | use: False
67 | params:
68 | knn: 5
69 | search: 5
70 | sigma: 1.0
71 | cutoff: 1.0
72 |
73 | ################################################################################
74 | # classification head parameters
75 | ################################################################################
76 | # dataset (to find parser)
77 | dataset:
78 | labels: "kitti"
79 | scans: "kitti"
80 | max_points: 150000 # max of any scan in dataset
81 | sensor:
82 | name: "HDL64"
83 | type: "spherical" # projective
84 | fov_up: 3
85 | fov_down: -25
86 | img_prop:
87 | width: 1024
88 | height: 64
89 | img_means: #range,x,y,z,signal
90 | - 12.12
91 | - 10.88
92 | - 0.23
93 | - -1.04
94 | - 0.21
95 | img_stds: #range,x,y,z,signal
96 | - 12.32
97 | - 11.47
98 | - 6.91
99 | - 0.86
100 | - 0.16
101 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53-512px.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 32 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.05
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.05
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.05
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: False
63 | train: True
64 | params: False # this should be a dict when in use
65 | KNN:
66 | use: False
67 | params:
68 | knn: 5
69 | search: 5
70 | sigma: 1.0
71 | cutoff: 1.0
72 |
73 | ################################################################################
74 | # classification head parameters
75 | ################################################################################
76 | # dataset (to find parser)
77 | dataset:
78 | labels: "kitti"
79 | scans: "kitti"
80 | max_points: 150000 # max of any scan in dataset
81 | sensor:
82 | name: "HDL64"
83 | type: "spherical" # projective
84 | fov_up: 3
85 | fov_down: -25
86 | img_prop:
87 | width: 512
88 | height: 64
89 | img_means: #range,x,y,z,signal
90 | - 12.12
91 | - 10.88
92 | - 0.23
93 | - -1.04
94 | - 0.21
95 | img_stds: #range,x,y,z,signal
96 | - 12.32
97 | - 11.47
98 | - 6.91
99 | - 0.86
100 | - 0.16
101 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53-crf-1024px.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.001 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 12 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.05
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.05
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.05
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: True
63 | train: True
64 | params:
65 | iter: 3
66 | lcn_size:
67 | h: 3
68 | w: 5
69 | xyz_coef: 0.1
70 | xyz_sigma: 0.7
71 | KNN:
72 | use: False
73 | params:
74 | knn: 5
75 | search: 5
76 | sigma: 1.0
77 | cutoff: 1.0
78 |
79 | ################################################################################
80 | # classification head parameters
81 | ################################################################################
82 | # dataset (to find parser)
83 | dataset:
84 | labels: "kitti"
85 | scans: "kitti"
86 | max_points: 150000 # max of any scan in dataset
87 | sensor:
88 | name: "HDL64"
89 | type: "spherical" # projective
90 | fov_up: 3
91 | fov_down: -25
92 | img_prop:
93 | width: 1024
94 | height: 64
95 | img_means: #range,x,y,z,signal
96 | - 12.12
97 | - 10.88
98 | - 0.23
99 | - -1.04
100 | - 0.21
101 | img_stds: #range,x,y,z,signal
102 | - 12.32
103 | - 11.47
104 | - 6.91
105 | - 0.86
106 | - 0.16
107 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53-crf-512px.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.001 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 12 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: True # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.05
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.05
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.05
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: True
63 | train: True
64 | params:
65 | iter: 3
66 | lcn_size:
67 | h: 3
68 | w: 5
69 | xyz_coef: 0.1
70 | xyz_sigma: 0.7
71 | KNN:
72 | use: False
73 | params:
74 | knn: 5
75 | search: 5
76 | sigma: 1.0
77 | cutoff: 1.0
78 |
79 | ################################################################################
80 | # classification head parameters
81 | ################################################################################
82 | # dataset (to find parser)
83 | dataset:
84 | labels: "kitti"
85 | scans: "kitti"
86 | max_points: 150000 # max of any scan in dataset
87 | sensor:
88 | name: "HDL64"
89 | type: "spherical" # projective
90 | fov_up: 3
91 | fov_down: -25
92 | img_prop:
93 | width: 512
94 | height: 64
95 | img_means: #range,x,y,z,signal
96 | - 12.12
97 | - 10.88
98 | - 0.23
99 | - -1.04
100 | - 0.21
101 | img_stds: #range,x,y,z,signal
102 | - 12.32
103 | - 11.47
104 | - 6.91
105 | - 0.86
106 | - 0.16
107 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53-crf.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.002 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.01
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.01
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: True
63 | train: True
64 | params:
65 | iter: 3
66 | lcn_size:
67 | h: 3
68 | w: 5
69 | xyz_coef: 0.1
70 | xyz_sigma: 0.7
71 | KNN:
72 | use: False
73 | params:
74 | knn: 5
75 | search: 5
76 | sigma: 1.0
77 | cutoff: 1.0
78 |
79 | ################################################################################
80 | # classification head parameters
81 | ################################################################################
82 | # dataset (to find parser)
83 | dataset:
84 | labels: "kitti"
85 | scans: "kitti"
86 | max_points: 150000 # max of any scan in dataset
87 | sensor:
88 | name: "HDL64"
89 | type: "spherical" # projective
90 | fov_up: 3
91 | fov_down: -25
92 | img_prop:
93 | width: 2048
94 | height: 64
95 | img_means: #range,x,y,z,signal
96 | - 12.12
97 | - 10.88
98 | - 0.23
99 | - -1.04
100 | - 0.21
101 | img_stds: #range,x,y,z,signal
102 | - 12.32
103 | - 11.47
104 | - 6.91
105 | - 0.86
106 | - 0.16
107 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/darknet53.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.005 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "darknet" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | bn_d: 0.01
34 | OS: 32 # output stride (only horizontally)
35 | train: True # train backbone?
36 | extra:
37 | layers: 53
38 |
39 | ################################################################################
40 | # decoder parameters
41 | ################################################################################
42 | decoder:
43 | name: "darknet"
44 | dropout: 0.01
45 | bn_d: 0.01
46 | train: True # train decoder?
47 | extra: False # nothing to add for this decoder, otherwise this is a dict
48 |
49 | ################################################################################
50 | # classification head parameters
51 | ################################################################################
52 | head:
53 | name: "segmentation"
54 | train: True
55 | dropout: 0.01
56 |
57 | ################################################################################
58 | # postproc parameters
59 | ################################################################################
60 | post:
61 | CRF:
62 | use: False
63 | train: True
64 | params: False # this should be a dict when in use
65 | KNN:
66 | use: False
67 | params:
68 | knn: 5
69 | search: 5
70 | sigma: 1.0
71 | cutoff: 1.0
72 |
73 | ################################################################################
74 | # classification head parameters
75 | ################################################################################
76 | # dataset (to find parser)
77 | dataset:
78 | labels: "kitti"
79 | scans: "kitti"
80 | max_points: 150000 # max of any scan in dataset
81 | sensor:
82 | name: "HDL64"
83 | type: "spherical" # projective
84 | fov_up: 3
85 | fov_down: -25
86 | img_prop:
87 | width: 2048
88 | height: 64
89 | img_means: #range,x,y,z,signal
90 | - 12.12
91 | - 10.88
92 | - 0.23
93 | - -1.04
94 | - 0.21
95 | img_stds: #range,x,y,z,signal
96 | - 12.32
97 | - 11.47
98 | - 6.91
99 | - 0.86
100 | - 0.16
101 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/squeezeseg.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.01 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.995 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 36 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezeseg" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | train: True # train backbone?
35 | extra: False
36 |
37 | ################################################################################
38 | # decoder parameters
39 | ################################################################################
40 | decoder:
41 | name: "squeezeseg"
42 | dropout: 0.01
43 | train: True # train decoder?
44 | extra: False # nothing to add for this decoder, otherwise this is a dict
45 |
46 | ################################################################################
47 | # classification head parameters
48 | ################################################################################
49 | head:
50 | name: "segmentation"
51 | train: True
52 | dropout: 0.3
53 |
54 | ################################################################################
55 | # postproc parameters
56 | ################################################################################
57 | post:
58 | CRF:
59 | use: False
60 | train: True
61 | params: False # this should be a dict when in use
62 | KNN:
63 | use: False
64 | params:
65 | knn: 5
66 | search: 5
67 | sigma: 1.0
68 | cutoff: 1.0
69 |
70 | ################################################################################
71 | # classification head parameters
72 | ################################################################################
73 | # dataset (to find parser)
74 | dataset:
75 | labels: "kitti"
76 | scans: "kitti"
77 | max_points: 150000 # max of any scan in dataset
78 | sensor:
79 | name: "HDL64"
80 | type: "spherical" # projective
81 | fov_up: 3
82 | fov_down: -25
83 | img_prop:
84 | width: 2048
85 | height: 64
86 | img_means: #range,x,y,z,signal
87 | - 12.12
88 | - 10.88
89 | - 0.23
90 | - -1.04
91 | - 0.21
92 | img_stds: #range,x,y,z,signal
93 | - 12.32
94 | - 11.47
95 | - 6.91
96 | - 0.86
97 | - 0.16
98 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/squeezesegV2.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.002 # sgd learning rate
8 | wup_epochs: 1 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezesegV2" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | bn_d: 0.01
35 | train: True # train backbone?
36 | extra: False
37 |
38 | ################################################################################
39 | # decoder parameters
40 | ################################################################################
41 | decoder:
42 | name: "squeezesegV2"
43 | dropout: 0.01
44 | bn_d: 0.01
45 | train: True # train decoder?
46 | extra: False # nothing to add for this decoder, otherwise this is a dict
47 |
48 | ################################################################################
49 | # classification head parameters
50 | ################################################################################
51 | head:
52 | name: "segmentation"
53 | train: True
54 | dropout: 0.01
55 |
56 | ################################################################################
57 | # postproc parameters
58 | ################################################################################
59 | post:
60 | CRF:
61 | use: False
62 | train: True
63 | params: False # this should be a dict when in use
64 | KNN:
65 | use: False
66 | params:
67 | knn: 5
68 | search: 5
69 | sigma: 1.0
70 | cutoff: 1.0
71 |
72 | ################################################################################
73 | # classification head parameters
74 | ################################################################################
75 | # dataset (to find parser)
76 | dataset:
77 | labels: "kitti"
78 | scans: "kitti"
79 | max_points: 150000 # max of any scan in dataset
80 | sensor:
81 | name: "HDL64"
82 | type: "spherical" # projective
83 | fov_up: 3
84 | fov_down: -25
85 | img_prop:
86 | width: 2048
87 | height: 64
88 | img_means: #range,x,y,z,signal
89 | - 12.12
90 | - 10.88
91 | - 0.23
92 | - -1.04
93 | - 0.21
94 | img_stds: #range,x,y,z,signal
95 | - 12.32
96 | - 11.47
97 | - 6.91
98 | - 0.86
99 | - 0.16
100 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/squeezesegV2_crf.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.001 # sgd learning rate
8 | wup_epochs: 0.01 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: False # show scans during training
21 | workers: 12 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezesegV2" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | bn_d: 0.01
35 | train: True # train backbone?
36 | extra: False
37 |
38 | ################################################################################
39 | # decoder parameters
40 | ################################################################################
41 | decoder:
42 | name: "squeezesegV2"
43 | dropout: 0.01
44 | bn_d: 0.01
45 | train: True # train decoder?
46 | extra: False # nothing to add for this decoder, otherwise this is a dict
47 |
48 | ################################################################################
49 | # classification head parameters
50 | ################################################################################
51 | head:
52 | name: "segmentation"
53 | train: True
54 | dropout: 0.01
55 |
56 | ################################################################################
57 | # postproc parameters
58 | ################################################################################
59 | post:
60 | CRF:
61 | use: True
62 | train: True
63 | params:
64 | iter: 3
65 | lcn_size:
66 | h: 3
67 | w: 5
68 | xyz_coef: 0.1
69 | xyz_sigma: 0.7
70 | KNN:
71 | use: False
72 | params:
73 | knn: 5
74 | search: 5
75 | sigma: 1.0
76 | cutoff: 1.0
77 |
78 | ################################################################################
79 | # classification head parameters
80 | ################################################################################
81 | # dataset (to find parser)
82 | dataset:
83 | labels: "kitti"
84 | scans: "kitti"
85 | max_points: 150000 # max of any scan in dataset
86 | sensor:
87 | name: "HDL64"
88 | type: "spherical" # projective
89 | fov_up: 3
90 | fov_down: -25
91 | img_prop:
92 | width: 2048
93 | height: 64
94 | img_means: #range,x,y,z,signal
95 | - 12.12
96 | - 10.88
97 | - 0.23
98 | - -1.04
99 | - 0.21
100 | img_stds: #range,x,y,z,signal
101 | - 12.32
102 | - 11.47
103 | - 6.91
104 | - 0.86
105 | - 0.16
106 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/arch/squeezeseg_crf.yaml:
--------------------------------------------------------------------------------
1 | ################################################################################
2 | # training parameters
3 | ################################################################################
4 | train:
5 | loss: "xentropy" # must be either xentropy or iou
6 | max_epochs: 150
7 | lr: 0.001 # sgd learning rate
8 | wup_epochs: 0.01 # warmup during first XX epochs (can be float)
9 | momentum: 0.9 # sgd momentum
10 | lr_decay: 0.99 # learning rate decay per epoch after initial cycle (from min lr)
11 | w_decay: 0.0001 # weight decay
12 | batch_size: 8 # batch size
13 | report_batch: 1 # every x batches, report loss
14 | report_epoch: 1 # every x epochs, report validation set
15 | epsilon_w: 0.001 # class weight w = 1 / (content + epsilon_w)
16 | save_summary: False # Summary of weight histograms for tensorboard
17 | save_scans: True # False doesn't save anything, True saves some
18 | # sample images (one per batch of the last calculated batch)
19 | # in log folder
20 | show_scans: True # show scans during training
21 | workers: 4 # number of threads to get data
22 |
23 | ################################################################################
24 | # backbone parameters
25 | ################################################################################
26 | backbone:
27 | name: "squeezeseg" # ['squeezeseg', 'squeezesegV2', 'darknet']
28 | input_depth:
29 | range: True
30 | xyz: True
31 | remission: True
32 | dropout: 0.01
33 | OS: 16 # output stride (only horizontally)
34 | train: True # train backbone?
35 | extra: False
36 |
37 | ################################################################################
38 | # decoder parameters
39 | ################################################################################
40 | decoder:
41 | name: "squeezeseg"
42 | dropout: 0.01
43 | train: True # train decoder?
44 | extra: False # nothing to add for this decoder, otherwise this is a dict
45 |
46 | ################################################################################
47 | # classification head parameters
48 | ################################################################################
49 | head:
50 | name: "segmentation"
51 | train: True
52 | dropout: 0.01
53 |
54 | ################################################################################
55 | # postproc parameters
56 | ################################################################################
57 | post:
58 | CRF:
59 | use: True
60 | train: True
61 | params:
62 | iter: 3
63 | lcn_size:
64 | h: 3
65 | w: 5
66 | xyz_coef: 0.1
67 | xyz_sigma: 0.7
68 | KNN:
69 | use: False
70 | params:
71 | knn: 5
72 | search: 5
73 | sigma: 1.0
74 | cutoff: 1.0
75 |
76 | ################################################################################
77 | # classification head parameters
78 | ################################################################################
79 | # dataset (to find parser)
80 | dataset:
81 | labels: "kitti"
82 | scans: "kitti"
83 | max_points: 150000 # max of any scan in dataset
84 | sensor:
85 | name: "HDL64"
86 | type: "spherical" # projective
87 | fov_up: 3
88 | fov_down: -25
89 | img_prop:
90 | width: 2048
91 | height: 64
92 | img_means: #range,x,y,z,signal
93 | - 12.12
94 | - 10.88
95 | - 0.23
96 | - -1.04
97 | - 0.21
98 | img_stds: #range,x,y,z,signal
99 | - 12.32
100 | - 11.47
101 | - 6.91
102 | - 0.86
103 | - 0.16
104 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/config/labels/semantic-kitti.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
138 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
139 | 254: 6 # "moving-person" to "person" ------------------------------mapped
140 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
141 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
142 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
143 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
144 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | learning_ignore: # Ignore classes
167 | 0: True # "unlabeled", and others ignored
168 | 1: False # "car"
169 | 2: False # "bicycle"
170 | 3: False # "motorcycle"
171 | 4: False # "truck"
172 | 5: False # "other-vehicle"
173 | 6: False # "person"
174 | 7: False # "bicyclist"
175 | 8: False # "motorcyclist"
176 | 9: False # "road"
177 | 10: False # "parking"
178 | 11: False # "sidewalk"
179 | 12: False # "other-ground"
180 | 13: False # "building"
181 | 14: False # "fence"
182 | 15: False # "vegetation"
183 | 16: False # "trunk"
184 | 17: False # "terrain"
185 | 18: False # "pole"
186 | 19: False # "traffic-sign"
187 | split: # sequence numbers
188 | train:
189 | - 0
190 | - 1
191 | - 2
192 | - 3
193 | - 4
194 | - 5
195 | - 6
196 | - 7
197 | - 9
198 | - 10
199 | valid:
200 | - 8
201 | test:
202 | - 11
203 | - 12
204 | - 13
205 | - 14
206 | - 15
207 | - 16
208 | - 17
209 | - 18
210 | - 19
211 | - 20
212 | - 21
213 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/dataset/kitti/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/tasks/semantic/dataset/kitti/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/decoders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/tasks/semantic/decoders/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/decoders/darknet.py:
--------------------------------------------------------------------------------
1 | # This file was modified from https://github.com/BobLiu20/YOLOv3_PyTorch
2 | # It needed to be modified in order to accomodate for different strides in the
3 |
4 | import torch.nn as nn
5 | from collections import OrderedDict
6 | import torch.nn.functional as F
7 |
8 |
9 | class BasicBlock(nn.Module):
10 | def __init__(self, inplanes, planes, bn_d=0.1):
11 | super(BasicBlock, self).__init__()
12 | self.conv1 = nn.Conv2d(inplanes,
13 | planes[0],
14 | kernel_size=1,
15 | stride=1,
16 | padding=0,
17 | bias=False)
18 | self.bn1 = nn.BatchNorm2d(planes[0], momentum=bn_d)
19 | self.relu1 = nn.LeakyReLU(0.1)
20 | self.conv2 = nn.Conv2d(planes[0],
21 | planes[1],
22 | kernel_size=3,
23 | stride=1,
24 | padding=1,
25 | bias=False)
26 | self.bn2 = nn.BatchNorm2d(planes[1], momentum=bn_d)
27 | self.relu2 = nn.LeakyReLU(0.1)
28 |
29 | def forward(self, x):
30 | residual = x
31 |
32 | out = self.conv1(x)
33 | out = self.bn1(out)
34 | out = self.relu1(out)
35 |
36 | out = self.conv2(out)
37 | out = self.bn2(out)
38 | out = self.relu2(out)
39 |
40 | out += residual
41 | return out
42 |
43 |
44 | # ******************************************************************************
45 |
46 |
47 | class Decoder(nn.Module):
48 | """
49 | Class for DarknetSeg. Subclasses PyTorch's own "nn" module
50 | """
51 | def __init__(self, params, stub_skips, OS=32, feature_depth=1024):
52 | super(Decoder, self).__init__()
53 | self.backbone_OS = OS
54 | self.backbone_feature_depth = feature_depth
55 | self.drop_prob = params["dropout"]
56 | self.bn_d = params["bn_d"]
57 |
58 | # stride play
59 | self.strides = [2, 2, 2, 2, 2]
60 | # check current stride
61 | current_os = 1
62 | for s in self.strides:
63 | current_os *= s
64 | print("Decoder original OS: ", int(current_os))
65 | # redo strides according to needed stride
66 | for i, stride in enumerate(self.strides):
67 | if int(current_os) != self.backbone_OS:
68 | if stride == 2:
69 | current_os /= 2
70 | self.strides[i] = 1
71 | if int(current_os) == self.backbone_OS:
72 | break
73 | print("Decoder new OS: ", int(current_os))
74 | print("Decoder strides: ", self.strides)
75 |
76 | # decoder
77 | self.dec5 = self._make_dec_layer(
78 | BasicBlock, [self.backbone_feature_depth, 512],
79 | bn_d=self.bn_d,
80 | stride=self.strides[0])
81 | self.dec4 = self._make_dec_layer(BasicBlock, [512, 256],
82 | bn_d=self.bn_d,
83 | stride=self.strides[1])
84 | self.dec3 = self._make_dec_layer(BasicBlock, [256, 128],
85 | bn_d=self.bn_d,
86 | stride=self.strides[2])
87 | self.dec2 = self._make_dec_layer(BasicBlock, [128, 64],
88 | bn_d=self.bn_d,
89 | stride=self.strides[3])
90 | self.dec1 = self._make_dec_layer(BasicBlock, [64, 32],
91 | bn_d=self.bn_d,
92 | stride=self.strides[4])
93 |
94 | # layer list to execute with skips
95 | self.layers = [
96 | self.dec5, self.dec4, self.dec3, self.dec2, self.dec1
97 | ]
98 |
99 | # for a bit of fun
100 | self.dropout = nn.Dropout2d(self.drop_prob)
101 |
102 | # last channels
103 | self.last_channels = 32
104 |
105 | def _make_dec_layer(self, block, planes, bn_d=0.1, stride=2):
106 | layers = []
107 |
108 | # downsample
109 | if stride == 2:
110 | layers.append(("upconv",
111 | nn.ConvTranspose2d(planes[0],
112 | planes[1],
113 | kernel_size=[1, 4],
114 | stride=[1, 2],
115 | padding=[0, 1])))
116 | else:
117 | layers.append(("conv",
118 | nn.Conv2d(planes[0],
119 | planes[1],
120 | kernel_size=3,
121 | padding=1)))
122 | layers.append(("bn", nn.BatchNorm2d(planes[1],
123 | momentum=bn_d)))
124 | layers.append(("relu", nn.LeakyReLU(0.1)))
125 |
126 | # blocks
127 | layers.append(("residual", block(planes[1], planes, bn_d)))
128 |
129 | return nn.Sequential(OrderedDict(layers))
130 |
131 | def run_layer(self, x, layer, skips, os):
132 | feats = layer(x) # up
133 | if feats.shape[-1] > x.shape[-1]:
134 | os //= 2 # match skip
135 | feats = feats + skips[os].detach() # add skip
136 | x = feats
137 | return x, skips, os
138 |
139 | def forward(self, x, skips):
140 | os = self.backbone_OS
141 |
142 | # run layers
143 | x, skips, os = self.run_layer(x, self.dec5, skips, os)
144 | x, skips, os = self.run_layer(x, self.dec4, skips, os)
145 | x, skips, os = self.run_layer(x, self.dec3, skips, os)
146 | x, skips, os = self.run_layer(x, self.dec2, skips, os)
147 | x, skips, os = self.run_layer(x, self.dec1, skips, os)
148 |
149 | x = self.dropout(x)
150 |
151 | return x
152 |
153 | def get_last_depth(self):
154 | return self.last_channels
155 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/decoders/squeezeseg.py:
--------------------------------------------------------------------------------
1 | # Adapted from https://github.com/BichenWuUCB/SqueezeSeg
2 | from __future__ import print_function
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 | class FireUp(nn.Module):
9 | def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
10 | expand3x3_planes, stride):
11 | super(FireUp, self).__init__()
12 | self.inplanes = inplanes
13 | self.stride = stride
14 | self.activation = nn.ReLU(inplace=True)
15 | self.squeeze = nn.Conv2d(inplanes,
16 | squeeze_planes,
17 | kernel_size=1)
18 | if self.stride == 2:
19 | self.upconv = nn.ConvTranspose2d(squeeze_planes,
20 | squeeze_planes,
21 | kernel_size=[1, 4],
22 | stride=[1, 2],
23 | padding=[0, 1])
24 | self.expand1x1 = nn.Conv2d(squeeze_planes,
25 | expand1x1_planes,
26 | kernel_size=1)
27 | self.expand3x3 = nn.Conv2d(squeeze_planes,
28 | expand3x3_planes,
29 | kernel_size=3,
30 | padding=1)
31 |
32 | def forward(self, x):
33 | x = self.activation(self.squeeze(x))
34 | if self.stride == 2:
35 | x = self.activation(self.upconv(x))
36 | return torch.cat([
37 | self.activation(self.expand1x1(x)),
38 | self.activation(self.expand3x3(x))
39 | ], 1)
40 |
41 |
42 | # ******************************************************************************
43 |
44 |
45 | class Decoder(nn.Module):
46 | """
47 | Class for DarknetSeg. Subclasses PyTorch's own "nn" module
48 | """
49 | def __init__(self, params, stub_skips, OS=32, feature_depth=512):
50 | super(Decoder, self).__init__()
51 | self.backbone_OS = OS
52 | self.backbone_feature_depth = feature_depth
53 | self.drop_prob = params["dropout"]
54 |
55 | # stride play
56 | self.strides = [2, 2, 2, 2]
57 | # check current stride
58 | current_os = 1
59 | for s in self.strides:
60 | current_os *= s
61 | print("Decoder original OS: ", int(current_os))
62 | # redo strides according to needed stride
63 | for i, stride in enumerate(self.strides):
64 | if int(current_os) != self.backbone_OS:
65 | if stride == 2:
66 | current_os /= 2
67 | self.strides[i] = 1
68 | if int(current_os) == self.backbone_OS:
69 | break
70 | print("Decoder new OS: ", int(current_os))
71 | print("Decoder strides: ", self.strides)
72 |
73 | # decoder
74 | # decoder
75 | self.firedec10 = FireUp(self.backbone_feature_depth,
76 | 64,
77 | 128,
78 | 128,
79 | stride=self.strides[0])
80 | self.firedec11 = FireUp(256,
81 | 32,
82 | 64,
83 | 64,
84 | stride=self.strides[1])
85 | self.firedec12 = FireUp(128,
86 | 16,
87 | 32,
88 | 32,
89 | stride=self.strides[2])
90 | self.firedec13 = FireUp(64,
91 | 16,
92 | 32,
93 | 32,
94 | stride=self.strides[3])
95 |
96 | # layer list to execute with skips
97 | self.layers = [
98 | self.firedec10, self.firedec11, self.firedec12,
99 | self.firedec13
100 | ]
101 |
102 | # for a bit of fun
103 | self.dropout = nn.Dropout2d(self.drop_prob)
104 |
105 | # last channels
106 | self.last_channels = 64
107 |
108 | def run_layer(self, x, layer, skips, os):
109 | feats = layer(x) # up
110 | if feats.shape[-1] > x.shape[-1]:
111 | os //= 2 # match skip
112 | feats = feats + skips[os].detach() # add skip
113 | x = feats
114 | return x, skips, os
115 |
116 | def forward(self, x, skips):
117 | os = self.backbone_OS
118 |
119 | # run layers
120 | x, skips, os = self.run_layer(x, self.firedec10, skips, os)
121 | x, skips, os = self.run_layer(x, self.firedec11, skips, os)
122 | x, skips, os = self.run_layer(x, self.firedec12, skips, os)
123 | x, skips, os = self.run_layer(x, self.firedec13, skips, os)
124 |
125 | x = self.dropout(x)
126 |
127 | return x
128 |
129 | def get_last_depth(self):
130 | return self.last_channels
131 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/decoders/squeezesegV2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | from __future__ import print_function
5 | import torch
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 |
9 |
10 | class FireUp(nn.Module):
11 | def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
12 | expand3x3_planes, bn_d, stride):
13 | super(FireUp, self).__init__()
14 | self.inplanes = inplanes
15 | self.bn_d = bn_d
16 | self.stride = stride
17 | self.activation = nn.ReLU(inplace=True)
18 | self.squeeze = nn.Conv2d(inplanes,
19 | squeeze_planes,
20 | kernel_size=1)
21 | self.squeeze_bn = nn.BatchNorm2d(squeeze_planes,
22 | momentum=self.bn_d)
23 | if self.stride == 2:
24 | self.upconv = nn.ConvTranspose2d(squeeze_planes,
25 | squeeze_planes,
26 | kernel_size=[1, 4],
27 | stride=[1, 2],
28 | padding=[0, 1])
29 | self.expand1x1 = nn.Conv2d(squeeze_planes,
30 | expand1x1_planes,
31 | kernel_size=1)
32 | self.expand1x1_bn = nn.BatchNorm2d(expand1x1_planes,
33 | momentum=self.bn_d)
34 | self.expand3x3 = nn.Conv2d(squeeze_planes,
35 | expand3x3_planes,
36 | kernel_size=3,
37 | padding=1)
38 | self.expand3x3_bn = nn.BatchNorm2d(expand3x3_planes,
39 | momentum=self.bn_d)
40 |
41 | def forward(self, x):
42 | x = self.activation(self.squeeze_bn(self.squeeze(x)))
43 | if self.stride == 2:
44 | x = self.activation(self.upconv(x))
45 | return torch.cat([
46 | self.activation(self.expand1x1_bn(self.expand1x1(x))),
47 | self.activation(self.expand3x3_bn(self.expand3x3(x)))
48 | ], 1)
49 |
50 |
51 | # ******************************************************************************
52 |
53 |
54 | class Decoder(nn.Module):
55 | """
56 | Class for DarknetSeg. Subclasses PyTorch's own "nn" module
57 | """
58 | def __init__(self, params, stub_skips, OS=32, feature_depth=512):
59 | super(Decoder, self).__init__()
60 | self.backbone_OS = OS
61 | self.backbone_feature_depth = feature_depth
62 | self.drop_prob = params["dropout"]
63 | self.bn_d = params["bn_d"]
64 |
65 | # stride play
66 | self.strides = [2, 2, 2, 2]
67 | # check current stride
68 | current_os = 1
69 | for s in self.strides:
70 | current_os *= s
71 | print("Decoder original OS: ", int(current_os))
72 | # redo strides according to needed stride
73 | for i, stride in enumerate(self.strides):
74 | if int(current_os) != self.backbone_OS:
75 | if stride == 2:
76 | current_os /= 2
77 | self.strides[i] = 1
78 | if int(current_os) == self.backbone_OS:
79 | break
80 | print("Decoder new OS: ", int(current_os))
81 | print("Decoder strides: ", self.strides)
82 |
83 | # decoder
84 | # decoder
85 | self.firedec10 = FireUp(self.backbone_feature_depth,
86 | 64,
87 | 128,
88 | 128,
89 | bn_d=self.bn_d,
90 | stride=self.strides[0])
91 | self.firedec11 = FireUp(256,
92 | 32,
93 | 64,
94 | 64,
95 | bn_d=self.bn_d,
96 | stride=self.strides[1])
97 | self.firedec12 = FireUp(128,
98 | 16,
99 | 32,
100 | 32,
101 | bn_d=self.bn_d,
102 | stride=self.strides[2])
103 | self.firedec13 = FireUp(64,
104 | 16,
105 | 32,
106 | 32,
107 | bn_d=self.bn_d,
108 | stride=self.strides[3])
109 |
110 | # for a bit of fun
111 | self.dropout = nn.Dropout2d(self.drop_prob)
112 |
113 | # last channels
114 | self.last_channels = 64
115 |
116 | def run_layer(self, x, layer, skips, os):
117 | feats = layer(x) # up
118 | if feats.shape[-1] > x.shape[-1]:
119 | os //= 2 # match skip
120 | feats = feats + skips[os].detach() # add skip
121 | x = feats
122 | return x, skips, os
123 |
124 | def forward(self, x, skips):
125 | os = self.backbone_OS
126 |
127 | # run layers
128 | x, skips, os = self.run_layer(x, self.firedec10, skips, os)
129 | x, skips, os = self.run_layer(x, self.firedec11, skips, os)
130 | x, skips, os = self.run_layer(x, self.firedec12, skips, os)
131 | x, skips, os = self.run_layer(x, self.firedec13, skips, os)
132 |
133 | x = self.dropout(x)
134 |
135 | return x
136 |
137 | def get_last_depth(self):
138 | return self.last_channels
139 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/infer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | import argparse
5 | import subprocess
6 | import datetime
7 | import yaml
8 | from shutil import copyfile
9 | import os
10 | import shutil
11 | import __init__ as booger
12 |
13 | from tasks.semantic.modules.user import *
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser("./infer.py")
17 | parser.add_argument(
18 | '--dataset',
19 | '-d',
20 | type=str,
21 | required=True,
22 | help='Dataset to train with. No Default',
23 | )
24 | parser.add_argument(
25 | '--log',
26 | '-l',
27 | type=str,
28 | default=os.path.expanduser("~") + '/logs/' +
29 | datetime.datetime.now().strftime("%Y-%-m-%d-%H:%M") + '/',
30 | help=
31 | 'Directory to put the predictions. Default: ~/logs/date+time')
32 | parser.add_argument('--model',
33 | '-m',
34 | type=str,
35 | required=True,
36 | default=None,
37 | help='Directory to get the trained model.')
38 | FLAGS, unparsed = parser.parse_known_args()
39 |
40 | # print summary of what we will do
41 | print("----------")
42 | print("INTERFACE:")
43 | print("dataset", FLAGS.dataset)
44 | print("log", FLAGS.log)
45 | print("model", FLAGS.model)
46 | print("----------\n")
47 | print(
48 | "Commit hash (training version): ",
49 | str(
50 | subprocess.check_output(
51 | ['git', 'rev-parse', '--short', 'HEAD']).strip()))
52 | print("----------\n")
53 |
54 | # open arch config file
55 | try:
56 | print("Opening arch config file from %s" % FLAGS.model)
57 | ARCH = yaml.safe_load(
58 | open(FLAGS.model + "/arch_cfg.yaml", 'r'))
59 | except Exception as e:
60 | print(e)
61 | print("Error opening arch yaml file.")
62 | quit()
63 |
64 | # open data config file
65 | try:
66 | print("Opening data config file from %s" % FLAGS.model)
67 | DATA = yaml.safe_load(
68 | open(FLAGS.model + "/data_cfg.yaml", 'r'))
69 | except Exception as e:
70 | print(e)
71 | print("Error opening data yaml file.")
72 | quit()
73 |
74 | # create log folder
75 | try:
76 | if os.path.isdir(FLAGS.log):
77 | shutil.rmtree(FLAGS.log)
78 | os.makedirs(FLAGS.log)
79 | os.makedirs(os.path.join(FLAGS.log, "sequences"))
80 | for seq in DATA["split"]["train"]:
81 | seq = '{0:02d}'.format(int(seq))
82 | print("train", seq)
83 | os.makedirs(os.path.join(FLAGS.log, "sequences", seq))
84 | os.makedirs(
85 | os.path.join(FLAGS.log, "sequences", seq,
86 | "predictions"))
87 | for seq in DATA["split"]["valid"]:
88 | seq = '{0:02d}'.format(int(seq))
89 | print("valid", seq)
90 | os.makedirs(os.path.join(FLAGS.log, "sequences", seq))
91 | os.makedirs(
92 | os.path.join(FLAGS.log, "sequences", seq,
93 | "predictions"))
94 | for seq in DATA["split"]["test"]:
95 | seq = '{0:02d}'.format(int(seq))
96 | print("test", seq)
97 | os.makedirs(os.path.join(FLAGS.log, "sequences", seq))
98 | os.makedirs(
99 | os.path.join(FLAGS.log, "sequences", seq,
100 | "predictions"))
101 | except Exception as e:
102 | print(e)
103 | print("Error creating log directory. Check permissions!")
104 | raise
105 |
106 | except Exception as e:
107 | print(e)
108 | print("Error creating log directory. Check permissions!")
109 | quit()
110 |
111 | # does model folder exist?
112 | if os.path.isdir(FLAGS.model):
113 | print("model folder exists! Using model from %s" %
114 | (FLAGS.model))
115 | else:
116 | print("model folder doesnt exist! Can't infer...")
117 | quit()
118 |
119 | # create user and infer dataset
120 | user = User(ARCH, DATA, FLAGS.dataset, FLAGS.log, FLAGS.model)
121 | predict = user.infer()
122 | predict2 = user.infer()
123 | for i in range(0, 127405):
124 | if predict[i] is not predict2:
125 | print("oh shit")
126 | print(predict.shape)
127 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/infer2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | import argparse
5 | import subprocess
6 | import datetime
7 | import yaml
8 | from shutil import copyfile
9 | import os
10 | import shutil
11 | import __init__ as booger
12 |
13 | from model.tasks.semantic.modules.user import *
14 |
15 |
16 | def get_user(model):
17 | # print summary of what we will do
18 | print("----------")
19 | print("INTERFACE:")
20 | print("model", model)
21 | print("----------\n")
22 |
23 | # open arch config file
24 | try:
25 | print("Opening arch config file from %s" % model)
26 | ARCH = yaml.safe_load(
27 | open(
28 | "/home/jovyan/work/model/" +
29 | model + "/arch_cfg.yaml", 'r'))
30 | except Exception as e:
31 | print(e)
32 | print("Error opening arch yaml file.")
33 | quit()
34 |
35 | # open data config file
36 | try:
37 | print("Opening data config file from %s" % model)
38 | DATA = yaml.safe_load(
39 | open(
40 | "/home/jovyan/work/model/" +
41 | model + "/data_cfg.yaml", 'r'))
42 | except Exception as e:
43 | print(e)
44 | print("Error opening data yaml file.")
45 | quit()
46 | model_dir = "/home/jovyan/work/model/" + model
47 | # does model folder exist?
48 | if os.path.isdir(model_dir):
49 | print("model folder exists! Using model from %s" % (model))
50 | else:
51 | print("model folder doesnt exist! Can't infer...")
52 | quit()
53 |
54 | # create user and infer dataset
55 | user = Inference(ARCH, DATA, model_dir)
56 | return user
57 |
58 |
59 | #user = get_user()
60 | #predict = user.infer()
61 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/tasks/semantic/modules/__init__.py
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/postproc/CRF.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | import numpy as np
5 | from scipy import signal
6 | import torch
7 | import torch.nn as nn
8 | import torch.nn.functional as F
9 | import __init__ as booger
10 |
11 |
12 | class LocallyConnectedXYZLayer(nn.Module):
13 | def __init__(self, h, w, sigma, nclasses):
14 | super().__init__()
15 | # size of window
16 | self.h = h
17 | self.padh = h // 2
18 | self.w = w
19 | self.padw = w // 2
20 | assert (self.h % 2 == 1
21 | and self.w % 2 == 1) # window must be odd
22 | self.sigma = sigma
23 | self.gauss_den = 2 * self.sigma**2
24 | self.nclasses = nclasses
25 |
26 | def forward(self, xyz, softmax, mask):
27 | # softmax size
28 | N, C, H, W = softmax.shape
29 |
30 | # make sofmax zero everywhere input is invalid
31 | softmax = softmax * mask.unsqueeze(1).float()
32 |
33 | # get x,y,z for distance (shape N,1,H,W)
34 | x = xyz[:, 0].unsqueeze(1)
35 | y = xyz[:, 1].unsqueeze(1)
36 | z = xyz[:, 2].unsqueeze(1)
37 |
38 | # im2col in size of window of input (x,y,z separately)
39 | window_x = F.unfold(x,
40 | kernel_size=(self.h, self.w),
41 | padding=(self.padh, self.padw))
42 | center_x = F.unfold(x, kernel_size=(1, 1), padding=(0, 0))
43 | window_y = F.unfold(y,
44 | kernel_size=(self.h, self.w),
45 | padding=(self.padh, self.padw))
46 | center_y = F.unfold(y, kernel_size=(1, 1), padding=(0, 0))
47 | window_z = F.unfold(z,
48 | kernel_size=(self.h, self.w),
49 | padding=(self.padh, self.padw))
50 | center_z = F.unfold(z, kernel_size=(1, 1), padding=(0, 0))
51 |
52 | # sq distance to center (center distance is zero)
53 | unravel_dist2 = (window_x - center_x)**2 + \
54 | (window_y - center_y)**2 + \
55 | (window_z - center_z)**2
56 |
57 | # weight input distance by gaussian weights
58 | unravel_gaussian = torch.exp(-unravel_dist2 / self.gauss_den)
59 |
60 | # im2col in size of window of softmax to reweight by gaussian weights
61 | # from input
62 | cloned_softmax = softmax.clone()
63 | for i in range(self.nclasses):
64 | # get the softmax for this class
65 | c_softmax = softmax[:, i].unsqueeze(1)
66 | # unfold this class to weigh it by the proper gaussian weights
67 | unravel_softmax = F.unfold(c_softmax,
68 | kernel_size=(self.h, self.w),
69 | padding=(self.padh, self.padw))
70 | unravel_w_softmax = unravel_softmax * unravel_gaussian
71 | # add dimenssion 1 to obtain the new softmax for this class
72 | unravel_added_softmax = unravel_w_softmax.sum(
73 | dim=1).unsqueeze(1)
74 | # fold it and put it in new tensor
75 | added_softmax = unravel_added_softmax.view(N, H, W)
76 | cloned_softmax[:, i] = added_softmax
77 |
78 | return cloned_softmax
79 |
80 |
81 | class CRF(nn.Module):
82 | def __init__(self, params, nclasses):
83 | super().__init__()
84 | self.params = params
85 | self.iter = torch.nn.Parameter(torch.tensor(params["iter"]),
86 | requires_grad=False)
87 | self.lcn_size = torch.nn.Parameter(torch.tensor(
88 | [params["lcn_size"]["h"], params["lcn_size"]["w"]]),
89 | requires_grad=False)
90 | self.xyz_coef = torch.nn.Parameter(
91 | torch.tensor(params["xyz_coef"]),
92 | requires_grad=False).float()
93 | self.xyz_sigma = torch.nn.Parameter(
94 | torch.tensor(params["xyz_sigma"]),
95 | requires_grad=False).float()
96 |
97 | self.nclasses = nclasses
98 | print("Using CRF!")
99 |
100 | # define layers here
101 | # compat init
102 | self.compat_kernel_init = np.reshape(
103 | np.ones((self.nclasses, self.nclasses)) -
104 | np.identity(self.nclasses),
105 | [self.nclasses, self.nclasses, 1, 1])
106 |
107 | # bilateral compatibility matrixes
108 | self.compat_conv = nn.Conv2d(self.nclasses, self.nclasses, 1)
109 | self.compat_conv.weight = torch.nn.Parameter(
110 | torch.from_numpy(self.compat_kernel_init).float() *
111 | self.xyz_coef,
112 | requires_grad=True)
113 |
114 | # locally connected layer for message passing
115 | self.local_conn_xyz = LocallyConnectedXYZLayer(
116 | params["lcn_size"]["h"], params["lcn_size"]["w"],
117 | params["xyz_coef"], self.nclasses)
118 |
119 | def forward(self, input, softmax, mask):
120 | # use xyz
121 | xyz = input[:, 1:4]
122 |
123 | # iteratively
124 | for iter in range(self.iter):
125 | # message passing as locally connected layer
126 | locally_connected = self.local_conn_xyz(
127 | xyz, softmax, mask)
128 |
129 | # reweigh with the 1x1 convolution
130 | reweight_softmax = self.compat_conv(locally_connected)
131 |
132 | # add the new values to the original softmax
133 | reweight_softmax = reweight_softmax + softmax
134 |
135 | # lastly, renormalize
136 | softmax = F.softmax(reweight_softmax, dim=1)
137 |
138 | return softmax
139 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/postproc/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | TRAIN_PATH = "../"
3 | DEPLOY_PATH = "../../deploy"
4 | sys.path.insert(0, TRAIN_PATH)
5 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/readme.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/obstacle-detection/model/tasks/semantic/readme.md
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | import argparse
5 | import subprocess
6 | import datetime
7 | import yaml
8 | from shutil import copyfile
9 | import os
10 | import shutil
11 | import __init__ as booger
12 |
13 | from tasks.semantic.modules.trainer import *
14 |
15 | if __name__ == '__main__':
16 | parser = argparse.ArgumentParser("./train.py")
17 | parser.add_argument(
18 | '--dataset',
19 | '-d',
20 | type=str,
21 | required=True,
22 | help='Dataset to train with. No Default',
23 | )
24 | parser.add_argument(
25 | '--arch_cfg',
26 | '-ac',
27 | type=str,
28 | required=True,
29 | help=
30 | 'Architecture yaml cfg file. See /config/arch for sample. No default!',
31 | )
32 | parser.add_argument(
33 | '--data_cfg',
34 | '-dc',
35 | type=str,
36 | required=False,
37 | default='config/labels/semantic-kitti.yaml',
38 | help=
39 | 'Classification yaml cfg file. See /config/labels for sample. No default!',
40 | )
41 | parser.add_argument(
42 | '--log',
43 | '-l',
44 | type=str,
45 | default=os.path.expanduser("~") + '/logs/' +
46 | datetime.datetime.now().strftime("%Y-%-m-%d-%H:%M") + '/',
47 | help='Directory to put the log data. Default: ~/logs/date+time'
48 | )
49 | parser.add_argument(
50 | '--pretrained',
51 | '-p',
52 | type=str,
53 | required=False,
54 | default=None,
55 | help=
56 | 'Directory to get the pretrained model. If not passed, do from scratch!'
57 | )
58 | FLAGS, unparsed = parser.parse_known_args()
59 |
60 | # print summary of what we will do
61 | print("----------")
62 | print("INTERFACE:")
63 | print("dataset", FLAGS.dataset)
64 | print("arch_cfg", FLAGS.arch_cfg)
65 | print("data_cfg", FLAGS.data_cfg)
66 | print("log", FLAGS.log)
67 | print("pretrained", FLAGS.pretrained)
68 | print("----------\n")
69 | print(
70 | "Commit hash (training version): ",
71 | str(
72 | subprocess.check_output(
73 | ['git', 'rev-parse', '--short', 'HEAD']).strip()))
74 | print("----------\n")
75 |
76 | # open arch config file
77 | try:
78 | print("Opening arch config file %s" % FLAGS.arch_cfg)
79 | ARCH = yaml.safe_load(open(FLAGS.arch_cfg, 'r'))
80 | except Exception as e:
81 | print(e)
82 | print("Error opening arch yaml file.")
83 | quit()
84 |
85 | # open data config file
86 | try:
87 | print("Opening data config file %s" % FLAGS.data_cfg)
88 | DATA = yaml.safe_load(open(FLAGS.data_cfg, 'r'))
89 | except Exception as e:
90 | print(e)
91 | print("Error opening data yaml file.")
92 | quit()
93 |
94 | # create log folder
95 | try:
96 | if os.path.isdir(FLAGS.log):
97 | shutil.rmtree(FLAGS.log)
98 | os.makedirs(FLAGS.log)
99 | except Exception as e:
100 | print(e)
101 | print("Error creating log directory. Check permissions!")
102 | quit()
103 |
104 | # does model folder exist?
105 | if FLAGS.pretrained is not None:
106 | if os.path.isdir(FLAGS.pretrained):
107 | print("model folder exists! Using model from %s" %
108 | (FLAGS.pretrained))
109 | else:
110 | print(
111 | "model folder doesnt exist! Start with random weights..."
112 | )
113 | else:
114 | print("No pretrained directory found.")
115 |
116 | # copy all files to log folder (to remember what we did, and make inference
117 | # easier). Also, standardize name to be able to open it later
118 | try:
119 | print("Copying files to %s for further reference." %
120 | FLAGS.log)
121 | copyfile(FLAGS.arch_cfg, FLAGS.log + "/arch_cfg.yaml")
122 | copyfile(FLAGS.data_cfg, FLAGS.log + "/data_cfg.yaml")
123 | except Exception as e:
124 | print(e)
125 | print("Error copying files, check permissions. Exiting...")
126 | quit()
127 |
128 | # create trainer and start the training
129 | trainer = Trainer(ARCH, DATA, FLAGS.dataset, FLAGS.log,
130 | FLAGS.pretrained)
131 | trainer.train()
132 |
--------------------------------------------------------------------------------
/obstacle-detection/model/tasks/semantic/visualize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # This file is covered by the LICENSE file in the root of this project.
3 |
4 | import argparse
5 | import os
6 | import yaml
7 | import __init__ as booger
8 |
9 | from common.laserscan import LaserScan, SemLaserScan
10 | from common.laserscanvis import LaserScanVis
11 |
12 | if __name__ == '__main__':
13 | parser = argparse.ArgumentParser("./visualize.py")
14 | parser.add_argument(
15 | '--dataset',
16 | '-d',
17 | type=str,
18 | required=True,
19 | help='Dataset to visualize. No Default',
20 | )
21 | parser.add_argument(
22 | '--config',
23 | '-c',
24 | type=str,
25 | required=False,
26 | default="config/labels/semantic-kitti.yaml",
27 | help='Dataset config file. Defaults to %(default)s',
28 | )
29 | parser.add_argument(
30 | '--sequence',
31 | '-s',
32 | type=str,
33 | default="00",
34 | required=False,
35 | help='Sequence to visualize. Defaults to %(default)s',
36 | )
37 | parser.add_argument(
38 | '--predictions',
39 | '-p',
40 | type=str,
41 | default=None,
42 | required=False,
43 | help=
44 | 'Alternate location for labels, to use predictions folder. '
45 | 'Must point to directory containing the predictions in the proper format '
46 | ' (see readme)'
47 | 'Defaults to %(default)s',
48 | )
49 | parser.add_argument(
50 | '--ignore_semantics',
51 | '-i',
52 | dest='ignore_semantics',
53 | default=False,
54 | action='store_true',
55 | help='Ignore semantics. Visualizes uncolored pointclouds.'
56 | 'Defaults to %(default)s',
57 | )
58 | parser.add_argument(
59 | '--offset',
60 | type=int,
61 | default=0,
62 | required=False,
63 | help='Sequence to start. Defaults to %(default)s',
64 | )
65 | parser.add_argument(
66 | '--ignore_safety',
67 | dest='ignore_safety',
68 | default=False,
69 | action='store_true',
70 | help=
71 | 'Normally you want the number of labels and ptcls to be the same,'
72 | ', but if you are not done inferring this is not the case, so this disables'
73 | ' that safety.'
74 | 'Defaults to %(default)s',
75 | )
76 | FLAGS, unparsed = parser.parse_known_args()
77 |
78 | # print summary of what we will do
79 | print("*" * 80)
80 | print("INTERFACE:")
81 | print("Dataset", FLAGS.dataset)
82 | print("Config", FLAGS.config)
83 | print("Sequence", FLAGS.sequence)
84 | print("Predictions", FLAGS.predictions)
85 | print("ignore_semantics", FLAGS.ignore_semantics)
86 | print("ignore_safety", FLAGS.ignore_safety)
87 | print("offset", FLAGS.offset)
88 | print("*" * 80)
89 |
90 | # open config file
91 | try:
92 | print("Opening config file %s" % FLAGS.config)
93 | CFG = yaml.safe_load(open(FLAGS.config, 'r'))
94 | except Exception as e:
95 | print(e)
96 | print("Error opening yaml file.")
97 | quit()
98 |
99 | # fix sequence name
100 | FLAGS.sequence = '{0:02d}'.format(int(FLAGS.sequence))
101 |
102 | # does sequence folder exist?
103 | scan_paths = os.path.join(FLAGS.dataset, "sequences",
104 | FLAGS.sequence, "velodyne")
105 | if os.path.isdir(scan_paths):
106 | print("Sequence folder exists! Using sequence from %s" %
107 | scan_paths)
108 | else:
109 | print("Sequence folder doesn't exist! Exiting...")
110 | quit()
111 |
112 | # populate the pointclouds
113 | scan_names = sorted([
114 | os.path.join(dp, f)
115 | for dp, dn, fn in os.walk(os.path.expanduser(scan_paths))
116 | for f in fn
117 | ])
118 |
119 | # does sequence folder exist?
120 | if not FLAGS.ignore_semantics:
121 | if FLAGS.predictions is not None:
122 | label_paths = os.path.join(FLAGS.predictions, "sequences",
123 | FLAGS.sequence, "predictions")
124 | else:
125 | label_paths = os.path.join(FLAGS.dataset, "sequences",
126 | FLAGS.sequence, "labels")
127 | if os.path.isdir(label_paths):
128 | print("Labels folder exists! Using labels from %s" %
129 | label_paths)
130 | else:
131 | print("Labels folder doesn't exist! Exiting...")
132 | quit()
133 | # populate the pointclouds
134 | label_names = sorted([
135 | os.path.join(dp, f)
136 | for dp, dn, fn in os.walk(os.path.expanduser(label_paths))
137 | for f in fn
138 | ])
139 |
140 | # check that there are same amount of labels and scans
141 | if not FLAGS.ignore_safety:
142 | assert (len(label_names) == len(scan_names))
143 |
144 | # create a scan
145 | if FLAGS.ignore_semantics:
146 | # project all opened scans to spheric proj
147 | scan = LaserScan(project=True)
148 | else:
149 | color_dict = CFG["color_map"]
150 | scan = SemLaserScan(color_dict, project=True)
151 |
152 | # create a visualizer
153 | semantics = not FLAGS.ignore_semantics
154 | if not semantics:
155 | label_names = None
156 | vis = LaserScanVis(scan=scan,
157 | scan_names=scan_names,
158 | label_names=label_names,
159 | offset=FLAGS.offset,
160 | semantics=semantics,
161 | instances=False)
162 |
163 | # print instructions
164 | print("To navigate:")
165 | print("\tb: back (previous scan)")
166 | print("\tn: next (next scan)")
167 | print("\tq: quit (exit program)")
168 |
169 | # run the visualizer
170 | vis.run()
171 |
--------------------------------------------------------------------------------
/obstacle-detection/pipeline/pcl_pipeline.py:
--------------------------------------------------------------------------------
1 | from pipeline import common
2 | from datetime import datetime
3 | from importlib import reload
4 | from pipeline import pcl_utils
5 | import time
6 | import pandas as pd
7 | import numpy as np
8 | import pcl
9 |
10 | pcl_utils = reload(pcl_utils)
11 |
12 |
13 | def pipeline_optimized_pcl(
14 | scan, label, obstacle_lst, verbose=False, exec_time=False, **params
15 | ):
16 | # get segment id
17 | start_time = datetime.now()
18 | pcloud = pd.DataFrame(
19 | np.concatenate((scan, label.reshape(len(label), 1)), axis=1),
20 | columns=["x", "y", "z", "seg_id"],
21 | )
22 |
23 | pcloud = common.roi_filter(
24 | pcloud,
25 | min_x=params["roi_x_min"],
26 | max_x=params["roi_x_max"],
27 | min_y=params["roi_y_min"],
28 | max_y=params["roi_y_max"],
29 | min_z=params["roi_z_min"],
30 | max_z=params["roi_z_max"],
31 | verbose=False,
32 | )
33 |
34 | pcloud = common.obstacle_filter(
35 | pcloud, obstacle_lst, proc_labels=True, verbose=False
36 | )
37 | pcloud = pcloud.drop(["seg_id"], axis=1)
38 | pcloud = pcloud.drop(["camera"], axis=1)
39 | obstacle_time = datetime.now() - start_time
40 | if len(pcloud.index) > 0:
41 | start_time = datetime.now()
42 | pcloud_pcl = pcl.PointCloud()
43 | pcloud_pcl.from_array(pcloud.to_numpy(dtype=np.float32))
44 | convert_time = datetime.now() - start_time
45 |
46 | # get voxel grid
47 | start_time = datetime.now()
48 | voxelgrid_id = pcl_utils.voxel_filter(
49 | pcloud_pcl, [params["x_voxels"], params["y_voxels"], params["z_voxels"]]
50 | )
51 | # voxelgrid_id = pcloud_pcl
52 | voxel_time = datetime.now() - start_time
53 |
54 | # ROI filter
55 | start_time = datetime.now()
56 | pcloud_roi = pcl_utils.roi_filter(
57 | voxelgrid_id,
58 | [params["roi_x_min"], params["roi_x_max"]],
59 | [params["roi_y_min"], params["roi_y_max"]],
60 | [params["roi_z_min"], params["roi_z_max"]],
61 | )
62 | roi_time = datetime.now() - start_time
63 |
64 | # get cluster
65 | start_time = datetime.now()
66 | cluster_data = pcloud_roi.extract([], negative=True)
67 | cluster_indices = pcl_utils.clustering(
68 | cluster_data, params["tol_distance"], params["min_cluster_size"], 150000
69 | )
70 | clustering_time = datetime.now() - start_time
71 |
72 | # get bboxes
73 | start_time = datetime.now()
74 | box_min_max_list, _ = pcl_utils.get_cluster_box_list(
75 | cluster_indices,
76 | cluster_data,
77 | radius_search=params["radius_search"],
78 | min_neighbors_in_radius=params["min_neighbors_in_radius"],
79 | )
80 | bbox_time = datetime.now() - start_time
81 | else:
82 | box_min_max_list, cluster_data = np.empty((0, 0)), np.empty((0, 0))
83 | roi_time, obstacle_time, voxel_time = 0, 0, 0
84 | clustering_time, bbox_time = 0, 0
85 |
86 | if verbose:
87 | print("Execution time:")
88 | print("\n-ROI filtering: {:.5f}s".format(roi_time.total_seconds()))
89 | print("\n-Filtering obstacles: {:.5f}s".format(obstacle_time.total_seconds()))
90 | print("\n-Voxel grid: {:.5f}s".format(voxel_time.total_seconds()))
91 | print("\n-Clustering: {:.5f}s".format(clustering_time.total_seconds()))
92 | print(
93 | "\n-Min-max cluster points: {:.5f} s \n".format(bbox_time.total_seconds())
94 | )
95 |
96 | if exec_time:
97 | return (
98 | box_min_max_list,
99 | cluster_data,
100 | {
101 | "roi_time": roi_time.total_seconds(),
102 | "filter_obstacle_time": obstacle_time.total_seconds(),
103 | "voxel_grid_time": voxel_time.total_seconds(),
104 | "clustering_time": clustering_time.total_seconds(),
105 | "outlier_filter_bbox_time": bbox_time.total_seconds(),
106 | "convert_time": convert_time.total_seconds(),
107 | },
108 | )
109 | else:
110 | return box_min_max_list, cluster_data
111 |
--------------------------------------------------------------------------------
/obstacle-detection/pipeline/pipeline.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | import importlib as imp
4 |
5 | from datetime import datetime
6 | from sklearn.cluster import DBSCAN
7 | from pipeline import common
8 |
9 | common = imp.reload(common)
10 |
11 | common = imp.reload(common)
12 |
13 |
14 | def pipeline(
15 | scan, label, obstacle_lst, verbose=False, OBBoxes=False, exec_time=False, **params
16 | ):
17 | """ ROI filtering """
18 | ##########################################################################
19 | start_time = datetime.now()
20 | pcloud = pd.DataFrame(
21 | np.concatenate((scan, label.reshape(len(label), 1)), axis=1),
22 | columns=["x", "y", "z", "seg_id"],
23 | )
24 | pcloud = common.roi_filter(
25 | pcloud,
26 | min_x=params["roi_x_min"],
27 | max_x=params["roi_x_max"],
28 | min_y=params["roi_y_min"],
29 | max_y=params["roi_y_max"],
30 | min_z=params["roi_z_min"],
31 | max_z=params["roi_z_max"],
32 | verbose=False,
33 | )
34 | roi_time = (datetime.now() - start_time).total_seconds()
35 | ##########################################################################
36 | """ Obstacles filtering """
37 | ##########################################################################
38 | start_time = datetime.now()
39 | pcloud = common.obstacle_filter(
40 | pcloud, obstacle_lst, proc_labels=True, verbose=False
41 | )
42 | obstacle_time = (datetime.now() - start_time).total_seconds()
43 | ##########################################################################
44 |
45 | if len(pcloud) > 200:
46 |
47 | # Getting voxel grid
48 | start_time = datetime.now()
49 | voxel_time = (datetime.now() - start_time).total_seconds()
50 | """ Сlustering obstacles """
51 | #######################################################################
52 | start_time = datetime.now()
53 | clusterer = DBSCAN(
54 | eps=params["eps"],
55 | min_samples=params["min_samples"],
56 | algorithm="auto",
57 | leaf_size=params["leaf_size"],
58 | n_jobs=-1,
59 | )
60 | clusterer.fit(pcloud[["x", "y", "z"]])
61 | pcloud["cluster_id"] = clusterer.labels_
62 | cluster_time = (datetime.now() - start_time).total_seconds()
63 | #######################################################################
64 | """ Getting bounding boxes coord """
65 | #######################################################################
66 | start_time = datetime.now()
67 | pcloud["norm"] = np.sqrt(np.square(pcloud[["x", "y", "z"]]).sum(axis=1))
68 | cluster_data = pd.DataFrame.from_dict(
69 | {"x": [], "y": [], "z": [], "cluster_id": []}
70 | )
71 | clusters = []
72 | for _id in sorted(pcloud["cluster_id"].unique()):
73 | if _id == -1 or not 50 < len(pcloud[pcloud["cluster_id"] == _id]) < 5000:
74 | continue
75 | tcluster = pcloud[pcloud["cluster_id"] == _id]
76 | tcluster = common.outlier_filter(tcluster, verbose=False)
77 | cluster_data = cluster_data.append(tcluster)
78 | if OBBoxes:
79 | obb = common.get_OBB(tcluster[["x", "y", "z"]])
80 | clusters.append(obb)
81 | if not OBBoxes:
82 | clusters = (
83 | cluster_data.groupby(["cluster_id"])
84 | .agg({"x": ["min", "max"], "y": ["min", "max"], "z": ["min", "max"]})
85 | .values
86 | )
87 | bb_time = (datetime.now() - start_time).total_seconds()
88 | #######################################################################
89 | else:
90 | clusters, cluster_data = np.empty((0, 0)), np.empty((0, 0))
91 | voxel_time, cluster_time, bb_time = 0, 0, 0
92 |
93 | if verbose:
94 | print("Execution time:")
95 | print("\n - ROI filtering: {:.5f}s".format(roi_time))
96 | print("\n - Filtering obstacles: {:.5f}s".format(obstacle_time))
97 | print("\n - Voxel grid: {:.5f}s".format(voxel_time))
98 | print("\n - Clustering: {:.5f}s".format(cluster_time))
99 | print("\n - Min-max cluster points: {:.5f}s \n".format(bb_time))
100 |
101 | if exec_time:
102 | return (
103 | clusters,
104 | cluster_data,
105 | {
106 | "roi_time": roi_time,
107 | "filter_obstacle_time": obstacle_time,
108 | "voxel_grid_time": voxel_time,
109 | "clustering_time": cluster_time,
110 | "outlier_filter_bbox_time": bb_time,
111 | },
112 | )
113 | else:
114 | return clusters, cluster_data
115 |
--------------------------------------------------------------------------------
/obstacle-detection/scripts/draw.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Wed Sep 25 20:52:45 2019
3 | @author: kyleguan
4 |
5 | """
6 |
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 |
10 |
11 | def draw_box(pyplot_axis, vertices, axes=[0, 1, 2], color='red'):
12 | """
13 | Draws a bounding 3D box in a pyplot axis.
14 |
15 | Parameters
16 | ----------
17 | pyplot_axis : Pyplot axis to draw in.
18 | vertices : Array 8 box vertices containing x, y, z coordinates.
19 | axes : Axes to use. Defaults to `[0, 1, 2]`, e.g. x, y and z axes.
20 | color : Drawing color. Defaults to `black`.
21 | """
22 | vertices = vertices[axes, :]
23 | connections = [
24 | [0, 1],
25 | [1, 2],
26 | [2, 3],
27 | [3, 0], # Lower plane parallel to Z=0 plane
28 | [4, 5],
29 | [5, 6],
30 | [6, 7],
31 | [7, 4], # Upper plane parallel to Z=0 plane
32 | # Connections between upper and lower planes
33 | [0, 4],
34 | [1, 5],
35 | [2, 6],
36 | [3, 7]
37 | ]
38 | for connection in connections:
39 | pyplot_axis.plot(*vertices[:, connection], c=color, lw=0.5)
40 |
41 |
42 | def draw_point_cloud(cloud, ax, title, axes_str, axes=[0, 1, 2]):
43 |
44 | cloud = np.array(cloud) # Covert point cloud to numpy array
45 | no_points = np.shape(cloud)[0]
46 | # Adjust the point size based on the point cloud size
47 | point_size = 10**(3 - int(np.log10(no_points)))
48 | if np.shape(
49 | cloud
50 | )[1] == 4: # If point cloud is XYZI format (e.g., I stands for intensity)
51 | ax.scatter(*np.transpose(cloud[:, axes]),
52 | s=point_size,
53 | c=cloud[:, 3],
54 | cmap='gray')
55 | elif np.shape(cloud)[1] == 3: # If point cloud is XYZ format
56 | ax.scatter(*np.transpose(cloud[:, axes]),
57 | s=point_size,
58 | c='b',
59 | alpha=0.7)
60 | ax.set_xlabel('{} axis'.format(axes_str[axes[0]]))
61 | ax.set_ylabel('{} axis'.format(axes_str[axes[1]]))
62 | # if len(axes) > 2: # 3-D plot
63 | # ax.set_xlim3d(axes_limits[axes[0]])
64 | # ax.set_ylim3d(axes_limits[axes[1]])
65 | # ax.set_zlim3d(axes_limits[axes[2]])
66 | # ax.set_zlabel('{} axis'.format(axes_str[axes[2]]))
67 | # else: # 2-D plot
68 | # ax.set_xlim(*axes_limits[axes[0]])
69 | # ax.set_ylim(*axes_limits[axes[1]])
70 | # # User specified limits
71 | # if xlim3d!=None:
72 | # ax.set_xlim3d(xlim3d)
73 | # if ylim3d!=None:
74 | # ax.set_ylim3d(ylim3d)
75 | # if zlim3d!=None:
76 | # ax.set_zlim3d(zlim3d)
77 | ax.set_title(title)
78 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 79
3 | [pycodestyle]
4 | count = False
5 | ignore = E226,E302,E41, E501, E125, E126, E129, E251, E265, W503, W504, E402
6 | max-line-length = 160
7 | statistics = True
8 | [yapf]
9 | column_limit = 160
10 |
--------------------------------------------------------------------------------
/test/config.yaml:
--------------------------------------------------------------------------------
1 | segments:
2 | 0 : unlabeled
3 | 1 : outlier
4 | 10: car
5 | 11: bicycle
6 | 13: bus
7 | 15: motorcycle
8 | 16: on-rails
9 | 18: truck
10 | 20: other-vehicle
11 | 30: person
12 | 31: bicyclist
13 | 32: motorcyclist
14 | 40: road
15 | 44: parking
16 | 48: sidewalk
17 | 49: other-ground
18 | 50: building
19 | 51: fence
20 | 52: other-structure
21 | 60: lane-marking
22 | 70: vegetation
23 | 71: trunk
24 | 72: terrain
25 | 80: pole
26 | 81: traffic-sign
27 | 99: other-object
28 | 252: moving-car
29 | 253: moving-bicyclist
30 | 254: moving-person
31 | 255: moving-motorcyclist
32 | 256: moving-on-rails
33 | 257: moving-bus
34 | 258: moving-truck
35 | 259: moving-other-vehicle
36 | obstacles:
37 | 10: car
38 | 11: bicycle
39 | 13: bus
40 | 15: motorcycle
41 | 16: on-rails
42 | 18: truck
43 | 20: other-vehicle
44 | 30: person
45 | 31: bicyclist
46 | 32: motorcyclist
47 | 252: moving-car
48 | 253: moving-bicyclist
49 | 254: moving-person
50 | 255: moving-motorcyclist
51 | 256: moving-on-rails
52 | 257: moving-bus
53 | 258: moving-truck
54 | 259: moving-other-vehicle
55 | learning_map:
56 | 0 : 0 # "unlabeled"
57 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
58 | 10: 1 # "car"
59 | 11: 2 # "bicycle"
60 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
61 | 15: 3 # "motorcycle"
62 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
63 | 18: 4 # "truck"
64 | 20: 5 # "other-vehicle"
65 | 30: 6 # "person"
66 | 31: 7 # "bicyclist"
67 | 32: 8 # "motorcyclist"
68 | 40: 9 # "road"
69 | 44: 10 # "parking"
70 | 48: 11 # "sidewalk"
71 | 49: 12 # "other-ground"
72 | 50: 13 # "building"
73 | 51: 14 # "fence"
74 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
75 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
76 | 70: 15 # "vegetation"
77 | 71: 16 # "trunk"
78 | 72: 17 # "terrain"
79 | 80: 18 # "pole"
80 | 81: 19 # "traffic-sign"
81 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
82 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
83 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
84 | 254: 6 # "moving-person" to "person" ------------------------------mapped
85 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
86 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
87 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
88 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
89 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
90 |
--------------------------------------------------------------------------------
/test/data/000100.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000100.bin
--------------------------------------------------------------------------------
/test/data/000100.label:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000100.label
--------------------------------------------------------------------------------
/test/data/000101.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000101.bin
--------------------------------------------------------------------------------
/test/data/000101.label:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000101.label
--------------------------------------------------------------------------------
/test/data/000102.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000102.bin
--------------------------------------------------------------------------------
/test/data/000102.label:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VirtualRoyalty/PointCloudSegmentation/b984844a356960395988a9c23f13614973db971f/test/data/000102.label
--------------------------------------------------------------------------------
/test/requirements.txt:
--------------------------------------------------------------------------------
1 | pandas==1.0.3
2 | scipy==1.4.1
3 | numpy==1.22.0
4 | pyyaml
5 |
6 |
--------------------------------------------------------------------------------
/test/simple_checks.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import yaml
3 | import glob
4 | import pytest
5 |
6 | import numpy as np
7 | import pandas as pd
8 |
9 | sys.path.append('./obstacle-detection/')
10 | from pipeline import common
11 |
12 | scan_lst = sorted(glob.glob("./test/data/*.bin"))
13 | label_lst = sorted(glob.glob("./test/data/*.label"))
14 |
15 | with open('./test/config.yaml') as file:
16 | config = yaml.load(file, Loader=yaml.FullLoader)
17 | obstacle_lst = config['obstacles']
18 |
19 |
20 | def get_pcloud(scan, label, proc_labels=True):
21 | scan = np.fromfile(scan, dtype=np.float32)
22 | scan = scan.reshape((-1, 4))
23 | scan = scan[:, :3]
24 | label = np.fromfile(label, dtype=np.uint32)
25 | label = label.reshape((-1))
26 | values = np.concatenate((scan, label.reshape(len(label), 1)), axis=1)
27 | pcloud = pd.DataFrame(values, columns=['x', 'y', 'z', 'seg_id'])
28 | if proc_labels:
29 | pcloud.seg_id = pcloud.seg_id.astype("uint32")
30 | pcloud.seg_id = pcloud.seg_id.apply(lambda x: x & 0xFFFF)
31 | return pcloud
32 |
33 |
34 | class TestClass:
35 | def test_obstacle_filter_1(self):
36 | pcloud = get_pcloud(scan_lst[0], label_lst[0])
37 | cloud = common.obstacle_filter(pcloud, obstacle_lst, proc_labels=False)
38 | seg_lst = list(cloud['seg_id'].unique())
39 | for seg in seg_lst:
40 | assert seg in list(obstacle_lst.keys())
41 |
42 | def test_obstacle_filter_2(self):
43 | pcloud = get_pcloud(scan_lst[1], label_lst[1])
44 | cloud = common.obstacle_filter(pcloud, obstacle_lst, proc_labels=False)
45 | seg_lst = list(cloud['seg_id'].unique())
46 | for seg in seg_lst:
47 | assert seg in list(obstacle_lst.keys())
48 |
49 | def test_obstacle_filter_3(self):
50 | pcloud = get_pcloud(scan_lst[2], label_lst[2])
51 | cloud = common.obstacle_filter(pcloud, obstacle_lst, proc_labels=False)
52 | seg_lst = list(cloud['seg_id'].unique())
53 | for seg in seg_lst:
54 | assert seg in list(obstacle_lst.keys())
55 |
56 | def test_roi_filter_1(self):
57 | params = {'roi_x_min': -10, 'roi_x_max': 10, 'roi_y_min': -14, 'roi_y_max': 14, 'roi_z_min': -2, 'roi_z_max': 1}
58 | pcloud = get_pcloud(scan_lst[0], label_lst[0])
59 | cloud = common.roi_filter(pcloud,
60 | min_x=params["roi_x_min"],
61 | max_x=params["roi_x_max"],
62 | min_y=params["roi_y_min"],
63 | max_y=params["roi_y_max"],
64 | min_z=params["roi_z_min"],
65 | max_z=params["roi_z_max"],
66 | verbose=False)
67 | assert cloud['x'].min() >= params['roi_x_min']
68 | assert cloud['y'].min() >= params['roi_y_min']
69 | assert cloud['z'].min() >= params['roi_z_min']
70 | assert cloud['x'].max() <= params['roi_x_max']
71 | assert cloud['y'].max() <= params['roi_y_max']
72 | assert cloud['z'].max() <= params['roi_z_max']
73 |
--------------------------------------------------------------------------------
/visualization/README.md:
--------------------------------------------------------------------------------
1 | # Visualization
2 | To visualize the data (in this example sequence 00):
3 | ```sh
4 | $ ./visualize.py -d /path/to/dataset/ -s 00
5 | ```
6 |
7 | To visualize the predictions (in this example sequence 00):
8 |
9 | ```sh
10 | $ ./visualize.py -d /path/to/dataset/ -p /path/to/predictions/ -s 00
11 | ```
12 |
13 | If you want to visualize oriented bounding boxes using 8 vertex coordinates use flag -b or --bboxes (in this example sequence 00):
14 |
15 | ```sh
16 | $ ./visualize.py -d /path/to/dataset/ -p /path/to/predictions/ -s 00 -b
17 | ```
18 |
19 | If you want to visualize oriented bounding boxes using width, depth, height, center coordinate and angle of rotation use flag -m or --use_bbox_measurements (in this example sequence 00):
20 |
21 | ```sh
22 | $ ./visualize.py -d /path/to/dataset/ -p /path/to/predictions/ -s 00 -b -m
23 | ```
24 |
25 | If you want to add cluster label use flag -l or --use_bbox_labels (in this example sequence 00):
26 |
27 | ```sh
28 | $ ./visualize.py -d /path/to/dataset/ -p /path/to/predictions/ -s 00 -b -l
29 | ```
30 |
31 | If you want to visualize region of interest use flag -r or --roi_filter (in this example sequence 00):
32 |
33 | ```sh
34 | $ ./visualize.py -d /path/to/dataset/ -p /path/to/predictions/ -s 00 -r
35 | ```
36 |
--------------------------------------------------------------------------------
/visualization/__init__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | TRAIN_PATH = "../../"
3 | DEPLOY_PATH = "../../../deploy"
4 | sys.path.insert(0, TRAIN_PATH)
5 |
--------------------------------------------------------------------------------
/visualization/config/labels/semantic-kitti-all.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 20 # "moving-car"
138 | 253: 21 # "moving-bicyclist"
139 | 254: 22 # "moving-person"
140 | 255: 23 # "moving-motorcyclist"
141 | 256: 24 # "moving-on-rails" mapped to "moving-other-vehicle" ------mapped
142 | 257: 24 # "moving-bus" mapped to "moving-other-vehicle" -----------mapped
143 | 258: 25 # "moving-truck"
144 | 259: 24 # "moving-other-vehicle"
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | 20: 252 # "moving-car"
167 | 21: 253 # "moving-bicyclist"
168 | 22: 254 # "moving-person"
169 | 23: 255 # "moving-motorcyclist"
170 | 24: 259 # "moving-other-vehicle"
171 | 25: 258 # "moving-truck"
172 | learning_ignore: # Ignore classes
173 | 0: True # "unlabeled", and others ignored
174 | 1: False # "car"
175 | 2: False # "bicycle"
176 | 3: False # "motorcycle"
177 | 4: False # "truck"
178 | 5: False # "other-vehicle"
179 | 6: False # "person"
180 | 7: False # "bicyclist"
181 | 8: False # "motorcyclist"
182 | 9: False # "road"
183 | 10: False # "parking"
184 | 11: False # "sidewalk"
185 | 12: False # "other-ground"
186 | 13: False # "building"
187 | 14: False # "fence"
188 | 15: False # "vegetation"
189 | 16: False # "trunk"
190 | 17: False # "terrain"
191 | 18: False # "pole"
192 | 19: False # "traffic-sign"
193 | 20: False # "moving-car"
194 | 21: False # "moving-bicyclist"
195 | 22: False # "moving-person"
196 | 23: False # "moving-motorcyclist"
197 | 24: False # "moving-other-vehicle"
198 | 25: False # "moving-truck"
199 | split: # sequence numbers
200 | train:
201 | - 0
202 | - 1
203 | - 2
204 | - 3
205 | - 4
206 | - 5
207 | - 6
208 | - 7
209 | - 9
210 | - 10
211 | valid:
212 | - 8
213 | test:
214 | - 11
215 | - 12
216 | - 13
217 | - 14
218 | - 15
219 | - 16
220 | - 17
221 | - 18
222 | - 19
223 | - 20
224 | - 21
225 |
--------------------------------------------------------------------------------
/visualization/config/labels/semantic-kitti.yaml:
--------------------------------------------------------------------------------
1 | # This file is covered by the LICENSE file in the root of this project.
2 | name: "kitti"
3 | labels:
4 | 0 : "unlabeled"
5 | 1 : "outlier"
6 | 10: "car"
7 | 11: "bicycle"
8 | 13: "bus"
9 | 15: "motorcycle"
10 | 16: "on-rails"
11 | 18: "truck"
12 | 20: "other-vehicle"
13 | 30: "person"
14 | 31: "bicyclist"
15 | 32: "motorcyclist"
16 | 40: "road"
17 | 44: "parking"
18 | 48: "sidewalk"
19 | 49: "other-ground"
20 | 50: "building"
21 | 51: "fence"
22 | 52: "other-structure"
23 | 60: "lane-marking"
24 | 70: "vegetation"
25 | 71: "trunk"
26 | 72: "terrain"
27 | 80: "pole"
28 | 81: "traffic-sign"
29 | 99: "other-object"
30 | 252: "moving-car"
31 | 253: "moving-bicyclist"
32 | 254: "moving-person"
33 | 255: "moving-motorcyclist"
34 | 256: "moving-on-rails"
35 | 257: "moving-bus"
36 | 258: "moving-truck"
37 | 259: "moving-other-vehicle"
38 | color_map: # bgr
39 | 0 : [0, 0, 0]
40 | 1 : [0, 0, 255]
41 | 10: [245, 150, 100]
42 | 11: [245, 230, 100]
43 | 13: [250, 80, 100]
44 | 15: [150, 60, 30]
45 | 16: [255, 0, 0]
46 | 18: [180, 30, 80]
47 | 20: [255, 0, 0]
48 | 30: [30, 30, 255]
49 | 31: [200, 40, 255]
50 | 32: [90, 30, 150]
51 | 40: [255, 0, 255]
52 | 44: [255, 150, 255]
53 | 48: [75, 0, 75]
54 | 49: [75, 0, 175]
55 | 50: [0, 200, 255]
56 | 51: [50, 120, 255]
57 | 52: [0, 150, 255]
58 | 60: [170, 255, 150]
59 | 70: [0, 175, 0]
60 | 71: [0, 60, 135]
61 | 72: [80, 240, 150]
62 | 80: [150, 240, 255]
63 | 81: [0, 0, 255]
64 | 99: [255, 255, 50]
65 | 252: [245, 150, 100]
66 | 256: [255, 0, 0]
67 | 253: [200, 40, 255]
68 | 254: [30, 30, 255]
69 | 255: [90, 30, 150]
70 | 257: [250, 80, 100]
71 | 258: [180, 30, 80]
72 | 259: [255, 0, 0]
73 | content: # as a ratio with the total number of points
74 | 0: 0.018889854628292943
75 | 1: 0.0002937197336781505
76 | 10: 0.040818519255974316
77 | 11: 0.00016609538710764618
78 | 13: 2.7879693665067774e-05
79 | 15: 0.00039838616015114444
80 | 16: 0.0
81 | 18: 0.0020633612104619787
82 | 20: 0.0016218197275284021
83 | 30: 0.00017698551338515307
84 | 31: 1.1065903904919655e-08
85 | 32: 5.532951952459828e-09
86 | 40: 0.1987493871255525
87 | 44: 0.014717169549888214
88 | 48: 0.14392298360372
89 | 49: 0.0039048553037472045
90 | 50: 0.1326861944777486
91 | 51: 0.0723592229456223
92 | 52: 0.002395131480328884
93 | 60: 4.7084144280367186e-05
94 | 70: 0.26681502148037506
95 | 71: 0.006035012012626033
96 | 72: 0.07814222006271769
97 | 80: 0.002855498193863172
98 | 81: 0.0006155958086189918
99 | 99: 0.009923127583046915
100 | 252: 0.001789309418528068
101 | 253: 0.00012709999297008662
102 | 254: 0.00016059776092534436
103 | 255: 3.745553104802113e-05
104 | 256: 0.0
105 | 257: 0.00011351574470342043
106 | 258: 0.00010157861367183268
107 | 259: 4.3840131989471124e-05
108 | # classes that are indistinguishable from single scan or inconsistent in
109 | # ground truth are mapped to their closest equivalent
110 | learning_map:
111 | 0 : 0 # "unlabeled"
112 | 1 : 0 # "outlier" mapped to "unlabeled" --------------------------mapped
113 | 10: 1 # "car"
114 | 11: 2 # "bicycle"
115 | 13: 5 # "bus" mapped to "other-vehicle" --------------------------mapped
116 | 15: 3 # "motorcycle"
117 | 16: 5 # "on-rails" mapped to "other-vehicle" ---------------------mapped
118 | 18: 4 # "truck"
119 | 20: 5 # "other-vehicle"
120 | 30: 6 # "person"
121 | 31: 7 # "bicyclist"
122 | 32: 8 # "motorcyclist"
123 | 40: 9 # "road"
124 | 44: 10 # "parking"
125 | 48: 11 # "sidewalk"
126 | 49: 12 # "other-ground"
127 | 50: 13 # "building"
128 | 51: 14 # "fence"
129 | 52: 0 # "other-structure" mapped to "unlabeled" ------------------mapped
130 | 60: 9 # "lane-marking" to "road" ---------------------------------mapped
131 | 70: 15 # "vegetation"
132 | 71: 16 # "trunk"
133 | 72: 17 # "terrain"
134 | 80: 18 # "pole"
135 | 81: 19 # "traffic-sign"
136 | 99: 0 # "other-object" to "unlabeled" ----------------------------mapped
137 | 252: 1 # "moving-car" to "car" ------------------------------------mapped
138 | 253: 7 # "moving-bicyclist" to "bicyclist" ------------------------mapped
139 | 254: 6 # "moving-person" to "person" ------------------------------mapped
140 | 255: 8 # "moving-motorcyclist" to "motorcyclist" ------------------mapped
141 | 256: 5 # "moving-on-rails" mapped to "other-vehicle" --------------mapped
142 | 257: 5 # "moving-bus" mapped to "other-vehicle" -------------------mapped
143 | 258: 4 # "moving-truck" to "truck" --------------------------------mapped
144 | 259: 5 # "moving-other"-vehicle to "other-vehicle" ----------------mapped
145 | learning_map_inv: # inverse of previous map
146 | 0: 0 # "unlabeled", and others ignored
147 | 1: 10 # "car"
148 | 2: 11 # "bicycle"
149 | 3: 15 # "motorcycle"
150 | 4: 18 # "truck"
151 | 5: 20 # "other-vehicle"
152 | 6: 30 # "person"
153 | 7: 31 # "bicyclist"
154 | 8: 32 # "motorcyclist"
155 | 9: 40 # "road"
156 | 10: 44 # "parking"
157 | 11: 48 # "sidewalk"
158 | 12: 49 # "other-ground"
159 | 13: 50 # "building"
160 | 14: 51 # "fence"
161 | 15: 70 # "vegetation"
162 | 16: 71 # "trunk"
163 | 17: 72 # "terrain"
164 | 18: 80 # "pole"
165 | 19: 81 # "traffic-sign"
166 | learning_ignore: # Ignore classes
167 | 0: True # "unlabeled", and others ignored
168 | 1: False # "car"
169 | 2: False # "bicycle"
170 | 3: False # "motorcycle"
171 | 4: False # "truck"
172 | 5: False # "other-vehicle"
173 | 6: False # "person"
174 | 7: False # "bicyclist"
175 | 8: False # "motorcyclist"
176 | 9: False # "road"
177 | 10: False # "parking"
178 | 11: False # "sidewalk"
179 | 12: False # "other-ground"
180 | 13: False # "building"
181 | 14: False # "fence"
182 | 15: False # "vegetation"
183 | 16: False # "trunk"
184 | 17: False # "terrain"
185 | 18: False # "pole"
186 | 19: False # "traffic-sign"
187 | split: # sequence numbers
188 | train:
189 | - 0
190 | - 1
191 | - 2
192 | - 3
193 | - 4
194 | - 5
195 | - 6
196 | - 7
197 | - 9
198 | - 10
199 | valid:
200 | - 8
201 | test:
202 | - 11
203 | - 12
204 | - 13
205 | - 14
206 | - 15
207 | - 16
208 | - 17
209 | - 18
210 | - 19
211 | - 20
212 | - 21
213 |
--------------------------------------------------------------------------------