├── pcs_detection ├── src_python │ ├── pcs_detection │ │ ├── __init__.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ └── histogram_backprojection.py │ │ ├── preprocess.py │ │ └── inference.py │ ├── setup.py.in │ └── CMakeLists.txt ├── scripts │ ├── data │ │ ├── weights │ │ │ ├── .gitignore │ │ │ ├── fcn8_COMBINED_19_10_10_173416 │ │ │ │ ├── inference_config.json │ │ │ │ └── full_config.json │ │ │ ├── fcn8_STACKED_19_10_10_173601 │ │ │ │ ├── inference_config.json │ │ │ │ └── full_config.json │ │ │ └── using_negatives_fcn8_GREY_19_10_10_101037 │ │ │ │ ├── inference_config.json │ │ │ │ └── full_config.json │ │ ├── example_weld_image_grayscale.png │ │ ├── example_dataset_1 │ │ │ ├── training │ │ │ │ ├── 0000.png │ │ │ │ ├── 0001.png │ │ │ │ ├── 0002.png │ │ │ │ ├── 0003.png │ │ │ │ ├── 0004.png │ │ │ │ ├── 0005.png │ │ │ │ ├── 0007.png │ │ │ │ ├── 0008.png │ │ │ │ ├── 0009.png │ │ │ │ ├── 0010.png │ │ │ │ ├── 0011.png │ │ │ │ ├── 0013.png │ │ │ │ ├── 0014.png │ │ │ │ ├── 0015.png │ │ │ │ ├── 0016.png │ │ │ │ ├── 0017.png │ │ │ │ ├── 0019.png │ │ │ │ ├── 0020.png │ │ │ │ ├── 0022.png │ │ │ │ ├── 0023.png │ │ │ │ ├── 0024.png │ │ │ │ ├── 0025.png │ │ │ │ ├── 0026.png │ │ │ │ ├── 0027.png │ │ │ │ ├── 0028.png │ │ │ │ ├── 0029.png │ │ │ │ ├── 0030.png │ │ │ │ ├── 0032.png │ │ │ │ ├── 0033.png │ │ │ │ ├── 0034.png │ │ │ │ ├── 0035.png │ │ │ │ ├── 0036.png │ │ │ │ ├── 0037.png │ │ │ │ ├── 0038.png │ │ │ │ ├── 0039.png │ │ │ │ ├── 0040.png │ │ │ │ ├── 0041.png │ │ │ │ ├── 0042.png │ │ │ │ ├── 0043.png │ │ │ │ ├── 0044.png │ │ │ │ ├── 0046.png │ │ │ │ └── 0047.png │ │ │ ├── validation │ │ │ │ ├── 0003.png │ │ │ │ ├── 0017.png │ │ │ │ ├── 0019.png │ │ │ │ ├── 0059.png │ │ │ │ ├── 0074.png │ │ │ │ ├── 0077.png │ │ │ │ ├── 0081.png │ │ │ │ ├── 0094.png │ │ │ │ ├── 0114.png │ │ │ │ └── 0116.png │ │ │ └── validation_labels.xml │ │ └── example_dataset_2 │ │ │ ├── training │ │ │ ├── 0000.png │ │ │ ├── 0001.png │ │ │ ├── 0002.png │ │ │ ├── 0003.png │ │ │ ├── 0004.png │ │ │ ├── 0005.png │ │ │ ├── 0006.png │ │ │ ├── 0007.png │ │ │ ├── 0008.png │ │ │ ├── 0009.png │ │ │ ├── 0010.png │ │ │ ├── 0011.png │ │ │ ├── 0012.png │ │ │ ├── 0013.png │ │ │ ├── 0014.png │ │ │ ├── 0015.png │ │ │ ├── 0016.png │ │ │ ├── 0017.png │ │ │ ├── 0020.png │ │ │ ├── 0021.png │ │ │ ├── 0022.png │ │ │ ├── 0023.png │ │ │ ├── 0024.png │ │ │ ├── 0026.png │ │ │ ├── 0027.png │ │ │ ├── 0028.png │ │ │ ├── 0029.png │ │ │ ├── 0031.png │ │ │ ├── 0032.png │ │ │ ├── 0033.png │ │ │ ├── 0034.png │ │ │ ├── 0035.png │ │ │ ├── 0036.png │ │ │ ├── 0037.png │ │ │ ├── 0038.png │ │ │ ├── 0039.png │ │ │ ├── 0040.png │ │ │ ├── 0041.png │ │ │ ├── 0043.png │ │ │ ├── 0044.png │ │ │ ├── 0045.png │ │ │ ├── 0046.png │ │ │ ├── 0047.png │ │ │ ├── 0049.png │ │ │ ├── 0050.png │ │ │ ├── 0051.png │ │ │ ├── 0052.png │ │ │ ├── 0053.png │ │ │ ├── 0054.png │ │ │ ├── 0056.png │ │ │ ├── 0057.png │ │ │ ├── 0058.png │ │ │ ├── 0059.png │ │ │ ├── 0060.png │ │ │ ├── 0061.png │ │ │ ├── 0062.png │ │ │ ├── 0063.png │ │ │ ├── 0064.png │ │ │ └── 0065.png │ │ │ └── validation │ │ │ ├── 0003.png │ │ │ ├── 0006.png │ │ │ ├── 0012.png │ │ │ ├── 0017.png │ │ │ ├── 0018.png │ │ │ ├── 0019.png │ │ │ ├── 0021.png │ │ │ ├── 0025.png │ │ │ ├── 0030.png │ │ │ ├── 0031.png │ │ │ ├── 0042.png │ │ │ ├── 0045.png │ │ │ ├── 0048.png │ │ │ ├── 0055.png │ │ │ ├── 0059.png │ │ │ ├── 0074.png │ │ │ ├── 0077.png │ │ │ ├── 0081.png │ │ │ ├── 0094.png │ │ │ ├── 0114.png │ │ │ ├── 0116.png │ │ │ └── 0018_2.png │ ├── histogram_training.py │ ├── histogram_backprojection_example.py │ ├── example_weights.py │ ├── fcn8_train.py │ ├── training_config.json │ ├── fcn8_inference.py │ └── masking_inference.py ├── cmake │ └── pcs_detection-config.cmake.in ├── package.xml ├── src │ └── pcs_detection │ │ └── point_cloud_annotator.cpp ├── include │ └── pcs_detection │ │ ├── hsv_thresholding.h │ │ ├── point_cloud_annotator.h │ │ └── utils.h ├── test │ ├── utils_unit.cpp │ ├── CMakeLists.txt │ └── point_cloud_annotator_unit.cpp ├── CMakeLists.txt └── README.md ├── gh_pages ├── requirements.txt ├── _static │ ├── footer.jpg │ ├── logo.jpg │ ├── pcs_scan_integration │ │ ├── RETURN_INSIDE.png │ │ ├── RETURN_OUTSIDE.png │ │ └── RETURN_COLORIZED.png │ └── override.css ├── _source │ ├── pcs_ros.rst │ ├── pcs_msgs.rst │ ├── pcs_detection.rst │ ├── pcs_scan_integration.rst │ └── FAQ.rst ├── _themes │ └── sphinx_rtd_theme │ │ ├── static │ │ ├── fonts │ │ │ ├── FontAwesome.otf │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ └── fontawesome-webfont.woff2 │ │ ├── css │ │ │ └── badge_only.css │ │ └── js │ │ │ └── theme.js │ │ ├── theme.conf │ │ ├── searchbox.html │ │ ├── __init__.py │ │ ├── versions.html │ │ ├── search.html │ │ ├── footer.html │ │ └── breadcrumbs.html ├── Makefile ├── index.rst ├── _templates │ ├── layout.html │ └── footer.html └── conf.py ├── pcs_scan_integration ├── test │ ├── results │ │ └── .gitignore │ ├── box_2m.bt │ ├── plane_4m.stl │ ├── box_2m.ply │ └── CMakeLists.txt ├── cmake │ └── pcs_scan_integration-config.cmake.in ├── package.xml ├── README.md └── CMakeLists.txt ├── .gitignore ├── pcs_msgs ├── srv │ └── ImageProcessing.srv ├── package.xml ├── action │ └── ApplyOctomapMeshMask.action └── CMakeLists.txt ├── .run-clang-format ├── pcs_ros ├── test │ └── data │ │ ├── colorized_cloud1.bag │ │ ├── colorized_cloud2.bag │ │ └── colorized_cloud3.bag ├── setup.py ├── launch │ ├── extract_all_bags.sh │ ├── extract_images_from_bag.launch │ └── demo.launch ├── package.xml ├── src │ ├── hsv_thresholding_node.cpp │ ├── histogram_backprojection_node │ ├── image_channel_splitter_node.cpp │ ├── image_extractor_node.cpp │ ├── fcn8_node │ ├── octomap_mesh_mask_node.cpp │ └── point_cloud_xyzi_to_xyzrgb_node.cpp ├── CMakeLists.txt └── config │ └── demo.rviz ├── dependencies.rosinstall ├── .travis.yml ├── README.md └── .clang-format /pcs_detection/src_python/pcs_detection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pcs_detection/src_python/pcs_detection/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gh_pages/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx_rtd_theme 2 | gitpython 3 | -------------------------------------------------------------------------------- /pcs_scan_integration/test/results/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/.gitignore: -------------------------------------------------------------------------------- 1 | *.h5 2 | !.gitignore 3 | 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.pyc 3 | *.vscode* 4 | *.workspace 5 | 6 | 7 | # Catkin custom files 8 | CATKIN_IGNORE 9 | -------------------------------------------------------------------------------- /pcs_msgs/srv/ImageProcessing.srv: -------------------------------------------------------------------------------- 1 | sensor_msgs/Image input_image 2 | --- 3 | sensor_msgs/Image returned_image 4 | -------------------------------------------------------------------------------- /gh_pages/_static/footer.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_static/footer.jpg -------------------------------------------------------------------------------- /gh_pages/_static/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_static/logo.jpg -------------------------------------------------------------------------------- /.run-clang-format: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | find . -type f -regex '.*\.\(cpp\|hpp\|cc\|cxx\|h\|hxx\)' -exec clang-format-8 -style=file -i {} \; 3 | -------------------------------------------------------------------------------- /gh_pages/_source/pcs_ros.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | pcs_scan_integration 3 | *************************** 4 | 5 | Features 6 | ======== 7 | -------------------------------------------------------------------------------- /pcs_scan_integration/test/box_2m.bt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_scan_integration/test/box_2m.bt -------------------------------------------------------------------------------- /gh_pages/_source/pcs_msgs.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | pcs_scan_integration 3 | *************************** 4 | 5 | Features 6 | ======== 7 | -------------------------------------------------------------------------------- /pcs_ros/test/data/colorized_cloud1.bag: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_ros/test/data/colorized_cloud1.bag -------------------------------------------------------------------------------- /pcs_ros/test/data/colorized_cloud2.bag: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_ros/test/data/colorized_cloud2.bag -------------------------------------------------------------------------------- /pcs_ros/test/data/colorized_cloud3.bag: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_ros/test/data/colorized_cloud3.bag -------------------------------------------------------------------------------- /pcs_scan_integration/test/plane_4m.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_scan_integration/test/plane_4m.stl -------------------------------------------------------------------------------- /gh_pages/_source/pcs_detection.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | pcs_scan_integration 3 | *************************** 4 | 5 | Features 6 | ======== 7 | -------------------------------------------------------------------------------- /gh_pages/_source/pcs_scan_integration.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | pcs_scan_integration 3 | *************************** 4 | 5 | Features 6 | ======== 7 | -------------------------------------------------------------------------------- /gh_pages/_static/pcs_scan_integration/RETURN_INSIDE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_static/pcs_scan_integration/RETURN_INSIDE.png -------------------------------------------------------------------------------- /gh_pages/_static/pcs_scan_integration/RETURN_OUTSIDE.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_static/pcs_scan_integration/RETURN_OUTSIDE.png -------------------------------------------------------------------------------- /gh_pages/_static/pcs_scan_integration/RETURN_COLORIZED.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_static/pcs_scan_integration/RETURN_COLORIZED.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_weld_image_grayscale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_weld_image_grayscale.png -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_themes/sphinx_rtd_theme/static/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0000.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0001.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0002.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0003.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0004.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0005.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0007.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0008.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0009.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0010.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0011.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0013.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0014.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0015.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0016.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0017.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0019.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0020.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0022.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0023.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0024.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0025.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0026.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0027.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0028.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0029.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0030.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0032.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0033.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0034.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0035.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0036.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0037.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0038.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0039.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0040.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0041.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0042.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0043.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0044.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0046.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/training/0047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/training/0047.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0000.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0001.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0002.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0003.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0004.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0005.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0006.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0007.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0008.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0009.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0010.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0011.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0012.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0013.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0014.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0015.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0016.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0017.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0020.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0021.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0022.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0023.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0024.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0026.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0027.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0028.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0029.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0031.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0032.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0033.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0033.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0034.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0035.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0035.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0036.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0036.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0037.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0037.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0038.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0038.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0039.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0039.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0040.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0040.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0041.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0041.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0043.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0043.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0044.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0044.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0045.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0046.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0046.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0047.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0047.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0049.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0049.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0050.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0051.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0052.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0053.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0054.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0056.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0057.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0058.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0059.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0060.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0060.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0061.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0061.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0062.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0062.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0063.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0063.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0064.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/training/0065.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/training/0065.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0003.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0017.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0019.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0059.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0074.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0077.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0081.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0094.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0094.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0114.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation/0116.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_1/validation/0116.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0003.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0006.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0012.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0017.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0018.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0019.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0021.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0025.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0030.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0031.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0031.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0042.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0042.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0045.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0045.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0048.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0048.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0055.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0059.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0074.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0074.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0077.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0077.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0081.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0094.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0094.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0114.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0116.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0116.png -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_2/validation/0018_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/pcs_detection/scripts/data/example_dataset_2/validation/0018_2.png -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swri-robotics/point_cloud_segmentation/HEAD/gh_pages/_themes/sphinx_rtd_theme/static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /pcs_detection/scripts/histogram_training.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | from pcs_detection.histogram_backprojection import HistogramBackprojection 5 | 6 | if __name__ == '__main__': 7 | test = HistogramBackproject("hist.npy") 8 | -------------------------------------------------------------------------------- /pcs_detection/src_python/setup.py.in: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | if __name__ == '__main__': 4 | setup(name='pcs_detection', 5 | version='${PACKAGE_VERSION}', 6 | package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}' }, 7 | packages=['pcs_detection', 'pcs_detection.models']) 8 | -------------------------------------------------------------------------------- /pcs_ros/setup.py: -------------------------------------------------------------------------------- 1 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD 2 | 3 | from distutils.core import setup 4 | from catkin_pkg.python_setup import generate_distutils_setup 5 | 6 | # fetch values from package.xml 7 | setup_args = generate_distutils_setup( 8 | packages=['pcs_ros'], 9 | package_dir={'': 'src'}, 10 | ) 11 | 12 | setup(**setup_args) 13 | 14 | -------------------------------------------------------------------------------- /gh_pages/_source/FAQ.rst: -------------------------------------------------------------------------------- 1 | Frequently Asked Questions 2 | =========================== 3 | This wiki highlights the frequently asked questions on the issue tracker. 4 | 5 | #. :ref:`Place Holder 1? ` 6 | #. :ref:`Place Holder 2? ` 7 | 8 | 9 | .. _faq1: 10 | 11 | Place Holder 1? 12 | --------------- 13 | 14 | TBD 15 | 16 | .. _faq2: 17 | 18 | Place Holder 2? 19 | --------------- 20 | 21 | TBD 22 | 23 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/theme.conf: -------------------------------------------------------------------------------- 1 | [theme] 2 | inherit = basic 3 | stylesheet = css/theme.css 4 | pygments_style = default 5 | 6 | [options] 7 | canonical_url = 8 | analytics_id = 9 | collapse_navigation = True 10 | sticky_navigation = True 11 | navigation_depth = 4 12 | includehidden = True 13 | titles_only = 14 | logo_only = 15 | display_version = True 16 | prev_next_buttons_location = bottom 17 | style_external_links = False 18 | -------------------------------------------------------------------------------- /pcs_detection/cmake/pcs_detection-config.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | set(@PROJECT_NAME@_FOUND ON) 4 | set_and_check(@PROJECT_NAME@_INCLUDE_DIRS "${PACKAGE_PREFIX_DIR}/include") 5 | set_and_check(@PROJECT_NAME@_LIBRARY_DIRS "${PACKAGE_PREFIX_DIR}/lib") 6 | 7 | include(CMakeFindDependencyMacro) 8 | 9 | find_dependency(PCL) 10 | find_dependency(console_bridge) 11 | 12 | include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@-targets.cmake") 13 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/searchbox.html: -------------------------------------------------------------------------------- 1 | {%- if builder != 'singlehtml' %} 2 |
3 |
4 | 5 | 6 | 7 |
8 |
9 | {%- endif %} 10 | -------------------------------------------------------------------------------- /pcs_ros/launch/extract_all_bags.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This bash script loops through all files in the directory and calls the image extractor launch file. 3 | # The results for each bag file are placed in a different directory 4 | 5 | for file in .* *; do 6 | echo "${file}" 7 | mkdir "${file}_images" 8 | roslaunch pcs_ros extract_images_from_bag.launch filepath:="${PWD}/${file}" results_dir:="${PWD}/${file}_images" 9 | done 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /pcs_scan_integration/cmake/pcs_scan_integration-config.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | set(@PROJECT_NAME@_FOUND ON) 4 | set_and_check(@PROJECT_NAME@_INCLUDE_DIRS "${PACKAGE_PREFIX_DIR}/include") 5 | set_and_check(@PROJECT_NAME@_LIBRARY_DIRS "${PACKAGE_PREFIX_DIR}/lib") 6 | 7 | include(CMakeFindDependencyMacro) 8 | 9 | find_dependency(PCL) 10 | find_dependency(console_bridge) 11 | find_dependency(tesseract_collision) 12 | 13 | include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@-targets.cmake") 14 | -------------------------------------------------------------------------------- /pcs_msgs/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pcs_msgs 4 | 0.0.0 5 | The pcs_msgs package 6 | 7 | Matthew Powelson 8 | 9 | Apache 2.0 10 | 11 | catkin 12 | actionlib_msgs 13 | message_generation 14 | message_runtime 15 | sensor_msgs 16 | std_msgs 17 | 18 | 19 | -------------------------------------------------------------------------------- /pcs_scan_integration/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pcs_scan_integration 4 | 0.0.0 5 | The pcs_scan_integration package 6 | 7 | Matthew Powelson 8 | 9 | Apache 2.0 10 | 11 | tesseract_collision 12 | libconsole-bridge-dev 13 | 14 | tesseract_support 15 | 16 | 17 | cmake 18 | 19 | 20 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/__init__.py: -------------------------------------------------------------------------------- 1 | """Sphinx ReadTheDocs theme. 2 | 3 | From https://github.com/ryan-roemer/sphinx-bootstrap-theme. 4 | 5 | """ 6 | from os import path 7 | 8 | __version__ = '0.4.2' 9 | __version_full__ = __version__ 10 | 11 | 12 | def get_html_theme_path(): 13 | """Return list of HTML theme paths.""" 14 | cur_dir = path.abspath(path.dirname(path.dirname(__file__))) 15 | return cur_dir 16 | 17 | # See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package 18 | def setup(app): 19 | app.add_html_theme('sphinx_rtd_theme', path.abspath(path.dirname(__file__))) 20 | -------------------------------------------------------------------------------- /pcs_detection/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pcs_detection 4 | 0.0.0 5 | The pcs_detection package 6 | 7 | Matthew Powelson 8 | 9 | Apache 2.0 10 | 11 | python-numpy 12 | python-opencv 13 | libconsole-bridge-dev 14 | 15 | tesseract_common 16 | gtest 17 | 18 | 19 | cmake 20 | 21 | 22 | -------------------------------------------------------------------------------- /pcs_ros/launch/extract_images_from_bag.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /gh_pages/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = SphinxTest 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /pcs_scan_integration/test/box_2m.ply: -------------------------------------------------------------------------------- 1 | ply 2 | format ascii 1.0 3 | comment Created by Blender 2.79 (sub 0) - www.blender.org, source file: '' 4 | element vertex 8 5 | property float x 6 | property float y 7 | property float z 8 | element face 12 9 | property list uchar uint vertex_indices 10 | end_header 11 | 1.000000 1.000000 -1.000000 12 | -1.000000 -1.000000 -1.000000 13 | -1.000000 1.000000 -1.000000 14 | -1.000000 1.000000 1.000000 15 | 0.999999 -1.000001 1.000000 16 | 1.000000 0.999999 1.000000 17 | 1.000000 -1.000000 -1.000000 18 | -1.000000 -1.000000 1.000000 19 | 3 0 1 2 20 | 3 3 4 5 21 | 3 5 6 0 22 | 3 4 1 6 23 | 3 1 3 2 24 | 3 0 3 5 25 | 3 0 6 1 26 | 3 3 7 4 27 | 3 5 4 6 28 | 3 4 7 1 29 | 3 1 7 3 30 | 3 0 2 3 31 | -------------------------------------------------------------------------------- /pcs_msgs/action/ApplyOctomapMeshMask.action: -------------------------------------------------------------------------------- 1 | ### Goal 2 | # Filepath of the input mesh and associated frame 3 | string mesh_path 4 | string mesh_frame 5 | 6 | # Point Cloud Topic 7 | string point_cloud_topic 8 | 9 | # Parameters used to create the octomap 10 | float64 resolution 11 | int32 lower_limit 12 | int32 upper_limit 13 | bool limit_negative 14 | 15 | # Directory where the results will be saved 16 | string results_dir 17 | 18 | # Valid Mask types 19 | int32 RETURN_INSIDE = 0 20 | int32 RETURN_OUTSIDE = 1 21 | int32 RETURN_COLORIZED = 2 22 | 23 | # Determines the way the mask is applied 24 | int32 mask_type 25 | 26 | --- 27 | ### Result 28 | # Filepath of the resulting mesh PLY file 29 | string results_path 30 | string status_msg 31 | 32 | --- 33 | ### Feedback 34 | 35 | -------------------------------------------------------------------------------- /pcs_ros/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | pcs_ros 4 | 0.0.1 5 | The point_cloud_segmentation package. 6 | 7 | Matthew Powelson 8 | 9 | Apache 2.0 10 | 11 | catkin 12 | pcl_conversions 13 | pcl_ros 14 | roscpp 15 | sensor_msgs 16 | cv_bridge 17 | pcs_detection 18 | pcs_msgs 19 | pcs_scan_integration 20 | message_runtime 21 | 22 | python-opencv 23 | rospy 24 | 25 | 26 | -------------------------------------------------------------------------------- /dependencies.rosinstall: -------------------------------------------------------------------------------- 1 | # Descartes Light 2 | - git: {local-name: descartes_light, uri: 'https://github.com/swri-robotics/descartes_light.git', version: master} 3 | 4 | # OPW Kinematics 5 | - git: {local-name: opw_kinematics, uri: 'https://github.com/Jmeyer1292/opw_kinematics.git', version: master} 6 | 7 | # Tesseract 8 | - git: {local-name: tesseract, uri: 'https://github.com/ros-industrial-consortium/tesseract.git', version: master} 9 | 10 | # Tesseract External (Bullet) 11 | - git: {local-name: tesseract_ext, uri: 'https://github.com/ros-industrial-consortium/tesseract_ext.git', version: master} 12 | 13 | # Trajopt 14 | - git: {local-name: trajopt_ros, uri: 'https://github.com/ros-industrial-consortium/trajopt_ros.git', version: master} 15 | 16 | # Octomap Mapping 17 | - git: {local-name: octomap_mapping, uri: 'https://github.com/OctoMap/octomap_mapping.git', version: kinetic-devel} 18 | -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/fcn8_COMBINED_19_10_10_173416/inference_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "CONFIDENCE_THRESHOLD": 0.5, 3 | "BATCH_SIZE": 8, 4 | "MODEL": "fcn8", 5 | "CHANNEL": "COMBINED", 6 | "DISPLAY_SCALE_FACTOR": 1.25, 7 | "VAL_WEIGHT_PATH": "/fcn8_GREY_19_10_10_101037/23.h5", 8 | "PRE_PROCESS": { 9 | "edge": [ 10 | 0, 11 | 3 12 | ], 13 | "grey": [ 14 | 117 15 | ], 16 | "rgb": [ 17 | 182.11864, 18 | 155.26283, 19 | 107.5 20 | ], 21 | "ycr": [ 22 | 123, 23 | 126, 24 | 120 25 | ], 26 | "lab": [ 27 | 50, 28 | 0.1, 29 | 1.5 30 | ] 31 | }, 32 | "ORIG_DIMS": [ 33 | 480, 34 | 640 35 | ], 36 | "CLASS_NAMES": [ 37 | "weld" 38 | ], 39 | "MODE": "VALIDATE" 40 | } -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/fcn8_STACKED_19_10_10_173601/inference_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "VAL_WEIGHT_PATH": "/fcn8_COMBINED_19_10_10_173426/08.h5", 3 | "BATCH_SIZE": 8, 4 | "DISPLAY_SCALE_FACTOR": 1.25, 5 | "MODEL": "fcn8", 6 | "MODE": "VALIDATE", 7 | "PRE_PROCESS": { 8 | "edge": [ 9 | 0, 10 | 3 11 | ], 12 | "grey": [ 13 | 117 14 | ], 15 | "rgb": [ 16 | 182.11864, 17 | 155.26283, 18 | 107.5 19 | ], 20 | "ycr": [ 21 | 123, 22 | 126, 23 | 120 24 | ], 25 | "lab": [ 26 | 50, 27 | 0.1, 28 | 1.5 29 | ] 30 | }, 31 | "CHANNEL": "STACKED", 32 | "CLASS_NAMES": [ 33 | "weld" 34 | ], 35 | "ORIG_DIMS": [ 36 | 480, 37 | 640 38 | ], 39 | "CONFIDENCE_THRESHOLD": 0.5 40 | } -------------------------------------------------------------------------------- /pcs_detection/scripts/histogram_backprojection_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import numpy as np 4 | import cv2 as cv 5 | from pcs_detection.models.histogram_backprojection import HistogramBackprojection 6 | 7 | if __name__ == '__main__': 8 | # Load the histogram into the annotator 9 | annotator = HistogramBackprojection("example_data/trained_hist.npy") 10 | 11 | # Load the image 12 | input_image = cv.imread('example_data/example_image.png') 13 | 14 | # Generate the annotation 15 | results_image = annotator.annotate_image(input_image) 16 | 17 | # Show the results 18 | print("Input image of size: " + str(input_image.shape)) 19 | print("Results image of size: " + str(results_image.shape)) 20 | print("Press ESC to exit") 21 | while True: 22 | cv.imshow("image", np.hstack((input_image, results_image))) 23 | k = cv.waitKey(1) & 0xFF 24 | if k == 27: 25 | cv.destroyWindow("image") 26 | break 27 | -------------------------------------------------------------------------------- /gh_pages/_static/override.css: -------------------------------------------------------------------------------- 1 | .icon:before { 2 | white-space: pre-wrap !important; 3 | } 4 | .header-override { 5 | margin-bottom: 20px; 6 | } 7 | .header-override p { 8 | padding: 5px; 9 | font-size: 1.2em; 10 | text-align: right; 11 | } 12 | 13 | .wy-breadcrumbs li.wy-breadcrumbs-aside { 14 | display: block; 15 | width: 100%; 16 | text-align: right; 17 | margin-bottom: -25px; 18 | } 19 | 20 | .btn-copyboard { 21 | opacity: 0.20; 22 | background-color: #00adef; 23 | color: #ffffff; 24 | white-space: nowrap; 25 | padding: 2px 2px; 26 | position: relative; 27 | float: right; 28 | transition:opacity 0.3s ease-in-out; 29 | -webkit-transition:opacity 0.3s ease-in-out; 30 | -o-transition:opacity 0.3s ease-in-out; 31 | } 32 | 33 | .btn-copyboard:hover { 34 | background-color: #00adef; 35 | color: #ffffff; 36 | opacity:1; 37 | } 38 | 39 | .btn-copyboard:hover::after, .btn-copyboard:focus, .btn-copyboard:active { 40 | background-color: #00adef; 41 | color: #ffffff; 42 | } 43 | -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/using_negatives_fcn8_GREY_19_10_10_101037/inference_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "MODE": "VALIDATE", 3 | "PRE_PROCESS": { 4 | "edge": [ 5 | 0, 6 | 3 7 | ], 8 | "grey": [ 9 | 117 10 | ], 11 | "rgb": [ 12 | 182.11864, 13 | 155.26283, 14 | 107.5 15 | ], 16 | "ycr": [ 17 | 123, 18 | 126, 19 | 120 20 | ], 21 | "lab": [ 22 | 50, 23 | 0.1, 24 | 1.5 25 | ] 26 | }, 27 | "VAL_WEIGHT_PATH": "/mnt/project_share/Weld_Detection/weld_weights/using_negatives/fcn8_GREY_19_10_10_101037/38.h5", 28 | "MODEL": "fcn8", 29 | "DISPLAY_SCALE_FACTOR": 1.25, 30 | "BATCH_SIZE": 8, 31 | "CHANNEL": "GREY", 32 | "CONFIDENCE_THRESHOLD": 0.5, 33 | "ORIG_DIMS": [ 34 | 480, 35 | 640 36 | ], 37 | "CLASS_NAMES": [ 38 | "weld" 39 | ] 40 | } -------------------------------------------------------------------------------- /pcs_detection/src_python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # See: 2 | # https://bloerg.net/2012/11/10/cmake-and-distutils.html 3 | # https://stackoverflow.com/questions/13298504/using-cmake-with-setup-py?newreg=c95d86b7b27645e4b608b29352eb2ebb 4 | 5 | find_package(PythonInterp) 6 | 7 | if (PYTHONINTERP_FOUND) 8 | set(SETUP_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in") 9 | set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py") 10 | set(DEPS "${CMAKE_CURRENT_SOURCE_DIR}/pcs_detection/__init__.py") 11 | set(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/build/timestamp") 12 | 13 | configure_file(${SETUP_PY_IN} ${SETUP_PY}) 14 | 15 | add_custom_command(OUTPUT ${OUTPUT} 16 | COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} build 17 | COMMAND ${CMAKE_COMMAND} -E touch ${OUTPUT} 18 | DEPENDS ${DEPS}) 19 | 20 | add_custom_target(target ALL DEPENDS ${OUTPUT}) 21 | install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${SETUP_PY} install --install-lib ${CMAKE_INSTALL_PREFIX}/lib/python2.7/dist-packages)") 22 | endif() 23 | -------------------------------------------------------------------------------- /pcs_ros/src/hsv_thresholding_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | namespace pcs_ros 7 | { 8 | bool process_image(pcs_msgs::ImageProcessing::Request& req, pcs_msgs::ImageProcessing::Response& res) 9 | { 10 | ROS_DEBUG("Processing image with hsv thresholding"); 11 | // Convert to OpenCV format 12 | cv_bridge::CvImagePtr cv_ptr; 13 | cv_ptr = cv_bridge::toCvCopy(req.input_image, req.input_image.encoding); 14 | 15 | // Perform Thresholding 16 | cv::Mat mask; 17 | pcs_detection::hsvThresholdingDetector(cv_ptr->image, mask); 18 | 19 | // Convert to the size of the input 20 | cv::Mat mask_3channel; 21 | cv::Mat in[] = { mask, mask, mask }; 22 | cv::merge(in, 3, mask_3channel); 23 | cv_ptr->image = mask_3channel; 24 | 25 | // Convert to ROS msg 26 | res.returned_image = *cv_ptr->toImageMsg(); 27 | return true; 28 | } 29 | } // namespace pcs_ros 30 | 31 | int main(int argc, char** argv) 32 | { 33 | ros::init(argc, argv, "hsv_thresholding_server"); 34 | ros::NodeHandle nh; 35 | 36 | ros::ServiceServer service = nh.advertiseService("perform_detection", pcs_ros::process_image); 37 | ROS_INFO("HSV thresholding service is available"); 38 | ros::spin(); 39 | 40 | return 0; 41 | } 42 | -------------------------------------------------------------------------------- /gh_pages/index.rst: -------------------------------------------------------------------------------- 1 | .. SphinxTest documentation master file, created by 2 | sphinx-quickstart on Tue Oct 3 11:09:13 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ============================= 7 | Welcome to the Point Cloud Segmentation wiki 8 | ============================= 9 | 10 | 11 | Core Packages 12 | ----------------------- 13 | 14 | * **pcs_detection** – Contains functions for doing 2D detection 15 | * **pcs_scan_integration** - Contains functions used during scanning 16 | 17 | ROS Packages 18 | ---------------------- 19 | 20 | * **pcs_msgs** – This package contains examples using tesseract and tesseract_ros for motion planning and collision checking. 21 | * **pcs_ros** – This contains plugins for collision and kinematics which are automatically loaded by the monitors. 22 | 23 | .. Warning:: These packages are under heavy development and are subject to change. 24 | 25 | 26 | Packages 27 | ------------ 28 | 29 | .. toctree:: 30 | :maxdepth: 1 31 | 32 | pcs_detection <_source/pcs_detection.rst> 33 | pcs_msgs <_source/pcs_msgs.rst> 34 | pcs_ros <_source/pcs_ros.rst> 35 | pcs_scan_integration <_source/psc_scan_integration.rst> 36 | 37 | FAQ 38 | --- 39 | .. toctree:: 40 | :maxdepth: 2 41 | 42 | Questions?<_source/FAQ.rst> 43 | -------------------------------------------------------------------------------- /gh_pages/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends '!layout.html' %} 2 | 3 | {% block extrabody %} 4 | 5 | {% endblock %} 6 | 7 | {% block extracontent %} 8 |
9 | 10 | 11 | 12 |

13 | ROS-Industrial Website|Blog 14 |

15 |
16 | {% endblock %} 17 | 18 | {% block footer %} 19 | 39 | {% endblock %} 40 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/versions.html: -------------------------------------------------------------------------------- 1 | {% if READTHEDOCS %} 2 | {# Add rst-badge after rst-versions for small badge style. #} 3 |
4 | 5 | Read the Docs 6 | v: {{ current_version }} 7 | 8 | 9 |
10 |
11 |
{{ _('Versions') }}
12 | {% for slug, url in versions %} 13 |
{{ slug }}
14 | {% endfor %} 15 |
16 |
17 |
{{ _('Downloads') }}
18 | {% for type, url in downloads %} 19 |
{{ type }}
20 | {% endfor %} 21 |
22 |
23 |
{{ _('On Read the Docs') }}
24 |
25 | {{ _('Project Home') }} 26 |
27 |
28 | {{ _('Builds') }} 29 |
30 |
31 |
32 | {% trans %}Free document hosting provided by Read the Docs.{% endtrans %} 33 | 34 |
35 |
36 | {% endif %} 37 | 38 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # This config file for Travis CI 2 | sudo: required 3 | dist: trusty 4 | language: generic 5 | 6 | services: 7 | - docker 8 | 9 | cache: 10 | directories: 11 | - $HOME/.ccache 12 | 13 | git: 14 | quiet: true 15 | 16 | env: 17 | global: 18 | - CCACHE_DIR=$HOME/.ccache 19 | - UPSTREAM_WORKSPACE=file 20 | - ROSINSTALL_FILENAME=dependencies.rosinstall 21 | - ROS_REPO=ros 22 | - NOT_TEST_INSTALL=true 23 | - CMAKE_ARGS=-DENABLE_TESTS=ON 24 | - ROSDEP_SKIP_KEYS="bullet3 fcl benchmark" 25 | - BUILD_PKGS_WHITELIST="pcs_detection pcs_msgs pcs_ros pcs_scan_integration" 26 | 27 | matrix: 28 | include: 29 | - env: ROS_DISTRO=melodic CLANG_FORMAT_CHECK=file CLANG_FORMAT_VERSION=8 30 | git: 31 | submodules: false 32 | - env: ROS_DISTRO=kinetic 33 | - env: ROS_DISTRO=kinetic ROS_REPO=ros-shadow-fixed 34 | - env: ROS_DISTRO=melodic ROS_PARALLEL_JOBS=-j2 CATKIN_PARALLEL_JOBS=-p2 # Travis was running out of resources and hanging for melodic builds 35 | - env: ROS_DISTRO=melodic ROS_REPO=ros-shadow-fixed ROS_PARALLEL_JOBS=-j2 CATKIN_PARALLEL_JOBS=-p3 36 | allow_failures: 37 | - env: ROS_DISTRO=kinetic ROS_REPO=ros-shadow-fixed 38 | - env: ROS_DISTRO=melodic ROS_REPO=ros-shadow-fixed ROS_PARALLEL_JOBS=-j2 CATKIN_PARALLEL_JOBS=-p3 39 | 40 | install: 41 | - git clone --quiet --depth 1 https://github.com/ros-industrial/industrial_ci.git .industrial_ci -b legacy 42 | 43 | script: 44 | - .industrial_ci/travis.sh 45 | -------------------------------------------------------------------------------- /pcs_scan_integration/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(GTest REQUIRED) 2 | find_package(tesseract_support REQUIRED) 3 | 4 | add_executable(${PROJECT_NAME}_octomap_mesh_masking_unit octomap_mesh_masking_unit.cpp) 5 | target_link_libraries(${PROJECT_NAME}_octomap_mesh_masking_unit ${GTEST_BOTH_LIBRARIES} ${PROJECT_NAME}_octomap_mesh_masking) 6 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE -Wsuggest-override -Wconversion -Wsign-conversion) 7 | if(CXX_FEATURE_FOUND EQUAL "-1") 8 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE -std=c++11) 9 | else() 10 | target_compile_features(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE cxx_std_11) 11 | endif() 12 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 13 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE -mno-avx) 14 | else() 15 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 16 | endif() 17 | target_include_directories(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE ${GTEST_INCLUDE_DIRS}) 18 | if(${CMAKE_VERSION} VERSION_LESS "3.10.0") 19 | gtest_add_tests(${PROJECT_NAME}_octomap_mesh_masking_unit "" AUTO) 20 | else() 21 | gtest_discover_tests(${PROJECT_NAME}_octomap_mesh_masking_unit) 22 | endif() 23 | add_dependencies(${PROJECT_NAME}_octomap_mesh_masking_unit ${PACKAGE_LIBRARIES}) 24 | target_compile_definitions(${PROJECT_NAME}_octomap_mesh_masking_unit PRIVATE DATA_DIR="${CMAKE_SOURCE_DIR}/test") 25 | add_dependencies(run_tests ${PROJECT_NAME}_octomap_mesh_masking_unit) 26 | 27 | -------------------------------------------------------------------------------- /pcs_scan_integration/README.md: -------------------------------------------------------------------------------- 1 | # Scan Integration 2 | 3 | This package contains libraries useful for integrating images into scans 4 | 5 | ## Libraries 6 | 7 | All libraries in this package are exported as targets under the namespace `pcs` 8 | 9 | ### Octomap_mesh_masking 10 | 11 | This library provides a class that masks a mesh with an octomap. During scanning, an octomap may have been generated using octomap_server and functions in pcs_detection. The user may then wish to extract the areas of the mesh (from YAK for example) that are inside the octomap. 12 | 13 | One useful feature of this class is the ability to set the octree from a colorized pointcloud. During scanning, the octomap is often colorized based on regions where a certain feature is detected. This can be retrieved as a colorized pointcloud with a point at the center of each leaf. The setOctree function allows a threshold on color to be set whereby only points where the r, g, and b channel values are within the limits are added to the octree. 14 | 15 | When applying the mask there are 3 options. 16 | * RETURN_INSIDE - Returns only the mesh inside the masking 17 | * RETURN_OUTSIDE - Returns only the mesh outside the masking 18 | * RETURN_COLORIZED - Returns the entire mesh but colorizes it based on what is inside/outside the masking 19 | 20 | #### RETURN_INSIDE 21 | ![RETURN_INSIDE](../gh_pages/_static/pcs_scan_integration/RETURN_INSIDE.png) 22 | 23 | #### RETURN_OUTSIDE 24 | ![RETURN_OUTSIDE](../gh_pages/_static/pcs_scan_integration/RETURN_OUTSIDE.png) 25 | 26 | #### RETURN_COLORIZED 27 | ![RETURN_COLORIZED](../gh_pages/_static/pcs_scan_integration/RETURN_COLORIZED.png) 28 | 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # point_cloud_segmentation 2 | 3 | master: [![Build Status](https://travis-ci.org/swri-robotics/point_cloud_segmentation.svg?branch=master)](https://travis-ci.org/swri-robotics/point_cloud_segmentation) 4 | 5 | devel: [![Build Status](https://travis-ci.org/swri-robotics/point_cloud_segmentation.svg?branch=devel)](https://travis-ci.org/swri-robotics/point_cloud_segmentation) 6 | 7 | ## Description 8 | 9 | This package contains tools for annotating point clouds based on associated images. The idea is that 2D feature detectors are a mature technology, but detecting features in 3D point clouds is much harder. It is possible to detect features in the 2D images that are often associated with pointclouds (e.g. from depth cameras) and annotate the point clouds based on the 2D detectors. This data can then be aggregated over the course of a 3D scan to result in a semantically labelled 3D reconstruction. 10 | 11 | One important feature of this meta-package is that the majority of the subpackages are ROS-independents. They are pure cmake 3.5 packages that expose cmake targets that can be used in a variety of settings. While pcs_ros is a ROS 1 wrapper around many of the packages's functions, a ROS 2 wrapper pull request would also be welcome. 12 | 13 | ## Package Overview 14 | * pcs_detection - Contains functions for doing 2D detection 15 | * pcs_msgs - Contains ROS msgs 16 | * pcs_ros - Exposes the functions in the other packages as ros nodes as well as provides a variety of utility nodes that are useful for piecing together a complete system. 17 | * pcs_scan_integration - Contains functions used to aggregate multiple annotated point clouds over the course of a scan. 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/search.html: -------------------------------------------------------------------------------- 1 | {# 2 | basic/search.html 3 | ~~~~~~~~~~~~~~~~~ 4 | 5 | Template for the search page. 6 | 7 | :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. 8 | :license: BSD, see LICENSE for details. 9 | #} 10 | {%- extends "layout.html" %} 11 | {% set title = _('Search') %} 12 | {% set script_files = script_files + ['_static/searchtools.js'] %} 13 | {% block footer %} 14 | 17 | {# this is used when loading the search index using $.ajax fails, 18 | such as on Chrome for documents on localhost #} 19 | 20 | {{ super() }} 21 | {% endblock %} 22 | {% block body %} 23 | 31 | 32 | {% if search_performed %} 33 |

{{ _('Search Results') }}

34 | {% if not search_results %} 35 |

{{ _('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.') }}

36 | {% endif %} 37 | {% endif %} 38 |
39 | {% if search_results %} 40 |
    41 | {% for href, caption, context in search_results %} 42 |
  • 43 | {{ caption }} 44 |

    {{ context|e }}

    45 |
  • 46 | {% endfor %} 47 |
48 | {% endif %} 49 |
50 | {% endblock %} 51 | -------------------------------------------------------------------------------- /pcs_detection/scripts/example_weights.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | * @file fcn8_validate.py 4 | * @brief Used for viewing the predicitions of trained nets on the validation set 5 | * 6 | * @author Jake Janssen 7 | * @date November 8, 2019 8 | * @version TODO 9 | * @bug No known bugs 10 | * 11 | * @copyright Copyright (c) 2017, Southwest Research Institute 12 | * 13 | * @par License 14 | * Software License Agreement (Apache License) 15 | * @par 16 | * Licensed under the Apache License, Version 2.0 (the "License"); 17 | * you may not use this file except in compliance with the License. 18 | * You may obtain a copy of the License at 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * @par 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | ''' 27 | 28 | import json 29 | import os 30 | from pcs_detection.process import validate 31 | 32 | 33 | class Config: 34 | def __init__(self, **entries): 35 | self.__dict__.update(entries) 36 | 37 | if __name__ == '__main__': 38 | # Import Config json file and convert into format we need 39 | dir_path = os.path.dirname(os.path.realpath(__file__)) 40 | with open(dir_path + '/data/weights//full_config.json') as json_data_file: 41 | data = json.load(json_data_file) 42 | config = Config(**data) 43 | 44 | if 'VAL_WEIGHT_PATH' in config.__dict__.keys(): 45 | validate(config) 46 | else: 47 | print('This config does not have an associated weight file') 48 | -------------------------------------------------------------------------------- /pcs_detection/scripts/fcn8_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | * @file fcn8_train.py 4 | * @brief Used for training neural nets, and view the images passed into the network after preprocessing 5 | * 6 | * @author Jake Janssen 7 | * @date November 6, 2019 8 | * @version TODO 9 | * @bug No known bugs 10 | * 11 | * @copyright Copyright (c) 2017, Southwest Research Institute 12 | * 13 | * @par License 14 | * Software License Agreement (Apache License) 15 | * @par 16 | * Licensed under the Apache License, Version 2.0 (the "License"); 17 | * you may not use this file except in compliance with the License. 18 | * You may obtain a copy of the License at 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * @par 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | ''' 27 | 28 | import json 29 | import os 30 | from pcs_detection.process import test_dataloader, train 31 | 32 | 33 | class Config: 34 | def __init__(self, **entries): 35 | self.__dict__.update(entries) 36 | 37 | if __name__ == '__main__': 38 | # Import Config json file and convert into format we need 39 | dir_path = os.path.dirname(os.path.realpath(__file__)) 40 | with open(dir_path + '/training_config.json') as json_data_file: 41 | data = json.load(json_data_file) 42 | config = Config(**data) 43 | 44 | # run the training process specified in the config 45 | if config.MODE == 'DEBUG': 46 | test_dataloader(config) 47 | elif config.MODE == 'TRAIN': 48 | train(config) 49 | else: 50 | print('Not a valid mode') 51 | -------------------------------------------------------------------------------- /pcs_ros/src/histogram_backprojection_node: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import rospy 4 | from cv_bridge import CvBridge 5 | import numpy as np 6 | 7 | from pcs_msgs.srv import * 8 | from pcs_detection.models.histogram_backprojection import HistogramBackprojection 9 | 10 | class HistogramBackprojectionServer(object): 11 | """ 12 | Creates a ROS service that takes a sensor_msgs/Image and returns a 13 | sensor_msgs/Image resulting from using histogram backprojection 14 | detection. 15 | """ 16 | service = None 17 | 18 | def __init__(self, filepath): 19 | """ 20 | Constructor takes an input filepath to the .npy file 21 | containing the histogram 22 | """ 23 | self.histogram_backprojection = HistogramBackprojection(filepath) 24 | self.service = rospy.Service('perform_detection', ImageProcessing, self.annotateImage) 25 | 26 | self.bridge = CvBridge() 27 | rospy.logdebug("Histrogram Backprojection service is available") 28 | 29 | def annotateImage(self, req): 30 | """ 31 | Callback function for ImageProcessing Service. Return image is the 32 | same size as the input image 33 | """ 34 | # Convert sensor_msgs/Image to cv2 Mat 35 | cv_image = self.bridge.imgmsg_to_cv2(req.input_image, "8UC3") 36 | 37 | # Convert to numpy array and send to annotator 38 | results = self.histogram_backprojection.annotate_image(np.asarray(cv_image)) 39 | 40 | # Convert results back into sensor_msgs/Image 41 | res_msg = self.bridge.cv2_to_imgmsg(results, "8UC3") 42 | response = ImageProcessingResponse(res_msg) 43 | 44 | return response 45 | 46 | 47 | if __name__ == '__main__': 48 | rospy.init_node('histogram_backprojection_node') 49 | 50 | filepath = rospy.get_param('~filepath') 51 | 52 | # Launch server 53 | server = HistogramBackprojectionServer(filepath) 54 | 55 | rospy.spin() 56 | -------------------------------------------------------------------------------- /pcs_detection/src_python/pcs_detection/models/histogram_backprojection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 as cv 3 | 4 | class HistogramBackprojection: 5 | """ 6 | Annotates an image based on a previously provided histogram file. 7 | 8 | For more information see 9 | https://docs.opencv.org/master/dc/df6/tutorial_py_histogram_backprojection.html 10 | """ 11 | histogram = None 12 | threshold_min = 30 13 | threshold_max = 150 14 | 15 | def __init__(self, hist_filepath): 16 | """ 17 | Takes an .npy file that contains the trained histogram as an input 18 | """ 19 | self.histogram = np.load(hist_filepath) 20 | print("Histogram loaded") 21 | 22 | def annotate_image(self, input_image): 23 | """ 24 | Returns a binary mask image where 255 corresponds to regions inside the 25 | histogram and 0 corresponds to regions outside. Mask will be the same 26 | size as the input image 27 | """ 28 | # Convert the image to hsv 29 | hsv_image = cv.cvtColor(input_image, cv.COLOR_BGR2HSV) 30 | dst = cv.calcBackProject([hsv_image], [0, 1], self.histogram, [0, 180, 0, 255], 1) 31 | 32 | # Now convolute with circular disc 33 | disc = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5)) 34 | cv.filter2D(dst,-1, disc, dst) 35 | 36 | # Apply treshold based on limits 37 | _, thresholded = cv.threshold(dst, self.threshold_min, self.threshold_max, 0) 38 | 39 | # Apply erosion and dilation to clean up noise 40 | kernel = np.ones((5, 5), np.uint8) 41 | cleaned = cv.morphologyEx(thresholded, cv.MORPH_OPEN, kernel) 42 | 43 | # Normalize to 0 - 1 44 | cv.normalize(cleaned, cleaned, 0, 1, cv.NORM_MINMAX, cv.CV_8UC3) 45 | 46 | # Convert to 0/255 47 | cleaned = 255 * cleaned 48 | 49 | # Convert back to 3 Channel Image 50 | output_image = cv.merge((cleaned, cleaned, cleaned)) 51 | 52 | return output_image 53 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | BasedOnStyle: Google 3 | AccessModifierOffset: -2 4 | AlignEscapedNewlinesLeft: false 5 | AlignTrailingComments: true 6 | AlignAfterOpenBracket: Align 7 | AllowAllParametersOfDeclarationOnNextLine: false 8 | AllowShortFunctionsOnASingleLine: true 9 | AllowShortIfStatementsOnASingleLine: false 10 | AllowShortLoopsOnASingleLine: false 11 | AllowShortLoopsOnASingleLine: false 12 | AlwaysBreakBeforeMultilineStrings: false 13 | AlwaysBreakTemplateDeclarations: true 14 | BinPackArguments: false 15 | BinPackParameters: false 16 | BreakBeforeBinaryOperators: false 17 | BreakBeforeTernaryOperators: false 18 | BreakConstructorInitializersBeforeComma: true 19 | ColumnLimit: 120 20 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 21 | ConstructorInitializerIndentWidth: 2 22 | ContinuationIndentWidth: 4 23 | Cpp11BracedListStyle: false 24 | DerivePointerBinding: false 25 | ExperimentalAutoDetectBinPacking: false 26 | IndentCaseLabels: true 27 | IndentFunctionDeclarationAfterType: false 28 | IndentWidth: 2 29 | MaxEmptyLinesToKeep: 1 30 | NamespaceIndentation: None 31 | ObjCSpaceBeforeProtocolList: true 32 | PenaltyBreakBeforeFirstCallParameter: 19 33 | PenaltyBreakComment: 60 34 | PenaltyBreakFirstLessLess: 1000 35 | PenaltyBreakString: 1 36 | PenaltyExcessCharacter: 1000 37 | PenaltyReturnTypeOnItsOwnLine: 90 38 | PointerBindsToType: true 39 | SortIncludes: false 40 | SpaceAfterControlStatementKeyword: true 41 | SpaceAfterCStyleCast: false 42 | SpaceBeforeAssignmentOperators: true 43 | SpaceInEmptyParentheses: false 44 | SpacesBeforeTrailingComments: 2 45 | SpacesInAngles: false 46 | SpacesInCStyleCastParentheses: false 47 | SpacesInParentheses: false 48 | Standard: Auto 49 | TabWidth: 2 50 | UseTab: Never 51 | 52 | # Configure each individual brace in BraceWrapping 53 | BreakBeforeBraces: Custom 54 | 55 | # Control of individual brace wrapping cases 56 | BraceWrapping: { 57 | AfterClass: 'true' 58 | AfterControlStatement: 'true' 59 | AfterEnum : 'true' 60 | AfterFunction : 'true' 61 | AfterNamespace : 'true' 62 | AfterStruct : 'true' 63 | AfterUnion : 'true' 64 | BeforeCatch : 'true' 65 | BeforeElse : 'true' 66 | IndentBraces : 'false' 67 | } 68 | ... 69 | -------------------------------------------------------------------------------- /pcs_ros/launch/demo.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /pcs_ros/src/image_channel_splitter_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | namespace pcs_ros 7 | { 8 | /** 9 | * @brief This class/node splits a single channel mono8 image into a 3 channel rgb8 image. Each channel will 10 | * simply be the same as the input mono8 image 11 | */ 12 | class ImageChannelSplitter 13 | { 14 | public: 15 | void callback(const sensor_msgs::Image::ConstPtr& monochrome_image) 16 | { 17 | // Convert to OpenCV and convert to rgb 18 | cv_bridge::CvImagePtr monochrome; 19 | monochrome = cv_bridge::toCvCopy(monochrome_image, sensor_msgs::image_encodings::MONO8); 20 | std::vector vChannels; 21 | vChannels.push_back(monochrome->image); 22 | vChannels.push_back(monochrome->image); 23 | vChannels.push_back(monochrome->image); 24 | cv::merge(vChannels, image_.image); 25 | 26 | // Convert back to ROS and publish 27 | image_.encoding = sensor_msgs::image_encodings::RGB8; 28 | image_.header = monochrome_image->header; 29 | pub_.publish(image_.toImageMsg()); 30 | } 31 | 32 | ImageChannelSplitter() : monochrome_topic_("input"), image_topic_("output") 33 | { 34 | // Create publishers and subscribers 35 | sub_ = nh_.subscribe(monochrome_topic_, 30, &ImageChannelSplitter::callback, this); 36 | pub_ = nh_.advertise(image_topic_, 30); 37 | 38 | // Print the topics we are using 39 | std::string t1 = nh_.resolveName(monochrome_topic_); 40 | std::string t2 = nh_.resolveName(image_topic_); 41 | ROS_INFO_STREAM("Subscribing to single channel image on: " << t1); 42 | ROS_INFO_STREAM("Publishing 3 channel image on: " << t2); 43 | } 44 | 45 | private: 46 | ros::NodeHandle nh_; 47 | cv_bridge::CvImage image_; 48 | std::string monochrome_topic_; 49 | std::string image_topic_; 50 | ros::Subscriber sub_; 51 | ros::Publisher pub_; 52 | }; 53 | } // namespace pcs_ros 54 | int main(int argc, char** argv) 55 | { 56 | ros::init(argc, argv, "image_channel_splitter_node"); 57 | pcs_ros::ImageChannelSplitter ics; 58 | ros::spin(); 59 | return 0; 60 | } 61 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/footer.html: -------------------------------------------------------------------------------- 1 |
2 | {% if (theme_prev_next_buttons_location == 'bottom' or theme_prev_next_buttons_location == 'both') and (next or prev) %} 3 | 11 | {% endif %} 12 | 13 |
14 | 15 |
16 |

17 | {%- if show_copyright %} 18 | {%- if hasdoc('copyright') %} 19 | {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}{% endtrans %} 20 | {%- else %} 21 | {% trans copyright=copyright|e %}© Copyright {{ copyright }}{% endtrans %} 22 | {%- endif %} 23 | {%- endif %} 24 | 25 | {%- if build_id and build_url %} 26 | {% trans build_url=build_url, build_id=build_id %} 27 | 28 | Build 29 | {{ build_id }}. 30 | 31 | {% endtrans %} 32 | {%- elif commit %} 33 | {% trans commit=commit %} 34 | 35 | Revision {{ commit }}. 36 | 37 | {% endtrans %} 38 | {%- elif last_updated %} 39 | {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} 40 | {%- endif %} 41 | 42 |

43 |
44 | 45 | {%- if show_sphinx %} 46 | {% trans %}Built with Sphinx using a theme provided by Read the Docs{% endtrans %}. 47 | {%- endif %} 48 | 49 | {%- block extrafooter %} {% endblock %} 50 | 51 |
52 | 53 | -------------------------------------------------------------------------------- /pcs_ros/src/image_extractor_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | namespace pcs_ros 7 | { 8 | /** 9 | * @brief This class is used for extracting an image from a point cloud for data collection 10 | * 11 | * This is similar in functionality to 12 | * but this uses the util function in pcs_ros that also extract the xyz position 13 | */ 14 | class ImageExtractor 15 | { 16 | public: 17 | /** 18 | * @brief Point cloud callback that extracts image and publishes it 19 | * @param cloud PointCloud from which the RGB image is extracted 20 | */ 21 | void callback(const pcl::PointCloud::ConstPtr& cloud) 22 | { 23 | // Extract the image 24 | cv::Mat position_image; 25 | pcs_detection::cloudToImage(cloud, position_image, image_.image); 26 | 27 | // Convert to ROS type and copy header 28 | image_.encoding = sensor_msgs::image_encodings::TYPE_8UC3; 29 | image_.header.frame_id = cloud->header.frame_id; 30 | image_.header.seq = cloud->header.seq; 31 | pcl_conversions::fromPCL(cloud->header.stamp, image_.header.stamp); 32 | image_pub_.publish(image_.toImageMsg()); 33 | } 34 | 35 | ImageExtractor() : cloud_topic_("input"), image_topic_("output") 36 | { 37 | // Create publishers and subscribers 38 | sub_ = nh_.subscribe(cloud_topic_, 30, &ImageExtractor::callback, this); 39 | image_pub_ = nh_.advertise(image_topic_, 30); 40 | 41 | // Print the topics we are using 42 | std::string r_ct = nh_.resolveName(cloud_topic_); 43 | std::string r_it = nh_.resolveName(image_topic_); 44 | ROS_INFO_STREAM("Subscribing to point cloud on: " << r_ct); 45 | ROS_INFO_STREAM("Publishing image on: " << r_it); 46 | } 47 | 48 | private: 49 | ros::NodeHandle nh_; 50 | cv_bridge::CvImage image_; 51 | std::string cloud_topic_; 52 | std::string image_topic_; 53 | ros::Subscriber sub_; 54 | ros::Publisher image_pub_; 55 | }; 56 | } // namespace pcs_ros 57 | int main(int argc, char** argv) 58 | { 59 | ros::init(argc, argv, "image_extractor_node"); 60 | pcs_ros::ImageExtractor pci; 61 | ros::spin(); 62 | return 0; 63 | } 64 | -------------------------------------------------------------------------------- /gh_pages/_templates/footer.html: -------------------------------------------------------------------------------- 1 |
2 | {% if (theme_prev_next_buttons_location == 'bottom' or theme_prev_next_buttons_location == 'both') and (next or prev) %} 3 | 11 | {% endif %} 12 | 13 |
14 | 15 | 20 | 21 |
22 |

23 | {%- if show_copyright %} 24 | {%- if hasdoc('copyright') %} 25 | {% trans path=pathto('copyright'), copyright=copyright|e %}© Copyright {{ copyright }}{% endtrans %} 26 | {%- else %} 27 | {% trans copyright=copyright|e %}© Copyright {{ copyright }}{% endtrans %} 28 | {%- endif %} 29 | {%- endif %} 30 | 31 | {%- if build_id and build_url %} 32 | {% trans build_url=build_url, build_id=build_id %} 33 | 34 | Build 35 | {{ build_id }}. 36 | 37 | {% endtrans %} 38 | {%- elif commit %} 39 | {% trans commit=commit %} 40 | 41 | Revision {{ commit }}. 42 | 43 | {% endtrans %} 44 | {%- elif last_updated %} 45 | {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} 46 | {%- endif %} 47 | 48 |

49 |
50 | 51 | {%- if show_sphinx %} 52 | {% trans %}Built with Sphinx using a theme provided by Read the Docs{% endtrans %}. 53 | {%- endif %} 54 | 55 | {%- block extrafooter %} {% endblock %} 56 | 57 |
58 | -------------------------------------------------------------------------------- /pcs_detection/scripts/training_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "MODE": "TRAIN", 3 | "WEIGHT_ID": "unique_id", 4 | "WEIGHT_DIR": "data/weights", 5 | 6 | "MODEL": "fcn8", 7 | 8 | "CHANNEL": "GREY", 9 | 10 | "CLASS_NAMES": ["weld"], 11 | 12 | "CONFIDENCE_THRESHOLD": 0.5, 13 | 14 | "LEARNING_RATE": { 15 | "reduce_patience": 1, 16 | "start": 0.008, 17 | "end": 0.0001, 18 | "reduce_factor": 0.6 19 | }, 20 | 21 | "N_EPOCHS": 100, 22 | "BATCH_SIZE": 8, 23 | 24 | "AUGMENTATIONS": { 25 | "number_aug": 10, 26 | "horizontal_flip": true, 27 | "brightness_range": null, 28 | "rotation_range": 45, 29 | "vertical_flip": true, 30 | "zoom_range": 0.3 31 | }, 32 | "MIN_PIXELS_IN_IMG": 100, 33 | 34 | "LABEL_THICKNESS": 5, 35 | 36 | "BACKGROUND_REDUCTION": 2, 37 | 38 | "IMG_DIMS": [ 39 | 256, 40 | 256 41 | ], 42 | "ORIG_DIMS": [ 43 | 480, 44 | 640 45 | ], 46 | 47 | "PRE_PROCESS": { 48 | "rgb": [ 49 | 182.11864, 50 | 155.26283, 51 | 107.5 52 | ], 53 | "edge": [ 54 | 0, 55 | 3 56 | ], 57 | "grey": [ 58 | 117 59 | ], 60 | "ycr": [ 61 | 123, 62 | 126, 63 | 120 64 | ], 65 | "lab": [ 66 | 50, 67 | 0.1, 68 | 1.5 69 | ] 70 | }, 71 | 72 | "USE_FULL_IMAGE": true, 73 | "DISPLAY_SCALE_FACTOR": 1.25, 74 | 75 | "TRAINING_DIRS": [ 76 | { 77 | "dir_path": "data/example_dataset_1/training", 78 | "num_imgs": "all", 79 | "class": "weld" 80 | }, 81 | { 82 | "dir_path": "data/example_dataset_2/training", 83 | "num_imgs": "all", 84 | "class": "weld" 85 | } 86 | ], 87 | "VALIDATION_DIRS": [ 88 | { 89 | "dir_path": "data/example_dataset_1/validation", 90 | "num_imgs": "all", 91 | "class": "weld" 92 | }, 93 | { 94 | "dir_path": "data/example_dataset_2/validation", 95 | "num_imgs": "all", 96 | "class": "weld" 97 | } 98 | ] 99 | } 100 | -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/using_negatives_fcn8_GREY_19_10_10_101037/full_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "AUGMENTATIONS": { 3 | "number_aug": 100, 4 | "horizontal_flip": true, 5 | "brightness_range": null, 6 | "rotation_range": 45, 7 | "vertical_flip": true, 8 | "zoom_range": 0.2 9 | }, 10 | "LABEL_THICKNESS": 5, 11 | "MODE": "VALIDATE", 12 | "BACKGROUND_REDUCTION": 30, 13 | "WEIGHT_ID": "using_negatives", 14 | "PRE_PROCESS": { 15 | "edge": [ 16 | 0, 17 | 3 18 | ], 19 | "grey": [ 20 | 117 21 | ], 22 | "rgb": [ 23 | 182.11864, 24 | 155.26283, 25 | 107.5 26 | ], 27 | "ycr": [ 28 | 123, 29 | 126, 30 | 120 31 | ], 32 | "lab": [ 33 | 50, 34 | 0.1, 35 | 1.5 36 | ] 37 | }, 38 | "MIN_PIXELS_IN_IMG": 100, 39 | "LEARNING_RATE": { 40 | "reduce_patience": 1, 41 | "start": 0.008, 42 | "end": 0.0001, 43 | "reduce_factor": 0.6 44 | }, 45 | "N_EPOCHS": 100, 46 | "IMG_DIMS": [ 47 | 256, 48 | 256 49 | ], 50 | "USE_FULL_IMAGE": false, 51 | "VAL_WEIGHT_PATH": "/mnt/project_share/Weld_Detection/weld_weights/using_negatives/fcn8_GREY_19_10_10_101037/38.h5", 52 | "TRAINING_DIRS": [ 53 | { 54 | "dir_path": "data/example_dataset_1/training", 55 | "num_imgs": "all", 56 | "label": true 57 | }, 58 | { 59 | "dir_path": "data/example_dataset_2/training", 60 | "num_imgs": "all", 61 | "label": true 62 | } 63 | ], 64 | "VALIDATION_DIRS": [ 65 | { 66 | "dir_path": "data/example_dataset_1/validation", 67 | "num_imgs": "all", 68 | "label": true 69 | }, 70 | { 71 | "dir_path": "data/example_dataset_2/validation", 72 | "num_imgs": "all", 73 | "label": true 74 | } 75 | ], 76 | "MODEL": "fcn8", 77 | "CLASS_NAMES": [ 78 | "weld" 79 | ], 80 | "ORIG_DIMS": [ 81 | 480, 82 | 640 83 | ], 84 | "DISPLAY_SCALE_FACTOR": 1.25, 85 | "BATCH_SIZE": 8, 86 | "SAVE_MODEL": true, 87 | "CHANNEL": "GREY", 88 | "CONFIDENCE_THRESHOLD": 0.5 89 | } -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/fcn8_COMBINED_19_10_10_173416/full_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "LABEL_THICKNESS": 5, 3 | "WEIGHT_SAVE_PATH": "/mnt/project_share/Weld_Detection/weld_weights/using_negatives_reduced/fcn8_COMBINED_19_10_10_173416/{epoch:02d}.h5", 4 | "MODEL": "fcn8", 5 | "MIN_PIXELS_IN_IMG": 100, 6 | "BACKGROUND_REDUCTION": 30, 7 | "USE_FULL_IMAGE": false, 8 | "CHANNEL": "COMBINED", 9 | "BATCH_SIZE": 8, 10 | "IMG_DIMS": [ 11 | 256, 12 | 256 13 | ], 14 | "DISPLAY_SCALE_FACTOR": 1.25, 15 | "AUGMENTATIONS": { 16 | "number_aug": 100, 17 | "horizontal_flip": true, 18 | "brightness_range": null, 19 | "vertical_flip": true, 20 | "zoom_range": 0.2, 21 | "rotation_range": 45 22 | }, 23 | "WEIGHT_ID": "using_negatives_reduced", 24 | "VAL_WEIGHT_PATH": "/fcn8_COMBINED_19_10_10_173426/08.h5", 25 | "PRE_PROCESS": { 26 | "edge": [ 27 | 0, 28 | 3 29 | ], 30 | "grey": [ 31 | 117 32 | ], 33 | "rgb": [ 34 | 182.11864, 35 | 155.26283, 36 | 107.5 37 | ], 38 | "ycr": [ 39 | 123, 40 | 126, 41 | 120 42 | ], 43 | "lab": [ 44 | 50, 45 | 0.1, 46 | 1.5 47 | ] 48 | }, 49 | "N_EPOCHS": 100, 50 | "LEARNING_RATE": { 51 | "reduce_factor": 0.6, 52 | "reduce_patience": 1, 53 | "end": 0.0001, 54 | "start": 0.008 55 | }, 56 | "SAVE_MODEL": true, 57 | "TRAINING_DIRS": [ 58 | { 59 | "dir_path": "data/example_dataset_1/training", 60 | "num_imgs": "all", 61 | "label": true 62 | }, 63 | { 64 | "dir_path": "data/example_dataset_2/training", 65 | "num_imgs": "all", 66 | "label": true 67 | } 68 | ], 69 | "VALIDATION_DIRS": [ 70 | { 71 | "dir_path": "data/example_dataset_1/validation", 72 | "num_imgs": "all", 73 | "label": true 74 | }, 75 | { 76 | "dir_path": "data/example_dataset_2/validation", 77 | "num_imgs": "all", 78 | "label": true 79 | } 80 | ], 81 | "MODE": "VALIDATE", 82 | "CLASS_NAMES": [ 83 | "weld" 84 | ], 85 | "ORIG_DIMS": [ 86 | 480, 87 | 640 88 | ] 89 | } -------------------------------------------------------------------------------- /pcs_detection/scripts/data/weights/fcn8_STACKED_19_10_10_173601/full_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "WEIGHT_SAVE_PATH": "/mnt/project_share/Weld_Detection/weld_weights/using_negatives_reduced/fcn8_STACKED_19_10_10_173601/{epoch:02d}.h5", 3 | "VAL_WEIGHT_PATH": "/fcn8_COMBINED_19_10_10_173426/08.h5", 4 | "WEIGHT_ID": "using_negatives_reduced", 5 | "BACKGROUND_REDUCTION": 30, 6 | "AUGMENTATIONS": { 7 | "number_aug": 100, 8 | "horizontal_flip": true, 9 | "brightness_range": null, 10 | "rotation_range": 45, 11 | "vertical_flip": true, 12 | "zoom_range": 0.2 13 | }, 14 | "TRAINING_DIRS": [ 15 | { 16 | "dir_path": "data/example_dataset_1/training", 17 | "num_imgs": "all", 18 | "label": true 19 | }, 20 | { 21 | "dir_path": "data/example_dataset_2/training", 22 | "num_imgs": "all", 23 | "label": true 24 | } 25 | ], 26 | "BATCH_SIZE": 8, 27 | "DISPLAY_SCALE_FACTOR": 1.25, 28 | "LEARNING_RATE": { 29 | "reduce_patience": 1, 30 | "reduce_factor": 0.6, 31 | "start": 0.008, 32 | "end": 0.0001 33 | }, 34 | "MODEL": "fcn8", 35 | "VALIDATION_DIRS": [ 36 | { 37 | "dir_path": "data/example_dataset_1/validation", 38 | "num_imgs": "all", 39 | "label": true 40 | }, 41 | { 42 | "dir_path": "data/example_dataset_2/validation", 43 | "num_imgs": "all", 44 | "label": true 45 | } 46 | ], 47 | "MIN_PIXELS_IN_IMG": 100, 48 | "MODE": "VALIDATE", 49 | "IMG_DIMS": [ 50 | 256, 51 | 256 52 | ], 53 | "PRE_PROCESS": { 54 | "edge": [ 55 | 0, 56 | 3 57 | ], 58 | "grey": [ 59 | 117 60 | ], 61 | "rgb": [ 62 | 182.11864, 63 | 155.26283, 64 | 107.5 65 | ], 66 | "ycr": [ 67 | 123, 68 | 126, 69 | 120 70 | ], 71 | "lab": [ 72 | 50, 73 | 0.1, 74 | 1.5 75 | ] 76 | }, 77 | "CHANNEL": "STACKED", 78 | "SAVE_MODEL": true, 79 | "USE_FULL_IMAGE": false, 80 | "N_EPOCHS": 100, 81 | "LABEL_THICKNESS": 5, 82 | "CLASS_NAMES": [ 83 | "weld" 84 | ], 85 | "ORIG_DIMS": [ 86 | 480, 87 | 640 88 | ] 89 | } -------------------------------------------------------------------------------- /pcs_detection/src/pcs_detection/point_cloud_annotator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | using namespace pcs_detection; 6 | 7 | bool PointCloudAnnotator::addPointCloud(pcl::PointCloud::ConstPtr input_cloud) 8 | { 9 | // Preprocess Images 10 | auto position_image = std::make_shared(); 11 | auto image_2d = std::make_shared(); 12 | cloudToImage(input_cloud, *position_image, *image_2d); 13 | PointCloudData data(input_cloud, position_image, image_2d); 14 | 15 | // Add Data to buffer 16 | buffer_mutex_.lock(); 17 | input_buffer_.push(data); 18 | buffer_mutex_.unlock(); 19 | 20 | // Check buffer size - This could be done in another thread 21 | if (input_buffer_.size() >= batch_size_) 22 | { 23 | if (!annotateImages()) 24 | { 25 | CONSOLE_BRIDGE_logError("Annotate Images failed"); 26 | return false; 27 | } 28 | } 29 | return true; 30 | } 31 | 32 | bool PointCloudAnnotator::annotateImages() 33 | { 34 | assert(input_buffer_.size() >= batch_size_); 35 | 36 | // Pull data out of queue and place it in a vector 37 | buffer_mutex_.lock(); 38 | std::vector vec(batch_size_); 39 | for (int idx = 0; idx < batch_size_; idx++) 40 | { 41 | vec[idx] = *input_buffer_.front().image_2d_; 42 | } 43 | buffer_mutex_.unlock(); 44 | 45 | // Send that data to the annotator (blocking and long potentially running) 46 | std::vector image_annotations; 47 | try 48 | { 49 | image_annotations = image_annotator_callback_(vec); 50 | } 51 | catch (...) 52 | { 53 | CONSOLE_BRIDGE_logError("Image Annotator Callback Exception"); 54 | buffer_mutex_.lock(); 55 | input_buffer_.pop(); 56 | buffer_mutex_.unlock(); 57 | return false; 58 | } 59 | 60 | // Apply annotations 61 | buffer_mutex_.lock(); 62 | pointCloudVec results(batch_size_); 63 | for (std::size_t idx = 0; idx < batch_size_; idx++) 64 | { 65 | results[idx] = pcs_detection::imageToCloud( 66 | image_annotations[idx], *input_buffer_.front().position_image_, input_buffer_.front().cloud_->header); 67 | input_buffer_.pop(); 68 | } 69 | buffer_mutex_.unlock(); 70 | 71 | // Send the results to the results callback 72 | try 73 | { 74 | results_callback_(results); 75 | } 76 | catch (...) 77 | { 78 | CONSOLE_BRIDGE_logError("Results Callback Exception"); 79 | return false; 80 | } 81 | return true; 82 | } 83 | -------------------------------------------------------------------------------- /pcs_detection/include/pcs_detection/hsv_thresholding.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file hsv_thresholding.h 3 | * @brief Detects features where the hsv values fall within a threshold 4 | * 5 | * @author Matthew Powelson 6 | * @date Sept 18, 2019 7 | * @version TODO 8 | * @bug No known bugs 9 | * 10 | * @copyright Copyright (c) 2017, Southwest Research Institute 11 | * 12 | * @par License 13 | * Software License Agreement (Apache License) 14 | * @par 15 | * Licensed under the Apache License, Version 2.0 (the "License"); 16 | * you may not use this file except in compliance with the License. 17 | * You may obtain a copy of the License at 18 | * http://www.apache.org/licenses/LICENSE-2.0 19 | * @par 20 | * Unless required by applicable law or agreed to in writing, software 21 | * distributed under the License is distributed on an "AS IS" BASIS, 22 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 23 | * See the License for the specific language governing permissions and 24 | * limitations under the License. 25 | */ 26 | 27 | #ifndef PCS_DETECTION_HSV_THRESHOLDING_H 28 | #define PCS_DETECTION_HSV_THRESHOLDING_H 29 | #include 30 | #include 31 | 32 | namespace pcs_detection 33 | { 34 | /** 35 | * @brief Detects a color using color thresholding and returns an annotation. 255 = detected, 0 = no color by default 36 | * 37 | * This is mostly copied from https://docs.opencv.org/3.4/da/d97/tutorial_threshold_inRange.html 38 | * 39 | * Use the Python script hsv_threshold_tuning.py to find suitable thresholds 40 | * @param input_image 41 | * @param mask Annotation the same size as the input 42 | * @param inverted Default = false. If true, 0's are returned where the color is detected 43 | * @return 44 | */ 45 | inline bool hsvThresholdingDetector(const cv::Mat& input_image, cv::Mat& mask, bool inverted = false) 46 | { 47 | const int hue_lower = 95; 48 | const int hue_upper = 115; 49 | const int saturation_lower = 95; 50 | const int saturation_upper = 220; 51 | const int value_lower = 40; 52 | const int value_upper = 150; 53 | 54 | cv::Mat hsv_image; 55 | cv::cvtColor(input_image, hsv_image, CV_BGR2HSV); 56 | inRange(hsv_image, 57 | cv::Scalar(hue_lower, saturation_lower, value_lower), 58 | cv::Scalar(hue_upper, saturation_upper, value_upper), 59 | mask); 60 | // Invert if flag is set 61 | if (inverted) 62 | mask = (255 - mask); 63 | else 64 | mask = mask; 65 | 66 | // Uncomment to return a completely 255 mask. 67 | // cv::Mat output(480, 640, CV_8UC1, cv::Scalar(255)); 68 | // mask = output; 69 | return true; 70 | } 71 | } // namespace pcs_detection 72 | #endif 73 | -------------------------------------------------------------------------------- /pcs_detection/test/utils_unit.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | TESSERACT_COMMON_IGNORE_WARNINGS_PUSH 3 | #include 4 | #include 5 | TESSERACT_COMMON_IGNORE_WARNINGS_POP 6 | 7 | #include 8 | 9 | using namespace pcs_detection; 10 | 11 | class UtilsUnit : public ::testing::Test 12 | { 13 | protected: 14 | pcl::PointCloud::Ptr cloud; 15 | 16 | void SetUp() override 17 | { 18 | CONSOLE_BRIDGE_logInform("Setting up PointCloudAnnotatorUnit"); 19 | cloud.reset(new pcl::PointCloud()); 20 | cloud->width = 255; 21 | cloud->height = 1; 22 | cloud->is_dense = false; 23 | cloud->points.resize(cloud->width * cloud->height); 24 | 25 | // Create a point cloud which consists of points in a row increasing in color value from 0 to 255 26 | for (size_t i = 0; i < cloud->points.size(); ++i) 27 | { 28 | cloud->points[i].x = static_cast(i) / 100.f; 29 | cloud->points[i].y = 0.0; 30 | cloud->points[i].z = 0.0; 31 | cloud->points[i].r = static_cast(i); 32 | cloud->points[i].g = static_cast(i); 33 | cloud->points[i].b = static_cast(i); 34 | } 35 | } 36 | }; 37 | 38 | TEST_F(UtilsUnit, CloudImageManipulation) 39 | { 40 | CONSOLE_BRIDGE_logDebug("UtilsUnit, CloudImageManipulation"); 41 | 42 | auto position_image = std::make_shared(); 43 | auto image_2d = std::make_shared(); 44 | cloudToImage(cloud, *position_image, *image_2d); 45 | EXPECT_EQ(position_image->rows, image_2d->rows); 46 | EXPECT_EQ(position_image->cols, image_2d->cols); 47 | 48 | pcl::PointCloud::Ptr output_cloud = imageToCloud(*image_2d, *position_image, cloud->header); 49 | 50 | // Chech that the results are the same as the input when the output of cloudToImage is passed into imageToCloud 51 | EXPECT_EQ(cloud->points.size(), output_cloud->points.size()); 52 | EXPECT_EQ(cloud->width, output_cloud->width); 53 | EXPECT_EQ(cloud->height, output_cloud->height); 54 | EXPECT_EQ(cloud->is_dense, output_cloud->is_dense); 55 | for (std::size_t i = 0; i < cloud->points.size(); i++) 56 | { 57 | EXPECT_EQ(cloud->points[i].x, output_cloud->points[i].x); 58 | EXPECT_EQ(cloud->points[i].y, output_cloud->points[i].y); 59 | EXPECT_EQ(cloud->points[i].z, output_cloud->points[i].z); 60 | EXPECT_EQ(cloud->points[i].r, output_cloud->points[i].r); 61 | EXPECT_EQ(cloud->points[i].g, output_cloud->points[i].g); 62 | EXPECT_EQ(cloud->points[i].b, output_cloud->points[i].b); 63 | } 64 | } 65 | 66 | int main(int argc, char** argv) 67 | { 68 | testing::InitGoogleTest(&argc, argv); 69 | 70 | return RUN_ALL_TESTS(); 71 | } 72 | -------------------------------------------------------------------------------- /pcs_detection/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package(GTest REQUIRED) 2 | find_package(tesseract_common REQUIRED) 3 | 4 | add_executable(${PROJECT_NAME}_point_cloud_annotator_unit point_cloud_annotator_unit.cpp) 5 | target_link_libraries(${PROJECT_NAME}_point_cloud_annotator_unit tesseract::tesseract_common ${GTEST_BOTH_LIBRARIES} ${PROJECT_NAME}_point_cloud_annotator) 6 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE -Wsuggest-override -Wconversion -Wsign-conversion) 7 | if(CXX_FEATURE_FOUND EQUAL "-1") 8 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE -std=c++11) 9 | else() 10 | target_compile_features(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE cxx_std_11) 11 | endif() 12 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 13 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE -mno-avx) 14 | else() 15 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 16 | endif() 17 | target_include_directories(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE ${GTEST_INCLUDE_DIRS}) 18 | if(${CMAKE_VERSION} VERSION_LESS "3.10.0") 19 | gtest_add_tests(${PROJECT_NAME}_point_cloud_annotator_unit "" AUTO) 20 | else() 21 | gtest_discover_tests(${PROJECT_NAME}_point_cloud_annotator_unit) 22 | endif() 23 | add_dependencies(${PROJECT_NAME}_point_cloud_annotator_unit ${PACKAGE_LIBRARIES}) 24 | target_compile_definitions(${PROJECT_NAME}_point_cloud_annotator_unit PRIVATE DATA_DIR="${CMAKE_SOURCE_DIR}/test") 25 | add_dependencies(run_tests ${PROJECT_NAME}_point_cloud_annotator_unit) 26 | 27 | add_executable(${PROJECT_NAME}_utils_unit utils_unit.cpp) 28 | target_link_libraries(${PROJECT_NAME}_utils_unit tesseract::tesseract_common ${GTEST_BOTH_LIBRARIES} ${PROJECT_NAME}_point_cloud_annotator) 29 | target_compile_options(${PROJECT_NAME}_utils_unit PRIVATE -Wsuggest-override -Wconversion -Wsign-conversion) 30 | if(CXX_FEATURE_FOUND EQUAL "-1") 31 | target_compile_options(${PROJECT_NAME}_utils_unit PRIVATE -std=c++11) 32 | else() 33 | target_compile_features(${PROJECT_NAME}_utils_unit PRIVATE cxx_std_11) 34 | endif() 35 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 36 | target_compile_options(${PROJECT_NAME}_utils_unit PRIVATE -mno-avx) 37 | else() 38 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 39 | endif() 40 | target_include_directories(${PROJECT_NAME}_utils_unit PRIVATE ${GTEST_INCLUDE_DIRS}) 41 | if(${CMAKE_VERSION} VERSION_LESS "3.10.0") 42 | gtest_add_tests(${PROJECT_NAME}_utils_unit "" AUTO) 43 | else() 44 | gtest_discover_tests(${PROJECT_NAME}_utils_unit) 45 | endif() 46 | add_dependencies(${PROJECT_NAME}_utils_unit ${PACKAGE_LIBRARIES}) 47 | target_compile_definitions(${PROJECT_NAME}_utils_unit PRIVATE DATA_DIR="${CMAKE_SOURCE_DIR}/test") 48 | add_dependencies(run_tests ${PROJECT_NAME}_utils_unit) 49 | -------------------------------------------------------------------------------- /pcs_detection/scripts/fcn8_inference.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | * @file fcn8_inference.py 4 | * @brief Used for training neural nets, validating the nets by viewing the predictions, viewing the images passed into the network after preprocessing, and deployment 5 | * 6 | * @author Matthew Powelson 7 | * @date November 6, 2019 8 | * @version TODO 9 | * @bug No known bugs 10 | * 11 | * @copyright Copyright (c) 2017, Southwest Research Institute 12 | * 13 | * @par License 14 | * Software License Agreement (Apache License) 15 | * @par 16 | * Licensed under the Apache License, Version 2.0 (the "License"); 17 | * you may not use this file except in compliance with the License. 18 | * You may obtain a copy of the License at 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * @par 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | ''' 27 | 28 | # For data loading and visualization 29 | import numpy as np 30 | import cv2 as cv 31 | 32 | # For importing the config 33 | import json 34 | import os 35 | import sys 36 | 37 | # Inference module 38 | from pcs_detection.inference import Inference 39 | 40 | # Hack because code was originally written to have configs as Python modules 41 | class Config: 42 | def __init__(self, **entries): 43 | self.__dict__.update(entries) 44 | 45 | if __name__ == '__main__': 46 | if len(sys.argv) > 1 and sys.argv[1] == "NO_GPU": 47 | print("Disabling GPU") 48 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 49 | elif len(sys.argv): 50 | print("Invalid argument. Pass argument 'NO_GPU' to run in CPU mode") 51 | 52 | # Import Config json file and convert into format we need 53 | dir_path = os.path.dirname(os.path.realpath(__file__)) 54 | 55 | with open(dir_path + '/data/weights/using_negatives_fcn8_GREY_19_10_10_101037/inference_config.json') as json_data_file: 56 | data = json.load(json_data_file) 57 | config = Config(**data) 58 | 59 | # Construct the annotator 60 | annotator = Inference(config) 61 | 62 | # Load the image 63 | input_image = cv.imread(dir_path + '/data/example_dataset_1/validation/0003.png') 64 | 65 | # Generate the annotation and convert to 3 channel image 66 | res = annotator.make_prediction(input_image[:,:,0]) 67 | results_image = cv.merge((res, res, res)) * 255 68 | 69 | # Show the results 70 | print("Input image of size: " + str(input_image.shape)) 71 | print("Results image of size: " + str(results_image.shape)) 72 | print("Press ESC to exit") 73 | while True: 74 | cv.imshow("image", np.hstack((input_image, results_image))) 75 | k = cv.waitKey(1) & 0xFF 76 | if k == 27: 77 | cv.destroyWindow("image") 78 | break 79 | -------------------------------------------------------------------------------- /pcs_detection/scripts/masking_inference.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | * @file fcn8_inference.py 4 | * @brief Used for training neural nets, validating the nets by viewing the predictions, viewing the images passed into the network after preprocessing, and deployment 5 | * 6 | * @author Matthew Powelson 7 | * @date November 6, 2019 8 | * @version TODO 9 | * @bug No known bugs 10 | * 11 | * @copyright Copyright (c) 2017, Southwest Research Institute 12 | * 13 | * @par License 14 | * Software License Agreement (Apache License) 15 | * @par 16 | * Licensed under the Apache License, Version 2.0 (the "License"); 17 | * you may not use this file except in compliance with the License. 18 | * You may obtain a copy of the License at 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * @par 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | ''' 27 | 28 | # For data loading and visualization 29 | import numpy as np 30 | import cv2 as cv 31 | 32 | # For importing the config 33 | import json 34 | import os 35 | 36 | # Inference module 37 | from pcs_detection.inference import Inference 38 | 39 | # Hack because code was originally written to have configs as Python modules 40 | class Config: 41 | def __init__(self, **entries): 42 | self.__dict__.update(entries) 43 | 44 | if __name__ == '__main__': 45 | # Import Config json file and convert into format we need 46 | dir_path = os.path.dirname(os.path.realpath(__file__)) 47 | 48 | with open(dir_path + '/data/weights//inference_config.json') as json_data_file: 49 | data = json.load(json_data_file) 50 | config = Config(**data) 51 | 52 | # Construct the annotator 53 | annotator = Inference(config) 54 | 55 | # Load the image 56 | input_image = cv.imread('') 57 | # Generate the annotation and convert to 3 channel image 58 | res = annotator.make_prediction(input_image) 59 | 60 | display_image = np.zeros((res.shape[0], res.shape[1], 3)) 61 | colors = [(102, 204, 255), (255, 153, 102), (0,153,255)] 62 | for jj, color in enumerate(colors): 63 | temp_chnl_img = np.zeros((res.shape[0],res.shape[1], 2)) 64 | temp_chnl_img[:,:,1][res==jj+1] = 1 65 | display_image[temp_chnl_img[:,:,1]==1] = color 66 | display_image = display_image.astype(np.uint8) 67 | 68 | # Show the results 69 | print("Input image of size: " + str(input_image.shape)) 70 | print("Results image of size: " + str(display_image.shape)) 71 | print("Press ESC to exit") 72 | while True: 73 | cv.imshow("image", np.hstack((input_image, display_image))) 74 | k = cv.waitKey(1) & 0xFF 75 | if k == 27: 76 | cv.destroyWindow("image") 77 | break 78 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} 2 | -------------------------------------------------------------------------------- /pcs_scan_integration/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5.0) 2 | project(pcs_scan_integration VERSION 0.1.0 LANGUAGES CXX) 3 | 4 | find_package(console_bridge REQUIRED) 5 | find_package(PCL REQUIRED) 6 | find_package(tesseract_collision REQUIRED) 7 | 8 | list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_11 CXX_FEATURE_FOUND) 9 | 10 | ########### 11 | ## Build ## 12 | ########### 13 | 14 | add_library(${PROJECT_NAME}_octomap_mesh_masking src/${PROJECT_NAME}/octomap_mesh_masking.cpp) 15 | target_link_libraries(${PROJECT_NAME}_octomap_mesh_masking tesseract::tesseract_collision_bullet console_bridge ${PCL_LIBRARIES}) 16 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking PUBLIC -Wsuggest-override -Wconversion -Wsign-conversion) 17 | if(CXX_FEATURE_FOUND EQUAL "-1") 18 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking PUBLIC -std=c++11) 19 | else() 20 | target_compile_features(${PROJECT_NAME}_octomap_mesh_masking PRIVATE cxx_std_11) 21 | endif() 22 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 23 | target_compile_options(${PROJECT_NAME}_octomap_mesh_masking PUBLIC -mno-avx) 24 | else() 25 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 26 | endif() 27 | target_include_directories(${PROJECT_NAME}_octomap_mesh_masking PUBLIC 28 | "$" 29 | "$") 30 | target_include_directories(${PROJECT_NAME}_octomap_mesh_masking SYSTEM PUBLIC 31 | ${PCL_INCLUDE_DIRS}) 32 | 33 | list (APPEND PACKAGE_LIBRARIES ${PROJECT_NAME}_octomap_mesh_masking) 34 | 35 | ############# 36 | ## Install ## 37 | ############# 38 | # Mark executables and/or libraries for installation 39 | install(TARGETS ${PACKAGE_LIBRARIES} 40 | EXPORT ${PROJECT_NAME}-targets DESTINATION lib) 41 | install(EXPORT ${PROJECT_NAME}-targets 42 | NAMESPACE pcs:: DESTINATION lib/cmake/${PROJECT_NAME}) 43 | 44 | # Mark cpp header files for installation 45 | install(DIRECTORY include/${PROJECT_NAME} 46 | DESTINATION include 47 | FILES_MATCHING 48 | PATTERN "*.h" 49 | PATTERN "*.hpp" 50 | PATTERN ".svn" EXCLUDE 51 | ) 52 | 53 | install(FILES package.xml DESTINATION share/${PROJECT_NAME}) 54 | 55 | # Create cmake config files 56 | include(CMakePackageConfigHelpers) 57 | configure_package_config_file( 58 | ${CMAKE_CURRENT_LIST_DIR}/cmake/${PROJECT_NAME}-config.cmake.in 59 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake 60 | INSTALL_DESTINATION lib/cmake/${PROJECT_NAME} 61 | NO_CHECK_REQUIRED_COMPONENTS_MACRO) 62 | 63 | write_basic_package_version_file( 64 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake 65 | VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion) 66 | 67 | install(FILES 68 | "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" 69 | "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake" 70 | DESTINATION lib/cmake/${PROJECT_NAME}) 71 | 72 | export(EXPORT ${PROJECT_NAME}-targets 73 | FILE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-targets.cmake) 74 | 75 | if (ENABLE_TESTS) 76 | enable_testing() 77 | add_custom_target(run_tests ALL 78 | WORKING_DIRECTORY ${CMAKE_BINARY_DIR} 79 | COMMAND ${CMAKE_CTEST_COMMAND} -V -O "/tmp/${PROJECT_NAME}_ctest.log" -C ${CONFIGURATION}) 80 | 81 | add_subdirectory(test) 82 | endif() 83 | 84 | -------------------------------------------------------------------------------- /pcs_detection/scripts/data/example_dataset_1/validation_labels.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 1.1 4 | 5 | 6 | 231 7 | alum_val_1 8 | 10 9 | annotation 10 | 0 11 | 12 | False 13 | 2019-10-04 00:22:26.188456+03:00 14 | 2019-10-04 00:25:09.939912+03:00 15 | 10 images: 0003.png, 0017.png, ... 16 | 17 | 22 | 23 | 24 | 25 | 202 26 | 0 27 | 9 28 | http://129.162.108.53:8080/?id=202 29 | 30 | 31 | 32 | admin 33 | ryan.mcbee@swri.org 34 | 35 | 36 | 2019-10-04 00:25:14.905545+03:00 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /pcs_detection/test/point_cloud_annotator_unit.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | TESSERACT_COMMON_IGNORE_WARNINGS_PUSH 3 | #include 4 | #include 5 | TESSERACT_COMMON_IGNORE_WARNINGS_POP 6 | 7 | #include 8 | 9 | using namespace pcs_detection; 10 | 11 | class PointCloudAnnotatorUnit : public ::testing::Test 12 | { 13 | protected: 14 | pcl::PointCloud::Ptr cloud; 15 | 16 | void SetUp() override 17 | { 18 | CONSOLE_BRIDGE_logInform("Setting up PointCloudAnnotatorUnit"); 19 | cloud.reset(new pcl::PointCloud()); 20 | cloud->width = 255; 21 | cloud->height = 1; 22 | cloud->is_dense = false; 23 | cloud->points.resize(cloud->width * cloud->height); 24 | 25 | // Create a point cloud which consists of points in a row increasing in color value from 0 to 255 26 | for (size_t i = 0; i < cloud->points.size(); ++i) 27 | { 28 | cloud->points[i].x = static_cast(i) / 100.f; 29 | cloud->points[i].y = 0.0; 30 | cloud->points[i].z = 0.0; 31 | cloud->points[i].r = static_cast(i); 32 | cloud->points[i].g = static_cast(i); 33 | cloud->points[i].b = static_cast(i); 34 | } 35 | } 36 | }; 37 | 38 | std::vector ImageAnnotatorCallback_ReturnInputs(const std::vector input_images) 39 | { 40 | CONSOLE_BRIDGE_logDebug("I am tired of annotating. I'm just going to return the inputs.."); 41 | return input_images; 42 | } 43 | 44 | std::vector ImageAnnotatorCallback_Throw(const std::vector input_images) 45 | { 46 | CONSOLE_BRIDGE_logDebug("Throwing a test exception. This is only a drill. This is only a drill."); 47 | throw std::exception(); 48 | } 49 | 50 | void ResultsCallback_ReturnCleanly(std::vector::ConstPtr> results) 51 | { 52 | CONSOLE_BRIDGE_logDebug("I got your results, and they are appreciated. Returning cleanly."); 53 | } 54 | 55 | void ResultsCallback_Throw(std::vector::ConstPtr> results) 56 | { 57 | CONSOLE_BRIDGE_logDebug("Throwing a test exception. This is only a drill. This is only a drill."); 58 | throw std::exception(); 59 | } 60 | 61 | TEST_F(PointCloudAnnotatorUnit, Construction) 62 | { 63 | CONSOLE_BRIDGE_logDebug("PointCloudAnnotatorUnit, Construction"); 64 | pcs_detection::PointCloudAnnotator pca1(&ImageAnnotatorCallback_ReturnInputs, &ResultsCallback_ReturnCleanly); 65 | pcs_detection::PointCloudAnnotator pca2(&ImageAnnotatorCallback_Throw, &ResultsCallback_Throw, 123); 66 | } 67 | 68 | TEST_F(PointCloudAnnotatorUnit, addPointCloud) 69 | { 70 | CONSOLE_BRIDGE_logDebug("PointCloudAnnotatorUnit, addPointCloud"); 71 | { 72 | pcs_detection::PointCloudAnnotator pca1(&ImageAnnotatorCallback_ReturnInputs, &ResultsCallback_ReturnCleanly, 2); 73 | EXPECT_TRUE(pca1.addPointCloud(cloud)); 74 | pca1.addPointCloud(cloud); 75 | } 76 | { 77 | // Should fail as soon as the buffer gets big enough to trigger annotateImages 78 | pcs_detection::PointCloudAnnotator pca1(&ImageAnnotatorCallback_Throw, &ResultsCallback_ReturnCleanly, 3); 79 | EXPECT_TRUE(pca1.addPointCloud(cloud)); 80 | EXPECT_TRUE(pca1.addPointCloud(cloud)); 81 | EXPECT_FALSE(pca1.addPointCloud(cloud)); 82 | } 83 | { 84 | // Should fail as soon as the buffer gets big enough to trigger annotateImages 85 | pcs_detection::PointCloudAnnotator pca1(&ImageAnnotatorCallback_ReturnInputs, &ResultsCallback_Throw, 3); 86 | EXPECT_TRUE(pca1.addPointCloud(cloud)); 87 | EXPECT_TRUE(pca1.addPointCloud(cloud)); 88 | EXPECT_FALSE(pca1.addPointCloud(cloud)); 89 | } 90 | } 91 | 92 | int main(int argc, char** argv) 93 | { 94 | testing::InitGoogleTest(&argc, argv); 95 | 96 | return RUN_ALL_TESTS(); 97 | } 98 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/breadcrumbs.html: -------------------------------------------------------------------------------- 1 | {# Support for Sphinx 1.3+ page_source_suffix, but don't break old builds. #} 2 | 3 | {% if page_source_suffix %} 4 | {% set suffix = page_source_suffix %} 5 | {% else %} 6 | {% set suffix = source_suffix %} 7 | {% endif %} 8 | 9 | {% if meta is defined and meta is not none %} 10 | {% set check_meta = True %} 11 | {% else %} 12 | {% set check_meta = False %} 13 | {% endif %} 14 | 15 | {% if check_meta and 'github_url' in meta %} 16 | {% set display_github = True %} 17 | {% endif %} 18 | 19 | {% if check_meta and 'bitbucket_url' in meta %} 20 | {% set display_bitbucket = True %} 21 | {% endif %} 22 | 23 | {% if check_meta and 'gitlab_url' in meta %} 24 | {% set display_gitlab = True %} 25 | {% endif %} 26 | 27 |
28 | 29 | 70 | 71 | {% if (theme_prev_next_buttons_location == 'top' or theme_prev_next_buttons_location == 'both') and (next or prev) %} 72 | 80 | {% endif %} 81 |
82 |
83 | -------------------------------------------------------------------------------- /gh_pages/_themes/sphinx_rtd_theme/static/js/theme.js: -------------------------------------------------------------------------------- 1 | /* sphinx_rtd_theme version 0.4.2 | MIT license */ 2 | /* Built 20181005 13:10 */ 3 | require=function r(s,a,l){function c(e,n){if(!a[e]){if(!s[e]){var i="function"==typeof require&&require;if(!n&&i)return i(e,!0);if(u)return u(e,!0);var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[e]={exports:{}};s[e][0].call(o.exports,function(n){return c(s[e][1][n]||n)},o,o.exports,r,s,a,l)}return a[e].exports}for(var u="function"==typeof require&&require,n=0;n"),i("table.docutils.footnote").wrap("
"),i("table.docutils.citation").wrap("
"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i(''),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e 31 | #include 32 | 33 | #include 34 | #include 35 | #include 36 | 37 | namespace pcs_detection 38 | { 39 | typedef std::vector::ConstPtr> pointCloudVec; 40 | /** 41 | * @brief Contains all data necessary to process a point cloud at a later time 42 | * 43 | * The idea is that these could go into a buffer for batch processing. This could be expanded to contain things like the 44 | * tranform if necessary. 45 | */ 46 | struct PointCloudData 47 | { 48 | PointCloudData() = default; 49 | 50 | PointCloudData(pcl::PointCloud::ConstPtr& cloud, 51 | std::shared_ptr& position_image, 52 | std::shared_ptr& image_2d) 53 | : cloud_(cloud), position_image_(position_image), image_2d_(image_2d) 54 | { 55 | } 56 | /** @brief Input point cloud */ 57 | pcl::PointCloud::ConstPtr cloud_; 58 | /** @brief cv::Mat (64FC3) with 3 64 bit channels encoding x, y, z position*/ 59 | std::shared_ptr position_image_; 60 | /** @brief cv::Mat (8UC3) encoding extracted 2D image */ 61 | std::shared_ptr image_2d_; 62 | }; 63 | 64 | /** 65 | * @brief The PointCloudAnnotator class 66 | */ 67 | class PointCloudAnnotator 68 | { 69 | public: 70 | PointCloudAnnotator(std::function(const std::vector)> image_annotator_callback, 71 | std::function results_callback, 72 | long unsigned int batch_size = 1) 73 | : image_annotator_callback_(std::move(image_annotator_callback)) 74 | , results_callback_(std::move(results_callback)) 75 | , batch_size_(batch_size) 76 | { 77 | // This is not a requirement. It can be as big as you want. This is mostly just a sanity check. 78 | assert(batch_size_ <= 1024); 79 | } 80 | 81 | /** @brief Adds a pointcloud to the processing queue and does any preprocessing necessary 82 | * @return false if failed*/ 83 | bool addPointCloud(pcl::PointCloud::ConstPtr input_cloud); 84 | 85 | /** @brief Remove data from the buffer, calls the annotate callback, and returns results 86 | * @return false if failed*/ 87 | bool annotateImages(); 88 | 89 | protected: 90 | /** @brief Called to annotate a buffer. */ 91 | std::function(const std::vector)> image_annotator_callback_; 92 | /** @brief Called when results are ready. */ 93 | std::function results_callback_; 94 | /** @brief Size at which the buffer submits a new batch of images to be annotated */ 95 | long unsigned int batch_size_; 96 | 97 | /** @brief This stores the data until there is enough of it to be batch processed. This will likely need to be a ring 98 | * buffer or something more intelligent if this becomes threaded */ 99 | std::queue input_buffer_; 100 | 101 | private: 102 | std::mutex buffer_mutex_; 103 | }; 104 | 105 | } // namespace pcs_detection 106 | 107 | #endif 108 | -------------------------------------------------------------------------------- /pcs_detection/src_python/pcs_detection/preprocess.py: -------------------------------------------------------------------------------- 1 | ''' 2 | * @file preprocess.py 3 | * @brief Applies laplacian preprocessing and mean subtraction prior to input to neural net 4 | * 5 | * @author Jake Janssen 6 | * @date Oct 24, 2019 7 | * @version TODO 8 | * @bug No known bugs 9 | * 10 | * @copyright Copyright (c) 2019, Southwest Research Institute 11 | * 12 | * @par License 13 | * Software License Agreement (Apache License) 14 | * @par 15 | * Licensed under the Apache License, Version 2.0 (the "License"); 16 | * you may not use this file except in compliance with the License. 17 | * You may obtain a copy of the License at 18 | * http://www.apache.org/licenses/LICENSE-2.0 19 | * @par 20 | * Unless required by applicable law or agreed to in writing, software 21 | * distributed under the License is distributed on an "AS IS" BASIS, 22 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 23 | * See the License for the specific language governing permissions and 24 | * limitations under the License. 25 | ''' 26 | 27 | import cv2 28 | import numpy as np 29 | 30 | def preprocessing(img_data, config): 31 | ''' 32 | Apply laplacian preprocessing to image if specified 33 | and mean subtraction on image 34 | ''' 35 | 36 | # only use the first channel if grey is being used 37 | if config.CHANNEL == 'GREY' and len(img_data.shape) != 2: 38 | img_data = img_data[:,:,0] 39 | 40 | # add third dimension to images with a single channel 41 | if len(img_data.shape) == 2: 42 | img_data = np.expand_dims(img_data, axis=-1) 43 | 44 | # convert image to the lab color space 45 | if config.CHANNEL == 'LAB': 46 | img_data = img_data.astype(np.float32) 47 | img_data /= 255 48 | img_data = cv2.cvtColor(img_data.astype(np.float32), cv2.COLOR_BGR2LAB) 49 | channels=cv2.split(img_data) 50 | img_data = img_data.astype(np.float32) 51 | # convert image channel to YCR_CB 52 | elif config.CHANNEL == 'YCR_CB': 53 | ycrcb=cv2.cvtColor(img_data.astype(np.uint8),cv2.COLOR_BGR2YCR_CB) 54 | # channels=cv2.split(ycrcb) 55 | # channels[0] = cv2.equalizeHist(channels[0].astype(np.uint8),channels[0].astype(np.uint8)) 56 | # img_data = cv2.merge(channels) 57 | # img_data = img_data.astype(np.float32) 58 | img_data = ycrcb.astype(np.float32) 59 | # add the laplacian as a second channel 60 | elif config.CHANNEL == 'COMBINED': 61 | edge_chnl = cv2.GaussianBlur(img_data, (3, 3), 0).astype(np.uint8) 62 | ddepth = cv2.CV_16S 63 | lap_chnl = cv2.Laplacian(edge_chnl, ddepth, ksize=3 ) 64 | lap_chnl = lap_chnl * config.PRE_PROCESS['edge'][1] 65 | combined_img = np.zeros((img_data.shape[0], img_data.shape[1], img_data.shape[2]+1)) 66 | combined_img[:,:,0:img_data.shape[2]] = img_data 67 | combined_img[:,:,img_data.shape[2]] = lap_chnl 68 | img_data = combined_img 69 | # add the laplacian onto the grey scale image 70 | elif config.CHANNEL == 'STACKED': 71 | # get the laplacian of the image 72 | edge_chnl = cv2.GaussianBlur(img_data, (3, 3), 0).astype(np.uint8) 73 | ddepth = cv2.CV_16S 74 | lap_chnl = cv2.Laplacian(edge_chnl, ddepth, ksize=3 ) 75 | img_data[:,:,0] += 2*lap_chnl 76 | 77 | # which mean subtraction values to use 78 | if config.CHANNEL == 'RGB': 79 | dataset_means = config.PRE_PROCESS['rgb'] 80 | elif config.CHANNEL == 'THERMAL': 81 | dataset_means = config.PRE_PROCESS['thermal'][3] 82 | elif config.CHANNEL == 'GREY': 83 | dataset_means = config.PRE_PROCESS['grey'] 84 | elif config.CHANNEL == 'COMBINED': 85 | dataset_means = config.PRE_PROCESS['grey'][:] 86 | dataset_means.append(config.PRE_PROCESS['edge'][0]) 87 | elif config.CHANNEL == 'STACKED': 88 | dataset_means = config.PRE_PROCESS['grey'] 89 | elif config.CHANNEL == 'LAB': 90 | dataset_means = config.PRE_PROCESS['lab'] 91 | elif config.CHANNEL == 'YCR_CB': 92 | dataset_means = config.PRE_PROCESS['ycr'] 93 | 94 | else: 95 | print('Invalid channel') 96 | 97 | # mean subtraction to center image means around 0 98 | img_data -= np.asarray(dataset_means) 99 | 100 | return img_data 101 | -------------------------------------------------------------------------------- /pcs_ros/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5.0) 2 | project(pcs_ros) 3 | 4 | add_compile_options(-std=c++11) 5 | 6 | find_package(pcs_scan_integration REQUIRED) 7 | find_package(pcs_detection REQUIRED) 8 | 9 | find_package(catkin REQUIRED COMPONENTS 10 | cv_bridge 11 | pcl_conversions 12 | pcl_ros 13 | pcs_msgs 14 | roscpp 15 | sensor_msgs 16 | ) 17 | 18 | #find_package(PCL 1.8 REQUIRED) 19 | find_package(OpenCV 3 REQUIRED) 20 | 21 | #catkin_python_setup() 22 | 23 | catkin_package( 24 | # INCLUDE_DIRS include 25 | # LIBRARIES 26 | CATKIN_DEPENDS 27 | cv_bridge 28 | pcl_conversions 29 | pcl_ros 30 | pcs_msgs 31 | roscpp 32 | sensor_msgs 33 | # DEPENDS 34 | ) 35 | 36 | ########### 37 | ## Build ## 38 | ########### 39 | 40 | include_directories( 41 | include 42 | ${catkin_INCLUDE_DIRS} 43 | ${PCL_INCLUDE_DIRS} 44 | ) 45 | 46 | add_executable(${PROJECT_NAME}_hsv_thresholding_node src/hsv_thresholding_node.cpp) 47 | add_dependencies(${PROJECT_NAME}_hsv_thresholding_node ${catkin_EXPORTED_TARGETS}) 48 | target_link_libraries(${PROJECT_NAME}_hsv_thresholding_node ${catkin_LIBRARIES} pcs::pcs_detection_hsv_thresholding) 49 | 50 | add_executable(${PROJECT_NAME}_image_channel_splitter_node src/image_channel_splitter_node.cpp) 51 | add_dependencies(${PROJECT_NAME}_image_channel_splitter_node ${catkin_EXPORTED_TARGETS}) 52 | target_link_libraries(${PROJECT_NAME}_image_channel_splitter_node ${catkin_LIBRARIES}) 53 | 54 | add_executable(${PROJECT_NAME}_image_extractor_node src/image_extractor_node.cpp) 55 | add_dependencies(${PROJECT_NAME}_image_extractor_node ${catkin_EXPORTED_TARGETS}) 56 | target_link_libraries(${PROJECT_NAME}_image_extractor_node ${catkin_LIBRARIES} pcs::pcs_detection_hsv_thresholding) 57 | 58 | add_executable(${PROJECT_NAME}_octomap_mesh_mask_node src/octomap_mesh_mask_node.cpp) 59 | add_dependencies(${PROJECT_NAME}_octomap_mesh_mask_node ${catkin_EXPORTED_TARGETS}) 60 | target_link_libraries(${PROJECT_NAME}_octomap_mesh_mask_node ${catkin_LIBRARIES} pcs::pcs_scan_integration_octomap_mesh_masking ) 61 | 62 | add_executable(${PROJECT_NAME}_point_cloud_annotator_node src/point_cloud_annotator_node.cpp) 63 | add_dependencies(${PROJECT_NAME}_point_cloud_annotator_node ${catkin_EXPORTED_TARGETS}) 64 | target_link_libraries(${PROJECT_NAME}_point_cloud_annotator_node ${catkin_LIBRARIES} pcs::pcs_detection_point_cloud_annotator) 65 | 66 | add_executable(${PROJECT_NAME}_point_cloud_xyzi_to_xyzrgb_node src/point_cloud_xyzi_to_xyzrgb_node.cpp) 67 | add_dependencies(${PROJECT_NAME}_point_cloud_xyzi_to_xyzrgb_node ${catkin_EXPORTED_TARGETS}) 68 | target_link_libraries(${PROJECT_NAME}_point_cloud_xyzi_to_xyzrgb_node ${catkin_LIBRARIES}) 69 | 70 | ############# 71 | ## Install ## 72 | ############# 73 | install(PROGRAMS 74 | src/histogram_backprojection_node 75 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 76 | ) 77 | 78 | # Mark executables and/or libraries for installation 79 | install(TARGETS 80 | ${PROJECT_NAME}_hsv_thresholding_node 81 | ${PROJECT_NAME}_image_channel_splitter_node 82 | ${PROJECT_NAME}_image_extractor_node 83 | ${PROJECT_NAME}_octomap_mesh_mask_node 84 | ${PROJECT_NAME}_point_cloud_annotator_node 85 | ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 86 | LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 87 | RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 88 | ) 89 | 90 | ## Mark cpp header files for installation 91 | # install( 92 | #DIRECTORY include/${PROJECT_NAME}/ 93 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 94 | # FILES_MATCHING PATTERN "*.h" 95 | # PATTERN ".svn" EXCLUDE 96 | # ) 97 | 98 | ## Mark other files for installation (e.g. launch and bag files, etc.) 99 | install(FILES 100 | launch/extract_images_from_bag.launch 101 | launch/demo.launch 102 | config/demo.rviz 103 | DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 104 | ) 105 | 106 | ############# 107 | ## Testing ## 108 | ############# 109 | 110 | ## Add gtest based cpp test target and link libraries 111 | # catkin_add_gtest(${PROJECT_NAME}-test test/point_cloud_segmentation.cpp) 112 | # if(TARGET ${PROJECT_NAME}-test) 113 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 114 | # endif() 115 | 116 | ## Add folders to be run by python nosetests 117 | # catkin_add_nosetests(test) 118 | -------------------------------------------------------------------------------- /pcs_detection/src_python/pcs_detection/inference.py: -------------------------------------------------------------------------------- 1 | ''' 2 | * @file inference.py 3 | * @brief Creates an object that holds the model instance and a method for making inferences 4 | * @author Jake Janssen 5 | * @date Oct 28, 2019 6 | * @version TODO 7 | * @bug No known bugs 8 | * 9 | * @copyright Copyright (c) 2019, Southwest Research Institute 10 | * 11 | * @par License 12 | * Software License Agreement (Apache License) 13 | * @par 14 | * Licensed under the Apache License, Version 2.0 (the "License"); 15 | * you may not use this file except in compliance with the License. 16 | * You may obtain a copy of the License at 17 | * http://www.apache.org/licenses/LICENSE-2.0 18 | * @par 19 | * Unless required by applicable law or agreed to in writing, software 20 | * distributed under the License is distributed on an "AS IS" BASIS, 21 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 22 | * See the License for the specific language governing permissions and 23 | * limitations under the License. 24 | ''' 25 | 26 | import numpy as np 27 | from pcs_detection.preprocess import preprocessing 28 | 29 | import tensorflow as tf 30 | import keras.backend as K 31 | 32 | 33 | class Inference(): 34 | ''' 35 | Edits the config bas ded on the validation weights and builds the model 36 | ''' 37 | def __init__(self, config): 38 | self.config=config 39 | 40 | # evaluate the full image regardless of what is in config 41 | self.config.USE_FULL_IMAGE = True 42 | 43 | # display the type of model and image channels 44 | print('___Config_Options_From_Training___') 45 | print('Using model:', config.MODEL) 46 | print('Using channel:',config.CHANNEL) 47 | print('___________________________________') 48 | 49 | # load in the model 50 | if config.MODEL == 'fcn8': 51 | from pcs_detection.models.fcn8_model import fcn8 52 | elif config.MODEL == 'fcn_reduced': 53 | from pcs_detection.models.fcn8_reduced import fcn8 54 | 55 | # Save the graph and session so it can be set if make_prediction is in another thread 56 | self.graph = tf.get_default_graph() 57 | cfg = tf.ConfigProto() 58 | # This allows GPU memory to dynamically grow. This is a workaround to fix this issue on RTX cards 59 | # https://github.com/tensorflow/tensorflow/issues/24496 60 | # However, this can be problematic when sharing memory between applications. 61 | # TODO: Check and see if issue 24496 has been closed, and change this. Note that since Tensorflow 1.15 62 | # is the final 1.x release, this might never happen until this code is upgraded to tensorflow 2.x 63 | cfg.gpu_options.allow_growth = True 64 | cfg.log_device_placement = False 65 | self.session = tf.Session(config = cfg) 66 | 67 | # create the model 68 | K.set_session(self.session) 69 | weldDetector = fcn8(self.config) 70 | # load weights into the model file 71 | weldDetector.build_model(val=True, val_weights = self.config.VAL_WEIGHT_PATH) 72 | 73 | self.model = weldDetector.model 74 | 75 | self.model._make_predict_function() 76 | self.graph.finalize() 77 | 78 | print("Model loaded and ready") 79 | 80 | def make_prediction(self, img_data_original): 81 | ''' 82 | Applies preprocessing, makes a prediction, and converts it to a boolean mask 83 | Returns np array of size img_height x img_width 84 | ''' 85 | img_data_original = img_data_original.astype(np.float32) 86 | 87 | if not img_data_original.any(): 88 | print("Input image is invalid") 89 | return img_data_original 90 | 91 | # do not edit the original image 92 | img_data = img_data_original.copy() 93 | 94 | # preprocess data and make prediction 95 | # apply preprocessing 96 | img_data = preprocessing(img_data, self.config) 97 | # first dimension is used for batch size 98 | img_data = np.expand_dims(img_data, axis=0) 99 | 100 | # make a prediction and convert it to a boolean mask 101 | with self.session.as_default(): 102 | with self.graph.as_default(): 103 | prediction = self.model.predict(img_data) 104 | prediction[:,:,0] += self.config.CONFIDENCE_THRESHOLD 105 | prediction = (np.argmax(prediction,axis=-1)).astype(np.uint8) 106 | prediction = prediction[0] 107 | 108 | return prediction 109 | 110 | -------------------------------------------------------------------------------- /pcs_ros/src/fcn8_node: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | * @file fcn8_node.py 4 | * @brief This node exposes a service interface to perform FCN8 inference. 5 | * 6 | * @author Matthew Powelson 7 | * @date November 21, 2019 8 | * @version TODO 9 | * @bug No known bugs 10 | * 11 | * @copyright Copyright (c) 2019, Southwest Research Institute 12 | * 13 | * @par License 14 | * Software License Agreement (Apache License) 15 | * @par 16 | * Licensed under the Apache License, Version 2.0 (the "License"); 17 | * you may not use this file except in compliance with the License. 18 | * You may obtain a copy of the License at 19 | * http://www.apache.org/licenses/LICENSE-2.0 20 | * @par 21 | * Unless required by applicable law or agreed to in writing, software 22 | * distributed under the License is distributed on an "AS IS" BASIS, 23 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 24 | * See the License for the specific language governing permissions and 25 | * limitations under the License. 26 | ''' 27 | 28 | import rospy 29 | from cv_bridge import CvBridge 30 | import cv2 as cv 31 | import numpy as np 32 | 33 | from pcs_msgs.srv import * 34 | from pcs_detection.inference import Inference 35 | 36 | import json 37 | import os 38 | 39 | # Hack because code was originally written to have configs as Python modules 40 | class Config: 41 | def __init__(self, **entries): 42 | self.__dict__.update(entries) 43 | 44 | class InferenceServer(object): 45 | """ 46 | Creates a ROS service that takes a sensor_msgs/Image and returns a 47 | sensor_msgs/Image resulting from using FCN8 48 | 49 | One note: rospy service callbacks are in their own thread. This can cause issues. They should have been resolved, 50 | but I am putting this here in case they pop up again in the future. 51 | https://stackoverflow.com/questions/46725323/keras-tensorflow-exception-while-predicting-from-multiple-threads 52 | """ 53 | service = None 54 | 55 | def __init__(self, filepath, invert_results = False): 56 | """ 57 | Constructor takes an input filepath to the .h5 wieghts file 58 | """ 59 | self.invert_results = invert_results 60 | 61 | # Import Config json file and convert into format we need 62 | rospy.loginfo(filepath) 63 | rospy.loginfo("------------------------------") 64 | with open(filepath) as json_data_file: 65 | data = json.load(json_data_file) 66 | config = Config(**data) 67 | self.config = config 68 | 69 | # Construct the annotator 70 | self.annotator = Inference(self.config) 71 | 72 | self.service = rospy.Service('perform_detection', ImageProcessing, self.annotateImage) 73 | 74 | self.bridge = CvBridge() 75 | rospy.logdebug("FCN8 Detection service is available") 76 | 77 | def annotateImage(self, req): 78 | """ 79 | Callback function for ImageProcessing Service. Return image is the 80 | same size as the input image 81 | """ 82 | rospy.logdebug("Annotating image using FCN8") 83 | 84 | # Convert sensor_msgs/Image to cv2 Mat 85 | cv_image = self.bridge.imgmsg_to_cv2(req.input_image, "8UC3") 86 | cv_image = np.asarray(cv_image[:,:,:]) 87 | 88 | # Convert to numpy array and send to annotator 89 | input_image = np.asarray(cv_image) 90 | 91 | # Generate the annotation and convert to 3 channel image 92 | res = self.annotator.make_prediction(input_image[:,:,0]) 93 | 94 | results_image = cv.merge((res, res, res)) * 255 95 | 96 | # Invert the image 97 | if self.invert_results: 98 | results_image = 255 - results_image 99 | 100 | # Convert results back into sensor_msgs/Image 101 | res_msg = self.bridge.cv2_to_imgmsg(results_image, "8UC3") 102 | response = ImageProcessingResponse(res_msg) 103 | 104 | rospy.logdebug("Annotation Complete") 105 | return response 106 | 107 | 108 | if __name__ == '__main__': 109 | rospy.init_node('fcn8_node') 110 | 111 | # Optionally disable the gpu 112 | disable_gpu = rospy.get_param('~disable_gpu', False) 113 | if disable_gpu: 114 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 115 | 116 | # Filepath to the h5 weights file 117 | filepath = rospy.get_param('~filepath') 118 | invert_results = rospy.get_param('~invert_results', False) 119 | 120 | # Launch server 121 | server = InferenceServer(filepath, invert_results) 122 | 123 | rospy.loginfo("FCN8 Server is active") 124 | rospy.spin() 125 | -------------------------------------------------------------------------------- /pcs_detection/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.5.0) 2 | project(pcs_detection VERSION 0.1.0 LANGUAGES CXX) 3 | 4 | find_package(console_bridge REQUIRED) 5 | find_package(PCL REQUIRED) 6 | find_package(OpenCV REQUIRED) 7 | 8 | list(FIND CMAKE_CXX_COMPILE_FEATURES cxx_std_11 CXX_FEATURE_FOUND) 9 | 10 | ########### 11 | ## Build ## 12 | ########### 13 | 14 | add_library(${PROJECT_NAME}_hsv_thresholding INTERFACE) 15 | target_link_libraries(${PROJECT_NAME}_hsv_thresholding INTERFACE console_bridge ${OpenCV_LIBS}) 16 | target_compile_options(${PROJECT_NAME}_hsv_thresholding INTERFACE -Wsuggest-override -Wconversion -Wsign-conversion) 17 | if(CXX_FEATURE_FOUND EQUAL "-1") 18 | target_compile_options(${PROJECT_NAME}_hsv_thresholding INTERFACE -std=c++11) 19 | else() 20 | target_compile_features(${PROJECT_NAME}_hsv_thresholding INTERFACE cxx_std_11) 21 | endif() 22 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 23 | target_compile_options(${PROJECT_NAME}_hsv_thresholding INTERFACE -mno-avx) 24 | else() 25 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 26 | endif() 27 | target_include_directories(${PROJECT_NAME}_hsv_thresholding INTERFACE 28 | "$" 29 | "$") 30 | target_include_directories(${PROJECT_NAME}_hsv_thresholding SYSTEM INTERFACE 31 | ${OpenCV_INCLUDE_DIRS}) 32 | 33 | list (APPEND PACKAGE_LIBRARIES ${PROJECT_NAME}_hsv_thresholding) 34 | 35 | add_library(${PROJECT_NAME}_point_cloud_annotator SHARED src/${PROJECT_NAME}/point_cloud_annotator.cpp) 36 | target_link_libraries(${PROJECT_NAME}_point_cloud_annotator PUBLIC console_bridge ${PCL_LIBRARIES} ${OpenCV_LIBS}) 37 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator PRIVATE -Wsuggest-override -Wconversion -Wsign-conversion) 38 | if(CXX_FEATURE_FOUND EQUAL "-1") 39 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator PUBLIC -std=c++11) 40 | else() 41 | target_compile_features(${PROJECT_NAME}_point_cloud_annotator PUBLIC cxx_std_11) 42 | endif() 43 | if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") 44 | target_compile_options(${PROJECT_NAME}_point_cloud_annotator PUBLIC -mno-avx) 45 | else() 46 | message(WARNING "Non-GNU compiler detected. If using AVX instructions, Eigen alignment issues may result.") 47 | endif() 48 | target_include_directories(${PROJECT_NAME}_point_cloud_annotator PUBLIC 49 | "$" 50 | "$") 51 | target_include_directories(${PROJECT_NAME}_point_cloud_annotator SYSTEM PUBLIC 52 | ${PCL_INCLUDE_DIRS} 53 | ${OpenCV_INCLUDE_DIRS}) 54 | 55 | list (APPEND PACKAGE_LIBRARIES ${PROJECT_NAME}_point_cloud_annotator) 56 | 57 | 58 | 59 | ############# 60 | ## Python ## 61 | ############# 62 | add_subdirectory(src_python) 63 | 64 | ############# 65 | ## Install ## 66 | ############# 67 | # Mark executables and/or libraries for installation 68 | install(TARGETS ${PACKAGE_LIBRARIES} 69 | EXPORT ${PROJECT_NAME}-targets DESTINATION lib) 70 | install(EXPORT ${PROJECT_NAME}-targets 71 | NAMESPACE pcs:: DESTINATION lib/cmake/${PROJECT_NAME}) 72 | 73 | # Mark cpp header files for installation 74 | install(DIRECTORY include/${PROJECT_NAME} 75 | DESTINATION include 76 | FILES_MATCHING 77 | PATTERN "*.h" 78 | PATTERN "*.hpp" 79 | PATTERN ".svn" EXCLUDE 80 | ) 81 | 82 | install(FILES package.xml DESTINATION share/${PROJECT_NAME}) 83 | 84 | # Create cmake config files 85 | include(CMakePackageConfigHelpers) 86 | configure_package_config_file( 87 | ${CMAKE_CURRENT_LIST_DIR}/cmake/${PROJECT_NAME}-config.cmake.in 88 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake 89 | INSTALL_DESTINATION lib/cmake/${PROJECT_NAME} 90 | NO_CHECK_REQUIRED_COMPONENTS_MACRO) 91 | 92 | write_basic_package_version_file( 93 | ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake 94 | VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion) 95 | 96 | install(FILES 97 | "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" 98 | "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config-version.cmake" 99 | DESTINATION lib/cmake/${PROJECT_NAME}) 100 | 101 | export(EXPORT ${PROJECT_NAME}-targets 102 | FILE ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-targets.cmake) 103 | 104 | ############# 105 | ## Test ## 106 | ############# 107 | if (ENABLE_TESTS) 108 | enable_testing() 109 | add_custom_target(run_tests ALL 110 | WORKING_DIRECTORY ${CMAKE_BINARY_DIR} 111 | COMMAND ${CMAKE_CTEST_COMMAND} -V -O "/tmp/${PROJECT_NAME}_ctest.log" -C ${CONFIGURATION}) 112 | 113 | add_subdirectory(test) 114 | endif() 115 | 116 | -------------------------------------------------------------------------------- /pcs_ros/src/octomap_mesh_mask_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace pcs_ros 10 | { 11 | /** 12 | * @brief Exposes pcs_scan_integration::OctomapMeshMask as a ROS action server. 13 | * 14 | * This implementation takes the octomap as a point cloud that is **currently being published**. If it is not found in 5 15 | * seconds, it returns failed. It also takes a path to PLY mesh file and returns a path to PLY mesh file. Modifying it 16 | * to return a Tesseract_geometry::Mesh would not be hard. 17 | */ 18 | class OctomapMeshMaskAction 19 | { 20 | protected: 21 | ros::NodeHandle nh_; 22 | actionlib::SimpleActionServer as_; 23 | std::string action_name_; 24 | pcs_msgs::ApplyOctomapMeshMaskFeedback feedback_; 25 | pcs_msgs::ApplyOctomapMeshMaskResult result_; 26 | tf::TransformListener tf_listener_; 27 | 28 | public: 29 | OctomapMeshMaskAction(std::string name) 30 | : as_(nh_, name, std::bind(&OctomapMeshMaskAction::execute_callback, this, std::placeholders::_1), false) 31 | , action_name_(name) 32 | { 33 | as_.start(); 34 | } 35 | 36 | /** 37 | * @brief Callback for action server 38 | * @param goal Action goal 39 | */ 40 | void execute_callback(const pcs_msgs::ApplyOctomapMeshMaskGoalConstPtr& goal) 41 | { 42 | pcs_scan_integration::OctomapMeshMask masker; 43 | 44 | try 45 | { 46 | // Get pointcloud on topic provided 47 | auto pointcloud_msg = 48 | ros::topic::waitForMessage(goal->point_cloud_topic, ros::Duration(5.0)); 49 | 50 | // Look up transform between octomap frame and mesh frame. Note that we look it up at time now because the octomap 51 | // message could be pretty old 52 | tf::StampedTransform transform; 53 | tf_listener_.lookupTransform(goal->mesh_frame, pointcloud_msg->header.frame_id, ros::Time::now(), transform); 54 | 55 | // Transform into mesh frame 56 | sensor_msgs::PointCloud2 pc_mesh_frame; 57 | pcl_ros::transformPointCloud(goal->mesh_frame, transform, *pointcloud_msg, pc_mesh_frame); 58 | 59 | // Convert to PCL 60 | pcl::PCLPointCloud2 pc_mesh_frame_pcl; 61 | pcl_conversions::toPCL(pc_mesh_frame, pc_mesh_frame_pcl); 62 | 63 | // Convert to Point 64 | pcl::PointCloud::Ptr pointcloud(new pcl::PointCloud()); 65 | pcl::fromPCLPointCloud2(pc_mesh_frame_pcl, *pointcloud); 66 | 67 | // Set the octree based on the parameters given 68 | masker.setOctree(pointcloud, goal->resolution, goal->lower_limit, goal->upper_limit, goal->limit_negative); 69 | } 70 | catch (...) 71 | { 72 | ROS_ERROR("Octomap Mesh Mask Action did not receive a pointcloud on %s", goal->point_cloud_topic.c_str()); 73 | result_.status_msg = "Failed to get point cloud"; 74 | as_.setAborted(result_); 75 | return; 76 | } 77 | 78 | // Set the input mesh 79 | std::string input_filepath = goal->mesh_path; 80 | masker.setInputMesh(input_filepath); 81 | 82 | // Perform masking 83 | switch (goal->mask_type) 84 | { 85 | case 0: 86 | masker.maskMesh(pcs_scan_integration::OctomapMeshMask::MaskType::RETURN_INSIDE); 87 | break; 88 | case 1: 89 | masker.maskMesh(pcs_scan_integration::OctomapMeshMask::MaskType::RETURN_OUTSIDE); 90 | break; 91 | case 2: 92 | masker.maskMesh(pcs_scan_integration::OctomapMeshMask::MaskType::RETURN_COLORIZED); 93 | break; 94 | default: 95 | result_.status_msg = "Invalid mask type"; 96 | ROS_ERROR("Invalid mask type"); 97 | as_.setAborted(result_); 98 | return; 99 | } 100 | 101 | // Save the results 102 | std::string result_path = goal->results_dir + "/masked_mesh.ply"; 103 | if (!masker.saveMaskedMesh(result_path)) 104 | { 105 | result_.status_msg = "Save mesh failed"; 106 | ROS_ERROR("Save mesh failed"); 107 | as_.setAborted(result_); 108 | return; 109 | } 110 | result_.results_path = result_path; 111 | as_.setSucceeded(result_); 112 | return; 113 | }; 114 | }; 115 | 116 | } // namespace pcs_ros 117 | 118 | int main(int argc, char** argv) 119 | { 120 | ros::init(argc, argv, "octomap_mesh_mask_node"); 121 | ros::NodeHandle nh; 122 | pcs_ros::OctomapMeshMaskAction omma("octomap_mesh_mask_server"); 123 | 124 | ROS_INFO("Octomap Mesh Mask Action is available"); 125 | ros::spin(); 126 | return 0; 127 | } 128 | -------------------------------------------------------------------------------- /gh_pages/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # SphinxTest documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Oct 3 11:09:13 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import os 20 | import git 21 | curr_path = os.path.abspath('.') 22 | # The try catch is need because of the subversion tool when it creates the master. 23 | try: 24 | repo = git.Repo(curr_path) 25 | current_branch = repo.active_branch.name 26 | except git.exc.InvalidGitRepositoryError: 27 | current_branch = '' 28 | 29 | 30 | # -- General configuration ------------------------------------------------ 31 | 32 | # If your documentation needs a minimal Sphinx version, state it here. 33 | # 34 | # needs_sphinx = '1.0' 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 38 | # ones. 39 | extensions = ['sphinx.ext.githubpages'] 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | from recommonmark.parser import CommonMarkParser 45 | 46 | source_parsers = { 47 | '.md': CommonMarkParser, 48 | } 49 | 50 | source_suffix = ['.rst', '.md'] 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'Industrial Training' 57 | copyright = u'2017, ROS-Industrial' 58 | 59 | # The version info for the project you're documenting, acts as replacement for 60 | # |version| and |release|, also used in various other places throughout the 61 | # built documents. 62 | # 63 | # The short X.Y version. 64 | version = current_branch 65 | # The full version, including alpha/beta/rc tags. 66 | release = current_branch 67 | 68 | # The language for content autogenerated by Sphinx. Refer to documentation 69 | # for a list of supported languages. 70 | # 71 | # This is also used if you do content translation via gettext catalogs. 72 | # Usually you set "language" from the command line for these cases. 73 | language = None 74 | 75 | # List of patterns, relative to source directory, that match files and 76 | # directories to ignore when looking for source files. 77 | # This pattern also affects html_static_path and html_extra_path . 78 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md', 'exercise'] 79 | 80 | # The name of the Pygments (syntax highlighting) style to use. 81 | pygments_style = 'manni' 82 | 83 | # If true, `todo` and `todoList` produce output, else they produce nothing. 84 | todo_include_todos = False 85 | 86 | download_support = True 87 | 88 | # -- Options for HTML output ---------------------------------------------- 89 | 90 | # The theme to use for HTML and HTML Help pages. See the documentation for 91 | # a list of builtin themes. 92 | # 93 | html_theme = 'sphinx_rtd_theme' 94 | html_theme_path = ['_themes',] 95 | 96 | # Theme options are theme-specific and customize the look and feel of a theme 97 | # further. For a list of options available for each theme, see the 98 | # documentation. 99 | # 100 | html_theme_options = { 101 | #'canonical_url': '', 102 | 'analytics_id': '', 103 | 'logo_only': False, 104 | 'display_version': True, 105 | 'prev_next_buttons_location': 'bottom', 106 | 'style_external_links': False, 107 | #'vcs_pageview_mode': '', 108 | # Toc options 109 | 'collapse_navigation': True, 110 | 'sticky_navigation': False, 111 | 'navigation_depth': 4, 112 | 'includehidden': True, 113 | 'titles_only': False, 114 | } 115 | 116 | # Add any paths that contain custom static files (such as style sheets) here, 117 | # relative to this directory. They are copied after the builtin static files, 118 | # so a file named "default.css" will overwrite the builtin "default.css". 119 | html_static_path = ['_static'] 120 | 121 | html_context = { 122 | #"wiki_title": "Tesseract", 123 | "display_github": True, 124 | "github_user": "ros-industrial-consortium", 125 | "github_repo": "tesseract", 126 | "github_version": current_branch, 127 | "conf_py_path": "gh_pages/", 128 | "source_suffix": source_suffix, 129 | "css_files": ['_static/override.css'], 130 | } 131 | 132 | # Output file base name for HTML help builder. 133 | htmlhelp_basename = 'TesseractDocumentation' 134 | 135 | -------------------------------------------------------------------------------- /pcs_msgs/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(pcs_msgs) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | add_compile_options(-std=c++11) 6 | 7 | find_package(catkin REQUIRED COMPONENTS 8 | actionlib_msgs 9 | message_generation 10 | message_runtime 11 | sensor_msgs 12 | std_msgs 13 | ) 14 | 15 | ## Generate messages in the 'msg' folder 16 | # add_message_files( 17 | # FILES 18 | # Message1.msg 19 | # Message2.msg 20 | # ) 21 | 22 | # Generate services in the 'srv' folder 23 | add_service_files( 24 | FILES 25 | ImageProcessing.srv 26 | ) 27 | 28 | 29 | # Generate actions in the 'action' folder 30 | add_action_files( 31 | FILES 32 | ApplyOctomapMeshMask.action 33 | ) 34 | 35 | ## Generate added messages and services with any dependencies listed here 36 | generate_messages( 37 | DEPENDENCIES 38 | actionlib_msgs 39 | sensor_msgs 40 | std_msgs 41 | ) 42 | 43 | 44 | ################################### 45 | ## catkin specific configuration ## 46 | ################################### 47 | ## The catkin_package macro generates cmake config files for your package 48 | ## Declare things to be passed to dependent projects 49 | ## INCLUDE_DIRS: uncomment this if your package contains header files 50 | ## LIBRARIES: libraries you create in this project that dependent projects also need 51 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 52 | ## DEPENDS: system dependencies of this project that dependent projects also need 53 | catkin_package( 54 | # INCLUDE_DIRS include 55 | # LIBRARIES pcs_msgs 56 | CATKIN_DEPENDS 57 | actionlib_msgs 58 | message_generation 59 | sensor_msgs 60 | std_msgs 61 | message_runtime 62 | # DEPENDS system_lib 63 | ) 64 | 65 | ########### 66 | ## Build ## 67 | ########### 68 | 69 | ## Specify additional locations of header files 70 | ## Your package locations should be listed before other locations 71 | include_directories( 72 | # include 73 | ${catkin_INCLUDE_DIRS} 74 | ) 75 | 76 | ## Declare a C++ library 77 | # add_library(${PROJECT_NAME} 78 | # src/${PROJECT_NAME}/pcs_msgs.cpp 79 | # ) 80 | 81 | ## Add cmake target dependencies of the library 82 | ## as an example, code may need to be generated before libraries 83 | ## either from message generation or dynamic reconfigure 84 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 85 | 86 | ## Declare a C++ executable 87 | ## With catkin_make all packages are built within a single CMake context 88 | ## The recommended prefix ensures that target names across packages don't collide 89 | # add_executable(${PROJECT_NAME}_node src/pcs_msgs_node.cpp) 90 | 91 | ## Rename C++ executable without prefix 92 | ## The above recommended prefix causes long target names, the following renames the 93 | ## target back to the shorter version for ease of user use 94 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 95 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 96 | 97 | ## Add cmake target dependencies of the executable 98 | ## same as for the library above 99 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 100 | 101 | ## Specify libraries to link a library or executable target against 102 | # target_link_libraries(${PROJECT_NAME}_node 103 | # ${catkin_LIBRARIES} 104 | # ) 105 | 106 | ############# 107 | ## Install ## 108 | ############# 109 | 110 | # all install targets should use catkin DESTINATION variables 111 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 112 | 113 | ## Mark executable scripts (Python etc.) for installation 114 | ## in contrast to setup.py, you can choose the destination 115 | # install(PROGRAMS 116 | # scripts/my_python_script 117 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 118 | # ) 119 | 120 | ## Mark executables and/or libraries for installation 121 | # install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node 122 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 123 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 124 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 125 | # ) 126 | 127 | ## Mark cpp header files for installation 128 | # install(DIRECTORY include/${PROJECT_NAME}/ 129 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 130 | # FILES_MATCHING PATTERN "*.h" 131 | # PATTERN ".svn" EXCLUDE 132 | # ) 133 | 134 | ## Mark other files for installation (e.g. launch and bag files, etc.) 135 | # install(FILES 136 | # # myfile1 137 | # # myfile2 138 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 139 | # ) 140 | 141 | ############# 142 | ## Testing ## 143 | ############# 144 | 145 | ## Add gtest based cpp test target and link libraries 146 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_pcs_msgs.cpp) 147 | # if(TARGET ${PROJECT_NAME}-test) 148 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 149 | # endif() 150 | 151 | ## Add folders to be run by python nosetests 152 | # catkin_add_nosetests(test) 153 | -------------------------------------------------------------------------------- /pcs_ros/src/point_cloud_xyzi_to_xyzrgb_node.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | namespace pcs_ros 5 | { 6 | /** 7 | * @brief Converts a PointI to a PointRGB setting each color channel equal to the intensity 8 | * @param in input XYZI cloud 9 | * @param out output XYZRGB cloud 10 | */ 11 | inline void PointItoRGB(const pcl::PointXYZI& in, pcl::PointXYZRGB& out) 12 | { 13 | // It appears that intensity is (in the case of the realsense) an 8 bit value padded with 3 extra bytes 14 | // http://pointclouds.org/documentation/tutorials/adding_custom_ptype.php 15 | uint8_t intensity = in._PointXYZI::intensity; 16 | uint8_t r = intensity; 17 | uint8_t g = intensity; 18 | uint8_t b = intensity; 19 | 20 | uint32_t rgb = ((uint32_t)r << 16 | (uint32_t)g << 8 | (uint32_t)b); 21 | out.rgb = *reinterpret_cast(&rgb); 22 | 23 | out._PointXYZRGB::x = in._PointXYZI::x; 24 | out._PointXYZRGB::y = in._PointXYZI::y; 25 | out._PointXYZRGB::z = in._PointXYZI::z; 26 | } 27 | 28 | /** 29 | * @brief This converts a PointCloud2 to a pointcloud XYZI for 8 bit intensity values. This should not be necessary, but 30 | * the fromROSMsg appears to be broken in this case. 31 | * 32 | * Perhaps fromROSMsg does not work because it is expecting a float? When I tried using it I got the error "Failed to 33 | * find match for field intensity" 34 | * @param cloud_msg Input cloud to be converted 35 | * @return pcl::PointCloud with intensity interpretted as a uint8_t 36 | */ 37 | inline pcl::PointCloud fromROSMsgXYZI(const sensor_msgs::PointCloud2& cloud_msg) 38 | { 39 | pcl::PointCloud cloud; 40 | pcl_conversions::toPCL(cloud_msg.header, cloud.header); 41 | 42 | // Get the field structure of this point cloud 43 | int pointBytes = cloud_msg.point_step; 44 | int offset_x; 45 | int offset_y; 46 | int offset_z; 47 | int offset_int; 48 | for (int f = 0; f < cloud_msg.fields.size(); ++f) 49 | { 50 | if (cloud_msg.fields[f].name == "x") 51 | offset_x = cloud_msg.fields[f].offset; 52 | if (cloud_msg.fields[f].name == "y") 53 | offset_y = cloud_msg.fields[f].offset; 54 | if (cloud_msg.fields[f].name == "z") 55 | offset_z = cloud_msg.fields[f].offset; 56 | if (cloud_msg.fields[f].name == "intensity") 57 | offset_int = cloud_msg.fields[f].offset; 58 | } 59 | 60 | // populate point cloud object 61 | assert(cloud_msg.height == 1); 62 | cloud.points.resize(cloud_msg.width); 63 | for (int p = 0; p < cloud_msg.width; ++p) 64 | { 65 | pcl::PointXYZI newPoint; 66 | 67 | newPoint.x = *(float*)(&cloud_msg.data[0] + (pointBytes * p) + offset_x); 68 | newPoint.y = *(float*)(&cloud_msg.data[0] + (pointBytes * p) + offset_y); 69 | newPoint.z = *(float*)(&cloud_msg.data[0] + (pointBytes * p) + offset_z); 70 | newPoint.intensity = *(uint8_t*)(&cloud_msg.data[0] + (pointBytes * p) + offset_int); 71 | 72 | cloud.points[p] = newPoint; 73 | } 74 | 75 | return cloud; 76 | } 77 | 78 | /** 79 | * @brief Subscribes to a XYZI pointcloud and republishes it as an XYZRGB with each color channel equal to the 8 bit 80 | * intensity 81 | * 82 | * This is useful when a pointcloud is colorized with a greyscale image and published as an XYZI 83 | */ 84 | class PointCloudXYZItoXYZRGB 85 | { 86 | public: 87 | void callback(const sensor_msgs::PointCloud2::ConstPtr& cloud_pc2) 88 | { 89 | // Convert to ZYZI from message 90 | pcl::PointCloud cloud = fromROSMsgXYZI(*cloud_pc2); 91 | 92 | // Convert to XYZRGB where each color channel is the intensity value 93 | pcl::PointCloud::Ptr results(new pcl::PointCloud()); 94 | 95 | results->width = cloud.width; 96 | results->height = cloud.height; 97 | results->header = cloud.header; 98 | results->is_dense = cloud.is_dense; 99 | 100 | results->points.resize(cloud.points.size()); 101 | for (int idx = 0; idx < cloud.points.size(); idx++) 102 | { 103 | PointItoRGB(cloud.points[idx], results->points[idx]); 104 | } 105 | 106 | pub_.publish(results); 107 | } 108 | 109 | PointCloudXYZItoXYZRGB() : input_topic_("input"), output_topic_("output") 110 | { 111 | // Create publishers and subscribers 112 | sub_ = nh_.subscribe(input_topic_, 1, &PointCloudXYZItoXYZRGB::callback, this); 113 | pub_ = nh_.advertise>(output_topic_, 1); 114 | 115 | // Print the topics we are using 116 | std::string t1 = nh_.resolveName(input_topic_); 117 | std::string t2 = nh_.resolveName(output_topic_); 118 | ROS_ERROR_STREAM("Subscribing to XYZI pointcloud on: " << t1); 119 | ROS_ERROR_STREAM("Publishing XYZRGB pointcloud on: " << t2); 120 | } 121 | 122 | private: 123 | ros::NodeHandle nh_; 124 | std::string input_topic_; 125 | std::string output_topic_; 126 | ros::Subscriber sub_; 127 | ros::Publisher pub_; 128 | }; 129 | } // namespace pcs_ros 130 | int main(int argc, char** argv) 131 | { 132 | ros::init(argc, argv, "point_cloud_xyzi_to_xyzrgb_node"); 133 | pcs_ros::PointCloudXYZItoXYZRGB converter; 134 | ros::spin(); 135 | return 0; 136 | } 137 | -------------------------------------------------------------------------------- /pcs_ros/config/demo.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 78 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Global Options1 8 | - /Status1 9 | - /PointCloud21 10 | - /PointCloud22 11 | Splitter Ratio: 0.5 12 | Tree Height: 901 13 | - Class: rviz/Selection 14 | Name: Selection 15 | - Class: rviz/Tool Properties 16 | Expanded: 17 | - /2D Pose Estimate1 18 | - /2D Nav Goal1 19 | - /Publish Point1 20 | Name: Tool Properties 21 | Splitter Ratio: 0.5886790156364441 22 | - Class: rviz/Views 23 | Expanded: 24 | - /Current View1 25 | Name: Views 26 | Splitter Ratio: 0.5 27 | - Class: rviz/Time 28 | Experimental: false 29 | Name: Time 30 | SyncMode: 0 31 | SyncSource: PointCloud2 32 | Toolbars: 33 | toolButtonStyle: 2 34 | Visualization Manager: 35 | Class: "" 36 | Displays: 37 | - Alpha: 0.5 38 | Cell Size: 1 39 | Class: rviz/Grid 40 | Color: 160; 160; 164 41 | Enabled: true 42 | Line Style: 43 | Line Width: 0.029999999329447746 44 | Value: Lines 45 | Name: Grid 46 | Normal Cell Count: 0 47 | Offset: 48 | X: 0 49 | Y: 0 50 | Z: 0 51 | Plane: XY 52 | Plane Cell Count: 10 53 | Reference Frame: 54 | Value: true 55 | - Alpha: 1 56 | Autocompute Intensity Bounds: true 57 | Autocompute Value Bounds: 58 | Max Value: 10 59 | Min Value: -10 60 | Value: true 61 | Axis: Z 62 | Channel Name: intensity 63 | Class: rviz/PointCloud2 64 | Color: 255; 255; 255 65 | Color Transformer: RGB8 66 | Decay Time: 0 67 | Enabled: true 68 | Invert Rainbow: false 69 | Max Color: 255; 255; 255 70 | Max Intensity: 4096 71 | Min Color: 0; 0; 0 72 | Min Intensity: 0 73 | Name: PointCloud2 74 | Position Transformer: XYZ 75 | Queue Size: 10 76 | Selectable: true 77 | Size (Pixels): 3 78 | Size (m): 0.009999999776482582 79 | Style: Flat Squares 80 | Topic: /pc_masker_node/unmasked_cloud 81 | Unreliable: false 82 | Use Fixed Frame: true 83 | Use rainbow: true 84 | Value: true 85 | - Alpha: 1 86 | Autocompute Intensity Bounds: true 87 | Autocompute Value Bounds: 88 | Max Value: 10 89 | Min Value: -10 90 | Value: true 91 | Axis: Z 92 | Channel Name: intensity 93 | Class: rviz/PointCloud2 94 | Color: 255; 255; 255 95 | Color Transformer: RGB8 96 | Decay Time: 0 97 | Enabled: true 98 | Invert Rainbow: false 99 | Max Color: 255; 255; 255 100 | Max Intensity: 4096 101 | Min Color: 0; 0; 0 102 | Min Intensity: 0 103 | Name: PointCloud2 104 | Position Transformer: XYZ 105 | Queue Size: 10 106 | Selectable: true 107 | Size (Pixels): 3 108 | Size (m): 0.10000000149011612 109 | Style: Points 110 | Topic: /pc_masker_node/masked_cloud 111 | Unreliable: false 112 | Use Fixed Frame: true 113 | Use rainbow: true 114 | Value: true 115 | Enabled: true 116 | Global Options: 117 | Background Color: 48; 48; 48 118 | Default Light: true 119 | Fixed Frame: camera_rgb_optical_frame 120 | Frame Rate: 30 121 | Name: root 122 | Tools: 123 | - Class: rviz/Interact 124 | Hide Inactive Objects: true 125 | - Class: rviz/MoveCamera 126 | - Class: rviz/Select 127 | - Class: rviz/FocusCamera 128 | - Class: rviz/Measure 129 | - Class: rviz/SetInitialPose 130 | Topic: /initialpose 131 | - Class: rviz/SetGoal 132 | Topic: /move_base_simple/goal 133 | - Class: rviz/PublishPoint 134 | Single click: true 135 | Topic: /clicked_point 136 | Value: true 137 | Views: 138 | Current: 139 | Class: rviz/Orbit 140 | Distance: 3.2491989135742188 141 | Enable Stereo Rendering: 142 | Stereo Eye Separation: 0.05999999865889549 143 | Stereo Focal Distance: 1 144 | Swap Stereo Eyes: false 145 | Value: false 146 | Focal Point: 147 | X: 0.05556504428386688 148 | Y: 0.14684289693832397 149 | Z: 0.5308224558830261 150 | Focal Shape Fixed Size: true 151 | Focal Shape Size: 0.05000000074505806 152 | Invert Z Axis: false 153 | Name: Current View 154 | Near Clip Distance: 0.009999999776482582 155 | Pitch: -1.0146015882492065 156 | Target Frame: 157 | Value: Orbit (rviz) 158 | Yaw: 4.683579921722412 159 | Saved: ~ 160 | Window Geometry: 161 | Displays: 162 | collapsed: false 163 | Height: 1176 164 | Hide Left Dock: false 165 | Hide Right Dock: false 166 | QMainWindow State: 000000ff00000000fd00000004000000000000015600000410fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002700000410000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002c6fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000027000002c6000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f0000003efc0100000002fb0000000800540069006d006501000000000000073f000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005e30000041000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 167 | Selection: 168 | collapsed: false 169 | Time: 170 | collapsed: false 171 | Tool Properties: 172 | collapsed: false 173 | Views: 174 | collapsed: false 175 | Width: 1855 176 | X: 65 177 | Y: 24 178 | -------------------------------------------------------------------------------- /pcs_detection/README.md: -------------------------------------------------------------------------------- 1 | # Running Instructions 2 | ### Training 3 | Training is done by running the fcn8_train.py. 4 | The training_config.json (see below) file allows for control of the training. 5 | The model will be trained on the images within the TRAINING_DIRS specified in the config. The classes to be trained on must also be entered into the config under CLASS_NAMES. These names must correspond to the class names used within the label file. 6 | Pre-processing done before training is also selected within this config. To select the type of preprocessing, first select the image channels that should be used. The image channels do not have to be the original input to the algoritm (e.g. rgb images can be converted to lab or ycr_cb), though image inputs will not be guarenteed to work with all image channels (e.g. greyscale images can not be converted to the lab color space). 7 | After selecting the image channel, the values under PRE_PROCESS may need to be adjusted. These values correspond to the mean of each image channel after preporcessing for the entire dataset. They can be verified by running the fcn8_train.py script in the debug mode (see config explanation below) and looking at the histogram of the image channel values. The goal is to have the mean for the whole dataset be zero. 8 | Other preprocessing techniques such as only taking a section with a high intensity of labels may also be configured in this config. 9 | 10 | Once training begins, the best weights from the training session will be saved off under the script/data/weights directory along with two config files. 11 | 12 | ### Verification 13 | Once training is complete, the example_weights.py script will aid in verifying the model. This script will make predictions on the validation sets specified in the training config. To run this script, first change the path inside the script to that of the full_config.json saved off with your weights. This config will load the weights that generated the highest accuracy on the validation set by default (this can be changed by going into the full_config file). Then run the script and press the spacebar to continue onto the next image. Pressing 'q' will cause the script to stop. 14 | 15 | ### Inference 16 | Inference is done in a similar way to verification, but instead of using full_config.json, inference_config.json should be used. Additionally, images will have to be supplied to this script instead of running off the verification directories. 17 | 18 | ### Interpreting Images 19 | The images generated for both the predicitions and labels will follow the same color scheme: 20 | * Blue represents the background class 21 | * Red represents regions of the image that are ignored or not used in the loss calculation 22 | * Other colors will consistently be a class specified in the config 23 | 24 | # Data Requirements 25 | Training subsets must contain a folder holding images and a training_labels.xml file. 26 | Images will be resized to the ORIG_DIMS specified in the config (aspect ratio may not be preserved). 27 | 28 | # Config Descriptions 29 | 30 | ### training_config.json: 31 | This config file is paired with fcn8_train.py. It allows for the selection of a model, types of image and label preprocessing, the image directories used for both training and validation, and various training parameteres. Once the model is done training, a config file used for inference and a config file used for logging the training parameters will be created. 32 | 33 | ### /full_config.json 34 | This config is a copy of training_config.json used to train a specific set of weights. It is also used for viewing predicitions of the model in example_weights.py. 35 | 36 | ### /inference_config.json 37 | A pruned version of full_config.json used for making inferences in fcn8_inference.py. 38 | 39 | ### Config Keys 40 | Mode - which mode execute when running 41 | * TRAIN will for training a new model 42 | * DEBUG will display training images, labels, and statistics on the training data 43 | * VALIDATE is used for loading in weights and making predicitions 44 | Save_Model - whether or not to save the training weights 45 | Class_Names - the names of the classes that are present in the label files 46 | Note: background does not have to be put as a class 47 | Weight_Id - name of model weights 48 | Weight_Save_Path - directory to save weight files to 49 | Model - model (fcn8) used for training 50 | Channel - image channels to use 51 | * GREY is single channel greyscale 52 | * STACKED is a single channel with the Laplacian of the image added to the greyscale 53 | * COMBINED is a two channel image with the first being greyscale and the second the Laplacian 54 | * RBG is three channel color 55 | * LAB will convert a RGB image to the LAB color space 56 | * YCR_CB will convert a RGB image to the YCR_CB color space 57 | Augmentations - standard Keras augmentations: https://keras.io/preprocessing/image/ 58 | Learning_Rate - scheduler for the learning rate 59 | Batch_Size - how many images to process with a single prediction 60 | N_Epochs - maximum number of epochs before training ends 61 | Background_Reduction - the ratio of (label pixels / background pixels) per batch used in training labels 62 | Min_Pixels_In_Image - minimum number of labeled pixels per training image in the weld class 63 | Display_Scale_Factor - amount to scale images viewed in the pipeline 64 | Use_Full_Image - toggles resizing to IMG_DIMS 65 | IMG_DIMS - select a region of the image with specified size that has the majority of the label 66 | Label_Thickness - how thick to draw the weld lines in the label 67 | Pre_Process - values used for mean subtraction pre-processing (mean should be zero after subtraction) 68 | Confidence_Threshold - used to increase the threshold needed predicting a weld pixel (post processing) 69 | Val_Weight_Path - path to the weight file used for predictions 70 | Training_Dirs - Directories of images used for training 71 | * dir_path - directory path 72 | * num_imgs - number of images to use from that directory ('all' will use all images) 73 | * labels - boolean value for if labels are paired with this directory 74 | Note: The only directories that should not have labels are the ones that do not contain any instances of the classes. 75 | Validation_Dirs - same structure as above, these directories will be used for generating validation metrics 76 | Note: Validation directories with no labels can be run by making the class background 77 | -------------------------------------------------------------------------------- /pcs_detection/include/pcs_detection/utils.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file utils.h 3 | * @brief Utilities primarily for point cloud annotation 4 | * 5 | * @author Matthew Powelson 6 | * @date Octover 4, 2019 7 | * @version TODO 8 | * @bug No known bugs 9 | * 10 | * @copyright Copyright (c) 2010, Southwest Research Institute 11 | * 12 | * @par License 13 | * Software License Agreement (Apache License) 14 | * @par 15 | * Licensed under the Apache License, Version 2.0 (the "License"); 16 | * you may not use this file except in compliance with the License. 17 | * You may obtain a copy of the License at 18 | * http://www.apache.org/licenses/LICENSE-2.0 19 | * @par 20 | * Unless required by applicable law or agreed to in writing, software 21 | * distributed under the License is distributed on an "AS IS" BASIS, 22 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 23 | * See the License for the specific language governing permissions and 24 | * limitations under the License. 25 | */ 26 | 27 | #include 28 | 29 | #include 30 | #include 31 | 32 | #include 33 | #include 34 | 35 | namespace pcs_detection 36 | { 37 | /** 38 | * @brief Returns a string associated with the openCV typedef (ie CV_8U 39 | * @param type OpenCV flags specified in typedef in interface.h 40 | * @return A human readable string coresponding to the typedef 41 | */ 42 | inline std::string type2str(const int type) 43 | { 44 | std::string r; 45 | 46 | uchar depth = CV_MAT_DEPTH(type); 47 | int chans = CV_MAT_CN(type); 48 | 49 | switch (depth) 50 | { 51 | case CV_8U: 52 | r = "8U"; 53 | break; 54 | case CV_8S: 55 | r = "8S"; 56 | break; 57 | case CV_16U: 58 | r = "16U"; 59 | break; 60 | case CV_16S: 61 | r = "16S"; 62 | break; 63 | case CV_32S: 64 | r = "32S"; 65 | break; 66 | case CV_32F: 67 | r = "32F"; 68 | break; 69 | case CV_64F: 70 | r = "64F"; 71 | break; 72 | default: 73 | r = "User"; 74 | break; 75 | } 76 | 77 | r += "C"; 78 | r += std::to_string(chans); 79 | 80 | return r; 81 | } 82 | 83 | inline bool applyMask(const cv::Mat& input_image, const cv::Mat& mask, cv::Mat& masked_image) 84 | { 85 | masked_image = input_image.mul(mask); 86 | return true; 87 | } 88 | 89 | /** 90 | * @brief Converts a pointcloud to a color image and a position encoded image 91 | * 92 | * Note: There are also the functions in pcl_ros which are similar, but they only extract the rgb 93 | * http://docs.pointclouds.org/1.1.0/namespacepcl.html#a31460a4b07150db357c690a8ae27d1e6 94 | * @param cloud Input point cloud. Should be a dense XYZRGB cloud 95 | * @param position_image cv::Mat with 3 64 bit channels encoding x, y, z position 96 | * @param color_image cv::Mat encoding extracted rgb image 97 | */ 98 | inline void cloudToImage(const pcl::PointCloud::ConstPtr cloud, 99 | cv::Mat& position_image, 100 | cv::Mat& color_image) 101 | { 102 | // Check that this is a structured point cloud 103 | assert(cloud->width != 1 || cloud->height != 1); 104 | // Check that the cloud is not so big as to cause a problem when converting uint32 to int32 105 | assert(cloud->width < std::numeric_limits::max() && cloud->height < std::numeric_limits::max()); 106 | 107 | // Resize coordinates to the size of the point cloud (stored in a 64 bit float 3 channel matrix) 108 | position_image = cv::Mat(static_cast(cloud->height), static_cast(cloud->width), CV_64FC3); 109 | // Resize image to the size of the depth image (stored in a 8 bit unsigned 3 channel matrix) 110 | color_image = cv::Mat(static_cast(cloud->height), static_cast(cloud->width), CV_8UC3); 111 | // Iterate over the rows and columns of the structured point cloud 112 | for (int y = 0; y < color_image.rows; y++) 113 | { 114 | for (int x = 0; x < color_image.cols; x++) 115 | { 116 | // Pull out the xyz values from the point cloud 117 | position_image.at(y, x * 3 + 0) = cloud->points[(y * color_image.cols + x)].x; 118 | position_image.at(y, x * 3 + 1) = cloud->points[(y * color_image.cols + x)].y; 119 | position_image.at(y, x * 3 + 2) = cloud->points[(y * color_image.cols + x)].z; 120 | 121 | // Pull out the rgb values from the point cloud 122 | cv::Vec3b color = cv::Vec3b(cloud->points[(y * color_image.cols + x)].b, 123 | cloud->points[(y * color_image.cols + x)].g, 124 | cloud->points[(y * color_image.cols + x)].r); 125 | // Apply color to that point 126 | color_image.at(cv::Point(x, y)) = color; 127 | } 128 | } 129 | } 130 | 131 | /** 132 | * @brief Convert a color image and a position encoded image back to a point cloud 133 | * 134 | * @param color_image CV_8UC3 cv::Mat RGB image 135 | * @param depth_image CV_64F3 cv::Mat where the channels correspond to x, y, and z position 136 | * @return Returns an XYZRGB point cloud generated from the inputs 137 | */ 138 | inline pcl::PointCloud::Ptr imageToCloud(const cv::Mat& color_image, 139 | const cv::Mat& position_image, 140 | const pcl::PCLHeader& header = pcl::PCLHeader()) 141 | { 142 | assert(color_image.rows == position_image.rows); 143 | assert(color_image.cols == position_image.cols); 144 | 145 | pcl::PointCloud::Ptr cloud(new pcl::PointCloud()); 146 | cloud->header = header; 147 | for (int y = 0; y < color_image.rows; y++) 148 | { 149 | for (int x = 0; x < color_image.cols; x++) 150 | { 151 | pcl::PointXYZRGB point; 152 | point.x = position_image.at(y, x * 3 + 0); 153 | point.y = position_image.at(y, x * 3 + 1); 154 | point.z = position_image.at(y, x * 3 + 2); 155 | 156 | cv::Vec3b color = color_image.at(cv::Point(x, y)); 157 | uint8_t r = (color[2]); 158 | uint8_t g = (color[1]); 159 | uint8_t b = (color[0]); 160 | 161 | int32_t rgb = (r << 16) | (g << 8) | b; 162 | point.rgb = *reinterpret_cast(&rgb); 163 | 164 | cloud->points.push_back(point); 165 | } 166 | } 167 | cloud->height = color_image.rows; 168 | cloud->width = color_image.cols; 169 | cloud->is_dense = false; // Note: This could be checked above. 170 | return cloud; 171 | } 172 | 173 | } // namespace pcs_detection 174 | --------------------------------------------------------------------------------