├── utils ├── nearest_neighbors │ ├── KNN_NanoFLANN.egg-info │ │ ├── dependency_links.txt │ │ ├── top_level.txt │ │ ├── PKG-INFO │ │ └── SOURCES.txt │ ├── lib │ │ └── python │ │ │ ├── KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg │ │ │ ├── EGG-INFO │ │ │ │ ├── not-zip-safe │ │ │ │ ├── dependency_links.txt │ │ │ │ ├── top_level.txt │ │ │ │ ├── native_libs.txt │ │ │ │ ├── PKG-INFO │ │ │ │ └── SOURCES.txt │ │ │ ├── __pycache__ │ │ │ │ └── nearest_neighbors.cpython-37.pyc │ │ │ ├── nearest_neighbors.cpython-37m-x86_64-linux-gnu.so │ │ │ └── nearest_neighbors.py │ │ │ ├── nearest_neighbors.cpython-35m-x86_64-linux-gnu.so │ │ │ ├── nearest_neighbors.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── KNN_NanoFLANN-0.0.0-py3.5.egg-info │ │ │ └── KNN_NanoFLANN-0.0.0-py3.6.egg-info │ ├── build │ │ ├── temp.linux-x86_64-3.5 │ │ │ ├── knn.o │ │ │ └── knn_.o │ │ ├── temp.linux-x86_64-3.6 │ │ │ ├── knn.o │ │ │ └── knn_.o │ │ ├── temp.linux-x86_64-3.7 │ │ │ ├── knn.o │ │ │ └── knn_.o │ │ ├── lib.linux-x86_64-3.5 │ │ │ └── nearest_neighbors.cpython-35m-x86_64-linux-gnu.so │ │ ├── lib.linux-x86_64-3.6 │ │ │ └── nearest_neighbors.cpython-36m-x86_64-linux-gnu.so │ │ └── lib.linux-x86_64-3.7 │ │ │ └── nearest_neighbors.cpython-37m-x86_64-linux-gnu.so │ ├── dist │ │ └── KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg │ ├── test.py │ ├── setup.py │ ├── .ipynb_checkpoints │ │ └── setup-checkpoint.py │ ├── knn_.h │ ├── knn.pyx │ ├── knn_.cxx │ └── KDTreeTableAdaptor.h └── cpp_wrappers │ ├── compile_wrappers.sh │ ├── cpp_subsampling │ ├── build │ │ ├── temp.linux-x86_64-3.5 │ │ │ ├── wrapper.o │ │ │ ├── grid_subsampling │ │ │ │ └── grid_subsampling.o │ │ │ └── cpp_wrappers │ │ │ │ └── cpp_utils │ │ │ │ └── cloud │ │ │ │ └── cloud.o │ │ ├── temp.linux-x86_64-3.6 │ │ │ ├── wrapper.o │ │ │ ├── grid_subsampling │ │ │ │ └── grid_subsampling.o │ │ │ └── cpp_wrappers │ │ │ │ └── cpp_utils │ │ │ │ └── cloud │ │ │ │ └── cloud.o │ │ ├── temp.linux-x86_64-3.7 │ │ │ ├── wrapper.o │ │ │ ├── grid_subsampling │ │ │ │ └── grid_subsampling.o │ │ │ └── cpp_wrappers │ │ │ │ └── cpp_utils │ │ │ │ └── cloud │ │ │ │ └── cloud.o │ │ └── lib.linux-x86_64-3.7 │ │ │ └── grid_subsampling.cpython-37m-x86_64-linux-gnu.so │ ├── grid_subsampling.cpython-35m-x86_64-linux-gnu.so │ ├── grid_subsampling.cpython-36m-x86_64-linux-gnu.so │ ├── grid_subsampling.cpython-37m-x86_64-linux-gnu.so │ ├── setup.py │ ├── grid_subsampling │ │ ├── grid_subsampling.h │ │ └── grid_subsampling.cpp │ └── wrapper.cpp │ └── cpp_utils │ └── cloud │ ├── cloud.cpp │ └── cloud.h ├── compile_op.sh ├── helper_requirements.txt ├── data_processing ├── multi_processing.sh ├── input_preparation_S3DIS.py ├── input_preparation_Toronto3D.py ├── .ipynb_checkpoints │ ├── input_preparation_S3DIS-checkpoint.py │ ├── input_preparation_Sensat-checkpoint.py │ ├── input_preparation_DALES-checkpoint.py │ └── input_preparation_Galaxy-checkpoint.py ├── input_preparation_Sensat.py ├── input_preparation_DALES.py ├── input_preparation_Teledyne_Test.py └── input_preparation_Galaxy.py ├── README.md ├── Lovasz_losses_tf.py ├── tester_Toronto3D.py ├── tester_DALES.py ├── helper_ply.py ├── tester_SensatUrban.py ├── LICENSE ├── main_DALES.py └── tf_util.py /utils/nearest_neighbors/KNN_NanoFLANN.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/KNN_NanoFLANN.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | nearest_neighbors 2 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/not-zip-safe: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/top_level.txt: -------------------------------------------------------------------------------- 1 | nearest_neighbors 2 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/native_libs.txt: -------------------------------------------------------------------------------- 1 | nearest_neighbors.cpython-37m-x86_64-linux-gnu.so 2 | -------------------------------------------------------------------------------- /compile_op.sh: -------------------------------------------------------------------------------- 1 | cd utils/nearest_neighbors 2 | python setup.py install --home="." 3 | cd ../../ 4 | 5 | cd utils/cpp_wrappers 6 | sh compile_wrappers.sh 7 | cd ../../../ -------------------------------------------------------------------------------- /utils/cpp_wrappers/compile_wrappers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Compile cpp subsampling 4 | cd cpp_subsampling 5 | python3 setup.py build_ext --inplace 6 | cd .. 7 | 8 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.5/knn.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.5/knn.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.5/knn_.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.5/knn_.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.6/knn.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.6/knn.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.6/knn_.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.6/knn_.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.7/knn.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.7/knn.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/temp.linux-x86_64-3.7/knn_.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/temp.linux-x86_64-3.7/knn_.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/wrapper.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/wrapper.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/wrapper.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/wrapper.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/wrapper.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/wrapper.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/dist/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/dist/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/grid_subsampling.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/KNN_NanoFLANN.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: KNN-NanoFLANN 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | License: UNKNOWN 6 | Platform: UNKNOWN 7 | 8 | UNKNOWN 9 | 10 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/nearest_neighbors.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/lib/python/nearest_neighbors.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/nearest_neighbors.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/lib/python/nearest_neighbors.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /helper_requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.16.1 2 | h5py==2.10.0 3 | cython==0.29.15 4 | pandas==0.25.3 5 | scikit-learn==0.22.2.post1 6 | scipy==1.4.1 7 | PyYAML 8 | open3d-python==0.3.0.0 9 | tensorflow-gpu==1.11 10 | laspy==1.7.0 -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/grid_subsampling/grid_subsampling.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/grid_subsampling/grid_subsampling.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/grid_subsampling/grid_subsampling.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/grid_subsampling/grid_subsampling.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/grid_subsampling/grid_subsampling.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/grid_subsampling/grid_subsampling.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/cpp_wrappers/cpp_utils/cloud/cloud.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.5/cpp_wrappers/cpp_utils/cloud/cloud.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/cpp_wrappers/cpp_utils/cloud/cloud.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.6/cpp_wrappers/cpp_utils/cloud/cloud.o -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/cpp_wrappers/cpp_utils/cloud/cloud.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/temp.linux-x86_64-3.7/cpp_wrappers/cpp_utils/cloud/cloud.o -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/lib.linux-x86_64-3.5/nearest_neighbors.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/lib.linux-x86_64-3.5/nearest_neighbors.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/lib.linux-x86_64-3.6/nearest_neighbors.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/lib.linux-x86_64-3.6/nearest_neighbors.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/build/lib.linux-x86_64-3.7/nearest_neighbors.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/build/lib.linux-x86_64-3.7/nearest_neighbors.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/KNN_NanoFLANN.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | knn.pyx 2 | knn_.cxx 3 | setup.py 4 | KNN_NanoFLANN.egg-info/PKG-INFO 5 | KNN_NanoFLANN.egg-info/SOURCES.txt 6 | KNN_NanoFLANN.egg-info/dependency_links.txt 7 | KNN_NanoFLANN.egg-info/top_level.txt -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: KNN-NanoFLANN 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | License: UNKNOWN 6 | Platform: UNKNOWN 7 | 8 | UNKNOWN 9 | 10 | -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/build/lib.linux-x86_64-3.7/grid_subsampling.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/cpp_wrappers/cpp_subsampling/build/lib.linux-x86_64-3.7/grid_subsampling.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/EGG-INFO/SOURCES.txt: -------------------------------------------------------------------------------- 1 | knn.pyx 2 | knn_.cxx 3 | setup.py 4 | KNN_NanoFLANN.egg-info/PKG-INFO 5 | KNN_NanoFLANN.egg-info/SOURCES.txt 6 | KNN_NanoFLANN.egg-info/dependency_links.txt 7 | KNN_NanoFLANN.egg-info/top_level.txt -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/__pycache__/nearest_neighbors.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/__pycache__/nearest_neighbors.cpython-37.pyc -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.5.egg-info: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: KNN NanoFLANN 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | Home-page: UNKNOWN 6 | Author: UNKNOWN 7 | Author-email: UNKNOWN 8 | License: UNKNOWN 9 | Description: UNKNOWN 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.6.egg-info: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: KNN NanoFLANN 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | Home-page: UNKNOWN 6 | Author: UNKNOWN 7 | Author-email: UNKNOWN 8 | License: UNKNOWN 9 | Description: UNKNOWN 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/nearest_neighbors.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Yacovitch/EyeNet/HEAD/utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/nearest_neighbors.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /data_processing/multi_processing.sh: -------------------------------------------------------------------------------- 1 | python -B input_preparation_DALES.py --grid_size 0.050 2 | python -B input_preparation_DALES.py --grid_size 0.100 3 | python -B input_preparation_DALES.py --grid_size 0.150 4 | python -B input_preparation_DALES.py --grid_size 0.200 5 | python -B input_preparation_DALES.py --grid_size 0.300 -------------------------------------------------------------------------------- /utils/nearest_neighbors/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import lib.python.nearest_neighbors as nearest_neighbors 3 | import time 4 | 5 | batch_size = 16 6 | num_points = 81920 7 | K = 16 8 | pc = np.random.rand(batch_size, num_points, 3).astype(np.float32) 9 | 10 | # nearest neighbours 11 | start = time.time() 12 | neigh_idx = nearest_neighbors.knn_batch(pc, pc, K, omp=True) 13 | print(time.time() - start) 14 | 15 | 16 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/lib/python/KNN_NanoFLANN-0.0.0-py3.7-linux-x86_64.egg/nearest_neighbors.py: -------------------------------------------------------------------------------- 1 | def __bootstrap__(): 2 | global __bootstrap__, __loader__, __file__ 3 | import sys, pkg_resources, importlib.util 4 | __file__ = pkg_resources.resource_filename(__name__, 'nearest_neighbors.cpython-37m-x86_64-linux-gnu.so') 5 | __loader__ = None; del __bootstrap__, __loader__ 6 | spec = importlib.util.spec_from_file_location(__name__,__file__) 7 | mod = importlib.util.module_from_spec(spec) 8 | spec.loader.exec_module(mod) 9 | __bootstrap__() 10 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy 5 | 6 | 7 | 8 | ext_modules = [Extension( 9 | "nearest_neighbors", 10 | sources=["knn.pyx", "knn_.cxx",], # source file(s) 11 | include_dirs=["./", numpy.get_include()], 12 | language="c++", 13 | extra_compile_args = [ "-std=c++11", "-fopenmp",], 14 | extra_link_args=["-std=c++11", '-fopenmp'], 15 | )] 16 | 17 | setup( 18 | name = "KNN NanoFLANN", 19 | ext_modules = ext_modules, 20 | cmdclass = {'build_ext': build_ext}, 21 | ) 22 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/.ipynb_checkpoints/setup-checkpoint.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy 5 | 6 | 7 | 8 | ext_modules = [Extension( 9 | "nearest_neighbors", 10 | sources=["knn.pyx", "knn_.cxx",], # source file(s) 11 | include_dirs=["./", numpy.get_include()], 12 | language="c++", 13 | extra_compile_args = [ "-std=c++11", "-fopenmp",], 14 | extra_link_args=["-std=c++11", '-fopenmp'], 15 | )] 16 | 17 | setup( 18 | name = "KNN NanoFLANN", 19 | ext_modules = ext_modules, 20 | cmdclass = {'build_ext': build_ext}, 21 | ) 22 | -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | import numpy.distutils.misc_util 3 | 4 | # Adding OpenCV to project 5 | # ************************ 6 | 7 | # Adding sources of the project 8 | # ***************************** 9 | 10 | m_name = "grid_subsampling" 11 | 12 | SOURCES = ["../cpp_utils/cloud/cloud.cpp", 13 | "grid_subsampling/grid_subsampling.cpp", 14 | "wrapper.cpp"] 15 | 16 | module = Extension(m_name, 17 | sources=SOURCES, 18 | extra_compile_args=['-std=c++11', 19 | '-D_GLIBCXX_USE_CXX11_ABI=0']) 20 | 21 | setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs()) 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/knn_.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | #include 4 | void cpp_knn(const float* points, const size_t npts, const size_t dim, 5 | const float* queries, const size_t nqueries, 6 | const size_t K, long* indices); 7 | 8 | void cpp_knn_omp(const float* points, const size_t npts, const size_t dim, 9 | const float* queries, const size_t nqueries, 10 | const size_t K, long* indices); 11 | 12 | 13 | void cpp_knn_batch(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 14 | const float* queries, const size_t nqueries, 15 | const size_t K, long* batch_indices); 16 | 17 | void cpp_knn_batch_omp(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 18 | const float* queries, const size_t nqueries, 19 | const size_t K, long* batch_indices); 20 | 21 | void cpp_knn_batch_distance_pick(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 22 | float* queries, const size_t nqueries, 23 | const size_t K, long* batch_indices); 24 | 25 | void cpp_knn_batch_distance_pick_omp(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 26 | float* batch_queries, const size_t nqueries, 27 | const size_t K, long* batch_indices); -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_utils/cloud/cloud.cpp: -------------------------------------------------------------------------------- 1 | // 2 | // 3 | // 0==========================0 4 | // | Local feature test | 5 | // 0==========================0 6 | // 7 | // version 1.0 : 8 | // > 9 | // 10 | //--------------------------------------------------- 11 | // 12 | // Cloud source : 13 | // Define usefull Functions/Methods 14 | // 15 | //---------------------------------------------------- 16 | // 17 | // Hugues THOMAS - 10/02/2017 18 | // 19 | 20 | 21 | #include "cloud.h" 22 | 23 | 24 | // Getters 25 | // ******* 26 | 27 | PointXYZ max_point(std::vector points) 28 | { 29 | // Initiate limits 30 | PointXYZ maxP(points[0]); 31 | 32 | // Loop over all points 33 | for (auto p : points) 34 | { 35 | if (p.x > maxP.x) 36 | maxP.x = p.x; 37 | 38 | if (p.y > maxP.y) 39 | maxP.y = p.y; 40 | 41 | if (p.z > maxP.z) 42 | maxP.z = p.z; 43 | } 44 | 45 | return maxP; 46 | } 47 | 48 | PointXYZ min_point(std::vector points) 49 | { 50 | // Initiate limits 51 | PointXYZ minP(points[0]); 52 | 53 | // Loop over all points 54 | for (auto p : points) 55 | { 56 | if (p.x < minP.x) 57 | minP.x = p.x; 58 | 59 | if (p.y < minP.y) 60 | minP.y = p.y; 61 | 62 | if (p.z < minP.z) 63 | minP.z = p.z; 64 | } 65 | 66 | return minP; 67 | } -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/grid_subsampling/grid_subsampling.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | #include "../../cpp_utils/cloud/cloud.h" 4 | 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | 10 | class SampledData 11 | { 12 | public: 13 | 14 | // Elements 15 | // ******** 16 | 17 | int count; 18 | PointXYZ point; 19 | vector features; 20 | vector> labels; 21 | 22 | 23 | // Methods 24 | // ******* 25 | 26 | // Constructor 27 | SampledData() 28 | { 29 | count = 0; 30 | point = PointXYZ(); 31 | } 32 | 33 | SampledData(const size_t fdim, const size_t ldim) 34 | { 35 | count = 0; 36 | point = PointXYZ(); 37 | features = vector(fdim); 38 | labels = vector>(ldim); 39 | } 40 | 41 | // Method Update 42 | void update_all(const PointXYZ p, vector::iterator f_begin, vector::iterator l_begin) 43 | { 44 | count += 1; 45 | point += p; 46 | transform (features.begin(), features.end(), f_begin, features.begin(), plus()); 47 | int i = 0; 48 | for(vector::iterator it = l_begin; it != l_begin + labels.size(); ++it) 49 | { 50 | labels[i][*it] += 1; 51 | i++; 52 | } 53 | return; 54 | } 55 | void update_features(const PointXYZ p, vector::iterator f_begin) 56 | { 57 | count += 1; 58 | point += p; 59 | transform (features.begin(), features.end(), f_begin, features.begin(), plus()); 60 | return; 61 | } 62 | void update_classes(const PointXYZ p, vector::iterator l_begin) 63 | { 64 | count += 1; 65 | point += p; 66 | int i = 0; 67 | for(vector::iterator it = l_begin; it != l_begin + labels.size(); ++it) 68 | { 69 | labels[i][*it] += 1; 70 | i++; 71 | } 72 | return; 73 | } 74 | void update_points(const PointXYZ p) 75 | { 76 | count += 1; 77 | point += p; 78 | return; 79 | } 80 | }; 81 | 82 | 83 | 84 | void grid_subsampling(vector& original_points, 85 | vector& subsampled_points, 86 | vector& original_features, 87 | vector& subsampled_features, 88 | vector& original_classes, 89 | vector& subsampled_classes, 90 | float sampleDl, 91 | int verbose); 92 | 93 | -------------------------------------------------------------------------------- /data_processing/input_preparation_S3DIS.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from tool import DataProcessing as DP 12 | from helper_ply import write_ply 13 | 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/S3DIS_merged/original_ply', help='original dataset path') 18 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/S3DIS_merged', help='original dataset path') 19 | parser.add_argument('--grid_size', type=float, default=0.04, help='sampling grid size default=0.2') 20 | FLAGS = parser.parse_args() 21 | dataset_path = FLAGS.dataset_path 22 | preparation_types = ['grid'] # Grid sampling & Random sampling 23 | grid_size = FLAGS.grid_size 24 | random_sample_ratio = 10 25 | files =np.sort([join(dataset_path, i) for i in os.listdir(dataset_path)]) 26 | 27 | for sample_type in preparation_types: 28 | for pc_path in files: 29 | cloud_name = pc_path.split('/')[-1][:-4] 30 | print('start to process:', cloud_name) 31 | 32 | # create output directory 33 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 34 | os.makedirs(out_folder) if not exists(out_folder) else None 35 | 36 | # check if it has already calculated 37 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 38 | print(cloud_name, 'already exists, skipped') 39 | continue 40 | 41 | 42 | xyz, rgb, labels = DP.read_ply_data(pc_path, with_rgb=True) 43 | 44 | sub_ply_file = join(out_folder, cloud_name + '.ply') 45 | if sample_type == 'grid': 46 | sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size) 47 | else: 48 | sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio) 49 | 50 | sub_rgb = sub_rgb / 255.0 51 | sub_labels = np.squeeze(sub_labels) 52 | write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) 53 | 54 | search_tree = KDTree(sub_xyz, leaf_size=50) 55 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 56 | with open(kd_tree_file, 'wb') as f: 57 | pickle.dump(search_tree, f) 58 | 59 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 60 | proj_idx = proj_idx.astype(np.int32) 61 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 62 | with open(proj_save, 'wb') as f: 63 | pickle.dump([proj_idx, labels], f) 64 | -------------------------------------------------------------------------------- /data_processing/input_preparation_Toronto3D.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/data/Toronto3D/original_data', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/data/Toronto3D', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.04, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | files = np.sort([join(dataset_path, i) for i in os.listdir(dataset_path)]) 25 | 26 | for sample_type in preparation_types: 27 | for pc_path in files: 28 | cloud_name = pc_path.split('/')[-1][:-4] 29 | print('start to process:', cloud_name) 30 | 31 | # create output directory 32 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 33 | os.makedirs(out_folder) if not exists(out_folder) else None 34 | 35 | # check if it has already calculated 36 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 37 | print(cloud_name, 'already exists, skipped') 38 | continue 39 | 40 | xyz, rgb, labels = DP.read_ply_data_toronto_3D(pc_path, with_rgb=True) 41 | 42 | 43 | sub_ply_file = join(out_folder, cloud_name + '.ply') 44 | if sample_type == 'grid': 45 | sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size) 46 | else: 47 | sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio) 48 | 49 | sub_rgb = sub_rgb / 255.0 50 | sub_labels = np.squeeze(sub_labels) 51 | write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) 52 | 53 | search_tree = KDTree(sub_xyz, leaf_size=50) 54 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 55 | with open(kd_tree_file, 'wb') as f: 56 | pickle.dump(search_tree, f) 57 | 58 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 59 | proj_idx = proj_idx.astype(np.int32) 60 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 61 | with open(proj_save, 'wb') as f: 62 | pickle.dump([proj_idx, labels], f) -------------------------------------------------------------------------------- /data_processing/.ipynb_checkpoints/input_preparation_S3DIS-checkpoint.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from tool import DataProcessing as DP 12 | from helper_ply import write_ply 13 | 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/S3DIS_merged/original_ply', help='original dataset path') 18 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/S3DIS_merged', help='original dataset path') 19 | parser.add_argument('--grid_size', type=float, default=0.04, help='sampling grid size default=0.2') 20 | FLAGS = parser.parse_args() 21 | dataset_path = FLAGS.dataset_path 22 | preparation_types = ['grid'] # Grid sampling & Random sampling 23 | grid_size = FLAGS.grid_size 24 | random_sample_ratio = 10 25 | files =np.sort([join(dataset_path, i) for i in os.listdir(dataset_path)]) 26 | 27 | for sample_type in preparation_types: 28 | for pc_path in files: 29 | cloud_name = pc_path.split('/')[-1][:-4] 30 | print('start to process:', cloud_name) 31 | 32 | # create output directory 33 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 34 | os.makedirs(out_folder) if not exists(out_folder) else None 35 | 36 | # check if it has already calculated 37 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 38 | print(cloud_name, 'already exists, skipped') 39 | continue 40 | 41 | 42 | xyz, rgb, labels = DP.read_ply_data(pc_path, with_rgb=True) 43 | 44 | sub_ply_file = join(out_folder, cloud_name + '.ply') 45 | if sample_type == 'grid': 46 | sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size) 47 | else: 48 | sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio) 49 | 50 | sub_rgb = sub_rgb / 255.0 51 | sub_labels = np.squeeze(sub_labels) 52 | write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) 53 | 54 | search_tree = KDTree(sub_xyz, leaf_size=50) 55 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 56 | with open(kd_tree_file, 'wb') as f: 57 | pickle.dump(search_tree, f) 58 | 59 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 60 | proj_idx = proj_idx.astype(np.int32) 61 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 62 | with open(proj_save, 'wb') as f: 63 | pickle.dump([proj_idx, labels], f) 64 | -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_utils/cloud/cloud.h: -------------------------------------------------------------------------------- 1 | // 2 | // 3 | // 0==========================0 4 | // | Local feature test | 5 | // 0==========================0 6 | // 7 | // version 1.0 : 8 | // > 9 | // 10 | //--------------------------------------------------- 11 | // 12 | // Cloud header 13 | // 14 | //---------------------------------------------------- 15 | // 16 | // Hugues THOMAS - 10/02/2017 17 | // 18 | 19 | 20 | # pragma once 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | #include 30 | 31 | #include 32 | 33 | 34 | 35 | 36 | // Point class 37 | // *********** 38 | 39 | 40 | class PointXYZ 41 | { 42 | public: 43 | 44 | // Elements 45 | // ******** 46 | 47 | float x, y, z; 48 | 49 | 50 | // Methods 51 | // ******* 52 | 53 | // Constructor 54 | PointXYZ() { x = 0; y = 0; z = 0; } 55 | PointXYZ(float x0, float y0, float z0) { x = x0; y = y0; z = z0; } 56 | 57 | // array type accessor 58 | float operator [] (int i) const 59 | { 60 | if (i == 0) return x; 61 | else if (i == 1) return y; 62 | else return z; 63 | } 64 | 65 | // opperations 66 | float dot(const PointXYZ P) const 67 | { 68 | return x * P.x + y * P.y + z * P.z; 69 | } 70 | 71 | float sq_norm() 72 | { 73 | return x*x + y*y + z*z; 74 | } 75 | 76 | PointXYZ cross(const PointXYZ P) const 77 | { 78 | return PointXYZ(y*P.z - z*P.y, z*P.x - x*P.z, x*P.y - y*P.x); 79 | } 80 | 81 | PointXYZ& operator+=(const PointXYZ& P) 82 | { 83 | x += P.x; 84 | y += P.y; 85 | z += P.z; 86 | return *this; 87 | } 88 | 89 | PointXYZ& operator-=(const PointXYZ& P) 90 | { 91 | x -= P.x; 92 | y -= P.y; 93 | z -= P.z; 94 | return *this; 95 | } 96 | 97 | PointXYZ& operator*=(const float& a) 98 | { 99 | x *= a; 100 | y *= a; 101 | z *= a; 102 | return *this; 103 | } 104 | }; 105 | 106 | 107 | // Point Opperations 108 | // ***************** 109 | 110 | inline PointXYZ operator + (const PointXYZ A, const PointXYZ B) 111 | { 112 | return PointXYZ(A.x + B.x, A.y + B.y, A.z + B.z); 113 | } 114 | 115 | inline PointXYZ operator - (const PointXYZ A, const PointXYZ B) 116 | { 117 | return PointXYZ(A.x - B.x, A.y - B.y, A.z - B.z); 118 | } 119 | 120 | inline PointXYZ operator * (const PointXYZ P, const float a) 121 | { 122 | return PointXYZ(P.x * a, P.y * a, P.z * a); 123 | } 124 | 125 | inline PointXYZ operator * (const float a, const PointXYZ P) 126 | { 127 | return PointXYZ(P.x * a, P.y * a, P.z * a); 128 | } 129 | 130 | inline std::ostream& operator << (std::ostream& os, const PointXYZ P) 131 | { 132 | return os << "[" << P.x << ", " << P.y << ", " << P.z << "]"; 133 | } 134 | 135 | inline bool operator == (const PointXYZ A, const PointXYZ B) 136 | { 137 | return A.x == B.x && A.y == B.y && A.z == B.z; 138 | } 139 | 140 | inline PointXYZ floor(const PointXYZ P) 141 | { 142 | return PointXYZ(std::floor(P.x), std::floor(P.y), std::floor(P.z)); 143 | } 144 | 145 | 146 | PointXYZ max_point(std::vector points); 147 | PointXYZ min_point(std::vector points); 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | -------------------------------------------------------------------------------- /data_processing/input_preparation_Sensat.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/SensatUrban_Data/ply', help='original dataset path') 18 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/SensatUrban_Data', help='original dataset path') 19 | parser.add_argument('--grid_size', type=float, default=0.2, help='sampling grid size default=0.2') 20 | FLAGS = parser.parse_args() 21 | dataset_name = 'SensatUrban' 22 | dataset_path = FLAGS.dataset_path 23 | preparation_types = ['grid'] # Grid sampling & Random sampling 24 | grid_size = FLAGS.grid_size 25 | random_sample_ratio = 10 26 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 27 | test_files = np.sort([join(dataset_path, 'test', i) for i in os.listdir(join(dataset_path, 'test'))]) 28 | files = np.sort(np.hstack((train_files, test_files))) 29 | 30 | for sample_type in preparation_types: 31 | for pc_path in files: 32 | cloud_name = pc_path.split('/')[-1][:-4] 33 | print('start to process:', cloud_name) 34 | 35 | # create output directory 36 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 37 | os.makedirs(out_folder) if not exists(out_folder) else None 38 | 39 | # check if it has already calculated 40 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 41 | print(cloud_name, 'already exists, skipped') 42 | continue 43 | 44 | if pc_path in train_files: 45 | xyz, rgb, labels = DP.read_ply_data(pc_path, with_rgb=True) 46 | else: 47 | xyz, rgb = DP.read_ply_data(pc_path, with_rgb=True, with_label=False) 48 | labels = np.zeros(len(xyz), dtype=np.uint8) 49 | 50 | sub_ply_file = join(out_folder, cloud_name + '.ply') 51 | if sample_type == 'grid': 52 | sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size) 53 | else: 54 | sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio) 55 | 56 | sub_rgb = sub_rgb / 255.0 57 | sub_labels = np.squeeze(sub_labels) 58 | write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) 59 | 60 | search_tree = KDTree(sub_xyz, leaf_size=50) 61 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 62 | with open(kd_tree_file, 'wb') as f: 63 | pickle.dump(search_tree, f) 64 | 65 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 66 | proj_idx = proj_idx.astype(np.int32) 67 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 68 | with open(proj_save, 'wb') as f: 69 | pickle.dump([proj_idx, labels], f) -------------------------------------------------------------------------------- /data_processing/.ipynb_checkpoints/input_preparation_Sensat-checkpoint.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/SensatUrban_Data/ply', help='original dataset path') 18 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/SensatUrban_Data', help='original dataset path') 19 | parser.add_argument('--grid_size', type=float, default=0.2, help='sampling grid size default=0.2') 20 | FLAGS = parser.parse_args() 21 | dataset_name = 'SensatUrban' 22 | dataset_path = FLAGS.dataset_path 23 | preparation_types = ['grid'] # Grid sampling & Random sampling 24 | grid_size = FLAGS.grid_size 25 | random_sample_ratio = 10 26 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 27 | test_files = np.sort([join(dataset_path, 'test', i) for i in os.listdir(join(dataset_path, 'test'))]) 28 | files = np.sort(np.hstack((train_files, test_files))) 29 | 30 | for sample_type in preparation_types: 31 | for pc_path in files: 32 | cloud_name = pc_path.split('/')[-1][:-4] 33 | print('start to process:', cloud_name) 34 | 35 | # create output directory 36 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 37 | print(out_folder) 38 | os.makedirs(out_folder) if not exists(out_folder) else None 39 | 40 | # check if it has already calculated 41 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 42 | print(cloud_name, 'already exists, skipped') 43 | continue 44 | 45 | if pc_path in train_files: 46 | xyz, rgb, labels = DP.read_ply_data(pc_path, with_rgb=True) 47 | else: 48 | xyz, rgb = DP.read_ply_data(pc_path, with_rgb=True, with_label=False) 49 | labels = np.zeros(len(xyz), dtype=np.uint8) 50 | 51 | sub_ply_file = join(out_folder, cloud_name + '.ply') 52 | if sample_type == 'grid': 53 | sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size) 54 | else: 55 | sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio) 56 | 57 | sub_rgb = sub_rgb / 255.0 58 | sub_labels = np.squeeze(sub_labels) 59 | write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class']) 60 | 61 | search_tree = KDTree(sub_xyz, leaf_size=50) 62 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 63 | with open(kd_tree_file, 'wb') as f: 64 | pickle.dump(search_tree, f) 65 | 66 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 67 | proj_idx = proj_idx.astype(np.int32) 68 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 69 | with open(proj_save, 'wb') as f: 70 | pickle.dump([proj_idx, labels], f) -------------------------------------------------------------------------------- /data_processing/input_preparation_DALES.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/DALES/dales_ply_to_ply', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/DALES', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.250, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 25 | test_files = np.sort([join(dataset_path, 'test', i) for i in os.listdir(join(dataset_path, 'test'))]) 26 | files = np.sort(np.hstack((train_files, test_files))) 27 | 28 | for sample_type in preparation_types: 29 | for pc_path in files: 30 | cloud_name = pc_path.split('/')[-1][:-4] 31 | print('start to process:', cloud_name) 32 | 33 | # create output directory 34 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 35 | os.makedirs(out_folder) if not exists(out_folder) else None 36 | 37 | # check if it has already calculated 38 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 39 | print(cloud_name, 'already exists, skipped') 40 | continue 41 | 42 | xyz, labels = DP.read_ply_data(pc_path, with_rgb=False) 43 | 44 | #os.makedirs(join(out_folder, 'pt_clouds')) if not exists(join(out_folder, 'pt_clouds')) else None 45 | sub_ply_file = join(out_folder, cloud_name + '.ply') 46 | #if sample_type == 'grid': 47 | sub_xyz, sub_labels = DP.grid_sub_sampling(xyz, None, labels, grid_size) 48 | #elif sample_type == 'raw': 49 | # print('processing raw files') 50 | # sub_xyz, sub_i, sub_labels = xyz, i, labels 51 | #else: 52 | # sub_xyz, sub_i, sub_labels = DP.random_sub_sampling(xyz, i, labels, random_sample_ratio) 53 | 54 | #sub_rgb = sub_rgb / 255.0 55 | sub_labels = np.squeeze(sub_labels) 56 | #write_ply(sub_ply_file, [sub_xyz, sub_i, sub_labels], ['x', 'y', 'z', 'num_return', 'class']) 57 | write_ply(sub_ply_file, [sub_xyz, sub_labels], ['x', 'y', 'z', 'class']) 58 | 59 | search_tree = KDTree(sub_xyz, leaf_size=50) 60 | #os.makedirs(join(out_folder, 'KDTree')) if not exists(join(out_folder, 'KDTree')) else None 61 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 62 | with open(kd_tree_file, 'wb') as f: 63 | pickle.dump(search_tree, f) 64 | 65 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 66 | proj_idx = proj_idx.astype(np.int32) 67 | #os.makedirs(join(out_folder, 'proj')) if not exists(join(out_folder, 'proj')) else None 68 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 69 | with open(proj_save, 'wb') as f: 70 | pickle.dump([proj_idx, labels], f) -------------------------------------------------------------------------------- /data_processing/input_preparation_Teledyne_Test.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/data/Teledyne_Test/test/original_las', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/data/Teledyne_Test/test', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.06, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | files = np.sort([join(dataset_path, i) for i in os.listdir(dataset_path)]) 25 | length = files.shape[0] 26 | total_lenght = files.shape[0] 27 | c = 0 28 | total_org=0 29 | total_sub=0 30 | print('grid_size: ' , grid_size) 31 | for sample_type in preparation_types: 32 | for pc_path in files: 33 | cloud_name = pc_path.split('/')[-1][:-4] 34 | #print('start to process:', cloud_name) 35 | 36 | # create output directory 37 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 38 | os.makedirs(out_folder) if not exists(out_folder) else None 39 | 40 | # check if it has already calculated 41 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 42 | print(cloud_name, 'already exists, skipped') 43 | continue 44 | 45 | xyz, i = DP.read_las_no_label_no_norm(pc_path) 46 | labels = None 47 | org_point_number = i.shape[0] 48 | #os.makedirs(join(out_folder, 'pt_clouds')) if not exists(join(out_folder, 'pt_clouds')) else None 49 | sub_ply_file = join(out_folder, cloud_name + '.ply') 50 | sub_xyz, sub_i = DP.grid_sub_sampling(xyz, i, labels, grid_size) 51 | 52 | 53 | sub_point_number = sub_i.shape[0] 54 | total_org += org_point_number 55 | total_sub += sub_point_number 56 | print(cloud_name + '| ' + str(org_point_number) + '| ' + str(sub_point_number)) 57 | c += 1 58 | if c == total_lenght: 59 | print('total points' + '| ' + str(total_org) + '| ' + str(total_sub)) 60 | 61 | 62 | #sub_rgb = sub_rgb / 255.0 63 | write_ply(sub_ply_file, [sub_xyz, sub_i], ['x', 'y', 'z', 'intensity', 'num_return']) 64 | 65 | search_tree = KDTree(sub_xyz, leaf_size=50) 66 | #os.makedirs(join(out_folder, 'KDTree')) if not exists(join(out_folder, 'KDTree')) else None 67 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 68 | with open(kd_tree_file, 'wb') as f: 69 | pickle.dump(search_tree, f) 70 | 71 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 72 | proj_idx = proj_idx.astype(np.int32) 73 | #os.makedirs(join(out_folder, 'proj')) if not exists(join(out_folder, 'proj')) else None 74 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 75 | with open(proj_save, 'wb') as f: 76 | pickle.dump([proj_idx, labels], f) 77 | -------------------------------------------------------------------------------- /data_processing/.ipynb_checkpoints/input_preparation_DALES-checkpoint.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/DALES/dales_ply_to_ply', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/DALES', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.250, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 25 | test_files = np.sort([join(dataset_path, 'test', i) for i in os.listdir(join(dataset_path, 'test'))]) 26 | files = np.sort(np.hstack((train_files, test_files))) 27 | 28 | for sample_type in preparation_types: 29 | for pc_path in files: 30 | cloud_name = pc_path.split('/')[-1][:-4] 31 | print('start to process:', cloud_name) 32 | 33 | # create output directory 34 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 35 | os.makedirs(out_folder) if not exists(out_folder) else None 36 | 37 | # check if it has already calculated 38 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 39 | print(cloud_name, 'already exists, skipped') 40 | continue 41 | 42 | xyz, labels = DP.read_ply_data(pc_path, with_rgb=False) 43 | 44 | #os.makedirs(join(out_folder, 'pt_clouds')) if not exists(join(out_folder, 'pt_clouds')) else None 45 | sub_ply_file = join(out_folder, cloud_name + '.ply') 46 | #if sample_type == 'grid': 47 | sub_xyz, sub_labels = DP.grid_sub_sampling(xyz, None, labels, grid_size) 48 | #elif sample_type == 'raw': 49 | # print('processing raw files') 50 | # sub_xyz, sub_i, sub_labels = xyz, i, labels 51 | #else: 52 | # sub_xyz, sub_i, sub_labels = DP.random_sub_sampling(xyz, i, labels, random_sample_ratio) 53 | 54 | #sub_rgb = sub_rgb / 255.0 55 | sub_labels = np.squeeze(sub_labels) 56 | #write_ply(sub_ply_file, [sub_xyz, sub_i, sub_labels], ['x', 'y', 'z', 'num_return', 'class']) 57 | write_ply(sub_ply_file, [sub_xyz, sub_labels], ['x', 'y', 'z', 'class']) 58 | 59 | search_tree = KDTree(sub_xyz, leaf_size=50) 60 | #os.makedirs(join(out_folder, 'KDTree')) if not exists(join(out_folder, 'KDTree')) else None 61 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 62 | with open(kd_tree_file, 'wb') as f: 63 | pickle.dump(search_tree, f) 64 | 65 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 66 | proj_idx = proj_idx.astype(np.int32) 67 | #os.makedirs(join(out_folder, 'proj')) if not exists(join(out_folder, 'proj')) else None 68 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 69 | with open(proj_save, 'wb') as f: 70 | pickle.dump([proj_idx, labels], f) -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/grid_subsampling/grid_subsampling.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "grid_subsampling.h" 3 | 4 | 5 | void grid_subsampling(vector& original_points, 6 | vector& subsampled_points, 7 | vector& original_features, 8 | vector& subsampled_features, 9 | vector& original_classes, 10 | vector& subsampled_classes, 11 | float sampleDl, 12 | int verbose) { 13 | 14 | // Initiate variables 15 | // ****************** 16 | 17 | // Number of points in the cloud 18 | size_t N = original_points.size(); 19 | 20 | // Dimension of the features 21 | size_t fdim = original_features.size() / N; 22 | size_t ldim = original_classes.size() / N; 23 | 24 | // Limits of the cloud 25 | PointXYZ minCorner = min_point(original_points); 26 | PointXYZ maxCorner = max_point(original_points); 27 | PointXYZ originCorner = floor(minCorner * (1/sampleDl)) * sampleDl; 28 | 29 | // Dimensions of the grid 30 | size_t sampleNX = (size_t)floor((maxCorner.x - originCorner.x) / sampleDl) + 1; 31 | size_t sampleNY = (size_t)floor((maxCorner.y - originCorner.y) / sampleDl) + 1; 32 | //size_t sampleNZ = (size_t)floor((maxCorner.z - originCorner.z) / sampleDl) + 1; 33 | 34 | // Check if features and classes need to be processed 35 | bool use_feature = original_features.size() > 0; 36 | bool use_classes = original_classes.size() > 0; 37 | 38 | 39 | // Create the sampled map 40 | // ********************** 41 | 42 | // Verbose parameters 43 | int i = 0; 44 | int nDisp = N / 100; 45 | 46 | // Initiate variables 47 | size_t iX, iY, iZ, mapIdx; 48 | unordered_map data; 49 | 50 | for (auto& p : original_points) 51 | { 52 | // Position of point in sample map 53 | iX = (size_t)floor((p.x - originCorner.x) / sampleDl); 54 | iY = (size_t)floor((p.y - originCorner.y) / sampleDl); 55 | iZ = (size_t)floor((p.z - originCorner.z) / sampleDl); 56 | mapIdx = iX + sampleNX*iY + sampleNX*sampleNY*iZ; 57 | 58 | // If not already created, create key 59 | if (data.count(mapIdx) < 1) 60 | data.emplace(mapIdx, SampledData(fdim, ldim)); 61 | 62 | // Fill the sample map 63 | if (use_feature && use_classes) 64 | data[mapIdx].update_all(p, original_features.begin() + i * fdim, original_classes.begin() + i * ldim); 65 | else if (use_feature) 66 | data[mapIdx].update_features(p, original_features.begin() + i * fdim); 67 | else if (use_classes) 68 | data[mapIdx].update_classes(p, original_classes.begin() + i * ldim); 69 | else 70 | data[mapIdx].update_points(p); 71 | 72 | // Display 73 | i++; 74 | if (verbose > 1 && i%nDisp == 0) 75 | std::cout << "\rSampled Map : " << std::setw(3) << i / nDisp << "%"; 76 | 77 | } 78 | 79 | // Divide for barycentre and transfer to a vector 80 | subsampled_points.reserve(data.size()); 81 | if (use_feature) 82 | subsampled_features.reserve(data.size() * fdim); 83 | if (use_classes) 84 | subsampled_classes.reserve(data.size() * ldim); 85 | for (auto& v : data) 86 | { 87 | subsampled_points.push_back(v.second.point * (1.0 / v.second.count)); 88 | if (use_feature) 89 | { 90 | float count = (float)v.second.count; 91 | transform(v.second.features.begin(), 92 | v.second.features.end(), 93 | v.second.features.begin(), 94 | [count](float f) { return f / count;}); 95 | subsampled_features.insert(subsampled_features.end(),v.second.features.begin(),v.second.features.end()); 96 | } 97 | if (use_classes) 98 | { 99 | for (int i = 0; i < ldim; i++) 100 | subsampled_classes.push_back(max_element(v.second.labels[i].begin(), v.second.labels[i].end(), 101 | [](const pair&a, const pair&b){return a.second < b.second;})->first); 102 | } 103 | } 104 | 105 | return; 106 | } 107 | -------------------------------------------------------------------------------- /data_processing/input_preparation_Galaxy.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/data/Galaxy_Data', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/data/Galaxy_Data', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.2, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 25 | train_lenght = train_files.shape[0] 26 | test_files = np.sort([join(dataset_path, 'val', i) for i in os.listdir(join(dataset_path, 'val'))]) 27 | files = np.sort(np.hstack((train_files, test_files))) 28 | total_lenght = files.shape[0] 29 | c = 0 30 | total_org=0 31 | total_sub=0 32 | print('grid_size: ' , grid_size) 33 | print('training files') 34 | for sample_type in preparation_types: 35 | for pc_path in files: 36 | if c == train_lenght: 37 | print('total points' + '| ' + str(total_org) + '| ' + str(total_sub)) 38 | print('testing files') 39 | total_org=0 40 | total_sup=0 41 | cloud_name = pc_path.split('/')[-1][:-4] 42 | #print('start to process:', cloud_name) 43 | 44 | # create output directory 45 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 46 | os.makedirs(out_folder) if not exists(out_folder) else None 47 | 48 | # check if it has already calculated 49 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 50 | print(cloud_name, 'already exists, skipped') 51 | continue 52 | 53 | xyz, i, labels = DP.read_las_data(pc_path) 54 | 55 | org_point_number = labels.shape[0] 56 | #os.makedirs(join(out_folder, 'pt_clouds')) if not exists(join(out_folder, 'pt_clouds')) else None 57 | sub_ply_file = join(out_folder, cloud_name + '.ply') 58 | sub_xyz, sub_i, sub_labels = DP.grid_sub_sampling(xyz, i, labels, grid_size) 59 | 60 | 61 | sub_point_number = sub_labels.shape[0] 62 | total_org += org_point_number 63 | total_sub += sub_point_number 64 | print(cloud_name + '| ' + str(org_point_number) + '| ' + str(sub_point_number)) 65 | c += 1 66 | if c == total_lenght: 67 | print('total points' + '| ' + str(total_org) + '| ' + str(total_sub)) 68 | 69 | 70 | #sub_rgb = sub_rgb / 255.0 71 | sub_labels = np.squeeze(sub_labels) 72 | write_ply(sub_ply_file, [sub_xyz, sub_i, sub_labels], ['x', 'y', 'z', 'intensity', 'num_return', 'class']) 73 | 74 | search_tree = KDTree(sub_xyz, leaf_size=50) 75 | #os.makedirs(join(out_folder, 'KDTree')) if not exists(join(out_folder, 'KDTree')) else None 76 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 77 | with open(kd_tree_file, 'wb') as f: 78 | pickle.dump(search_tree, f) 79 | 80 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 81 | proj_idx = proj_idx.astype(np.int32) 82 | #os.makedirs(join(out_folder, 'proj')) if not exists(join(out_folder, 'proj')) else None 83 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 84 | with open(proj_save, 'wb') as f: 85 | pickle.dump([proj_idx, labels], f) 86 | -------------------------------------------------------------------------------- /data_processing/.ipynb_checkpoints/input_preparation_Galaxy-checkpoint.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from sklearn.neighbors import KDTree 3 | import numpy as np 4 | import os, pickle, argparse, sys 5 | 6 | BASE_DIR = dirname(abspath(__file__)) 7 | ROOT_DIR = dirname(BASE_DIR) 8 | sys.path.append(BASE_DIR) 9 | sys.path.append(ROOT_DIR) 10 | 11 | from helper_ply import write_ply 12 | from tool import DataProcessing as DP 13 | 14 | if __name__ == '__main__': 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument('--dataset_path', type=str, default='/nas2/jacob/data/Galaxy_Data', help='') 17 | parser.add_argument('--output_path', type=str, default='/nas2/jacob/data/Galaxy_Data', help='original dataset path') 18 | parser.add_argument('--grid_size', type=float, default=0.2, help='sampling grid size default=0.2') 19 | FLAGS = parser.parse_args() 20 | dataset_path = FLAGS.dataset_path 21 | preparation_types = ['grid'] # Grid sampling & Random sampling 22 | grid_size = FLAGS.grid_size 23 | random_sample_ratio = 10 24 | train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))]) 25 | train_lenght = train_files.shape[0] 26 | test_files = np.sort([join(dataset_path, 'val', i) for i in os.listdir(join(dataset_path, 'val'))]) 27 | files = np.sort(np.hstack((train_files, test_files))) 28 | total_lenght = files.shape[0] 29 | c = 0 30 | total_org=0 31 | total_sub=0 32 | print('grid_size: ' , grid_size) 33 | print('training files') 34 | for sample_type in preparation_types: 35 | for pc_path in files: 36 | if c == train_lenght: 37 | print('total points' + '| ' + str(total_org) + '| ' + str(total_sub)) 38 | print('testing files') 39 | total_org=0 40 | total_sup=0 41 | cloud_name = pc_path.split('/')[-1][:-4] 42 | #print('start to process:', cloud_name) 43 | 44 | # create output directory 45 | out_folder = join(FLAGS.output_path, sample_type + '_{:.3f}'.format(grid_size)) 46 | os.makedirs(out_folder) if not exists(out_folder) else None 47 | 48 | # check if it has already calculated 49 | if exists(join(out_folder, cloud_name + '_KDTree.pkl')): 50 | print(cloud_name, 'already exists, skipped') 51 | continue 52 | 53 | xyz, i, labels = DP.read_las_data(pc_path) 54 | 55 | org_point_number = labels.shape[0] 56 | #os.makedirs(join(out_folder, 'pt_clouds')) if not exists(join(out_folder, 'pt_clouds')) else None 57 | sub_ply_file = join(out_folder, cloud_name + '.ply') 58 | sub_xyz, sub_i, sub_labels = DP.grid_sub_sampling(xyz, i, labels, grid_size) 59 | 60 | 61 | sub_point_number = sub_labels.shape[0] 62 | total_org += org_point_number 63 | total_sub += sub_point_number 64 | print(cloud_name + '| ' + str(org_point_number) + '| ' + str(sub_point_number)) 65 | c += 1 66 | if c == total_lenght: 67 | print('total points' + '| ' + str(total_org) + '| ' + str(total_sub)) 68 | 69 | 70 | #sub_rgb = sub_rgb / 255.0 71 | sub_labels = np.squeeze(sub_labels) 72 | write_ply(sub_ply_file, [sub_xyz, sub_i, sub_labels], ['x', 'y', 'z', 'intensity', 'num_return', 'class']) 73 | 74 | search_tree = KDTree(sub_xyz, leaf_size=50) 75 | #os.makedirs(join(out_folder, 'KDTree')) if not exists(join(out_folder, 'KDTree')) else None 76 | kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl') 77 | with open(kd_tree_file, 'wb') as f: 78 | pickle.dump(search_tree, f) 79 | 80 | proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False)) 81 | proj_idx = proj_idx.astype(np.int32) 82 | #os.makedirs(join(out_folder, 'proj')) if not exists(join(out_folder, 'proj')) else None 83 | proj_save = join(out_folder, cloud_name + '_proj.pkl') 84 | with open(proj_save, 'wb') as f: 85 | pickle.dump([proj_idx, labels], f) 86 | -------------------------------------------------------------------------------- /utils/nearest_neighbors/knn.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | # distutils: sources = knn.cxx 3 | 4 | import numpy as np 5 | cimport numpy as np 6 | import cython 7 | 8 | cdef extern from "knn_.h": 9 | void cpp_knn(const float* points, const size_t npts, const size_t dim, 10 | const float* queries, const size_t nqueries, 11 | const size_t K, long* indices) 12 | 13 | void cpp_knn_omp(const float* points, const size_t npts, const size_t dim, 14 | const float* queries, const size_t nqueries, 15 | const size_t K, long* indices) 16 | 17 | void cpp_knn_batch(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 18 | const float* queries, const size_t nqueries, 19 | const size_t K, long* batch_indices) 20 | 21 | void cpp_knn_batch_omp(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 22 | const float* queries, const size_t nqueries, 23 | const size_t K, long* batch_indices) 24 | 25 | void cpp_knn_batch_distance_pick(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 26 | float* queries, const size_t nqueries, 27 | const size_t K, long* batch_indices) 28 | 29 | void cpp_knn_batch_distance_pick_omp(const float* batch_data, const size_t batch_size, const size_t npts, const size_t dim, 30 | float* batch_queries, const size_t nqueries, 31 | const size_t K, long* batch_indices) 32 | 33 | def knn(pts, queries, K, omp=False): 34 | 35 | # define shape parameters 36 | cdef int npts 37 | cdef int dim 38 | cdef int K_cpp 39 | cdef int nqueries 40 | 41 | # define tables 42 | cdef np.ndarray[np.float32_t, ndim=2] pts_cpp 43 | cdef np.ndarray[np.float32_t, ndim=2] queries_cpp 44 | cdef np.ndarray[np.int64_t, ndim=2] indices_cpp 45 | 46 | # set shape values 47 | npts = pts.shape[0] 48 | nqueries = queries.shape[0] 49 | dim = pts.shape[1] 50 | K_cpp = K 51 | 52 | # create indices tensor 53 | indices = np.zeros((queries.shape[0], K), dtype=np.int64) 54 | 55 | pts_cpp = np.ascontiguousarray(pts, dtype=np.float32) 56 | queries_cpp = np.ascontiguousarray(queries, dtype=np.float32) 57 | indices_cpp = indices 58 | 59 | # normal estimation 60 | if omp: 61 | cpp_knn_omp( pts_cpp.data, npts, dim, 62 | queries_cpp.data, nqueries, 63 | K_cpp, indices_cpp.data) 64 | else: 65 | cpp_knn( pts_cpp.data, npts, dim, 66 | queries_cpp.data, nqueries, 67 | K_cpp, indices_cpp.data) 68 | 69 | return indices 70 | 71 | def knn_batch(pts, queries, K, omp=False): 72 | 73 | # define shape parameters 74 | cdef int batch_size 75 | cdef int npts 76 | cdef int nqueries 77 | cdef int K_cpp 78 | cdef int dim 79 | 80 | # define tables 81 | cdef np.ndarray[np.float32_t, ndim=3] pts_cpp 82 | cdef np.ndarray[np.float32_t, ndim=3] queries_cpp 83 | cdef np.ndarray[np.int64_t, ndim=3] indices_cpp 84 | 85 | # set shape values 86 | batch_size = pts.shape[0] 87 | npts = pts.shape[1] 88 | dim = pts.shape[2] 89 | nqueries = queries.shape[1] 90 | K_cpp = K 91 | 92 | # create indices tensor 93 | indices = np.zeros((pts.shape[0], queries.shape[1], K), dtype=np.int64) 94 | 95 | pts_cpp = np.ascontiguousarray(pts, dtype=np.float32) 96 | queries_cpp = np.ascontiguousarray(queries, dtype=np.float32) 97 | indices_cpp = indices 98 | 99 | # normal estimation 100 | if omp: 101 | cpp_knn_batch_omp( pts_cpp.data, batch_size, npts, dim, 102 | queries_cpp.data, nqueries, 103 | K_cpp, indices_cpp.data) 104 | else: 105 | cpp_knn_batch( pts_cpp.data, batch_size, npts, dim, 106 | queries_cpp.data, nqueries, 107 | K_cpp, indices_cpp.data) 108 | 109 | return indices 110 | 111 | def knn_batch_distance_pick(pts, nqueries, K, omp=False): 112 | 113 | # define shape parameters 114 | cdef int batch_size 115 | cdef int npts 116 | cdef int nqueries_cpp 117 | cdef int K_cpp 118 | cdef int dim 119 | 120 | # define tables 121 | cdef np.ndarray[np.float32_t, ndim=3] pts_cpp 122 | cdef np.ndarray[np.float32_t, ndim=3] queries_cpp 123 | cdef np.ndarray[np.int64_t, ndim=3] indices_cpp 124 | 125 | # set shape values 126 | batch_size = pts.shape[0] 127 | npts = pts.shape[1] 128 | dim = pts.shape[2] 129 | nqueries_cpp = nqueries 130 | K_cpp = K 131 | 132 | # create indices tensor 133 | indices = np.zeros((pts.shape[0], nqueries, K), dtype=np.long) 134 | queries = np.zeros((pts.shape[0], nqueries, dim), dtype=np.float32) 135 | 136 | pts_cpp = np.ascontiguousarray(pts, dtype=np.float32) 137 | queries_cpp = np.ascontiguousarray(queries, dtype=np.float32) 138 | indices_cpp = indices 139 | 140 | if omp: 141 | cpp_knn_batch_distance_pick_omp( pts_cpp.data, batch_size, npts, dim, 142 | queries_cpp.data, nqueries, 143 | K_cpp, indices_cpp.data) 144 | else: 145 | cpp_knn_batch_distance_pick( pts_cpp.data, batch_size, npts, dim, 146 | queries_cpp.data, nqueries, 147 | K_cpp, indices_cpp.data) 148 | 149 | return indices, queries -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Human Vision Based 3D Point Cloud Semantic Segmentation of Large-Scale Outdoor Scenes (CVPR PCV Workshop 2023) 2 | 3 | This is the official GitHub page of **EyeNet** (CVPR PCV Workshop 2023, Oral and Poster presentation), an efficient and effective human vision-inspired 3d semantic segmentation network for point clouds. For more details, please refer to our paper ([CVPR PCV Workshop 2023](https://openaccess.thecvf.com/content/CVPR2023W/PCV/html/Yoo_Human_Vision_Based_3D_Point_Cloud_Semantic_Segmentation_of_Large-Scale_CVPRW_2023_paper.html)). 4 | 5 | ### Preparation 6 | 7 | - Clone this repository. 8 | 9 | Setting up the environment on your own. 10 | 11 | The code has been tested with Python 3.7, Tensorflow 11.1, Cuda 10.2, cuDNN 7.4.1 on Ubuntu 16.04. 12 | 13 | 14 | 15 | - Create Conda Environment: 16 | 17 | ``` 18 | conda create -n eyenet python=3.5 19 | source activate eyenet 20 | ``` 21 | 22 | - You need to update pip: 23 | 24 | ``` 25 | curl https://bootstrap.pypa.io/pip/3.5/get-pip.py -o get-pip.py 26 | python get-pip.py 27 | ``` 28 | 29 | - Install Required Libraries and compile custom libraries: 30 | 31 | ``` 32 | pip install -r helper_requirements.txt 33 | conda install -c conda-forge zip 34 | sh compile_op.sh 35 | conda install cudatoolkit=9.0 36 | ``` 37 | 38 | ### Sensat Urban 39 | 40 | - Download the SensatUrban Dataset from the official website (https://github.com/QingyongHu/SensatUrban). 41 | 42 | - cambridge_block_0.ply and cambridge_block_1.ply contain less than 4mb of data, so they have to be removed before processing. 43 | 44 | - Pre-processing dataset (Grid Sampling) by running: 45 | ``` 46 | python data_processing/input_preparation_Sensat.py --dataset_path "YOUR_DATA_PATH" --output_path "YOUR_OUTPUT_PATH" 47 | ``` 48 | Note: Grid size can be also adjusted for further details, please refer to the code. 49 | 50 | - Start Training 51 | 52 | ``` 53 | python main_Sensat.py 54 | ``` 55 | 56 | Note: Before Training, please modify data_set_dir (line 21) to your sampled data directory in tool.py. 57 | 58 | 59 | - Start Evaluation on validation set (for visualization): 60 | 61 | ``` 62 | python main_Sensat.py --mode val --model_path "YOUR_SAVED_MODEL" 63 | ``` 64 | 65 | Note: saved models are located in the "trained_weights/Sensat" folder. 66 | 67 | Note: The "YOUR_SAVED_MODEL" path has to include snap-NumberofSteps e.g. trained_weights/Sensat/First_Train/snapshots/snap-17001. 68 | 69 | 70 | - Start Evaluation on test set: 71 | 72 | ``` 73 | python main_Sensat.py --mode test --model_path "YOUR_SAVED_MODEL" 74 | ``` 75 | 76 | - The folder that contains the submission file will be saved in the "test" folder. 77 | - This code will generate the submission file for submitting your result on the Online Server (https://codalab.lisn.upsaclay.fr/competitions/7113). 78 | 79 | 80 | 81 | ### DALES 82 | - Downloading the LAS version of the DALES data set from the website https://udayton.edu/engineering/research/centers/vision_lab/research/was_data_analysis_and_processing/dale.php. Ply version does not include the number of return feature. 83 | 84 | - Pre-processing dataset (Grid Sampling) by running: 85 | 86 | ``` 87 | python data_processing/input_preparation_DALES.py --dataset_path "YOUR_DATA_PATH" --output_path "YOUR_OUTPUT_PATH" 88 | ``` 89 | 90 | 91 | - Start Training: 92 | 93 | ``` 94 | python main_DALES.py 95 | ``` 96 | Note: Before Training, please modify data_set_dir (line 73) to your sampled data directory in tool.py. 97 | 98 | 99 | - Start Evaluation: 100 | ``` 101 | python main_DALES.py --mode test --model_path "YOUR_SAVED_MODEL" 102 | ``` 103 | Note: saved models are located in the "trained_weights/DALES" folder. 104 | Note: The "YOUR_SAVED_MODEL" path has to include snap-NumberofSteps e.g. trained_weights/DALES/First_Train/snapshots/snap-17001. 105 | 106 | - The evaluation results will be saved in the "test" folder. 107 | 108 | 109 | ### Toronto3D 110 | - If you have access to our Nas2 server, you can just download all dataset from NAS2/VM/jacob/data/Toronto3D 111 | - Pre-processing dataset (Grid Sampling) by running: 112 | ``` 113 | python data_processing/input_preparation_toronto3D.py --dataset_path "YOUR_DATA_PATH" --output_path "YOUR_OUTPUT_PATH" 114 | ``` 115 | 116 | - Start Training: 117 | 118 | ``` 119 | python main_Toronto3D.py 120 | ``` 121 | Note: Before Training, please modify data_set_dir (line 125) to your sampled data directory in tool.py. 122 | 123 | 124 | - Start Evaluation: 125 | ``` 126 | python main_Toronto3D.py --mode test --model_path "YOUR_SAVED_MODEL" 127 | ``` 128 | Note: saved models are located in the "trained_weights/Toronto3D" folder. 129 | Note: The "YOUR_SAVED_MODEL" path has to include snap-NumberofSteps e.g. trained_weights/Toronto3D/First_Train/snapshots/snap-17001. 130 | 131 | - The evaluation results will be saved in the "test" folder. 132 | 133 | ### Citation 134 | Thank you for showing interest in our work. Please consider citing: 135 | ``` 136 | @InProceedings{Yoo_2023_CVPR, 137 | author = {Yoo, Sunghwan and Jeong, Yeonjeong and Jameela, Maryam and Sohn, Gunho}, 138 | title = {Human Vision Based 3D Point Cloud Semantic Segmentation of Large-Scale Outdoor Scenes}, 139 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops}, 140 | month = {June}, 141 | year = {2023}, 142 | pages = {6576-6585} 143 | } 144 | ``` 145 | 146 | 147 | ### Acknowledgement 148 | Part of our work refers to ([nanoflann](https://github.com/jlblancoc/nanoflann)) and ([RandLA-Net](https://github.com/QingyongHu/RandLA-Net)). 149 | 150 | ### Updates 151 | * 9/17/2023: Code uploaded! 152 | 153 | 154 | ### License 155 | Shield: [![CC BY-NC-SA 4.0][cc-by-nc-sa-shield]][cc-by-nc-sa] 156 | 157 | This work is licensed under a 158 | [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License][cc-by-nc-sa]. 159 | 160 | [![CC BY-NC-SA 4.0][cc-by-nc-sa-image]][cc-by-nc-sa] 161 | 162 | [cc-by-nc-sa]: http://creativecommons.org/licenses/by-nc-sa/4.0/ 163 | [cc-by-nc-sa-image]: https://licensebuttons.net/l/by-nc-sa/4.0/88x31.png 164 | [cc-by-nc-sa-shield]: https://img.shields.io/badge/License-CC%20BY--NC--SA%204.0-lightgrey.svg 165 | 166 | -------------------------------------------------------------------------------- /Lovasz_losses_tf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Lovasz-Softmax and Jaccard hinge loss in Tensorflow 3 | Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License) 4 | """ 5 | 6 | from __future__ import print_function, division 7 | 8 | import tensorflow as tf 9 | import numpy as np 10 | 11 | 12 | def lovasz_grad(gt_sorted): 13 | """ 14 | Computes gradient of the Lovasz extension w.r.t sorted errors 15 | See Alg. 1 in paper 16 | """ 17 | gts = tf.reduce_sum(gt_sorted) 18 | intersection = gts - tf.cumsum(gt_sorted) 19 | union = gts + tf.cumsum(1. - gt_sorted) 20 | jaccard = 1. - intersection / union 21 | jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0) 22 | return jaccard 23 | 24 | 25 | # --------------------------- BINARY LOSSES --------------------------- 26 | 27 | 28 | def lovasz_hinge(logits, labels, per_image=True, ignore=None): 29 | """ 30 | Binary Lovasz hinge loss 31 | logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) 32 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 33 | per_image: compute the loss per image instead of per batch 34 | ignore: void class id 35 | """ 36 | if per_image: 37 | def treat_image(log_lab): 38 | log, lab = log_lab 39 | log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0) 40 | log, lab = flatten_binary_scores(log, lab, ignore) 41 | return lovasz_hinge_flat(log, lab) 42 | 43 | losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32) 44 | loss = tf.reduce_mean(losses) 45 | else: 46 | loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) 47 | return loss 48 | 49 | 50 | def lovasz_hinge_flat(logits, labels): 51 | """ 52 | Binary Lovasz hinge loss 53 | logits: [P] Variable, logits at each prediction (between -\infty and +\infty) 54 | labels: [P] Tensor, binary ground truth labels (0 or 1) 55 | ignore: label to ignore 56 | """ 57 | 58 | def compute_loss(): 59 | labelsf = tf.cast(labels, logits.dtype) 60 | signs = 2. * labelsf - 1. 61 | errors = 1. - logits * tf.stop_gradient(signs) 62 | errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort") 63 | gt_sorted = tf.gather(labelsf, perm) 64 | grad = lovasz_grad(gt_sorted) 65 | loss = tf.tensordot(tf.nn.relu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void") 66 | return loss 67 | 68 | # deal with the void prediction case (only void pixels) 69 | loss = tf.cond(tf.equal(tf.shape(logits)[0], 0), 70 | lambda: tf.reduce_sum(logits) * 0., 71 | compute_loss, 72 | strict=True, 73 | name="loss" 74 | ) 75 | return loss 76 | 77 | 78 | def flatten_binary_scores(scores, labels, ignore=None): 79 | """ 80 | Flattens predictions in the batch (binary case) 81 | Remove labels equal to 'ignore' 82 | """ 83 | scores = tf.reshape(scores, (-1,)) 84 | labels = tf.reshape(labels, (-1,)) 85 | if ignore is None: 86 | return scores, labels 87 | valid = tf.not_equal(labels, ignore) 88 | vscores = tf.boolean_mask(scores, valid, name='valid_scores') 89 | vlabels = tf.boolean_mask(labels, valid, name='valid_labels') 90 | return vscores, vlabels 91 | 92 | 93 | # --------------------------- MULTICLASS LOSSES --------------------------- 94 | 95 | 96 | def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None, order='BHWC'): 97 | """ 98 | Multi-class Lovasz-Softmax loss 99 | probas: [B, H, W, C] or [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1) 100 | Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. 101 | labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) 102 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 103 | per_image: compute the loss per image instead of per batch 104 | ignore: void class labels 105 | order: use BHWC or BCHW 106 | """ 107 | if per_image: 108 | def treat_image(prob_lab): 109 | prob, lab = prob_lab 110 | prob, lab = tf.expand_dims(prob, 0), tf.expand_dims(lab, 0) 111 | prob, lab = flatten_probas(prob, lab, ignore, order) 112 | return lovasz_softmax_flat(prob, lab, classes=classes) 113 | 114 | losses = tf.map_fn(treat_image, (probas, labels), dtype=tf.float32) 115 | loss = tf.reduce_mean(losses) 116 | else: 117 | loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore, order), classes=classes) 118 | return loss 119 | 120 | 121 | def lovasz_softmax_flat(probas, labels, classes='present'): 122 | """ 123 | Multi-class Lovasz-Softmax loss 124 | probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) 125 | labels: [P] Tensor, ground truth labels (between 0 and C - 1) 126 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 127 | """ 128 | C = probas.shape[1] 129 | losses = [] 130 | present = [] 131 | class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes 132 | for c in class_to_sum: 133 | fg = tf.cast(tf.equal(labels, c), probas.dtype) # foreground for class c 134 | if classes == 'present': 135 | present.append(tf.reduce_sum(fg) > 0) 136 | if C == 1: 137 | if len(classes) > 1: 138 | raise ValueError('Sigmoid output possible only with 1 class') 139 | class_pred = probas[:, 0] 140 | else: 141 | class_pred = probas[:, c] 142 | errors = tf.abs(fg - class_pred) 143 | errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort_{}".format(c)) 144 | fg_sorted = tf.gather(fg, perm) 145 | grad = lovasz_grad(fg_sorted) 146 | losses.append( 147 | tf.tensordot(errors_sorted, tf.stop_gradient(grad), 1, name="loss_class_{}".format(c)) 148 | ) 149 | if len(class_to_sum) == 1: # short-circuit mean when only one class 150 | return losses[0] 151 | losses_tensor = tf.stack(losses) 152 | if classes == 'present': 153 | present = tf.stack(present) 154 | losses_tensor = tf.boolean_mask(losses_tensor, present) 155 | loss = tf.reduce_mean(losses_tensor) 156 | return loss 157 | 158 | 159 | def flatten_probas(probas, labels, ignore=None, order='BHWC'): 160 | """ 161 | Flattens predictions in the batch 162 | """ 163 | # if len(probas.shape) == 3: 164 | # probas, order = tf.expand_dims(probas, 3), 'BHWC' 165 | if order == 'BCHW': 166 | probas = tf.transpose(probas, (0, 2, 3, 1), name="BCHW_to_BHWC") 167 | order = 'BHWC' 168 | if order != 'BHWC': 169 | raise NotImplementedError('Order {} unknown'.format(order)) 170 | C = probas.get_shape()[-1].value 171 | probas = tf.reshape(probas, (-1, C)) 172 | labels = tf.reshape(labels, (-1,)) 173 | if ignore is None: 174 | return probas, labels 175 | valid = tf.not_equal(labels, ignore) 176 | vprobas = tf.boolean_mask(probas, valid, name='valid_probas') 177 | vlabels = tf.boolean_mask(labels, valid, name='valid_labels') 178 | return vprobas, vlabels -------------------------------------------------------------------------------- /utils/nearest_neighbors/knn_.cxx: -------------------------------------------------------------------------------- 1 | 2 | #include "knn_.h" 3 | #include "nanoflann.hpp" 4 | using namespace nanoflann; 5 | 6 | #include "KDTreeTableAdaptor.h" 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | using namespace std; 19 | 20 | 21 | 22 | void cpp_knn(const float* points, const size_t npts, const size_t dim, 23 | const float* queries, const size_t nqueries, 24 | const size_t K, long* indices){ 25 | 26 | // create the kdtree 27 | typedef KDTreeTableAdaptor< float, float> KDTree; 28 | KDTree mat_index(npts, dim, points, 10); 29 | mat_index.index->buildIndex(); 30 | 31 | std::vector out_dists_sqr(K); 32 | std::vector out_ids(K); 33 | 34 | // iterate over the points 35 | for(size_t i=0; i resultSet(K); 38 | resultSet.init(&out_ids[0], &out_dists_sqr[0] ); 39 | mat_index.index->findNeighbors(resultSet, &queries[i*dim], nanoflann::SearchParams(10)); 40 | for(size_t j=0; j KDTree; 52 | KDTree mat_index(npts, dim, points, 10); 53 | mat_index.index->buildIndex(); 54 | 55 | 56 | // iterate over the points 57 | # pragma omp parallel for 58 | for(size_t i=0; i out_ids(K); 60 | std::vector out_dists_sqr(K); 61 | 62 | nanoflann::KNNResultSet resultSet(K); 63 | resultSet.init(&out_ids[0], &out_dists_sqr[0] ); 64 | mat_index.index->findNeighbors(resultSet, &queries[i*dim], nanoflann::SearchParams(10)); 65 | for(size_t j=0; j KDTree; 83 | KDTree mat_index(npts, dim, points, 10); 84 | 85 | mat_index.index->buildIndex(); 86 | 87 | std::vector out_dists_sqr(K); 88 | std::vector out_ids(K); 89 | 90 | // iterate over the points 91 | for(size_t i=0; i resultSet(K); 93 | resultSet.init(&out_ids[0], &out_dists_sqr[0] ); 94 | mat_index.index->findNeighbors(resultSet, &queries[bid*nqueries*dim + i*dim], nanoflann::SearchParams(10)); 95 | for(size_t j=0; j KDTree; 116 | KDTree mat_index(npts, dim, points, 10); 117 | 118 | mat_index.index->buildIndex(); 119 | 120 | std::vector out_dists_sqr(K); 121 | std::vector out_ids(K); 122 | 123 | // iterate over the points 124 | for(size_t i=0; i resultSet(K); 126 | resultSet.init(&out_ids[0], &out_dists_sqr[0] ); 127 | mat_index.index->findNeighbors(resultSet, &queries[bid*nqueries*dim + i*dim], nanoflann::SearchParams(10)); 128 | for(size_t j=0; j KDTree; 153 | KDTree tree(npts, dim, points, 10); 154 | tree.index->buildIndex(); 155 | 156 | vector used(npts, 0); 157 | int current_id = 0; 158 | for(size_t ptid=0; ptid possible_ids; 162 | while(possible_ids.size() == 0){ 163 | for(size_t i=0; i query(3); 178 | for(size_t i=0; i dists(K); 183 | std::vector ids(K); 184 | nanoflann::KNNResultSet resultSet(K); 185 | resultSet.init(&ids[0], &dists[0] ); 186 | tree.index->findNeighbors(resultSet, &query[0], nanoflann::SearchParams(10)); 187 | 188 | for(size_t i=0; i KDTree; 221 | KDTree tree(npts, dim, points, 10); 222 | tree.index->buildIndex(); 223 | 224 | vector used(npts, 0); 225 | int current_id = 0; 226 | for(size_t ptid=0; ptid possible_ids; 230 | while(possible_ids.size() == 0){ 231 | for(size_t i=0; i query(3); 246 | for(size_t i=0; i dists(K); 251 | std::vector ids(K); 252 | nanoflann::KNNResultSet resultSet(K); 253 | resultSet.init(&ids[0], &dists[0] ); 254 | tree.index->findNeighbors(resultSet, &query[0], nanoflann::SearchParams(10)); 255 | 256 | for(size_t i=0; i 34 | 35 | // ===== This example shows how to use nanoflann with these types of containers: ======= 36 | //typedef std::vector > my_vector_of_vectors_t; 37 | //typedef std::vector my_vector_of_vectors_t; // This requires #include 38 | // ===================================================================================== 39 | 40 | 41 | /** A simple vector-of-vectors adaptor for nanoflann, without duplicating the storage. 42 | * The i'th vector represents a point in the state space. 43 | * 44 | * \tparam DIM If set to >0, it specifies a compile-time fixed dimensionality for the points in the data set, allowing more compiler optimizations. 45 | * \tparam num_t The type of the point coordinates (typically, double or float). 46 | * \tparam Distance The distance metric to use: nanoflann::metric_L1, nanoflann::metric_L2, nanoflann::metric_L2_Simple, etc. 47 | * \tparam IndexType The type for indices in the KD-tree index (typically, size_t of int) 48 | */ 49 | // template 50 | // struct KDTreeVectorAdaptor 51 | // { 52 | // typedef KDTreeVectorAdaptor self_t; 53 | // typedef typename Distance::template traits::distance_t metric_t; 54 | // typedef nanoflann::KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; 55 | 56 | // index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. 57 | // size_t dims; 58 | 59 | // /// Constructor: takes a const ref to the vector of vectors object with the data points 60 | // KDTreeVectorAdaptor(const size_t dims /* dimensionality */, const VectorType &mat, const int leaf_max_size = 10) : m_data(mat) 61 | // { 62 | // assert(mat.size() != 0); 63 | // this->dims= dims; 64 | // index = new index_t( static_cast(dims), *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); 65 | // index->buildIndex(); 66 | // } 67 | 68 | // ~KDTreeVectorAdaptor() { 69 | // delete index; 70 | // } 71 | 72 | // const VectorType &m_data; 73 | 74 | // /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). 75 | // * Note that this is a short-cut method for index->findNeighbors(). 76 | // * The user can also call index->... methods as desired. 77 | // * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. 78 | // */ 79 | // inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const 80 | // { 81 | // nanoflann::KNNResultSet resultSet(num_closest); 82 | // resultSet.init(out_indices, out_distances_sq); 83 | // index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); 84 | // } 85 | 86 | // /** @name Interface expected by KDTreeSingleIndexAdaptor 87 | // * @{ */ 88 | 89 | // const self_t & derived() const { 90 | // return *this; 91 | // } 92 | // self_t & derived() { 93 | // return *this; 94 | // } 95 | 96 | // // Must return the number of data points 97 | // inline size_t kdtree_get_point_count() const { 98 | // return m_data.size()/this->dims; 99 | // } 100 | 101 | // // Returns the dim'th component of the idx'th point in the class: 102 | // inline num_t kdtree_get_pt(const size_t idx, const size_t dim) const { 103 | // return m_data[idx*this->dims + dim]; 104 | // } 105 | 106 | // // Optional bounding-box computation: return false to default to a standard bbox computation loop. 107 | // // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. 108 | // // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) 109 | // template 110 | // bool kdtree_get_bbox(BBOX & /*bb*/) const { 111 | // return false; 112 | // } 113 | 114 | // /** @} */ 115 | 116 | // }; // end of KDTreeVectorOfVectorsAdaptor 117 | 118 | 119 | 120 | 121 | template 122 | struct KDTreeTableAdaptor 123 | { 124 | typedef KDTreeTableAdaptor self_t; 125 | typedef typename Distance::template traits::distance_t metric_t; 126 | typedef nanoflann::KDTreeSingleIndexAdaptor< metric_t,self_t,DIM,IndexType> index_t; 127 | 128 | index_t* index; //! The kd-tree index for the user to call its methods as usual with any other FLANN index. 129 | size_t dim; 130 | size_t npts; 131 | const TableType* m_data; 132 | 133 | /// Constructor: takes a const ref to the vector of vectors object with the data points 134 | KDTreeTableAdaptor(const size_t npts, const size_t dim, const TableType* mat, const int leaf_max_size = 10) : m_data(mat), dim(dim), npts(npts) 135 | { 136 | assert(npts != 0); 137 | index = new index_t( static_cast(dim), *this /* adaptor */, nanoflann::KDTreeSingleIndexAdaptorParams(leaf_max_size ) ); 138 | index->buildIndex(); 139 | } 140 | 141 | ~KDTreeTableAdaptor() { 142 | delete index; 143 | } 144 | 145 | 146 | /** Query for the \a num_closest closest points to a given point (entered as query_point[0:dim-1]). 147 | * Note that this is a short-cut method for index->findNeighbors(). 148 | * The user can also call index->... methods as desired. 149 | * \note nChecks_IGNORED is ignored but kept for compatibility with the original FLANN interface. 150 | */ 151 | inline void query(const num_t *query_point, const size_t num_closest, IndexType *out_indices, num_t *out_distances_sq, const int nChecks_IGNORED = 10) const 152 | { 153 | nanoflann::KNNResultSet resultSet(num_closest); 154 | resultSet.init(out_indices, out_distances_sq); 155 | index->findNeighbors(resultSet, query_point, nanoflann::SearchParams()); 156 | } 157 | 158 | /** @name Interface expected by KDTreeSingleIndexAdaptor 159 | * @{ */ 160 | 161 | const self_t & derived() const { 162 | return *this; 163 | } 164 | self_t & derived() { 165 | return *this; 166 | } 167 | 168 | // Must return the number of data points 169 | inline size_t kdtree_get_point_count() const { 170 | return this->npts; 171 | } 172 | 173 | // Returns the dim'th component of the idx'th point in the class: 174 | inline num_t kdtree_get_pt(const size_t pts_id, const size_t coord_id) const { 175 | return m_data[pts_id*this->dim + coord_id]; 176 | } 177 | 178 | // Optional bounding-box computation: return false to default to a standard bbox computation loop. 179 | // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. 180 | // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) 181 | template 182 | bool kdtree_get_bbox(BBOX & /*bb*/) const { 183 | return false; 184 | } 185 | 186 | /** @} */ 187 | 188 | }; // end of KDTreeVectorOfVectorsAdaptor 189 | 190 | -------------------------------------------------------------------------------- /utils/cpp_wrappers/cpp_subsampling/wrapper.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "grid_subsampling/grid_subsampling.h" 4 | #include 5 | 6 | 7 | 8 | // docstrings for our module 9 | // ************************* 10 | 11 | static char module_docstring[] = "This module provides an interface for the subsampling of a pointcloud"; 12 | 13 | static char compute_docstring[] = "function subsampling a pointcloud"; 14 | 15 | 16 | // Declare the functions 17 | // ********************* 18 | 19 | static PyObject *grid_subsampling_compute(PyObject *self, PyObject *args, PyObject *keywds); 20 | 21 | 22 | // Specify the members of the module 23 | // ********************************* 24 | 25 | static PyMethodDef module_methods[] = 26 | { 27 | { "compute", (PyCFunction)grid_subsampling_compute, METH_VARARGS | METH_KEYWORDS, compute_docstring }, 28 | {NULL, NULL, 0, NULL} 29 | }; 30 | 31 | 32 | // Initialize the module 33 | // ********************* 34 | 35 | static struct PyModuleDef moduledef = 36 | { 37 | PyModuleDef_HEAD_INIT, 38 | "grid_subsampling", // m_name 39 | module_docstring, // m_doc 40 | -1, // m_size 41 | module_methods, // m_methods 42 | NULL, // m_reload 43 | NULL, // m_traverse 44 | NULL, // m_clear 45 | NULL, // m_free 46 | }; 47 | 48 | PyMODINIT_FUNC PyInit_grid_subsampling(void) 49 | { 50 | import_array(); 51 | return PyModule_Create(&moduledef); 52 | } 53 | 54 | 55 | // Actual wrapper 56 | // ************** 57 | 58 | static PyObject *grid_subsampling_compute(PyObject *self, PyObject *args, PyObject *keywds) 59 | { 60 | 61 | // Manage inputs 62 | // ************* 63 | 64 | // Args containers 65 | PyObject *points_obj = NULL; 66 | PyObject *features_obj = NULL; 67 | PyObject *classes_obj = NULL; 68 | 69 | // Keywords containers 70 | static char *kwlist[] = {"points", "features", "classes", "sampleDl", "method", "verbose", NULL }; 71 | float sampleDl = 0.1; 72 | const char *method_buffer = "barycenters"; 73 | int verbose = 0; 74 | 75 | // Parse the input 76 | if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|$OOfsi", kwlist, &points_obj, &features_obj, &classes_obj, &sampleDl, &method_buffer, &verbose)) 77 | { 78 | PyErr_SetString(PyExc_RuntimeError, "Error parsing arguments"); 79 | return NULL; 80 | } 81 | 82 | // Get the method argument 83 | string method(method_buffer); 84 | 85 | // Interpret method 86 | if (method.compare("barycenters") && method.compare("voxelcenters")) 87 | { 88 | PyErr_SetString(PyExc_RuntimeError, "Error parsing method. Valid method names are \"barycenters\" and \"voxelcenters\" "); 89 | return NULL; 90 | } 91 | 92 | // Check if using features or classes 93 | bool use_feature = true, use_classes = true; 94 | if (features_obj == NULL) 95 | use_feature = false; 96 | if (classes_obj == NULL) 97 | use_classes = false; 98 | 99 | // Interpret the input objects as numpy arrays. 100 | PyObject *points_array = PyArray_FROM_OTF(points_obj, NPY_FLOAT, NPY_IN_ARRAY); 101 | PyObject *features_array = NULL; 102 | PyObject *classes_array = NULL; 103 | if (use_feature) 104 | features_array = PyArray_FROM_OTF(features_obj, NPY_FLOAT, NPY_IN_ARRAY); 105 | if (use_classes) 106 | classes_array = PyArray_FROM_OTF(classes_obj, NPY_INT, NPY_IN_ARRAY); 107 | 108 | // Verify data was load correctly. 109 | if (points_array == NULL) 110 | { 111 | Py_XDECREF(points_array); 112 | Py_XDECREF(classes_array); 113 | Py_XDECREF(features_array); 114 | PyErr_SetString(PyExc_RuntimeError, "Error converting input points to numpy arrays of type float32"); 115 | return NULL; 116 | } 117 | if (use_feature && features_array == NULL) 118 | { 119 | Py_XDECREF(points_array); 120 | Py_XDECREF(classes_array); 121 | Py_XDECREF(features_array); 122 | PyErr_SetString(PyExc_RuntimeError, "Error converting input features to numpy arrays of type float32"); 123 | return NULL; 124 | } 125 | if (use_classes && classes_array == NULL) 126 | { 127 | Py_XDECREF(points_array); 128 | Py_XDECREF(classes_array); 129 | Py_XDECREF(features_array); 130 | PyErr_SetString(PyExc_RuntimeError, "Error converting input classes to numpy arrays of type int32"); 131 | return NULL; 132 | } 133 | 134 | // Check that the input array respect the dims 135 | if ((int)PyArray_NDIM(points_array) != 2 || (int)PyArray_DIM(points_array, 1) != 3) 136 | { 137 | Py_XDECREF(points_array); 138 | Py_XDECREF(classes_array); 139 | Py_XDECREF(features_array); 140 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : points.shape is not (N, 3)"); 141 | return NULL; 142 | } 143 | if (use_feature && ((int)PyArray_NDIM(features_array) != 2)) 144 | { 145 | Py_XDECREF(points_array); 146 | Py_XDECREF(classes_array); 147 | Py_XDECREF(features_array); 148 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : features.shape is not (N, d)"); 149 | return NULL; 150 | } 151 | 152 | if (use_classes && (int)PyArray_NDIM(classes_array) > 2) 153 | { 154 | Py_XDECREF(points_array); 155 | Py_XDECREF(classes_array); 156 | Py_XDECREF(features_array); 157 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : classes.shape is not (N,) or (N, d)"); 158 | return NULL; 159 | } 160 | 161 | // Number of points 162 | int N = (int)PyArray_DIM(points_array, 0); 163 | 164 | // Dimension of the features 165 | int fdim = 0; 166 | if (use_feature) 167 | fdim = (int)PyArray_DIM(features_array, 1); 168 | 169 | //Dimension of labels 170 | int ldim = 1; 171 | if (use_classes && (int)PyArray_NDIM(classes_array) == 2) 172 | ldim = (int)PyArray_DIM(classes_array, 1); 173 | 174 | // Check that the input array respect the number of points 175 | if (use_feature && (int)PyArray_DIM(features_array, 0) != N) 176 | { 177 | Py_XDECREF(points_array); 178 | Py_XDECREF(classes_array); 179 | Py_XDECREF(features_array); 180 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : features.shape is not (N, d)"); 181 | return NULL; 182 | } 183 | if (use_classes && (int)PyArray_DIM(classes_array, 0) != N) 184 | { 185 | Py_XDECREF(points_array); 186 | Py_XDECREF(classes_array); 187 | Py_XDECREF(features_array); 188 | PyErr_SetString(PyExc_RuntimeError, "Wrong dimensions : classes.shape is not (N,) or (N, d)"); 189 | return NULL; 190 | } 191 | 192 | 193 | // Call the C++ function 194 | // ********************* 195 | 196 | // Create pyramid 197 | if (verbose > 0) 198 | cout << "Computing cloud pyramid with support points: " << endl; 199 | 200 | 201 | // Convert PyArray to Cloud C++ class 202 | vector original_points; 203 | vector original_features; 204 | vector original_classes; 205 | original_points = vector((PointXYZ*)PyArray_DATA(points_array), (PointXYZ*)PyArray_DATA(points_array) + N); 206 | if (use_feature) 207 | original_features = vector((float*)PyArray_DATA(features_array), (float*)PyArray_DATA(features_array) + N*fdim); 208 | if (use_classes) 209 | original_classes = vector((int*)PyArray_DATA(classes_array), (int*)PyArray_DATA(classes_array) + N*ldim); 210 | 211 | // Subsample 212 | vector subsampled_points; 213 | vector subsampled_features; 214 | vector subsampled_classes; 215 | grid_subsampling(original_points, 216 | subsampled_points, 217 | original_features, 218 | subsampled_features, 219 | original_classes, 220 | subsampled_classes, 221 | sampleDl, 222 | verbose); 223 | 224 | // Check result 225 | if (subsampled_points.size() < 1) 226 | { 227 | PyErr_SetString(PyExc_RuntimeError, "Error"); 228 | return NULL; 229 | } 230 | 231 | // Manage outputs 232 | // ************** 233 | 234 | // Dimension of input containers 235 | npy_intp* point_dims = new npy_intp[2]; 236 | point_dims[0] = subsampled_points.size(); 237 | point_dims[1] = 3; 238 | npy_intp* feature_dims = new npy_intp[2]; 239 | feature_dims[0] = subsampled_points.size(); 240 | feature_dims[1] = fdim; 241 | npy_intp* classes_dims = new npy_intp[2]; 242 | classes_dims[0] = subsampled_points.size(); 243 | classes_dims[1] = ldim; 244 | 245 | // Create output array 246 | PyObject *res_points_obj = PyArray_SimpleNew(2, point_dims, NPY_FLOAT); 247 | PyObject *res_features_obj = NULL; 248 | PyObject *res_classes_obj = NULL; 249 | PyObject *ret = NULL; 250 | 251 | // Fill output array with values 252 | size_t size_in_bytes = subsampled_points.size() * 3 * sizeof(float); 253 | memcpy(PyArray_DATA(res_points_obj), subsampled_points.data(), size_in_bytes); 254 | if (use_feature) 255 | { 256 | size_in_bytes = subsampled_points.size() * fdim * sizeof(float); 257 | res_features_obj = PyArray_SimpleNew(2, feature_dims, NPY_FLOAT); 258 | memcpy(PyArray_DATA(res_features_obj), subsampled_features.data(), size_in_bytes); 259 | } 260 | if (use_classes) 261 | { 262 | size_in_bytes = subsampled_points.size() * ldim * sizeof(int); 263 | res_classes_obj = PyArray_SimpleNew(2, classes_dims, NPY_INT); 264 | memcpy(PyArray_DATA(res_classes_obj), subsampled_classes.data(), size_in_bytes); 265 | } 266 | 267 | 268 | // Merge results 269 | if (use_feature && use_classes) 270 | ret = Py_BuildValue("NNN", res_points_obj, res_features_obj, res_classes_obj); 271 | else if (use_feature) 272 | ret = Py_BuildValue("NN", res_points_obj, res_features_obj); 273 | else if (use_classes) 274 | ret = Py_BuildValue("NN", res_points_obj, res_classes_obj); 275 | else 276 | ret = Py_BuildValue("N", res_points_obj); 277 | 278 | // Clean up 279 | // ******** 280 | 281 | Py_DECREF(points_array); 282 | Py_XDECREF(features_array); 283 | Py_XDECREF(classes_array); 284 | 285 | return ret; 286 | } -------------------------------------------------------------------------------- /tester_Toronto3D.py: -------------------------------------------------------------------------------- 1 | from os import makedirs 2 | from os.path import exists, join 3 | from helper_ply import write_ply 4 | from sklearn.metrics import confusion_matrix 5 | from tool import DataProcessing as DP 6 | import tensorflow as tf 7 | import numpy as np 8 | import time 9 | 10 | 11 | def log_out(out_str, log_f_out): 12 | log_f_out.write(out_str + '\n') 13 | log_f_out.flush() 14 | print(out_str) 15 | 16 | 17 | class ModelTester: 18 | def __init__(self, model, dataset, restore_snap=None, base_only=False): 19 | my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 20 | self.saver = tf.train.Saver(my_vars, max_to_keep=100) 21 | makedirs('test_log') if not exists('test_log') else None 22 | self.Log_file = open('test_log/log_test_DALES.txt', 'a') 23 | self.base_only = base_only 24 | 25 | # Create a session for running Ops on the Graph. 26 | on_cpu = False 27 | if on_cpu: 28 | c_proto = tf.ConfigProto(device_count={'GPU': 0}) 29 | else: 30 | c_proto = tf.ConfigProto() 31 | c_proto.gpu_options.allow_growth = True 32 | self.sess = tf.Session(config=c_proto) 33 | self.sess.run(tf.global_variables_initializer()) 34 | 35 | # Load trained model 36 | if restore_snap is not None: 37 | self.saver.restore(self.sess, restore_snap) 38 | print("Model restored from " + restore_snap) 39 | 40 | self.prob_logits = tf.nn.softmax(model.logits) 41 | 42 | # Initiate global prediction over all test clouds 43 | self.test_probs = [np.zeros(shape=[l.shape[0], model.config.num_classes], dtype=np.float32) 44 | for l in dataset.input_labels['test']] 45 | 46 | def test(self, model, dataset, num_votes=30): 47 | 48 | # Smoothing parameter for votes 49 | test_smooth = 0.95 50 | 51 | # Initialise iterator with validation/test data 52 | self.sess.run(dataset.test_init_op) 53 | 54 | 55 | # Number of points per class in test set 56 | proportions = np.zeros(model.config.num_classes, dtype=np.float32) 57 | i = 0 58 | for label_val in dataset.label_values: 59 | if label_val not in dataset.ignored_labels: 60 | proportions[i] = np.sum([np.sum(labels == label_val) for labels in dataset.test_labels]) 61 | i += 1 62 | 63 | # Test saving path 64 | saving_path = time.strftime('results/Log_Toronto_3D_%Y-%m-%d_%H-%M-%S', time.gmtime()) 65 | test_path = join('test', saving_path.split('/')[-1]) 66 | makedirs(test_path) if not exists(test_path) else None 67 | makedirs(join(test_path, 'test_preds')) if not exists(join(test_path, 'test_preds')) else None 68 | 69 | step_id = 0 70 | epoch_id = 0 71 | last_min = -0.5 72 | t0 = time.time() 73 | while last_min < num_votes: 74 | try: 75 | ops = (self.prob_logits, 76 | model.labels, 77 | model.inputs['input_inds'], 78 | model.inputs['cloud_inds'], 79 | model.accuracy) 80 | 81 | stacked_probs, stacked_labels, point_idx, cloud_idx, acc = self.sess.run(ops, {model.is_training: False}) 82 | print('step' + str(step_id) + ' acc:' + str(acc)) 83 | stacked_probs = np.reshape(stacked_probs, [model.config.val_batch_size, model.config.num_points, 84 | model.config.num_classes]) 85 | if self.base_only: 86 | stacked_probs = stacked_probs[:,:model.config.num_points*4//7,:] 87 | point_idx = point_idx[:,:model.config.num_points*4//7] 88 | 89 | 90 | for j in range(np.shape(stacked_probs)[0]): 91 | probs = stacked_probs[j, :, :] 92 | p_idx = point_idx[j, :] 93 | c_i = cloud_idx[j][0] 94 | self.test_probs[c_i][p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (1 - test_smooth) * probs 95 | step_id += 1 96 | 97 | except tf.errors.OutOfRangeError: 98 | 99 | new_min = np.min(dataset.min_possibility['test']) 100 | log_out('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min), self.Log_file) 101 | 102 | if last_min + 1 < new_min: 103 | print('Prediction done in {:.1f} s\n'.format(time.time() - t0)) 104 | # Update last_min 105 | last_min += 1 106 | 107 | # Show vote results (On subcloud so it is not the good values here) 108 | log_out('\nConfusion on sub clouds', self.Log_file) 109 | confusion_list = [] 110 | 111 | num_test = len(dataset.input_labels['test']) 112 | 113 | for i_test in range(num_test): 114 | probs = self.test_probs[i_test] 115 | for l_ind, label_value in enumerate(dataset.label_values): 116 | if label_value in dataset.ignored_labels: 117 | probs = np.insert(probs, l_ind, 0, axis=1) 118 | preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32) 119 | labels = dataset.input_labels['test'][i_test] 120 | 121 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 122 | 123 | # Regroup confusions 124 | C = np.sum(np.stack(confusion_list), axis=0).astype(np.float32) 125 | 126 | for l_ind, label_value in reversed(list(enumerate(dataset.label_values))): 127 | if label_value in dataset.ignored_labels: 128 | C = np.delete(C, l_ind, axis=0) 129 | C = np.delete(C, l_ind, axis=1) 130 | 131 | # Rescale with the right number of point per class 132 | C *= np.expand_dims(proportions / (np.sum(C, axis=1) + 1e-6), 1) 133 | 134 | # Compute IoUs 135 | IoUs = DP.IoU_from_confusions(C) 136 | m_IoU = np.mean(IoUs) 137 | s = '{:5.2f} | '.format(100 * m_IoU) 138 | for IoU in IoUs: 139 | s += '{:5.2f} '.format(100 * IoU) 140 | log_out(s + '\n', self.Log_file) 141 | 142 | if int(np.ceil(new_min)) % 1 == 0: 143 | 144 | # Project predictions 145 | log_out('\nReproject Vote #{:d}'.format(int(np.floor(new_min))), self.Log_file) 146 | proj_probs_list = [] 147 | 148 | for i_test in range(num_test): 149 | # Reproject probs back to the evaluations points 150 | proj_idx = dataset.test_proj[i_test] 151 | probs = self.test_probs[i_test][proj_idx, :] 152 | # Insert false columns for ignored labels 153 | probs2 = probs 154 | for l_ind, label_value in enumerate(dataset.label_values): 155 | if label_value in dataset.ignored_labels: 156 | probs2 = np.insert(probs2, l_ind, 0, axis=1) 157 | proj_probs_list += [probs2] 158 | 159 | # Show vote results 160 | log_out('Confusion on full clouds', self.Log_file) 161 | confusion_list = [] 162 | for i_test in range(num_test): 163 | # Get the predicted labels 164 | preds = dataset.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8) 165 | 166 | # Confusion 167 | labels = dataset.test_labels[i_test] 168 | acc = np.sum(preds == labels) / len(labels) 169 | log_out(dataset.input_names['test'][i_test] + ' Acc:' + str(acc), self.Log_file) 170 | 171 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 172 | name = dataset.input_names['test'][i_test] + '.ply' 173 | pc_path = dataset.path + '/original_files/' + name[:-4] + '.ply' 174 | print(pc_path) 175 | xyz, i, _ = DP.read_ply_data_toronto_3D(pc_path, with_rgb=True) 176 | write_ply(join(test_path, 'test_preds', name), [xyz, i, preds, labels], ['x','y','z', 'red', 'green','blue','pred', 'label']) 177 | 178 | 179 | # Regroup confusions 180 | C = np.sum(np.stack(confusion_list), axis=0) 181 | 182 | for l_ind, label_value in reversed(list(enumerate(dataset.label_values))): 183 | if label_value in dataset.ignored_labels: 184 | C = np.delete(C, l_ind, axis=0) 185 | C = np.delete(C, l_ind, axis=1) 186 | IoUs = DP.IoU_from_confusions(C) 187 | m_IoU = np.mean(IoUs) 188 | s = '{:5.2f} | '.format(100 * m_IoU) 189 | for IoU in IoUs: 190 | s += '{:5.2f} '.format(100 * IoU) 191 | log_out('-' * len(s), self.Log_file) 192 | log_out(s, self.Log_file) 193 | log_out('-' * len(s) + '\n', self.Log_file) 194 | print('finished \n') 195 | self.sess.close() 196 | return 197 | 198 | self.sess.run(dataset.test_init_op) 199 | epoch_id += 1 200 | step_id = 0 201 | continue 202 | print('processing time: ',time.time() - t0) 203 | return -------------------------------------------------------------------------------- /tester_DALES.py: -------------------------------------------------------------------------------- 1 | from os import makedirs 2 | from os.path import exists, join 3 | from helper_ply import write_ply 4 | from sklearn.metrics import confusion_matrix 5 | from tool import DataProcessing as DP 6 | import tensorflow as tf 7 | import numpy as np 8 | import time 9 | 10 | 11 | def log_out(out_str, log_f_out): 12 | log_f_out.write(out_str + '\n') 13 | log_f_out.flush() 14 | print(out_str) 15 | 16 | 17 | class ModelTester: 18 | def __init__(self, model, dataset, restore_snap=None, base_only=False): 19 | my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 20 | self.saver = tf.train.Saver(my_vars, max_to_keep=100) 21 | makedirs('test_log') if not exists('test_log') else None 22 | self.Log_file = open('test_log/log_test_DALES.txt', 'a') 23 | self.base_only = base_only 24 | 25 | # Create a session for running Ops on the Graph. 26 | on_cpu = False 27 | if on_cpu: 28 | c_proto = tf.ConfigProto(device_count={'GPU': 0}) 29 | else: 30 | c_proto = tf.ConfigProto() 31 | c_proto.gpu_options.allow_growth = True 32 | self.sess = tf.Session(config=c_proto) 33 | self.sess.run(tf.global_variables_initializer()) 34 | 35 | # Load trained model 36 | if restore_snap is not None: 37 | self.saver.restore(self.sess, restore_snap) 38 | print("Model restored from " + restore_snap) 39 | 40 | self.prob_logits = tf.nn.softmax(model.logits) 41 | 42 | # Initiate global prediction over all test clouds 43 | self.test_probs = [np.zeros(shape=[l.shape[0], model.config.num_classes], dtype=np.float32) 44 | for l in dataset.input_labels['test']] 45 | 46 | def test(self, model, dataset, num_votes=30): 47 | 48 | # Smoothing parameter for votes 49 | test_smooth = 0.95 50 | 51 | # Initialise iterator with validation/test data 52 | self.sess.run(dataset.test_init_op) 53 | 54 | 55 | # Number of points per class in test set 56 | proportions = np.zeros(model.config.num_classes, dtype=np.float32) 57 | i = 0 58 | for label_val in dataset.label_values: 59 | if label_val not in dataset.ignored_labels: 60 | proportions[i] = np.sum([np.sum(labels == label_val) for labels in dataset.test_labels]) 61 | i += 1 62 | 63 | # Test saving path 64 | saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime()) 65 | test_path = join('test', saving_path.split('/')[-1]) 66 | makedirs(test_path) if not exists(test_path) else None 67 | makedirs(join(test_path, 'test_preds')) if not exists(join(test_path, 'test_preds')) else None 68 | 69 | step_id = 0 70 | epoch_id = 0 71 | last_min = -0.5 72 | t0 = time.time() 73 | while last_min < num_votes: 74 | try: 75 | ops = (self.prob_logits, 76 | model.labels, 77 | model.inputs['input_inds'], 78 | model.inputs['cloud_inds'], 79 | model.accuracy) 80 | 81 | stacked_probs, stacked_labels, point_idx, cloud_idx, acc = self.sess.run(ops, {model.is_training: False}) 82 | print('step' + str(step_id) + ' acc:' + str(acc)) 83 | stacked_probs = np.reshape(stacked_probs, [model.config.val_batch_size, model.config.num_points, 84 | model.config.num_classes]) 85 | if self.base_only: 86 | stacked_probs = stacked_probs[:,:model.config.num_points*4//7,:] 87 | point_idx = point_idx[:,:model.config.num_points*4//7] 88 | 89 | 90 | for j in range(np.shape(stacked_probs)[0]): 91 | probs = stacked_probs[j, :, :] 92 | p_idx = point_idx[j, :] 93 | c_i = cloud_idx[j][0] 94 | self.test_probs[c_i][p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (1 - test_smooth) * probs 95 | step_id += 1 96 | 97 | except tf.errors.OutOfRangeError: 98 | 99 | new_min = np.min(dataset.min_possibility['test']) 100 | log_out('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min), self.Log_file) 101 | 102 | if last_min + 1 < new_min: 103 | print('Prediction done in {:.1f} s\n'.format(time.time() - t0)) 104 | # Update last_min 105 | last_min += 1 106 | 107 | # Show vote results (On subcloud so it is not the good values here) 108 | log_out('\nConfusion on sub clouds', self.Log_file) 109 | confusion_list = [] 110 | 111 | num_test = len(dataset.input_labels['test']) 112 | 113 | for i_test in range(num_test): 114 | probs = self.test_probs[i_test] 115 | for l_ind, label_value in enumerate(dataset.label_values): 116 | if label_value in dataset.ignored_labels: 117 | probs = np.insert(probs, l_ind, 0, axis=1) 118 | preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32) 119 | labels = dataset.input_labels['test'][i_test] 120 | 121 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 122 | 123 | # Regroup confusions 124 | C = np.sum(np.stack(confusion_list), axis=0).astype(np.float32) 125 | 126 | for l_ind, label_value in reversed(list(enumerate(dataset.label_values))): 127 | if label_value in dataset.ignored_labels: 128 | C = np.delete(C, l_ind, axis=0) 129 | C = np.delete(C, l_ind, axis=1) 130 | 131 | # Rescale with the right number of point per class 132 | C *= np.expand_dims(proportions / (np.sum(C, axis=1) + 1e-6), 1) 133 | 134 | # Compute IoUs 135 | IoUs = DP.IoU_from_confusions(C) 136 | m_IoU = np.mean(IoUs) 137 | s = '{:5.2f} | '.format(100 * m_IoU) 138 | for IoU in IoUs: 139 | s += '{:5.2f} '.format(100 * IoU) 140 | log_out(s + '\n', self.Log_file) 141 | 142 | if int(np.ceil(new_min)) % 1 == 0: 143 | 144 | # Project predictions 145 | log_out('\nReproject Vote #{:d}'.format(int(np.floor(new_min))), self.Log_file) 146 | proj_probs_list = [] 147 | 148 | for i_test in range(num_test): 149 | print(num_test) 150 | print(len(dataset.test_proj)) 151 | # Reproject probs back to the evaluations points 152 | proj_idx = dataset.test_proj[i_test] 153 | probs = self.test_probs[i_test][proj_idx, :] 154 | # Insert false columns for ignored labels 155 | probs2 = probs 156 | for l_ind, label_value in enumerate(dataset.label_values): 157 | if label_value in dataset.ignored_labels: 158 | probs2 = np.insert(probs2, l_ind, 0, axis=1) 159 | proj_probs_list += [probs2] 160 | 161 | # Show vote results 162 | log_out('Confusion on full clouds', self.Log_file) 163 | confusion_list = [] 164 | for i_test in range(num_test): 165 | # Get the predicted labels 166 | preds = dataset.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8) 167 | 168 | # Confusion 169 | labels = dataset.test_labels[i_test] 170 | acc = np.sum(preds == labels) / len(labels) 171 | log_out(dataset.input_names['test'][i_test] + ' Acc:' + str(acc), self.Log_file) 172 | 173 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 174 | name = dataset.input_names['test'][i_test] + '.ply' 175 | pc_path = dataset.path + '/dales_ply_to_ply/test/' + name[:-4] + '.ply' 176 | print(pc_path) 177 | xyz, _ = DP.read_ply_data(pc_path, with_rgb = False, with_label = True, with_i = False) 178 | write_ply(join(test_path, 'test_preds', name), [xyz, preds, labels], ['x','y','z','pred', 'label']) 179 | 180 | 181 | # Regroup confusions 182 | C = np.sum(np.stack(confusion_list), axis=0) 183 | 184 | for l_ind, label_value in reversed(list(enumerate(dataset.label_values))): 185 | if label_value in dataset.ignored_labels: 186 | C = np.delete(C, l_ind, axis=0) 187 | C = np.delete(C, l_ind, axis=1) 188 | IoUs = DP.IoU_from_confusions(C) 189 | m_IoU = np.mean(IoUs) 190 | s = '{:5.2f} | '.format(100 * m_IoU) 191 | for IoU in IoUs: 192 | s += '{:5.2f} '.format(100 * IoU) 193 | log_out('-' * len(s), self.Log_file) 194 | log_out(s, self.Log_file) 195 | log_out('-' * len(s) + '\n', self.Log_file) 196 | print('finished \n') 197 | self.sess.close() 198 | return 199 | 200 | self.sess.run(dataset.test_init_op) 201 | epoch_id += 1 202 | step_id = 0 203 | continue 204 | print('processing time: ',time.time() - t0) 205 | return -------------------------------------------------------------------------------- /helper_ply.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # 0===============================0 4 | # | PLY files reader/writer | 5 | # 0===============================0 6 | # 7 | # 8 | # ---------------------------------------------------------------------------------------------------------------------- 9 | # 10 | # function to read/write .ply files 11 | # 12 | # ---------------------------------------------------------------------------------------------------------------------- 13 | # 14 | # Hugues THOMAS - 10/02/2017 15 | # 16 | 17 | 18 | # ---------------------------------------------------------------------------------------------------------------------- 19 | # 20 | # Imports and global variables 21 | # \**********************************/ 22 | # 23 | 24 | 25 | # Basic libs 26 | import numpy as np 27 | import sys 28 | 29 | 30 | # Define PLY types 31 | ply_dtypes = dict([ 32 | (b'int8', 'i1'), 33 | (b'char', 'i1'), 34 | (b'uint8', 'u1'), 35 | (b'uchar', 'u1'), 36 | (b'int16', 'i2'), 37 | (b'short', 'i2'), 38 | (b'uint16', 'u2'), 39 | (b'ushort', 'u2'), 40 | (b'int32', 'i4'), 41 | (b'int', 'i4'), 42 | (b'uint32', 'u4'), 43 | (b'uint', 'u4'), 44 | (b'float32', 'f4'), 45 | (b'float', 'f4'), 46 | (b'float64', 'f8'), 47 | (b'double', 'f8') 48 | ]) 49 | 50 | # Numpy reader format 51 | valid_formats = {'ascii': '', 'binary_big_endian': '>', 52 | 'binary_little_endian': '<'} 53 | 54 | 55 | # ---------------------------------------------------------------------------------------------------------------------- 56 | # 57 | # Functions 58 | # \***************/ 59 | # 60 | 61 | 62 | def parse_header(plyfile, ext): 63 | # Variables 64 | line = [] 65 | properties = [] 66 | num_points = None 67 | 68 | while b'end_header' not in line and line != b'': 69 | line = plyfile.readline() 70 | 71 | if b'element' in line: 72 | line = line.split() 73 | num_points = int(line[2]) 74 | 75 | elif b'property' in line: 76 | line = line.split() 77 | properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) 78 | 79 | return num_points, properties 80 | 81 | 82 | def parse_mesh_header(plyfile, ext): 83 | # Variables 84 | line = [] 85 | vertex_properties = [] 86 | num_points = None 87 | num_faces = None 88 | current_element = None 89 | 90 | 91 | while b'end_header' not in line and line != b'': 92 | line = plyfile.readline() 93 | 94 | # Find point element 95 | if b'element vertex' in line: 96 | current_element = 'vertex' 97 | line = line.split() 98 | num_points = int(line[2]) 99 | 100 | elif b'element face' in line: 101 | current_element = 'face' 102 | line = line.split() 103 | num_faces = int(line[2]) 104 | 105 | elif b'property' in line: 106 | if current_element == 'vertex': 107 | line = line.split() 108 | vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) 109 | elif current_element == 'vertex': 110 | if not line.startswith('property list uchar int'): 111 | raise ValueError('Unsupported faces property : ' + line) 112 | 113 | return num_points, num_faces, vertex_properties 114 | 115 | 116 | def read_ply(filename, triangular_mesh=False): 117 | """ 118 | Read ".ply" files 119 | 120 | Parameters 121 | ---------- 122 | filename : string 123 | the name of the file to read. 124 | 125 | Returns 126 | ------- 127 | result : array 128 | data stored in the file 129 | 130 | Examples 131 | -------- 132 | Store data in file 133 | 134 | >>> points = np.random.rand(5, 3) 135 | >>> values = np.random.randint(2, size=10) 136 | >>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values']) 137 | 138 | Read the file 139 | 140 | >>> data = read_ply('example.ply') 141 | >>> values = data['values'] 142 | array([0, 0, 1, 1, 0]) 143 | 144 | >>> points = np.vstack((data['x'], data['y'], data['z'])).T 145 | array([[ 0.466 0.595 0.324] 146 | [ 0.538 0.407 0.654] 147 | [ 0.850 0.018 0.988] 148 | [ 0.395 0.394 0.363] 149 | [ 0.873 0.996 0.092]]) 150 | 151 | """ 152 | 153 | with open(filename, 'rb') as plyfile: 154 | 155 | 156 | # Check if the file start with ply 157 | if b'ply' not in plyfile.readline(): 158 | raise ValueError('The file does not start whith the word ply') 159 | 160 | # get binary_little/big or ascii 161 | fmt = plyfile.readline().split()[1].decode() 162 | if fmt == "ascii": 163 | raise ValueError('The file is not binary') 164 | 165 | # get extension for building the numpy dtypes 166 | ext = valid_formats[fmt] 167 | 168 | # PointCloud reader vs mesh reader 169 | if triangular_mesh: 170 | 171 | # Parse header 172 | num_points, num_faces, properties = parse_mesh_header(plyfile, ext) 173 | 174 | # Get point data 175 | vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points) 176 | 177 | # Get face data 178 | face_properties = [('k', ext + 'u1'), 179 | ('v1', ext + 'i4'), 180 | ('v2', ext + 'i4'), 181 | ('v3', ext + 'i4')] 182 | faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces) 183 | 184 | # Return vertex data and concatenated faces 185 | faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T 186 | data = [vertex_data, faces] 187 | 188 | else: 189 | 190 | # Parse header 191 | num_points, properties = parse_header(plyfile, ext) 192 | 193 | # Get data 194 | data = np.fromfile(plyfile, dtype=properties, count=num_points) 195 | 196 | return data 197 | 198 | 199 | def header_properties(field_list, field_names): 200 | 201 | # List of lines to write 202 | lines = [] 203 | 204 | # First line describing element vertex 205 | lines.append('element vertex %d' % field_list[0].shape[0]) 206 | 207 | # Properties lines 208 | i = 0 209 | for fields in field_list: 210 | for field in fields.T: 211 | lines.append('property %s %s' % (field.dtype.name, field_names[i])) 212 | i += 1 213 | 214 | return lines 215 | 216 | 217 | def write_ply(filename, field_list, field_names, triangular_faces=None): 218 | """ 219 | Write ".ply" files 220 | 221 | Parameters 222 | ---------- 223 | filename : string 224 | the name of the file to which the data is saved. A '.ply' extension will be appended to the 225 | file name if it does no already have one. 226 | 227 | field_list : list, tuple, numpy array 228 | the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a 229 | tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered 230 | as one field. 231 | 232 | field_names : list 233 | the name of each fields as a list of strings. Has to be the same length as the number of 234 | fields. 235 | 236 | Examples 237 | -------- 238 | >>> points = np.random.rand(10, 3) 239 | >>> write_ply('example1.ply', points, ['x', 'y', 'z']) 240 | 241 | >>> values = np.random.randint(2, size=10) 242 | >>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values']) 243 | 244 | >>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8) 245 | >>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values'] 246 | >>> write_ply('example3.ply', [points, colors, values], field_names) 247 | 248 | """ 249 | 250 | # Format list input to the right form 251 | field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,)) 252 | for i, field in enumerate(field_list): 253 | if field.ndim < 2: 254 | field_list[i] = field.reshape(-1, 1) 255 | if field.ndim > 2: 256 | print('fields have more than 2 dimensions') 257 | return False 258 | 259 | # check all fields have the same number of data 260 | n_points = [field.shape[0] for field in field_list] 261 | if not np.all(np.equal(n_points, n_points[0])): 262 | print('wrong field dimensions') 263 | return False 264 | 265 | # Check if field_names and field_list have same nb of column 266 | n_fields = np.sum([field.shape[1] for field in field_list]) 267 | if (n_fields != len(field_names)): 268 | print('wrong number of field names') 269 | return False 270 | 271 | # Add extension if not there 272 | if not filename.endswith('.ply'): 273 | filename += '.ply' 274 | 275 | # open in text mode to write the header 276 | with open(filename, 'w') as plyfile: 277 | 278 | # First magical word 279 | header = ['ply'] 280 | 281 | # Encoding format 282 | header.append('format binary_' + sys.byteorder + '_endian 1.0') 283 | 284 | # Points properties description 285 | header.extend(header_properties(field_list, field_names)) 286 | 287 | # Add faces if needded 288 | if triangular_faces is not None: 289 | header.append('element face {:d}'.format(triangular_faces.shape[0])) 290 | header.append('property list uchar int vertex_indices') 291 | 292 | # End of header 293 | header.append('end_header') 294 | 295 | # Write all lines 296 | for line in header: 297 | plyfile.write("%s\n" % line) 298 | 299 | # open in binary/append to use tofile 300 | with open(filename, 'ab') as plyfile: 301 | 302 | # Create a structured array 303 | i = 0 304 | type_list = [] 305 | for fields in field_list: 306 | for field in fields.T: 307 | type_list += [(field_names[i], field.dtype.str)] 308 | i += 1 309 | data = np.empty(field_list[0].shape[0], dtype=type_list) 310 | i = 0 311 | for fields in field_list: 312 | for field in fields.T: 313 | data[field_names[i]] = field 314 | i += 1 315 | 316 | data.tofile(plyfile) 317 | 318 | if triangular_faces is not None: 319 | triangular_faces = triangular_faces.astype(np.int32) 320 | type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)] 321 | data = np.empty(triangular_faces.shape[0], dtype=type_list) 322 | data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8) 323 | data['0'] = triangular_faces[:, 0] 324 | data['1'] = triangular_faces[:, 1] 325 | data['2'] = triangular_faces[:, 2] 326 | data.tofile(plyfile) 327 | 328 | return True 329 | 330 | 331 | def describe_element(name, df): 332 | """ Takes the columns of the dataframe and builds a ply-like description 333 | 334 | Parameters 335 | ---------- 336 | name: str 337 | df: pandas DataFrame 338 | 339 | Returns 340 | ------- 341 | element: list[str] 342 | """ 343 | property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'} 344 | element = ['element ' + name + ' ' + str(len(df))] 345 | 346 | if name == 'face': 347 | element.append("property list uchar int points_indices") 348 | 349 | else: 350 | for i in range(len(df.columns)): 351 | # get first letter of dtype to infer format 352 | f = property_formats[str(df.dtypes[i])[0]] 353 | element.append('property ' + f + ' ' + df.columns.values[i]) 354 | 355 | return element 356 | 357 | -------------------------------------------------------------------------------- /tester_SensatUrban.py: -------------------------------------------------------------------------------- 1 | from os import makedirs, system 2 | from os.path import exists, join, dirname, abspath 3 | import tensorflow as tf 4 | import numpy as np 5 | import time 6 | 7 | 8 | def log_out(out_str, log_f_out): 9 | log_f_out.write(out_str + '\n') 10 | log_f_out.flush() 11 | print(out_str) 12 | 13 | 14 | class ModelTester: 15 | def __init__(self, model, dataset, restore_snap=None, base_only=False): 16 | my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 17 | self.saver = tf.train.Saver(my_vars, max_to_keep=100) 18 | makedirs('test_log') if not exists('test_log') else None 19 | self.Log_file = open('test_log/log_test_' + dataset.name + '.txt', 'a') 20 | self.base_only = base_only 21 | 22 | # Create a session for running Ops on the Graph. 23 | on_cpu = False 24 | if on_cpu: 25 | c_proto = tf.ConfigProto(device_count={'GPU': 0}) 26 | else: 27 | c_proto = tf.ConfigProto() 28 | c_proto.gpu_options.allow_growth = True 29 | self.sess = tf.Session(config=c_proto) 30 | self.sess.run(tf.global_variables_initializer()) 31 | 32 | # Load trained model 33 | if restore_snap is not None: 34 | self.saver.restore(self.sess, restore_snap) 35 | print("Model restored from " + restore_snap) 36 | 37 | self.prob_logits = tf.nn.softmax(model.logits) 38 | 39 | # Initiate global prediction over all test clouds 40 | self.test_probs = [np.zeros(shape=[l.shape[0], model.config.num_classes], dtype=np.float32) 41 | for l in dataset.input_labels['test']] 42 | 43 | def test(self, model, dataset, num_votes=100): 44 | 45 | # Smoothing parameter for votes 46 | test_smooth = 0.95 47 | 48 | # Initialise iterator with validation/test data 49 | self.sess.run(dataset.test_init_op) 50 | 51 | # Test saving path 52 | saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime()) 53 | test_path = join('test', saving_path.split('/')[-1]) 54 | makedirs(test_path) if not exists(test_path) else None 55 | makedirs(join(test_path, 'test_preds')) if not exists(join(test_path, 'test_preds')) else None 56 | 57 | step_id = 0 58 | epoch_id = 0 59 | last_min = -0.5 60 | 61 | while last_min < num_votes: 62 | try: 63 | ops = (self.prob_logits, 64 | model.labels, 65 | model.inputs['input_inds'], 66 | model.inputs['cloud_inds']) 67 | 68 | stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(ops, {model.is_training: False}) 69 | stacked_probs = np.reshape(stacked_probs, [model.config.val_batch_size, model.config.num_points, 70 | model.config.num_classes]) 71 | if self.base_only: 72 | stacked_probs = stacked_probs[:,:model.config.num_points*4//7,:] 73 | point_idx = point_idx[:,:model.config.num_points*4//7] 74 | 75 | 76 | for j in range(np.shape(stacked_probs)[0]): 77 | probs = stacked_probs[j, :, :] 78 | p_idx = point_idx[j, :] 79 | c_i = cloud_idx[j][0] 80 | self.test_probs[c_i][p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (1 - test_smooth) * probs 81 | step_id += 1 82 | 83 | except tf.errors.OutOfRangeError: 84 | 85 | new_min = np.min(dataset.min_possibility['test']) 86 | log_out('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min), self.Log_file) 87 | 88 | if last_min + 30 < new_min: 89 | 90 | # Update last_min 91 | last_min += 1 92 | 93 | # Show vote results (On subcloud so it is not the good values here) 94 | log_out('\nConfusion on sub clouds', self.Log_file) 95 | num_test = len(dataset.input_labels['test']) 96 | 97 | # Project predictions 98 | log_out('\nReproject Vote #{:d}'.format(int(np.floor(new_min))), self.Log_file) 99 | proj_probs_list = [] 100 | 101 | for i_test in range(num_test): 102 | # Reproject probs back to the evaluations points 103 | proj_idx = dataset.test_proj[i_test] 104 | probs = self.test_probs[i_test][proj_idx, :] 105 | proj_probs_list += [probs] 106 | 107 | # Show vote results 108 | log_out('Confusion on full clouds', self.Log_file) 109 | for i_test in range(num_test): 110 | # Get the predicted labels 111 | preds = dataset.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8) 112 | save_name = join(test_path, 'test_preds', dataset.input_names['test'][i_test] + '.label') 113 | preds = preds.astype(np.uint8) 114 | preds.tofile(save_name) 115 | 116 | # creat submission files 117 | base_dir = dirname(abspath(__file__)) 118 | results_path = join(base_dir, test_path, 'test_preds') 119 | system('cd %s && zip -r %s/submission.zip *.label' % (results_path, results_path)) 120 | return 121 | 122 | self.sess.run(dataset.test_init_op) 123 | epoch_id += 1 124 | step_id = 0 125 | continue 126 | 127 | return 128 | 129 | class ModelTester_validation: 130 | def __init__(self, model, dataset, restore_snap=None, base_only=False): 131 | my_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) 132 | self.saver = tf.train.Saver(my_vars, max_to_keep=100) 133 | makedirs('test_log') if not exists('test_log') else None 134 | self.Log_file = open('test_log/log_test_' + dataset.name + '.txt', 'a') 135 | 136 | # Create a session for running Ops on the Graph. 137 | on_cpu = False 138 | if on_cpu: 139 | c_proto = tf.ConfigProto(device_count={'GPU': 0}) 140 | else: 141 | c_proto = tf.ConfigProto() 142 | c_proto.gpu_options.allow_growth = True 143 | self.sess = tf.Session(config=c_proto) 144 | self.sess.run(tf.global_variables_initializer()) 145 | 146 | # Load trained model 147 | if restore_snap is not None: 148 | self.saver.restore(self.sess, restore_snap) 149 | print("Model restored from " + restore_snap) 150 | 151 | self.prob_logits = tf.nn.softmax(model.logits) 152 | 153 | # Initiate global prediction over all test clouds 154 | self.test_probs = [np.zeros(shape=[l.shape[0], model.config.num_classes], dtype=np.float32) 155 | for l in dataset.input_labels['validation']] 156 | 157 | def test(self, model, dataset, num_votes=100): 158 | 159 | # Smoothing parameter for votes 160 | test_smooth = 0.95 161 | 162 | # Initialise iterator with validation/test data 163 | self.sess.run(dataset.val_init_op) 164 | 165 | # Number of points per class in validation set 166 | val_proportions = np.zeros(model.config.num_classes, dtype=np.float32) 167 | i = 0 168 | for label_val in dataset.label_values: 169 | if label_val not in dataset.ignored_labels: 170 | val_proportions[i] = np.sum([np.sum(labels == label_val) for labels in dataset.val_labels]) 171 | i += 1 172 | 173 | # Test saving path 174 | saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime()) 175 | test_path = join('test', saving_path.split('/')[-1]) 176 | makedirs(test_path) if not exists(test_path) else None 177 | makedirs(join(test_path, 'val_preds')) if not exists(join(test_path, 'val_preds')) else None 178 | 179 | step_id = 0 180 | epoch_id = 0 181 | last_min = -0.5 182 | 183 | while last_min < num_votes: 184 | try: 185 | ops = (self.prob_logits, 186 | model.labels, 187 | model.inputs['input_inds'], 188 | model.inputs['cloud_inds'], 189 | ) 190 | 191 | stacked_probs, stacked_labels, point_idx, cloud_idx = self.sess.run(ops, {model.is_training: False}) 192 | correct = np.sum(np.argmax(stacked_probs, axis=1) == stacked_labels) 193 | acc = correct / float(np.prod(np.shape(stacked_labels))) 194 | print('step' + str(step_id) + ' acc:' + str(acc)) 195 | stacked_probs = np.reshape(stacked_probs, [model.config.val_batch_size, model.config.num_points, 196 | model.config.num_classes]) 197 | 198 | for j in range(np.shape(stacked_probs)[0]): 199 | probs = stacked_probs[j, :, :] 200 | p_idx = point_idx[j, :] 201 | c_i = cloud_idx[j][0] 202 | self.test_probs[c_i][p_idx] = test_smooth * self.test_probs[c_i][p_idx] + (1 - test_smooth) * probs 203 | step_id += 1 204 | 205 | except tf.errors.OutOfRangeError: 206 | 207 | new_min = np.min(dataset.min_possibility['validation']) 208 | log_out('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min), self.Log_file) 209 | 210 | if last_min + 1 < new_min: 211 | 212 | # Update last_min 213 | last_min += 1 214 | 215 | # Show vote results (On subcloud so it is not the good values here) 216 | log_out('\nConfusion on sub clouds', self.Log_file) 217 | confusion_list = [] 218 | 219 | num_val = len(dataset.input_labels['validation']) 220 | 221 | for i_test in range(num_val): 222 | probs = self.test_probs[i_test] 223 | preds = dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32) 224 | labels = dataset.input_labels['validation'][i_test] 225 | 226 | # Confs 227 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 228 | 229 | # Regroup confusions 230 | C = np.sum(np.stack(confusion_list), axis=0).astype(np.float32) 231 | 232 | # Rescale with the right number of point per class 233 | C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1) 234 | 235 | # Compute IoUs 236 | IoUs = DP.IoU_from_confusions(C) 237 | m_IoU = np.mean(IoUs) 238 | s = '{:5.2f} | '.format(100 * m_IoU) 239 | for IoU in IoUs: 240 | s += '{:5.2f} '.format(100 * IoU) 241 | log_out(s + '\n', self.Log_file) 242 | 243 | if int(np.ceil(new_min)) % 1 == 0: 244 | 245 | # Project predictions 246 | log_out('\nReproject Vote #{:d}'.format(int(np.floor(new_min))), self.Log_file) 247 | proj_probs_list = [] 248 | 249 | for i_val in range(num_val): 250 | # Reproject probs back to the evaluations points 251 | proj_idx = dataset.val_proj[i_val] 252 | probs = self.test_probs[i_val][proj_idx, :] 253 | proj_probs_list += [probs] 254 | 255 | # Show vote results 256 | log_out('Confusion on full clouds', self.Log_file) 257 | preds_list = [] 258 | labels_list = [] 259 | confusion_list = [] 260 | for i_test in range(num_val): 261 | # Get the predicted labels 262 | preds = dataset.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8) 263 | # Confusion 264 | labels = dataset.val_labels[i_test] 265 | preds_list += [preds] 266 | labels_list += [labels] 267 | 268 | acc = np.sum(preds == labels) / len(labels) 269 | log_out(dataset.input_names['validation'][i_test] + ' Acc:' + str(acc), self.Log_file) 270 | 271 | confusion_list += [confusion_matrix(labels, preds, dataset.label_values)] 272 | name = dataset.input_names['validation'][i_test] + '.ply' 273 | original_data_path = '/nas2/jacob/SensatUrban_Data/original_files/' + dataset.input_names['validation'][i_test] + '.ply' 274 | data = read_ply(original_data_path) 275 | print(data.shape) 276 | write_ply(join(test_path, 'val_preds', name), [data['x'], data['y'], data['z'], data['red'], data['green'], data['blue'], preds, labels], ['x','y','z','red','green','blue','pred', 'label']) 277 | 278 | # Regroup confusions 279 | preds = np.concatenate(preds_list) 280 | labels = np.concatenate(labels_list) 281 | acc = np.sum(preds == labels) / (labels.shape[0]) 282 | log_out('Overall Acc:' + str(acc), self.Log_file) 283 | C = np.sum(np.stack(confusion_list), axis=0) 284 | 285 | IoUs = DP.IoU_from_confusions(C) 286 | m_IoU = np.mean(IoUs) 287 | s = '{:5.2f} | '.format(100 * m_IoU) 288 | for IoU in IoUs: 289 | s += '{:5.2f} '.format(100 * IoU) 290 | log_out('-' * len(s), self.Log_file) 291 | log_out(s, self.Log_file) 292 | log_out('-' * len(s) + '\n', self.Log_file) 293 | print('finished \n') 294 | self.sess.close() 295 | return 296 | 297 | self.sess.run(dataset.val_init_op) 298 | epoch_id += 1 299 | step_id = 0 300 | continue 301 | 302 | return -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | -------------------------------------------------------------------------------- /main_DALES.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, dirname, abspath 2 | from EyeNet import Network 3 | from tester_DALES import ModelTester 4 | from helper_ply import read_ply 5 | from tool import ConfigDALES as cfg 6 | from tool import DataProcessing as DP 7 | import tensorflow as tf 8 | import numpy as np 9 | import time, pickle, argparse, glob, os, shutil 10 | 11 | 12 | class Dataset: 13 | def __init__(self, mode='train'): 14 | self.name = 'DALES' 15 | self.path = cfg.data_set_dir 16 | self.label_to_names = {0: 'Unknown', 1: 'Ground', 2: 'Vegetation', 3: 'Cars', 17 | 4: 'Trucks', 5: 'Power lines', 6: 'Poles', 7: 'Fences', 8: 'Buildings'} 18 | self.num_classes = len(self.label_to_names) 19 | self.label_values = np.sort([k for k, v in self.label_to_names.items()]) 20 | self.label_to_idx = {l: i for i, l in enumerate(self.label_values)} 21 | self.ignored_labels = np.array([0]) 22 | self.val_file_name = [] 23 | 24 | self.test_file_name = ["5080_54400","5080_54470","5100_54440", 25 | "5100_54490","5120_54445","5135_54430", 26 | "5135_54435","5140_54390","5150_54325", 27 | "5155_54335","5175_54395"] 28 | 29 | self.all_files = np.sort(glob.glob(join(self.path, 'original_files', '*.ply'))) 30 | #self.val_file_name = [join(self.path, 'original_files', files + '.ply') for files in self.val_file_name] 31 | self.test_file_name = [join(self.path, 'original_files', files + '.ply') for files in self.test_file_name] 32 | 33 | self.use_val = cfg.use_val_data # whether use validation set or not 34 | 35 | 36 | # initialize 37 | self.num_per_class = np.zeros(self.num_classes) 38 | self.val_proj = [] 39 | self.val_labels = [] 40 | self.test_proj = [] 41 | self.test_labels = [] 42 | self.possibility = {} 43 | self.min_possibility = {} 44 | self.class_weight = {}#class weight testing 45 | self.input_trees = {'training': [], 'validation': [], 'test': []} 46 | #self.input_colors = {'training': [], 'validation': [], 'test': []} 47 | self.input_labels = {'training': [], 'validation': [], 'test': []} 48 | self.input_names = {'training': [], 'validation': [], 'test': []} 49 | self.load_sub_sampled_clouds(cfg.sub_grid_size, mode) 50 | for ignore_label in self.ignored_labels: 51 | self.num_per_class = np.delete(self.num_per_class, ignore_label) 52 | 53 | def load_sub_sampled_clouds(self, sub_grid_size, mode): 54 | tree_path = join(self.path, 'grid_{:.3f}'.format(sub_grid_size)) 55 | if mode == 'test': 56 | files = self.test_file_name 57 | else: 58 | files = self.all_files 59 | 60 | for i, file_path in enumerate(files): 61 | t0 = time.time() 62 | cloud_name = file_path.split('/')[-1][:-4] 63 | if mode == 'test': 64 | cloud_split = 'test' 65 | else: 66 | if file_path in self.val_file_name: 67 | cloud_split = 'validation' 68 | else: 69 | cloud_split = 'training' 70 | 71 | # Name of the input files 72 | kd_tree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name)) 73 | sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name)) 74 | 75 | data = read_ply(sub_ply_file) 76 | #sub_colors = data['num_return'].reshape(-1, 1) 77 | sub_labels = data['class'] 78 | 79 | # compute num_per_class in training set 80 | if cloud_split == 'training': 81 | self.num_per_class += DP.get_num_class_from_label(sub_labels, self.num_classes) 82 | 83 | # Read pkl with search tree 84 | with open(kd_tree_file, 'rb') as f: 85 | search_tree = pickle.load(f) 86 | 87 | self.input_trees[cloud_split] += [search_tree] 88 | #self.input_colors[cloud_split] += [sub_colors] 89 | self.input_labels[cloud_split] += [sub_labels] 90 | self.input_names[cloud_split] += [cloud_name] 91 | 92 | size = sub_labels.shape[0] * 4 * 7 93 | print('{:s} {:.1f} MB loaded in {:.1f}s'.format(kd_tree_file.split('/')[-1], size * 1e-6, time.time() - t0)) 94 | 95 | if cloud_split == 'test': 96 | print('\nPreparing reprojection indices for {}'.format(cloud_name)) 97 | proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) 98 | with open(proj_file, 'rb') as f: 99 | proj_idx, labels = pickle.load(f) 100 | self.test_proj += [proj_idx] 101 | self.test_labels += [labels] 102 | #print('\nPreparing reprojected indices for testing') 103 | 104 | # Get validation and test reprojected indices 105 | """ 106 | for i, file_path in enumerate(files): 107 | t0 = time.time() 108 | cloud_name = file_path.split('/')[-1][:-4] 109 | print(cloud_name) 110 | 111 | # val projection and labels 112 | if cloud_name in self.val_file_name: 113 | proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) 114 | with open(proj_file, 'rb') as f: 115 | proj_idx, labels = pickle.load(f) 116 | self.val_proj += [proj_idx] 117 | self.val_labels += [labels] 118 | print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0)) 119 | 120 | # test projection and labels 121 | if cloud_name in self.test_file_name: 122 | proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name)) 123 | with open(proj_file, 'rb') as f: 124 | proj_idx, labels = pickle.load(f) 125 | self.test_proj += [proj_idx] 126 | self.test_labels += [labels] 127 | print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0)) 128 | """ 129 | def get_batch_gen(self, split): 130 | if split == 'training': 131 | num_per_epoch = cfg.train_steps * cfg.batch_size 132 | elif split == 'validation': 133 | num_per_epoch = cfg.val_steps * cfg.val_batch_size 134 | else: 135 | num_per_epoch = cfg.val_steps * cfg.val_batch_size 136 | 137 | # Reset possibility 138 | self.possibility[split] = [] 139 | self.min_possibility[split] = [] 140 | #self.class_weight[split] = [] 141 | for i, tree in enumerate(self.input_labels[split]): 142 | self.possibility[split] += [np.random.rand(tree.data.shape[0]) * 1e-3] 143 | self.min_possibility[split] += [float(np.min(self.possibility[split][-1]))] 144 | 145 | ######################################class weight testing##################################### 146 | #if split != 'test': 147 | # _, num_class_total = np.unique(np.hstack(self.input_labels[split]), return_counts=True) 148 | # self.class_weight[split] += [np.squeeze([num_class_total / np.sum(num_class_total)], axis=0)] 149 | ################################################################################################ 150 | 151 | def spatially_regular_gen(): 152 | 153 | # Generator loop 154 | for i in range(num_per_epoch): # num_per_epoch 155 | 156 | # Choose the cloud with the lowest probability 157 | cloud_idx = int(np.argmin(self.min_possibility[split])) 158 | 159 | # choose the point with the minimum of possibility in the cloud as query point 160 | point_ind = np.argmin(self.possibility[split][cloud_idx]) 161 | 162 | # Get all points within the cloud from tree structure 163 | points = np.array(self.input_trees[split][cloud_idx].data, copy=False) 164 | 165 | # Center point of input region 166 | center_point = points[point_ind, :].reshape(1, -1) 167 | 168 | # Add noise to the center point 169 | noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape) 170 | pick_point = center_point + noise.astype(center_point.dtype) 171 | 172 | #collect points for base receptive field 173 | if len(points) < cfg.num_points * 4 //7: 174 | base_queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=len(points))[1][0] 175 | else: 176 | base_queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=cfg.num_points * 4 //7)[1][0] 177 | 178 | # Shuffle index for base 179 | base_queried_idx = DP.shuffle_idx(base_queried_idx) 180 | 181 | # Get corresponding points and colors based on the index 182 | base_queried_pc_xyz = points[base_queried_idx] 183 | base_queried_pc_xyz[:, 0:2] = base_queried_pc_xyz[:, 0:2] - pick_point[:, 0:2] 184 | #base_queried_pc_colors = self.input_colors[split][cloud_idx][base_queried_idx] 185 | base_queried_pc_labels = self.input_labels[split][cloud_idx][base_queried_idx] 186 | base_queried_pc_labels = np.array([self.label_to_idx[l] for l in base_queried_pc_labels]) 187 | 188 | # Collect points and colors for medium receptive field 189 | base_dists = np.sum(np.square((points[base_queried_idx] - pick_point).astype(np.float32)), axis=1) 190 | base_r = np.sqrt(np.max(base_dists)) 191 | 192 | medium_r = base_r*2 193 | 194 | ind = self.input_trees[split][cloud_idx].query_radius(pick_point, r=medium_r)[0] 195 | medium_queried_idx = np.setdiff1d(ind, base_queried_idx,assume_unique=True) 196 | 197 | medium_queried_idx = DP.shuffle_idx(medium_queried_idx)[:cfg.num_points * 3 //7] 198 | 199 | medium_queried_pc_xyz = points[medium_queried_idx] 200 | medium_queried_pc_xyz[:, 0:2] = medium_queried_pc_xyz[:, 0:2] - pick_point[:, 0:2] 201 | 202 | #medium_queried_pc_colors = self.input_colors[split][cloud_idx][medium_queried_idx] 203 | medium_queried_pc_labels = self.input_labels[split][cloud_idx][medium_queried_idx] 204 | medium_queried_pc_labels = np.array([self.label_to_idx[l] for l in medium_queried_pc_labels]) 205 | 206 | 207 | if len(points) < cfg.num_points * 4 //7: 208 | base_queried_pc_xyz, base_queried_idx, base_queried_pc_labels = \ 209 | DP.data_aug_xyz(base_queried_pc_xyz, 210 | base_queried_pc_labels, 211 | base_queried_idx, 212 | cfg.num_points * 4 //7) 213 | 214 | if len(medium_queried_pc_xyz) < cfg.num_points * 3 //7: 215 | medium_queried_pc_xyz, medium_queried_idx, medium_queried_pc_labels = \ 216 | DP.data_aug_xyz(medium_queried_pc_xyz, 217 | medium_queried_pc_labels, 218 | medium_queried_idx, 219 | cfg.num_points * 3 //7) 220 | 221 | #concatenate base and medium indexes 222 | query_idx = np.concatenate((base_queried_idx, medium_queried_idx)) 223 | queried_pc_labels = np.concatenate((base_queried_pc_labels, medium_queried_pc_labels), axis = 0) 224 | #if split != 'test': 225 | # queried_pt_weight = np.array([self.class_weight[split][0][n] for n in queried_pc_labels]) 226 | #else: 227 | # queried_pt_weight = 1 228 | 229 | # Update the possibility of the selected points 230 | dists = np.sum(np.square((points[query_idx] - pick_point).astype(np.float32)), axis=1) 231 | #delta = np.square(1 - dists / np.max(dists)) * queried_pt_weight 232 | delta = np.square(1 - dists / np.max(dists)) 233 | self.possibility[split][cloud_idx][query_idx] += delta 234 | self.min_possibility[split][cloud_idx] = float(np.min(self.possibility[split][cloud_idx])) 235 | 236 | #combine medium and base points 237 | queried_pc_xyz = np.concatenate((base_queried_pc_xyz, medium_queried_pc_xyz), axis = 0) 238 | #queried_pc_colors = np.concatenate((base_queried_pc_colors, medium_queried_pc_colors), axis = 0) 239 | 240 | 241 | #TODO: I have to add functions that does not take number of return as features 242 | if True: 243 | yield (queried_pc_xyz, 244 | queried_pc_labels, 245 | query_idx.astype(np.int32), 246 | np.array([cloud_idx], dtype=np.int32)) 247 | 248 | gen_func = spatially_regular_gen 249 | gen_types = (tf.float32, tf.int32, tf.int32, tf.int32) 250 | gen_shapes = ([None, 3], [None], [None], [None]) 251 | return gen_func, gen_types, gen_shapes 252 | 253 | #@staticmethod 254 | def get_tf_mapping2(self): 255 | 256 | def tf_map(batch_xyz, batch_labels, batch_pc_idx, batch_cloud_idx): 257 | if cfg.data_augmentation: 258 | batch_features = tf.map_fn(self.tf_augment_input, batch_xyz, dtype=tf.float32) 259 | else: 260 | batch_features = batch_xyz 261 | #separate points to base and medium receptive field 262 | b_batch_xyz, m_batch_xyz = batch_xyz[:,:cfg.num_points * 4 //7,:], batch_xyz[:,cfg.num_points * 4 //7:,:] 263 | b_batch_features, m_batch_features = batch_features[:,:cfg.num_points * 4 //7,:], batch_features[:,cfg.num_points * 4 //7:,:] 264 | b_batch_xyz_opp = b_batch_xyz 265 | m_batch_xyz_opp = m_batch_xyz 266 | b_input_points = [] 267 | b_input_neighbors = [] 268 | b_input_pools = [] 269 | b_input_up_samples = [] 270 | 271 | 272 | m_input_points =[] 273 | m_input_neighbors = [] 274 | m_input_pools = [] 275 | m_input_up_samples = [] 276 | 277 | #currently it always assume the last subsampling ratio to be 4. Is it even possible to use 2 like original RandLA-Net Implementation? 278 | for i in range(cfg.num_layers): 279 | #processing base points 280 | neigh_idx = tf.py_func(DP.knn_search, [b_batch_xyz, b_batch_xyz, cfg.k_n[i]], tf.int32) 281 | sub_points = b_batch_xyz[:, :tf.shape(b_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 282 | if i == 0: 283 | sub_features = b_batch_features[:, :tf.shape(b_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 284 | m_batch_features = tf.concat((sub_features,m_batch_features), 1) 285 | pool_i = neigh_idx[:, :tf.shape(b_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 286 | up_i = tf.py_func(DP.knn_search, [sub_points, b_batch_xyz, 1], tf.int32) 287 | 288 | b_input_points.append(b_batch_xyz) 289 | b_input_neighbors.append(neigh_idx) 290 | b_input_pools.append(pool_i) 291 | b_input_up_samples.append(up_i) 292 | 293 | 294 | if cfg.sub_sampling_ratio[i] == cfg.connection_ratio: 295 | b_batch_xyz = sub_points 296 | else: 297 | addtional_sampling_ratio = cfg.connection_ratio//cfg.sub_sampling_ratio[i] 298 | b_batch_xyz = sub_points[:, :tf.shape(sub_points)[1] // addtional_sampling_ratio, :] 299 | 300 | #processing medium points 301 | m_input_data = tf.concat((b_batch_xyz, m_batch_xyz), 1) 302 | m_neigh_idx = tf.py_func(DP.knn_search, [m_input_data, m_input_data, cfg.k_n[i]], tf.int32) 303 | 304 | m_b_neigh_idx = m_neigh_idx[:,:tf.shape(b_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 305 | m_m_neigh_idx = m_neigh_idx[:,tf.shape(b_batch_xyz)[1]: tf.shape(b_batch_xyz)[1] + tf.shape(m_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 306 | m_pool_i = tf.concat((m_b_neigh_idx, m_m_neigh_idx), 1) 307 | 308 | b_sub_points = b_batch_xyz[:, :tf.shape(b_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 309 | m_batch_xyz = m_batch_xyz[:, :tf.shape(m_batch_xyz)[1] // cfg.sub_sampling_ratio[i], :] 310 | m_sub_points = tf.concat((b_sub_points, m_batch_xyz), 1) 311 | 312 | m_up_i = tf.py_func(DP.knn_search, [m_sub_points, m_input_data, 1], tf.int32) 313 | 314 | m_input_points.append(m_input_data)#[12288,3072, 768, 192, 48] 315 | m_input_neighbors.append(m_neigh_idx) 316 | m_input_pools.append(m_pool_i) 317 | m_input_up_samples.append(m_up_i) 318 | ########################################################################################## 319 | 320 | opp = b_batch_xyz_opp[:, tf.shape(b_batch_xyz_opp)[1] // cfg.sub_sampling_ratio[0]:, :] 321 | cat_xyz = tf.concat((b_input_points[1],opp, m_batch_xyz_opp), axis = 1) 322 | reorder_idx = tf.py_func(DP.knn_search, [cat_xyz, batch_xyz, 1], tf.int32) 323 | 324 | 325 | input_list = b_input_points + b_input_neighbors + b_input_pools + b_input_up_samples + m_input_points + m_input_neighbors + m_input_pools + m_input_up_samples 326 | input_list += [b_batch_features, m_batch_features, batch_labels, batch_pc_idx, batch_cloud_idx, reorder_idx] 327 | 328 | return input_list 329 | 330 | return tf_map 331 | 332 | # data augmentation 333 | @staticmethod 334 | def tf_augment_input(inputs): 335 | xyz = inputs 336 | theta = tf.random_uniform((1,), minval=0, maxval=2 * np.pi) 337 | # Rotation matrices 338 | c, s = tf.cos(theta), tf.sin(theta) 339 | cs0 = tf.zeros_like(c) 340 | cs1 = tf.ones_like(c) 341 | R = tf.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1) 342 | stacked_rots = tf.reshape(R, (3, 3)) 343 | 344 | # Apply rotations 345 | transformed_xyz = tf.reshape(tf.matmul(xyz, stacked_rots), [-1, 3]) 346 | # Choose random scales for each example 347 | min_s = cfg.augment_scale_min 348 | max_s = cfg.augment_scale_max 349 | if cfg.augment_scale_anisotropic: 350 | s = tf.random_uniform((1, 3), minval=min_s, maxval=max_s) 351 | else: 352 | s = tf.random_uniform((1, 1), minval=min_s, maxval=max_s) 353 | 354 | symmetries = [] 355 | for i in range(3): 356 | if cfg.augment_symmetries[i]: 357 | symmetries.append(tf.round(tf.random_uniform((1, 1))) * 2 - 1) 358 | else: 359 | symmetries.append(tf.ones([1, 1], dtype=tf.float32)) 360 | s *= tf.concat(symmetries, 1) 361 | 362 | # Create N x 3 vector of scales to multiply with stacked_points 363 | stacked_scales = tf.tile(s, [tf.shape(transformed_xyz)[0], 1]) 364 | 365 | # Apply scales 366 | transformed_xyz = transformed_xyz * stacked_scales 367 | 368 | noise = tf.random_normal(tf.shape(transformed_xyz), stddev=cfg.augment_noise) 369 | transformed_xyz = transformed_xyz + noise 370 | return transformed_xyz 371 | 372 | def init_input_pipeline(self): 373 | print('Initiating input pipelines') 374 | cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels] 375 | gen_function, gen_types, gen_shapes = self.get_batch_gen('training') 376 | gen_function_val, _, _ = self.get_batch_gen('validation') 377 | gen_function_test, _, _ = self.get_batch_gen('test') 378 | self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes) 379 | self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes) 380 | self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes) 381 | 382 | self.batch_train_data = self.train_data.batch(cfg.batch_size) 383 | self.batch_val_data = self.val_data.batch(cfg.val_batch_size) 384 | self.batch_test_data = self.test_data.batch(cfg.val_batch_size) 385 | map_func = self.get_tf_mapping2() 386 | 387 | self.batch_train_data = self.batch_train_data.map(map_func=map_func) 388 | self.batch_val_data = self.batch_val_data.map(map_func=map_func) 389 | self.batch_test_data = self.batch_test_data.map(map_func=map_func) 390 | 391 | self.batch_train_data = self.batch_train_data.prefetch(cfg.batch_size) 392 | self.batch_val_data = self.batch_val_data.prefetch(cfg.val_batch_size) 393 | self.batch_test_data = self.batch_test_data.prefetch(cfg.val_batch_size) 394 | 395 | iter = tf.data.Iterator.from_structure(self.batch_train_data.output_types, self.batch_train_data.output_shapes) 396 | self.flat_inputs = iter.get_next() 397 | self.train_init_op = iter.make_initializer(self.batch_train_data) 398 | self.val_init_op = iter.make_initializer(self.batch_val_data) 399 | self.test_init_op = iter.make_initializer(self.batch_test_data) 400 | 401 | 402 | if __name__ == '__main__': 403 | parser = argparse.ArgumentParser() 404 | parser.add_argument('--gpu', type=int, default=0, help='the number of GPUs to use [default: 0]') 405 | parser.add_argument('--mode', type=str, default='train', help='options: train, val,test, vis') 406 | parser.add_argument('--model_path', type=str, default='None', help='pretrained model path') 407 | FLAGS = parser.parse_args() 408 | 409 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 410 | os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu) 411 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 412 | Mode = FLAGS.mode 413 | 414 | dataset = Dataset(mode=Mode) 415 | dataset.init_input_pipeline() 416 | 417 | if Mode == 'train': 418 | model = Network(dataset, cfg) 419 | model.train(dataset) 420 | elif Mode == 'test': 421 | cfg.saving = False 422 | model = Network(dataset, cfg) 423 | chosen_snap = FLAGS.model_path 424 | tester = ModelTester(model, dataset, restore_snap=chosen_snap) 425 | tester.test(model, dataset) 426 | 427 | else: 428 | raise ValueError('mode not supported') 429 | -------------------------------------------------------------------------------- /tf_util.py: -------------------------------------------------------------------------------- 1 | """ Wrapper functions for TensorFlow layers. 2 | 3 | Author: Charles R. Qi 4 | Date: November 2016 5 | """ 6 | 7 | import numpy as np 8 | import tensorflow as tf 9 | 10 | 11 | def _variable_on_cpu(name, shape, initializer, use_fp16=False): 12 | """Helper to create a Variable stored on CPU memory. 13 | Args: 14 | name: name of the variable 15 | shape: list of ints 16 | initializer: initializer for Variable 17 | Returns: 18 | Variable Tensor 19 | """ 20 | with tf.device('/cpu:0'): 21 | dtype = tf.float16 if use_fp16 else tf.float32 22 | var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) 23 | return var 24 | 25 | 26 | def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True): 27 | """Helper to create an initialized Variable with weight decay. 28 | 29 | Note that the Variable is initialized with a truncated normal distribution. 30 | A weight decay is added only if one is specified. 31 | 32 | Args: 33 | name: name of the variable 34 | shape: list of ints 35 | stddev: standard deviation of a truncated Gaussian 36 | wd: add L2Loss weight decay multiplied by this float. If None, weight 37 | decay is not added for this Variable. 38 | use_xavier: bool, whether to use xavier initializer 39 | 40 | Returns: 41 | Variable Tensor 42 | """ 43 | if use_xavier: 44 | initializer = tf.contrib.layers.xavier_initializer() 45 | var = _variable_on_cpu(name, shape, initializer) 46 | else: 47 | # initializer = tf.truncated_normal_initializer(stddev=stddev) 48 | with tf.device('/cpu:0'): 49 | var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1])) 50 | var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32) 51 | var = tf.Variable(var, name='weights') 52 | if wd is not None: 53 | weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') 54 | tf.add_to_collection('losses', weight_decay) 55 | return var 56 | 57 | 58 | def conv1d(inputs, 59 | num_output_channels, 60 | kernel_size, 61 | scope, 62 | stride=1, 63 | padding='SAME', 64 | use_xavier=True, 65 | stddev=1e-3, 66 | weight_decay=0.0, 67 | activation_fn=tf.nn.relu, 68 | bn=False, 69 | bn_decay=None, 70 | is_training=None): 71 | """ 1D convolution with non-linear operation. 72 | 73 | Args: 74 | inputs: 3-D tensor variable BxLxC 75 | num_output_channels: int 76 | kernel_size: int 77 | scope: string 78 | stride: int 79 | padding: 'SAME' or 'VALID' 80 | use_xavier: bool, use xavier_initializer if true 81 | stddev: float, stddev for truncated_normal init 82 | weight_decay: float 83 | activation_fn: function 84 | bn: bool, whether to use batch norm 85 | bn_decay: float or float tensor variable in [0,1] 86 | is_training: bool Tensor variable 87 | 88 | Returns: 89 | Variable tensor 90 | """ 91 | with tf.variable_scope(scope) as sc: 92 | num_in_channels = inputs.get_shape()[-1].value 93 | kernel_shape = [kernel_size, 94 | num_in_channels, num_output_channels] 95 | kernel = _variable_with_weight_decay('weights', 96 | shape=kernel_shape, 97 | use_xavier=use_xavier, 98 | stddev=stddev, 99 | wd=weight_decay) 100 | outputs = tf.nn.conv1d(inputs, kernel, 101 | stride=stride, 102 | padding=padding) 103 | biases = _variable_on_cpu('biases', [num_output_channels], 104 | tf.constant_initializer(0.0)) 105 | outputs = tf.nn.bias_add(outputs, biases) 106 | 107 | if bn: 108 | outputs = batch_norm_for_conv1d(outputs, is_training, 109 | bn_decay=bn_decay, scope='bn') 110 | if activation_fn is not None: 111 | outputs = activation_fn(outputs) 112 | return outputs 113 | 114 | 115 | def conv2d(inputs, 116 | num_output_channels, 117 | kernel_size, 118 | scope, 119 | stride=[1, 1], 120 | padding='SAME', 121 | bn=False, 122 | is_training=None, 123 | use_xavier=False, 124 | stddev=1e-3, 125 | weight_decay=0.0, 126 | activation_fn=tf.nn.relu, 127 | bn_decay=None): 128 | """ 2D convolution with non-linear operation. 129 | 130 | Args: 131 | inputs: 4-D tensor variable BxHxWxC 132 | num_output_channels: int 133 | kernel_size: a list of 2 ints 134 | scope: string 135 | stride: a list of 2 ints 136 | padding: 'SAME' or 'VALID' 137 | use_xavier: bool, use xavier_initializer if true 138 | stddev: float, stddev for truncated_normal init 139 | weight_decay: float 140 | activation_fn: function 141 | bn: bool, whether to use batch norm 142 | bn_decay: float or float tensor variable in [0,1] 143 | is_training: bool Tensor variable 144 | 145 | Returns: 146 | Variable tensor 147 | """ 148 | with tf.variable_scope(scope) as sc: 149 | kernel_h, kernel_w = kernel_size 150 | num_in_channels = inputs.get_shape()[-1].value 151 | kernel_shape = [kernel_h, kernel_w, 152 | num_in_channels, num_output_channels] 153 | kernel = _variable_with_weight_decay('weights', 154 | shape=kernel_shape, 155 | use_xavier=use_xavier, 156 | stddev=stddev, 157 | wd=weight_decay) 158 | stride_h, stride_w = stride 159 | outputs = tf.nn.conv2d(inputs, kernel, 160 | [1, stride_h, stride_w, 1], 161 | padding=padding) 162 | biases = _variable_on_cpu('biases', [num_output_channels], 163 | tf.constant_initializer(0.0)) 164 | outputs = tf.nn.bias_add(outputs, biases) 165 | 166 | if bn: 167 | outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) 168 | if activation_fn is not None: 169 | outputs = tf.nn.leaky_relu(outputs, alpha=0.2) 170 | return outputs 171 | 172 | 173 | def conv2d_transpose(inputs, 174 | num_output_channels, 175 | kernel_size, 176 | scope, 177 | stride=[1, 1], 178 | padding='SAME', 179 | use_xavier=False, 180 | stddev=1e-3, 181 | weight_decay=0.0, 182 | activation_fn=tf.nn.relu, 183 | bn=False, 184 | bn_decay=None, 185 | is_training=None): 186 | """ 2D convolution transpose with non-linear operation. 187 | 188 | Args: 189 | inputs: 4-D tensor variable BxHxWxC 190 | num_output_channels: int 191 | kernel_size: a list of 2 ints 192 | scope: string 193 | stride: a list of 2 ints 194 | padding: 'SAME' or 'VALID' 195 | use_xavier: bool, use xavier_initializer if true 196 | stddev: float, stddev for truncated_normal init 197 | weight_decay: float 198 | activation_fn: function 199 | bn: bool, whether to use batch norm 200 | bn_decay: float or float tensor variable in [0,1] 201 | is_training: bool Tensor variable 202 | 203 | Returns: 204 | Variable tensor 205 | 206 | Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a 207 | """ 208 | with tf.variable_scope(scope) as sc: 209 | kernel_h, kernel_w = kernel_size 210 | num_in_channels = inputs.get_shape()[-1].value 211 | kernel_shape = [kernel_h, kernel_w, 212 | num_output_channels, num_in_channels] # reversed to conv2d 213 | kernel = _variable_with_weight_decay('weights', 214 | shape=kernel_shape, 215 | use_xavier=use_xavier, 216 | stddev=stddev, 217 | wd=weight_decay) 218 | stride_h, stride_w = stride 219 | 220 | # from slim.convolution2d_transpose 221 | def get_deconv_dim(dim_size, stride_size, kernel_size, padding): 222 | dim_size *= stride_size 223 | 224 | if padding == 'VALID' and dim_size is not None: 225 | dim_size += max(kernel_size - stride_size, 0) 226 | return dim_size 227 | 228 | # caculate output shape 229 | batch_size = tf.shape(inputs)[0] 230 | height = tf.shape(inputs)[1] 231 | width = tf.shape(inputs)[2] 232 | out_height = get_deconv_dim(height, stride_h, kernel_h, padding) 233 | out_width = get_deconv_dim(width, stride_w, kernel_w, padding) 234 | output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0) 235 | 236 | outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, 237 | [1, stride_h, stride_w, 1], 238 | padding=padding) 239 | biases = _variable_on_cpu('biases', [num_output_channels], 240 | tf.constant_initializer(0.0)) 241 | outputs = tf.nn.bias_add(outputs, biases) 242 | 243 | if bn: 244 | # outputs = batch_norm_for_conv2d(outputs, is_training, 245 | # bn_decay=bn_decay, scope='bn') 246 | outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training) 247 | if activation_fn is not None: 248 | # outputs = activation_fn(outputs) 249 | outputs = tf.nn.leaky_relu(outputs, alpha=0.2) 250 | return outputs 251 | 252 | 253 | def conv3d(inputs, 254 | num_output_channels, 255 | kernel_size, 256 | scope, 257 | stride=[1, 1, 1], 258 | padding='SAME', 259 | use_xavier=True, 260 | stddev=1e-3, 261 | weight_decay=0.0, 262 | activation_fn=tf.nn.relu, 263 | bn=False, 264 | bn_decay=None, 265 | is_training=None): 266 | """ 3D convolution with non-linear operation. 267 | 268 | Args: 269 | inputs: 5-D tensor variable BxDxHxWxC 270 | num_output_channels: int 271 | kernel_size: a list of 3 ints 272 | scope: string 273 | stride: a list of 3 ints 274 | padding: 'SAME' or 'VALID' 275 | use_xavier: bool, use xavier_initializer if true 276 | stddev: float, stddev for truncated_normal init 277 | weight_decay: float 278 | activation_fn: function 279 | bn: bool, whether to use batch norm 280 | bn_decay: float or float tensor variable in [0,1] 281 | is_training: bool Tensor variable 282 | 283 | Returns: 284 | Variable tensor 285 | """ 286 | with tf.variable_scope(scope) as sc: 287 | kernel_d, kernel_h, kernel_w = kernel_size 288 | num_in_channels = inputs.get_shape()[-1].value 289 | kernel_shape = [kernel_d, kernel_h, kernel_w, 290 | num_in_channels, num_output_channels] 291 | kernel = _variable_with_weight_decay('weights', 292 | shape=kernel_shape, 293 | use_xavier=use_xavier, 294 | stddev=stddev, 295 | wd=weight_decay) 296 | stride_d, stride_h, stride_w = stride 297 | outputs = tf.nn.conv3d(inputs, kernel, 298 | [1, stride_d, stride_h, stride_w, 1], 299 | padding=padding) 300 | biases = _variable_on_cpu('biases', [num_output_channels], 301 | tf.constant_initializer(0.0)) 302 | outputs = tf.nn.bias_add(outputs, biases) 303 | 304 | if bn: 305 | outputs = batch_norm_for_conv3d(outputs, is_training, 306 | bn_decay=bn_decay, scope='bn') 307 | 308 | if activation_fn is not None: 309 | outputs = activation_fn(outputs) 310 | return outputs 311 | 312 | 313 | def fully_connected(inputs, 314 | num_outputs, 315 | scope, 316 | use_xavier=True, 317 | stddev=1e-3, 318 | weight_decay=0.0, 319 | activation_fn=tf.nn.relu, 320 | bn=False, 321 | bn_decay=None, 322 | is_training=None): 323 | """ Fully connected layer with non-linear operation. 324 | 325 | Args: 326 | inputs: 2-D tensor BxN 327 | num_outputs: int 328 | 329 | Returns: 330 | Variable tensor of size B x num_outputs. 331 | """ 332 | with tf.variable_scope(scope) as sc: 333 | num_input_units = inputs.get_shape()[-1].value 334 | weights = _variable_with_weight_decay('weights', 335 | shape=[num_input_units, num_outputs], 336 | use_xavier=use_xavier, 337 | stddev=stddev, 338 | wd=weight_decay) 339 | outputs = tf.matmul(inputs, weights) 340 | biases = _variable_on_cpu('biases', [num_outputs], 341 | tf.constant_initializer(0.0)) 342 | outputs = tf.nn.bias_add(outputs, biases) 343 | 344 | if bn: 345 | outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn') 346 | 347 | if activation_fn is not None: 348 | # outputs = activation_fn(outputs) 349 | outputs = tf.nn.leaky_relu(outputs, alpha=0.2) 350 | return outputs 351 | 352 | 353 | def max_pool2d(inputs, 354 | kernel_size, 355 | scope, 356 | stride=[2, 2], 357 | padding='VALID'): 358 | """ 2D max pooling. 359 | 360 | Args: 361 | inputs: 4-D tensor BxHxWxC 362 | kernel_size: a list of 2 ints 363 | stride: a list of 2 ints 364 | 365 | Returns: 366 | Variable tensor 367 | """ 368 | with tf.variable_scope(scope) as sc: 369 | kernel_h, kernel_w = kernel_size 370 | stride_h, stride_w = stride 371 | outputs = tf.nn.max_pool(inputs, 372 | ksize=[1, kernel_h, kernel_w, 1], 373 | strides=[1, stride_h, stride_w, 1], 374 | padding=padding, 375 | name=sc.name) 376 | return outputs 377 | 378 | 379 | def avg_pool2d(inputs, 380 | kernel_size, 381 | scope, 382 | stride=[2, 2], 383 | padding='VALID'): 384 | """ 2D avg pooling. 385 | 386 | Args: 387 | inputs: 4-D tensor BxHxWxC 388 | kernel_size: a list of 2 ints 389 | stride: a list of 2 ints 390 | 391 | Returns: 392 | Variable tensor 393 | """ 394 | with tf.variable_scope(scope) as sc: 395 | kernel_h, kernel_w = kernel_size 396 | stride_h, stride_w = stride 397 | outputs = tf.nn.avg_pool(inputs, 398 | ksize=[1, kernel_h, kernel_w, 1], 399 | strides=[1, stride_h, stride_w, 1], 400 | padding=padding, 401 | name=sc.name) 402 | return outputs 403 | 404 | 405 | def max_pool3d(inputs, 406 | kernel_size, 407 | scope, 408 | stride=[2, 2, 2], 409 | padding='VALID'): 410 | """ 3D max pooling. 411 | 412 | Args: 413 | inputs: 5-D tensor BxDxHxWxC 414 | kernel_size: a list of 3 ints 415 | stride: a list of 3 ints 416 | 417 | Returns: 418 | Variable tensor 419 | """ 420 | with tf.variable_scope(scope) as sc: 421 | kernel_d, kernel_h, kernel_w = kernel_size 422 | stride_d, stride_h, stride_w = stride 423 | outputs = tf.nn.max_pool3d(inputs, 424 | ksize=[1, kernel_d, kernel_h, kernel_w, 1], 425 | strides=[1, stride_d, stride_h, stride_w, 1], 426 | padding=padding, 427 | name=sc.name) 428 | return outputs 429 | 430 | 431 | def avg_pool3d(inputs, 432 | kernel_size, 433 | scope, 434 | stride=[2, 2, 2], 435 | padding='VALID'): 436 | """ 3D avg pooling. 437 | 438 | Args: 439 | inputs: 5-D tensor BxDxHxWxC 440 | kernel_size: a list of 3 ints 441 | stride: a list of 3 ints 442 | 443 | Returns: 444 | Variable tensor 445 | """ 446 | with tf.variable_scope(scope) as sc: 447 | kernel_d, kernel_h, kernel_w = kernel_size 448 | stride_d, stride_h, stride_w = stride 449 | outputs = tf.nn.avg_pool3d(inputs, 450 | ksize=[1, kernel_d, kernel_h, kernel_w, 1], 451 | strides=[1, stride_d, stride_h, stride_w, 1], 452 | padding=padding, 453 | name=sc.name) 454 | return outputs 455 | 456 | 457 | def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay): 458 | """ Batch normalization on convolutional maps and beyond... 459 | Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow 460 | 461 | Args: 462 | inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC 463 | is_training: boolean tf.Varialbe, true indicates training phase 464 | scope: string, variable scope 465 | moments_dims: a list of ints, indicating dimensions for moments calculation 466 | bn_decay: float or float tensor variable, controling moving average weight 467 | Return: 468 | normed: batch-normalized maps 469 | """ 470 | with tf.variable_scope(scope) as sc: 471 | num_channels = inputs.get_shape()[-1].value 472 | beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), 473 | name='beta', trainable=True) 474 | gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), 475 | name='gamma', trainable=True) 476 | batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') 477 | decay = bn_decay if bn_decay is not None else 0.9 478 | ema = tf.train.ExponentialMovingAverage(decay=decay) 479 | # Operator that maintains moving averages of variables. 480 | ema_apply_op = tf.cond(is_training, 481 | lambda: ema.apply([batch_mean, batch_var]), 482 | lambda: tf.no_op()) 483 | 484 | # Update moving average and return current batch's avg and var. 485 | def mean_var_with_update(): 486 | with tf.control_dependencies([ema_apply_op]): 487 | return tf.identity(batch_mean), tf.identity(batch_var) 488 | 489 | # ema.average returns the Variable holding the average of var. 490 | mean, var = tf.cond(is_training, 491 | mean_var_with_update, 492 | lambda: (ema.average(batch_mean), ema.average(batch_var))) 493 | normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3) 494 | return normed 495 | 496 | 497 | def batch_norm_for_fc(inputs, is_training, bn_decay, scope): 498 | """ Batch normalization on FC data. 499 | 500 | Args: 501 | inputs: Tensor, 2D BxC input 502 | is_training: boolean tf.Varialbe, true indicates training phase 503 | bn_decay: float or float tensor variable, controling moving average weight 504 | scope: string, variable scope 505 | Return: 506 | normed: batch-normalized maps 507 | """ 508 | return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay) 509 | 510 | 511 | def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope): 512 | """ Batch normalization on 1D convolutional maps. 513 | 514 | Args: 515 | inputs: Tensor, 3D BLC input maps 516 | is_training: boolean tf.Varialbe, true indicates training phase 517 | bn_decay: float or float tensor variable, controling moving average weight 518 | scope: string, variable scope 519 | Return: 520 | normed: batch-normalized maps 521 | """ 522 | return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay) 523 | 524 | 525 | def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope): 526 | """ Batch normalization on 2D convolutional maps. 527 | 528 | Args: 529 | inputs: Tensor, 4D BHWC input maps 530 | is_training: boolean tf.Varialbe, true indicates training phase 531 | bn_decay: float or float tensor variable, controling moving average weight 532 | scope: string, variable scope 533 | Return: 534 | normed: batch-normalized maps 535 | """ 536 | return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay) 537 | 538 | 539 | def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope): 540 | """ Batch normalization on 3D convolutional maps. 541 | 542 | Args: 543 | inputs: Tensor, 5D BDHWC input maps 544 | is_training: boolean tf.Varialbe, true indicates training phase 545 | bn_decay: float or float tensor variable, controling moving average weight 546 | scope: string, variable scope 547 | Return: 548 | normed: batch-normalized maps 549 | """ 550 | return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay) 551 | 552 | 553 | def dropout(inputs, 554 | is_training, 555 | scope, 556 | keep_prob=0.5, 557 | noise_shape=None): 558 | """ Dropout layer. 559 | 560 | Args: 561 | inputs: tensor 562 | is_training: boolean tf.Variable 563 | scope: string 564 | keep_prob: float in [0,1] 565 | noise_shape: list of ints 566 | 567 | Returns: 568 | tensor variable 569 | """ 570 | with tf.variable_scope(scope) as sc: 571 | outputs = tf.cond(is_training, 572 | lambda: tf.nn.dropout(inputs, keep_prob, noise_shape), 573 | lambda: inputs) 574 | return outputs 575 | --------------------------------------------------------------------------------