├── display ├── ReadMe.md ├── car.gif ├── chair.gif ├── plane.gif ├── table.gif └── Qualitative.jpg ├── dmifnet ├── ReadMe.md ├── utils │ ├── libkdtree │ │ ├── README │ │ ├── setup.cfg │ │ ├── MANIFEST.in │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ └── pykdtree │ │ │ ├── kdtree.cpython-36m-x86_64-linux-gnu.so │ │ │ └── render_template.py │ ├── libvoxelize │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ ├── voxelize.cpython-36m-x86_64-linux-gnu.so │ │ └── voxelize.pyx │ ├── libmcubes │ │ ├── pyarray_symbol.h │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── exporter.cpython-36.pyc │ │ ├── mcubes.cpython-36m-x86_64-linux-gnu.so │ │ ├── __init__.py │ │ ├── pywrapper.h │ │ ├── LICENSE │ │ ├── mcubes.pyx │ │ ├── exporter.py │ │ └── README.rst │ ├── libmise │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ ├── mise.cpython-36m-x86_64-linux-gnu.so │ │ └── test.py │ ├── __pycache__ │ │ ├── io.cpython-36.pyc │ │ ├── voxels.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── binvox_rw.cpython-36.pyc │ │ └── visualize.cpython-36.pyc │ ├── libmesh │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── inside_mesh.cpython-36.pyc │ │ ├── triangle_hash.cpython-36m-x86_64-linux-gnu.so │ │ ├── __init__.py │ │ └── triangle_hash.pyx │ ├── libsimplify │ │ ├── __pycache__ │ │ │ └── __init__.cpython-36.pyc │ │ ├── simplify_mesh.cpython-36m-x86_64-linux-gnu.so │ │ ├── test.py │ │ ├── __init__.py │ │ └── simplify_mesh.pyx │ ├── io.py │ └── icp.py ├── data │ ├── __pycache__ │ │ ├── core.cpython-36.pyc │ │ ├── fields.cpython-36.pyc │ │ ├── real.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ └── transforms.cpython-36.pyc │ ├── __init__.py │ └── transforms.py ├── pix2mesh │ ├── ellipsoid │ │ └── info_ellipsoid.dat │ ├── __pycache__ │ │ ├── config.cpython-36.pyc │ │ ├── layers.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── training.cpython-36.pyc │ │ └── generation.cpython-36.pyc │ ├── models │ │ ├── __pycache__ │ │ │ ├── decoder.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ │ └── __init__.py │ ├── __init__.py │ ├── generation.py │ └── config.py ├── psgn │ ├── __pycache__ │ │ ├── config.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── training.cpython-36.pyc │ │ └── generation.cpython-36.pyc │ ├── models │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── decoder.cpython-36.pyc │ │ │ └── psgn_2branch.cpython-36.pyc │ │ ├── __init__.py │ │ ├── decoder.py │ │ └── psgn_2branch.py │ ├── __init__.py │ └── config.py ├── r2n2 │ ├── __pycache__ │ │ ├── config.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── training.cpython-36.pyc │ │ └── generation.cpython-36.pyc │ ├── models │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ └── decoder.cpython-36.pyc │ │ ├── __init__.py │ │ └── decoder.py │ ├── __init__.py │ ├── generation.py │ └── config.py ├── encoder │ ├── __pycache__ │ │ ├── conv.cpython-36.pyc │ │ ├── r2n2.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── batchnet.cpython-36.pyc │ │ ├── pointnet.cpython-36.pyc │ │ ├── voxels.cpython-36.pyc │ │ ├── psgn_cond.cpython-36.pyc │ │ ├── gaussian_conv.cpython-36.pyc │ │ ├── pix2mesh_cond.cpython-36.pyc │ │ └── channel_attention_1d.cpython-36.pyc │ ├── __init__.py │ ├── gaussian_conv.py │ ├── channel_attention_1d.py │ ├── pix2mesh_cond.py │ ├── voxels.py │ ├── psgn_cond.py │ ├── r2n2.py │ └── pointnet.py ├── dmc │ ├── models │ │ ├── __pycache__ │ │ │ ├── decoder.cpython-36.pyc │ │ │ ├── encoder.cpython-36.pyc │ │ │ └── __init__.cpython-36.pyc │ │ ├── __init__.py │ │ └── encoder.py │ ├── ops │ │ ├── _cuda_ext.cpython-36m-x86_64-linux-gnu.so │ │ ├── cpp_modules │ │ │ ├── pred2mesh.cpython-36m-x86_64-linux-gnu.so │ │ │ ├── old │ │ │ │ ├── pred_to_mesh.h │ │ │ │ ├── pred_to_mesh.cpp │ │ │ │ └── commons.cpp │ │ │ └── setup.py │ │ ├── setup.py │ │ ├── occupancy_connectivity.py │ │ ├── occupancy_to_topology.py │ │ ├── src │ │ │ └── kernels.h │ │ ├── point_triangle_distance.py │ │ ├── grid_pooling.py │ │ ├── curvature_constraint.py │ │ └── tests │ │ │ ├── test_distance.py │ │ │ ├── test_occupancy_connectivity_yiyi.py │ │ │ ├── test_curvature.py │ │ │ └── test_occupancy_connectivity.py │ ├── generation.py │ └── config.py ├── dmif_model │ ├── __pycache__ │ │ ├── config.cpython-36.pyc │ │ ├── __init__.cpython-36.pyc │ │ ├── generation.cpython-36.pyc │ │ └── training.cpython-36.pyc │ ├── models │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-36.pyc │ │ │ ├── decoder.cpython-36.pyc │ │ │ └── encoder_latent.cpython-36.pyc │ │ └── encoder_latent.py │ └── __init__.py ├── training.py ├── preprocess.py ├── checkpoints.py ├── io.py └── icp.py ├── external └── mesh-fusion │ ├── libmcubes │ ├── pyarray_symbol.h │ ├── __init__.py │ ├── pywrapper.h │ ├── LICENSE │ ├── mcubes.pyx │ ├── setup.py │ ├── exporter.py │ └── README.rst │ ├── screenshot.jpg │ ├── librender │ ├── __init__.py │ ├── offscreen.h │ ├── setup.py │ ├── pyrender.pyx │ └── test.py │ ├── libfusioncpu │ ├── __init__.py │ ├── README.md │ ├── LICENSE │ ├── setup.py │ ├── CMakeLists.txt │ └── fusion.cpp │ ├── libfusiongpu │ ├── __init__.py │ ├── README.md │ ├── LICENSE │ ├── setup.py │ ├── CMakeLists.txt │ └── fusion.cu │ ├── simplification.mlx │ └── 3_simplify.py ├── dmif_model ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── config.cpython-36.pyc │ ├── training.cpython-36.pyc │ └── generation.cpython-36.pyc ├── models │ ├── __pycache__ │ │ ├── __init__.cpython-36.pyc │ │ ├── decoder.cpython-36.pyc │ │ └── encoder_latent.cpython-36.pyc │ └── encoder_latent.py └── __init__.py ├── configs ├── demo.yaml ├── img │ └── dmifnet.yaml └── default.yaml ├── dmif_env.yaml ├── .github └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── LICENSE ├── set_env_up.py ├── README.md └── visualize.py /display/ReadMe.md: -------------------------------------------------------------------------------- 1 | a 2 | -------------------------------------------------------------------------------- /dmifnet/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/README: -------------------------------------------------------------------------------- 1 | README.rst -------------------------------------------------------------------------------- /dmifnet/utils/libvoxelize/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /display/car.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/display/car.gif -------------------------------------------------------------------------------- /display/chair.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/display/chair.gif -------------------------------------------------------------------------------- /display/plane.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/display/plane.gif -------------------------------------------------------------------------------- /display/table.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/display/table.gif -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_rpm] 2 | requires=numpy 3 | release=1 4 | 5 | 6 | -------------------------------------------------------------------------------- /display/Qualitative.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/display/Qualitative.jpg -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude pykdtree/render_template.py 2 | include LICENSE.txt 3 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/pyarray_symbol.h: -------------------------------------------------------------------------------- 1 | 2 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 3 | -------------------------------------------------------------------------------- /dmifnet/utils/libmise/__init__.py: -------------------------------------------------------------------------------- 1 | from .mise import MISE 2 | 3 | 4 | __all__ = [ 5 | MISE 6 | ] 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/pyarray_symbol.h: -------------------------------------------------------------------------------- 1 | 2 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 3 | -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/__init__.py: -------------------------------------------------------------------------------- 1 | from .pykdtree.kdtree import KDTree 2 | 3 | 4 | __all__ = [ 5 | KDTree 6 | ] 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/external/mesh-fusion/screenshot.jpg -------------------------------------------------------------------------------- /dmifnet/utils/__pycache__/io.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/__pycache__/io.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/__pycache__/training.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/__pycache__/training.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/data/__pycache__/core.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/data/__pycache__/core.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/data/__pycache__/fields.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/data/__pycache__/fields.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/data/__pycache__/real.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/data/__pycache__/real.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/ellipsoid/info_ellipsoid.dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/ellipsoid/info_ellipsoid.dat -------------------------------------------------------------------------------- /dmifnet/psgn/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/__pycache__/generation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/__pycache__/generation.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/conv.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/conv.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/r2n2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/r2n2.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/__pycache__/training.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/__pycache__/training.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/__pycache__/training.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/__pycache__/training.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/__pycache__/voxels.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/__pycache__/voxels.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/data/__pycache__/transforms.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/data/__pycache__/transforms.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/batchnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/batchnet.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/pointnet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/pointnet.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/voxels.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/voxels.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__pycache__/layers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/__pycache__/layers.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/__pycache__/generation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/__pycache__/generation.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/__pycache__/generation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/__pycache__/generation.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/__pycache__/binvox_rw.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/__pycache__/binvox_rw.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/__pycache__/visualize.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/__pycache__/visualize.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmif_model/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmc/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmc/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmc/models/__pycache__/encoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmc/models/__pycache__/encoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/psgn_cond.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/psgn_cond.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__pycache__/training.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/__pycache__/training.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmc/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmc/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/__pycache__/generation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/__pycache__/generation.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/__pycache__/training.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/__pycache__/training.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/gaussian_conv.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/gaussian_conv.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/pix2mesh_cond.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/pix2mesh_cond.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__pycache__/generation.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/__pycache__/generation.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/r2n2/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/r2n2/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmc/ops/_cuda_ext.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmc/ops/_cuda_ext.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmifnet/pix2mesh/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmesh/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmesh/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmise/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmise/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmise/mise.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmise/mise.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmif_model/models/__pycache__/encoder_latent.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmif_model/models/__pycache__/encoder_latent.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/dmif_model/models/__pycache__/decoder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/models/__pycache__/decoder.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/pix2mesh/models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/pix2mesh/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/models/__pycache__/psgn_2branch.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/psgn/models/__pycache__/psgn_2branch.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libkdtree/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmcubes/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/__pycache__/exporter.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmcubes/__pycache__/exporter.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmesh/__pycache__/inside_mesh.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmesh/__pycache__/inside_mesh.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libsimplify/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libsimplify/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libvoxelize/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libvoxelize/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/encoder/__pycache__/channel_attention_1d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/encoder/__pycache__/channel_attention_1d.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/psgn/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.psgn import ( 2 | config, generation, training, models 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models 7 | ] 8 | -------------------------------------------------------------------------------- /dmifnet/r2n2/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.r2n2 import ( 2 | config, generation, training, models 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models 7 | ] 8 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/mcubes.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmcubes/mcubes.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmif_model/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.dmif_model import ( 2 | config, generation, training, models 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models 7 | ] 8 | -------------------------------------------------------------------------------- /dmifnet/utils/libvoxelize/voxelize.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libvoxelize/voxelize.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/__init__.py: -------------------------------------------------------------------------------- 1 | from libmcubes.mcubes import marching_cubes, marching_cubes_func 2 | from libmcubes.exporter import export_mesh, export_obj, export_off 3 | -------------------------------------------------------------------------------- /dmifnet/dmif_model/models/__pycache__/encoder_latent.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmif_model/models/__pycache__/encoder_latent.cpython-36.pyc -------------------------------------------------------------------------------- /dmifnet/utils/libmesh/triangle_hash.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libmesh/triangle_hash.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmifnet/dmc/ops/cpp_modules/pred2mesh.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/dmc/ops/cpp_modules/pred2mesh.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmifnet/dmif_model/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.dmif_model import ( 2 | config, generation, training, models 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models 7 | ] 8 | -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/pykdtree/kdtree.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libkdtree/pykdtree/kdtree.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmifnet/utils/libsimplify/simplify_mesh.cpython-36m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ray-tju/DmifNet/HEAD/dmifnet/utils/libsimplify/simplify_mesh.cpython-36m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /dmifnet/pix2mesh/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.pix2mesh import ( 2 | config, generation, training, models, layers 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models, layers 7 | ] 8 | -------------------------------------------------------------------------------- /dmifnet/utils/libsimplify/test.py: -------------------------------------------------------------------------------- 1 | from simplify_mesh import mesh_simplify 2 | import numpy as np 3 | 4 | v = np.random.rand(100, 3) 5 | f = np.random.choice(range(100), (50, 3)) 6 | 7 | mesh_simplify(v, f, 50) -------------------------------------------------------------------------------- /dmifnet/dmc/ops/cpp_modules/old/pred_to_mesh.h: -------------------------------------------------------------------------------- 1 | int pred_to_mesh(THFloatTensor *offset, THLongTensor *topology, THFloatTensor *vertices_all, THFloatTensor *faces_all, THLongTensor *vertice_number, THLongTensor *face_number); 2 | -------------------------------------------------------------------------------- /dmifnet/utils/libmesh/__init__.py: -------------------------------------------------------------------------------- 1 | from .inside_mesh import ( 2 | check_mesh_contains, MeshIntersector, TriangleIntersector2d 3 | ) 4 | 5 | 6 | __all__ = [ 7 | check_mesh_contains, MeshIntersector, TriangleIntersector2d 8 | ] 9 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | #pyrender_dir = os.path.dirname(os.path.realpath(__file__)) 5 | #ctypes.cdll.LoadLibrary(os.path.join(pyrender_dir, 'pyrender.so')) 6 | from librender.pyrender import * 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | pyfusion_dir = os.path.dirname(os.path.realpath(__file__)) 5 | ctypes.cdll.LoadLibrary(os.path.join(pyfusion_dir, 'build', 'libfusion_cpu.so')) 6 | from cyfusion import * 7 | -------------------------------------------------------------------------------- /configs/demo.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/img/onet_pretrained.yaml 2 | data: 3 | dataset: images 4 | path: demo 5 | training: 6 | out_dir: demo 7 | generation: 8 | generation_dir: generation 9 | refinement_step: 30 10 | simplify_nfaces: 5000 11 | -------------------------------------------------------------------------------- /dmifnet/utils/libkdtree/pykdtree/render_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from mako.template import Template 4 | 5 | mytemplate = Template(filename='_kdtree_core.c.mako') 6 | with open('_kdtree_core.c', 'w') as fp: 7 | fp.write(mytemplate.render()) 8 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | pyfusion_dir = os.path.dirname(os.path.realpath(__file__)) 5 | ctypes.cdll.LoadLibrary(os.path.join(pyfusion_dir, 'build', 'libfusion_gpu.so')) 6 | from libfusiongpu.cyfusion import * 7 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.utils.libmcubes.mcubes import ( 2 | marching_cubes, marching_cubes_func 3 | ) 4 | from dmifnet.utils.libmcubes.exporter import ( 5 | export_mesh, export_obj, export_off 6 | ) 7 | 8 | 9 | __all__ = [ 10 | marching_cubes, marching_cubes_func, 11 | export_mesh, export_obj, export_off 12 | ] 13 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/cpp_modules/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import torch 3 | from torch.utils.cpp_extension import BuildExtension, CppExtension 4 | 5 | setup( 6 | name='pred2mesh', 7 | ext_modules=[ 8 | CppExtension('pred2mesh', [ 9 | 'pred_to_mesh_.cpp', 10 | # 'commons.cpp' 11 | ]), 12 | ], 13 | cmdclass={ 14 | 'build_ext': BuildExtension 15 | }) -------------------------------------------------------------------------------- /dmifnet/utils/libsimplify/__init__.py: -------------------------------------------------------------------------------- 1 | from .simplify_mesh import ( 2 | mesh_simplify 3 | ) 4 | import trimesh 5 | 6 | 7 | def simplify_mesh(mesh, f_target=10000, agressiveness=7.): 8 | vertices = mesh.vertices 9 | faces = mesh.faces 10 | 11 | vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness) 12 | 13 | mesh_simplified = trimesh.Trimesh(vertices, faces, process=False) 14 | 15 | return mesh_simplified 16 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/pywrapper.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _PYWRAPPER_H 3 | #define _PYWRAPPER_H 4 | 5 | #include 6 | #include "pyarraymodule.h" 7 | 8 | #include 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue); 11 | PyObject* marching_cubes2(PyArrayObject* arr, double isovalue); 12 | PyObject* marching_cubes3(PyArrayObject* arr, double isovalue); 13 | PyObject* marching_cubes_func(PyObject* lower, PyObject* upper, 14 | int numx, int numy, int numz, PyObject* f, double isovalue); 15 | 16 | #endif // _PYWRAPPER_H 17 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/pywrapper.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _PYWRAPPER_H 3 | #define _PYWRAPPER_H 4 | 5 | #include 6 | #include "pyarraymodule.h" 7 | 8 | #include 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue); 11 | PyObject* marching_cubes2(PyArrayObject* arr, double isovalue); 12 | PyObject* marching_cubes3(PyArrayObject* arr, double isovalue); 13 | PyObject* marching_cubes_func(PyObject* lower, PyObject* upper, 14 | int numx, int numy, int numz, PyObject* f, double isovalue); 15 | 16 | #endif // _PYWRAPPER_H 17 | -------------------------------------------------------------------------------- /dmifnet/utils/libmise/test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mise import MISE 3 | import time 4 | 5 | t0 = time.time() 6 | extractor = MISE(1, 2, 0.) 7 | 8 | p = extractor.query() 9 | i = 0 10 | 11 | while p.shape[0] != 0: 12 | print(i) 13 | print(p) 14 | v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1 15 | extractor.update(p, v) 16 | p = extractor.query() 17 | i += 1 18 | if (i >= 8): 19 | break 20 | 21 | print(extractor.to_dense()) 22 | # p, v = extractor.get_points() 23 | # print(p) 24 | # print(v) 25 | print('Total time: %f' % (time.time() - t0)) 26 | -------------------------------------------------------------------------------- /dmifnet/dmc/models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from dmifnet.dmc.models import encoder, decoder 3 | 4 | 5 | decoder_dict = { 6 | 'unet': decoder.UNetDecoder 7 | } 8 | 9 | encoder_dict = { 10 | 'pointnet_local': encoder.PointNetLocal, 11 | } 12 | 13 | class DMC(nn.Module): 14 | def __init__(self, decoder, encoder): 15 | super().__init__() 16 | self.decoder = decoder 17 | self.encoder = encoder 18 | 19 | def forward(self, x): 20 | c = self.encoder(x) 21 | offset, topology, occupancy = self.decoder(c) 22 | 23 | return offset, topology, occupancy 24 | -------------------------------------------------------------------------------- /dmif_env.yaml: -------------------------------------------------------------------------------- 1 | name: dmifnet_space 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | - defaults 6 | dependencies: 7 | - cython=0.29.2 8 | - imageio=2.4.1 9 | - numpy=1.15.4 10 | - numpy-base=1.15.4 11 | - matplotlib=3.0.3 12 | - matplotlib-base=3.0.3 13 | - pandas=0.23.4 14 | - pillow=5.3.0 15 | - pyembree=0.1.4 16 | - pytest=4.0.2 17 | - python=3.6.7 18 | - pytorch=1.0.0 19 | - pyyaml=3.13 20 | - scikit-image=0.14.1 21 | - scipy=1.1.0 22 | - tensorboardx=1.4 23 | - torchvision=0.2.1 24 | - tqdm=4.28.1 25 | - trimesh=2.37.7 26 | - pip: 27 | - h5py==2.9.0 28 | - plyfile==0.7 29 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import torch 3 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 4 | 5 | setup( 6 | name='_cuda_ext', 7 | ext_modules=[ 8 | CUDAExtension('_cuda_ext', [ 9 | 'src/extension.cpp', 10 | 'src/curvature_constraint_kernel.cu', 11 | 'src/grid_pooling_kernel.cu', 12 | 'src/occupancy_to_topology_kernel.cu', 13 | 'src/occupancy_connectivity_kernel.cu', 14 | 'src/point_triangle_distance_kernel.cu', 15 | ]), 16 | ], 17 | cmdclass={ 18 | 'build_ext': BuildExtension 19 | }) 20 | -------------------------------------------------------------------------------- /dmifnet/encoder/__init__.py: -------------------------------------------------------------------------------- 1 | from dmifnet.encoder import ( 2 | conv, pix2mesh_cond, pointnet, 3 | psgn_cond, r2n2, voxels, 4 | ) 5 | 6 | 7 | encoder_dict = { 8 | 'simple_conv': conv.ConvEncoder, 9 | 'resnet18': conv.Resnet18, 10 | 'resnet34': conv.Resnet34, 11 | 'resnet50': conv.Resnet50, 12 | 'resnet101': conv.Resnet101, 13 | 'r2n2_simple': r2n2.SimpleConv, 14 | 'r2n2_resnet': r2n2.Resnet, 15 | 'pointnet_simple': pointnet.SimplePointnet, 16 | 'pointnet_resnet': pointnet.ResnetPointnet, 17 | 'psgn_cond': psgn_cond.PCGN_Cond, 18 | 'voxel_simple': voxels.VoxelEncoder, 19 | 'pixel2mesh_cond': pix2mesh_cond.Pix2mesh_Cond, 20 | } 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /configs/img/dmifnet.yaml: -------------------------------------------------------------------------------- 1 | method: dmif_model 2 | data: 3 | path: ./data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/img/dmif_model 15 | batch_size: 32 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: 20000 19 | validate_every: 20000 20 | test: 21 | threshold: 0.2 22 | eval_mesh: true 23 | eval_pointcloud: false 24 | generation: 25 | batch_size: 100000 26 | refine: false 27 | n_x: 128 28 | n_z: 1 29 | resolution_0: 32 30 | upsampling_steps: 2 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/offscreen.h: -------------------------------------------------------------------------------- 1 | #ifndef LIBRENDER_OFFSCREEN_H 2 | #define LIBRENDER_OFFSCREEN_H 3 | 4 | #include "GL/glew.h" 5 | #include "GL/gl.h" 6 | #include "GL/glu.h" 7 | #include "GL/glut.h" 8 | 9 | class OffscreenGL { 10 | 11 | public: 12 | OffscreenGL(int maxHeight, int maxWidth); 13 | ~OffscreenGL(); 14 | 15 | private: 16 | static int glutWin; 17 | static bool glutInitialized; 18 | GLuint fb; 19 | GLuint renderTex; 20 | GLuint depthTex; 21 | }; 22 | 23 | 24 | void renderDepthMesh(double *FM, int fNum, double *VM, int vNum, double *CM, double *intrinsics, int *imgSizeV, double *zNearFarV, unsigned char * imgBuffer, float *depthBuffer, bool *maskBuffer, double linewidth, bool coloring); 25 | 26 | #endif -------------------------------------------------------------------------------- /external/mesh-fusion/librender/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy as np 5 | 6 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2', '-O4', '-fopenmp'] 7 | extra_link_args = ['-lGLEW', '-lglut', '-lGL', '-lGLU', '-fopenmp'] 8 | 9 | setup( 10 | name="pyrender", 11 | cmdclass= {'build_ext': build_ext}, 12 | ext_modules=[ 13 | Extension('pyrender', 14 | [ 15 | 'pyrender.pyx', 16 | 'offscreen.cpp', 17 | ], 18 | language='c++', 19 | include_dirs=[np.get_include()], 20 | extra_compile_args=extra_compile_args, 21 | extra_link_args=extra_link_args 22 | ) 23 | ] 24 | ) 25 | 26 | 27 | -------------------------------------------------------------------------------- /dmifnet/r2n2/models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from dmifnet.r2n2.models.decoder import Decoder 3 | 4 | 5 | # Decoder dictionary 6 | decoder_dict = { 7 | 'simple': Decoder, 8 | } 9 | 10 | 11 | class R2N2(nn.Module): 12 | ''' The 3D Recurrent Reconstruction Neural Network (3D-R2N2) model. 13 | 14 | For details regarding the model, please see 15 | https://arxiv.org/abs/1604.00449 16 | 17 | As single-view images are used as input, we do not use the recurrent 18 | module. 19 | 20 | Args: 21 | decoder (nn.Module): decoder network 22 | encoder (nn.Module): encoder network 23 | ''' 24 | 25 | def __init__(self, decoder, encoder): 26 | super().__init__() 27 | self.decoder = decoder 28 | self.encoder = encoder 29 | 30 | def forward(self, x): 31 | c = self.encoder(x) 32 | occ_hat = self.decoder(c) 33 | return occ_hat 34 | -------------------------------------------------------------------------------- /external/mesh-fusion/simplification.mlx: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /dmifnet/data/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from dmifnet.data.core import ( 3 | Shapes3dDataset, collate_remove_none, worker_init_fn 4 | ) 5 | from dmifnet.data.fields import ( 6 | IndexField, CategoryField, ImagesField, PointsField, 7 | VoxelsField, PointCloudField, MeshField, 8 | ) 9 | from dmifnet.data.transforms import ( 10 | PointcloudNoise, SubsamplePointcloud, 11 | SubsamplePoints 12 | ) 13 | from dmifnet.data.real import ( 14 | KittiDataset, OnlineProductDataset, 15 | ImageDataset, 16 | ) 17 | 18 | 19 | __all__ = [ 20 | # Core 21 | Shapes3dDataset, 22 | collate_remove_none, 23 | worker_init_fn, 24 | # Fields 25 | IndexField, 26 | CategoryField, 27 | ImagesField, 28 | PointsField, 29 | VoxelsField, 30 | PointCloudField, 31 | MeshField, 32 | # Transforms 33 | PointcloudNoise, 34 | SubsamplePointcloud, 35 | SubsamplePoints, 36 | # Real Data 37 | KittiDataset, 38 | OnlineProductDataset, 39 | ImageDataset, 40 | ] 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /dmifnet/encoder/gaussian_conv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.autograd import Variable 5 | import numpy as np 6 | import cv2 7 | 8 | class GaussianBlurConv(nn.Module): 9 | def __init__(self, channels=3, k=0.3): 10 | super(GaussianBlurConv, self).__init__() 11 | self.channels = channels 12 | kernel = self.gauss(3, k) 13 | kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0) 14 | kernel = np.repeat(kernel, self.channels, axis=0) 15 | self.weight = nn.Parameter(data=kernel, requires_grad=False).cuda() 16 | 17 | def __call__(self, x): 18 | x = F.conv2d(x, self.weight, padding=1, groups=self.channels).to('cuda') 19 | return x 20 | 21 | def gauss(self, kernel_size, sigma): 22 | kernel1 = cv2.getGaussianKernel(kernel_size, sigma) 23 | kernel2 = cv2.getGaussianKernel(kernel_size, sigma) 24 | kernel3 = np.multiply(kernel1, np.transpose(kernel2)) 25 | 26 | return kernel3 -------------------------------------------------------------------------------- /dmifnet/psgn/models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from dmifnet.psgn.models.decoder import Decoder 3 | from dmifnet.psgn.models.psgn_2branch import PCGN_2Branch 4 | 5 | decoder_dict = { 6 | 'simple': Decoder, 7 | 'psgn_2branch': PCGN_2Branch 8 | } 9 | 10 | 11 | class PCGN(nn.Module): 12 | r''' The Point Set Generation Network. 13 | 14 | For the PSGN, the input image is first passed to a encoder network, 15 | e.g. restnet-18 or the CNN proposed in the original publication. Next, 16 | this latent code is then used as the input for the decoder network, e.g. 17 | the 2-Branch model from the PSGN paper. 18 | 19 | Args: 20 | decoder (nn.Module): The decoder network 21 | encoder (nn.Module): The encoder network 22 | ''' 23 | 24 | def __init__(self, decoder, encoder): 25 | super().__init__() 26 | self.decoder = decoder 27 | self.encoder = encoder 28 | 29 | def forward(self, x): 30 | c = self.encoder(x) 31 | points = self.decoder(c) 32 | return points 33 | -------------------------------------------------------------------------------- /dmifnet/pix2mesh/models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from dmifnet.pix2mesh.models.decoder import Decoder 3 | 4 | 5 | decoder_dict = { 6 | 'simple': Decoder, 7 | } 8 | 9 | 10 | class Pix2Mesh(nn.Module): 11 | ''' Pixel2Mesh model. 12 | 13 | First, the input image is passed through a CNN to extract several feature 14 | maps. These feature maps as well as camera matrices are passed to the 15 | decoder to predict respective vertex locations of the output mesh 16 | 17 | ''' 18 | def __init__(self, decoder, encoder): 19 | ''' Initialisation. 20 | 21 | Args: 22 | encoder (PyTorch model): The conditional network to obtain 23 | feature maps 24 | decoder (PyTorch model): The decoder network 25 | ''' 26 | super().__init__() 27 | self.decoder = decoder 28 | self.encoder = encoder 29 | 30 | def forward(self, x, camera_mat): 31 | fm = self.encoder(x) 32 | pred = self.decoder(x, fm, camera_mat) 33 | return pred 34 | -------------------------------------------------------------------------------- /dmifnet/encoder/channel_attention_1d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn.parameter import Parameter 4 | 5 | 6 | class attention_layer(nn.Module): 7 | """Constructs a ECA module. 8 | 9 | Args: 10 | channel: Number of channels of the input feature map 11 | k_size: Adaptive selection of kernel size 12 | """ 13 | 14 | def __init__(self, channel, k_size=3): 15 | super(attention_layer, self).__init__() 16 | self.avg_pool = nn.AdaptiveAvgPool1d(1) 17 | self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) 18 | self.sigmoid = nn.Sigmoid() 19 | 20 | def forward(self, x): 21 | # x: input features with shape [b, c, h, w] 22 | # b, c, h, w = x.size() 23 | 24 | # feature descriptor on the global spatial information 25 | y = self.avg_pool(x) 26 | 27 | # Two different branches of ECA module 28 | y = self.conv(y.transpose(-1, -2)).transpose(-1, -2) 29 | 30 | # Multi-scale information fusion 31 | y = self.sigmoid(y) 32 | 33 | return x * y.expand_as(x) 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 leilimaster 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /dmifnet/training.py: -------------------------------------------------------------------------------- 1 | # from im2mesh import icp 2 | import numpy as np 3 | from collections import defaultdict 4 | from tqdm import tqdm 5 | 6 | 7 | class BaseTrainer(object): 8 | ''' Base trainer class. 9 | ''' 10 | 11 | def evaluate(self, val_loader): 12 | ''' Performs an evaluation. 13 | Args: 14 | val_loader (dataloader): pytorch dataloader 15 | ''' 16 | eval_list = defaultdict(list) 17 | 18 | for data in tqdm(val_loader): 19 | eval_step_dict = self.eval_step(data) 20 | 21 | for k, v in eval_step_dict.items(): 22 | eval_list[k].append(v) 23 | 24 | eval_dict = {k: np.mean(v) for k, v in eval_list.items()} 25 | return eval_dict 26 | 27 | def train_step(self, *args, **kwargs): 28 | ''' Performs a training step. 29 | ''' 30 | raise NotImplementedError 31 | 32 | def eval_step(self, *args, **kwargs): 33 | ''' Performs an evaluation step. 34 | ''' 35 | raise NotImplementedError 36 | 37 | def visualize(self, *args, **kwargs): 38 | ''' Performs visualization. 39 | ''' 40 | raise NotImplementedError 41 | -------------------------------------------------------------------------------- /dmifnet/dmc/generation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import trimesh 4 | from dmifnet.dmc.utils.pred2mesh import pred_to_mesh_max 5 | from dmifnet.dmc.ops.occupancy_to_topology import OccupancyToTopology 6 | from dmifnet.dmc.ops.table import get_accept_topology 7 | 8 | 9 | class Generator3D(object): 10 | def __init__(self, model, device=None, num_voxels=32): 11 | self.model = model.to(device) 12 | self.device = device 13 | self.num_voxels = num_voxels 14 | self.vis_topology = torch.LongTensor(get_accept_topology(4)) 15 | 16 | def generate_mesh(self, data): 17 | self.model.eval() 18 | device = self.device 19 | 20 | inputs = data.get('inputs', torch.empty(1, 0)).to(device) 21 | 22 | inputs = self.num_voxels * (inputs / 1.2 + 0.5) 23 | 24 | with torch.no_grad(): 25 | offset, topology, occupancy = self.model(inputs) 26 | 27 | offset = offset.squeeze() 28 | topology = topology.squeeze() 29 | topology = topology[:, self.vis_topology] 30 | 31 | vertices, faces = pred_to_mesh_max(offset, topology) 32 | faces = faces.astype(np.int64) 33 | 34 | vertices = 1.2 * (vertices / self.num_voxels - 0.5) 35 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False) 36 | return mesh 37 | 38 | 39 | -------------------------------------------------------------------------------- /dmifnet/psgn/models/decoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class Decoder(nn.Module): 6 | r''' Simple decoder for the Point Set Generation Network. 7 | 8 | The simple decoder consists of 4 fully-connected layers, resulting in an 9 | output of 3D coordinates for a fixed number of points. 10 | 11 | Args: 12 | dim (int): The output dimension of the points (e.g. 3) 13 | c_dim (int): dimension of the input vector 14 | n_points (int): number of output points 15 | ''' 16 | def __init__(self, dim=3, c_dim=128, n_points=1024): 17 | super().__init__() 18 | # Attributes 19 | self.dim = dim 20 | self.c_dim = c_dim 21 | self.n_points = n_points 22 | 23 | # Submodules 24 | self.actvn = F.relu 25 | self.fc_0 = nn.Linear(c_dim, 512) 26 | self.fc_1 = nn.Linear(512, 512) 27 | self.fc_2 = nn.Linear(512, 512) 28 | self.fc_out = nn.Linear(512, dim*n_points) 29 | 30 | def forward(self, c): 31 | batch_size = c.size(0) 32 | 33 | net = self.fc_0(c) 34 | net = self.fc_1(self.actvn(net)) 35 | net = self.fc_2(self.actvn(net)) 36 | points = self.fc_out(self.actvn(net)) 37 | points = points.view(batch_size, self.n_points, self.dim) 38 | 39 | return points 40 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/README.md: -------------------------------------------------------------------------------- 1 | # PyFusion 2 | 3 | PyFusion is a Python framework for volumetric depth fusion. 4 | It contains simple occupancy and TSDF fusion methods that can be executed on a CPU as well as on a GPU. 5 | 6 | To use the code, first compile the native code via 7 | 8 | ```bash 9 | cd build 10 | cmake .. 11 | make 12 | ``` 13 | Afterwards you can compile the Cython code via 14 | 15 | ```bash 16 | python setup.py build_ext --inplace 17 | ``` 18 | 19 | You can then use the fusion functions 20 | 21 | ```python 22 | import pyfusion 23 | 24 | # create a views object 25 | # depthmaps: a NxHxW numpy float tensor of N depthmaps, invalid depth values are marked by negative numbers 26 | # Ks: the camera intric matrices, Nx3x3 float tensor 27 | # Rs: the camera rotation matrices, Nx3x3 float tensor 28 | # Ts: the camera translation vectors, Nx3 float tensor 29 | views = pyfusion.PyViews(depthmaps, Ks,Rs,Ts) 30 | 31 | # afterwards you can fuse the depth maps for example by 32 | # depth,height,width: number of voxels in each dimension 33 | # truncation: TSDF truncation value 34 | tsdf = pyfusion.tsdf_gpu(views, depth,height,width, vx_size, truncation, False) 35 | 36 | # the same code can also be run on the CPU 37 | tsdf = pyfusion.tsdf_cpu(views, depth,height,width, vx_size, truncation, False, n_threads=8) 38 | ``` 39 | 40 | Make sure `pyfusion` is in your `$PYTHONPATH`. 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/README.md: -------------------------------------------------------------------------------- 1 | # PyFusion 2 | 3 | PyFusion is a Python framework for volumetric depth fusion. 4 | It contains simple occupancy and TSDF fusion methods that can be executed on a CPU as well as on a GPU. 5 | 6 | To use the code, first compile the native code via 7 | 8 | ```bash 9 | cd build 10 | cmake .. 11 | make 12 | ``` 13 | Afterwards you can compile the Cython code via 14 | 15 | ```bash 16 | python setup.py build_ext --inplace 17 | ``` 18 | 19 | You can then use the fusion functions 20 | 21 | ```python 22 | import pyfusion 23 | 24 | # create a views object 25 | # depthmaps: a NxHxW numpy float tensor of N depthmaps, invalid depth values are marked by negative numbers 26 | # Ks: the camera intric matrices, Nx3x3 float tensor 27 | # Rs: the camera rotation matrices, Nx3x3 float tensor 28 | # Ts: the camera translation vectors, Nx3 float tensor 29 | views = pyfusion.PyViews(depthmaps, Ks,Rs,Ts) 30 | 31 | # afterwards you can fuse the depth maps for example by 32 | # depth,height,width: number of voxels in each dimension 33 | # truncation: TSDF truncation value 34 | tsdf = pyfusion.tsdf_gpu(views, depth,height,width, vx_size, truncation, False) 35 | 36 | # the same code can also be run on the CPU 37 | tsdf = pyfusion.tsdf_cpu(views, depth,height,width, vx_size, truncation, False, n_threads=8) 38 | ``` 39 | 40 | Make sure `pyfusion` is in your `$PYTHONPATH`. 41 | -------------------------------------------------------------------------------- /dmifnet/r2n2/models/decoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | 5 | class Decoder(nn.Module): 6 | ''' Decoder network class for the R2N2 model. 7 | 8 | It consists of 4 transposed 3D-convolutional layers. 9 | 10 | Args: 11 | dim (int): input dimension 12 | c_dim (int): dimension of latent conditioned code c 13 | ''' 14 | 15 | def __init__(self, dim=3, c_dim=128): 16 | super().__init__() 17 | self.actvn = F.relu 18 | self.fc_in = nn.Linear(c_dim, 256*4*4*4) 19 | self.convtrp_0 = nn.ConvTranspose3d(256, 128, 3, stride=2, 20 | padding=1, output_padding=1) 21 | self.convtrp_1 = nn.ConvTranspose3d(128, 64, 3, stride=2, 22 | padding=1, output_padding=1) 23 | self.convtrp_2 = nn.ConvTranspose3d(64, 32, 3, stride=2, 24 | padding=1, output_padding=1) 25 | self.conv_out = nn.Conv3d(32, 1, 1) 26 | 27 | def forward(self, c): 28 | batch_size = c.size(0) 29 | 30 | net = self.fc_in(c) 31 | net = net.view(batch_size, 256, 4, 4, 4) 32 | net = self.convtrp_0(self.actvn(net)) 33 | net = self.convtrp_1(self.actvn(net)) 34 | net = self.convtrp_2(self.actvn(net)) 35 | 36 | occ_hat = self.conv_out(self.actvn(net)) 37 | 38 | return occ_hat 39 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/occupancy_connectivity.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | from torch import nn 4 | from torch.autograd import Function 5 | from torch.autograd import Variable 6 | from ._cuda_ext import occupancy_connectivity_forward, occupancy_connectivity_backward 7 | 8 | 9 | 10 | class OccupancyConnectivityFunction(Function): 11 | @staticmethod 12 | def forward(ctx, occ): 13 | loss = occupancy_connectivity_forward(occ) 14 | ctx.save_for_backward(occ) 15 | return loss 16 | 17 | @staticmethod 18 | def backward(ctx, grad_output): 19 | occ, = ctx.saved_tensors 20 | grad_occupancy = torch.zeros(occ.size(), dtype=torch.float32, device='cuda') 21 | occupancy_connectivity_backward( 22 | grad_output, 23 | occ, 24 | grad_occupancy) 25 | # Multiply with incoming gradient 26 | grad_occupancy = grad_occupancy * grad_output 27 | return grad_occupancy 28 | 29 | 30 | class OccupancyConnectivity(nn.Module): 31 | 32 | """ 33 | Module for deriving the Occupancy connectiviy loss 34 | 35 | ForwardW 36 | ---------- 37 | arg1 : tensor 38 | occupancy probabilities [W+1 x H+1 x D+1] 39 | 40 | Returns 41 | ------- 42 | tensor 43 | Occupancy connectiviy loss 1 44 | 45 | """ 46 | 47 | def __init__(self): 48 | super(OccupancyConnectivity, self).__init__() 49 | def forward(self, occ): 50 | return OccupancyConnectivityFunction.apply(occ) 51 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2015, P. M. Neila 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2015, P. M. Neila 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Gernot 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/setup.py: -------------------------------------------------------------------------------- 1 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 3 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 4 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 5 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 6 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 7 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 8 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 9 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | from distutils.core import setup 12 | from Cython.Build import cythonize 13 | from distutils.extension import Extension 14 | from Cython.Distutils import build_ext 15 | import numpy as np 16 | import platform 17 | 18 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2'] 19 | extra_link_args = [] 20 | if 'Linux' in platform.system(): 21 | print('Added OpenMP') 22 | extra_compile_args.append('-fopenmp') 23 | extra_link_args.append('-fopenmp') 24 | 25 | 26 | setup( 27 | name="cyfusion", 28 | cmdclass= {'build_ext': build_ext}, 29 | ext_modules=[ 30 | Extension('cyfusion', 31 | ['cyfusion.pyx'], 32 | language='c++', 33 | library_dirs=['./build/'], 34 | libraries=['m', "fusion_cpu"], 35 | include_dirs=[np.get_include()], 36 | extra_compile_args=extra_compile_args, 37 | extra_link_args=extra_link_args 38 | ) 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Gernot 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/setup.py: -------------------------------------------------------------------------------- 1 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 3 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 4 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 5 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 6 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 7 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 8 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 9 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | from distutils.core import setup 12 | from Cython.Build import cythonize 13 | from distutils.extension import Extension 14 | from Cython.Distutils import build_ext 15 | import numpy as np 16 | import platform 17 | 18 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2'] 19 | extra_link_args = [] 20 | if 'Linux' in platform.system(): 21 | print('Added OpenMP') 22 | extra_compile_args.append('-fopenmp') 23 | extra_link_args.append('-fopenmp') 24 | 25 | 26 | setup( 27 | name="cyfusion", 28 | cmdclass= {'build_ext': build_ext}, 29 | ext_modules=[ 30 | Extension('cyfusion', 31 | ['cyfusion.pyx'], 32 | language='c++', 33 | library_dirs=['./build/'], 34 | libraries=['m', "fusion_gpu"], 35 | include_dirs=[np.get_include()], 36 | extra_compile_args=extra_compile_args, 37 | extra_link_args=extra_link_args 38 | ) 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /dmifnet/r2n2/generation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from dmifnet.utils.voxels import VoxelGrid 4 | 5 | 6 | class VoxelGenerator3D(object): 7 | ''' Generator class for R2N2 model. 8 | 9 | The output of the model is transformed to a voxel grid and returned as a 10 | mesh. 11 | 12 | Args: 13 | model (nn.Module): (trained) R2N2 model 14 | threshold (float): threshold value for deciding whether a voxel is 15 | occupied or not 16 | device (device): pytorch device 17 | ''' 18 | 19 | def __init__(self, model, threshold=0.5, device=None): 20 | self.model = model.to(device) 21 | self.threshold = threshold 22 | self.device = device 23 | 24 | def generate_mesh(self, data): 25 | ''' Generates the output mesh. 26 | 27 | Args: 28 | data (dict): data dictionary 29 | ''' 30 | self.model.eval() 31 | device = self.device 32 | 33 | inputs = data.get('inputs', torch.empty(1, 0)).to(device) 34 | 35 | with torch.no_grad(): 36 | out = self.model(inputs).squeeze(1).squeeze(0) 37 | 38 | out = out.cpu().numpy() 39 | mesh = self.extract_mesh(out) 40 | 41 | return mesh 42 | 43 | def extract_mesh(self, values): 44 | ''' Extracts the mesh. 45 | 46 | Args: 47 | values (numpy array): predicted values 48 | ''' 49 | # Convert threshold to logits 50 | threshold = np.log(self.threshold) - np.log(1. - self.threshold) 51 | 52 | # Extract mesh 53 | mesh = VoxelGrid(values >= threshold).to_mesh() 54 | 55 | return mesh 56 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/mcubes.pyx: -------------------------------------------------------------------------------- 1 | 2 | # distutils: language = c++ 3 | # cython: embedsignature = True 4 | 5 | # from libcpp.vector cimport vector 6 | import numpy as np 7 | 8 | # Define PY_ARRAY_UNIQUE_SYMBOL 9 | cdef extern from "pyarray_symbol.h": 10 | pass 11 | 12 | cimport numpy as np 13 | 14 | np.import_array() 15 | 16 | cdef extern from "pywrapper.h": 17 | cdef object c_marching_cubes "marching_cubes"(np.ndarray, double) except + 18 | cdef object c_marching_cubes2 "marching_cubes2"(np.ndarray, double) except + 19 | cdef object c_marching_cubes3 "marching_cubes3"(np.ndarray, double) except + 20 | cdef object c_marching_cubes_func "marching_cubes_func"(tuple, tuple, int, int, int, object, double) except + 21 | 22 | def marching_cubes(np.ndarray volume, float isovalue): 23 | 24 | verts, faces = c_marching_cubes(volume, isovalue) 25 | verts.shape = (-1, 3) 26 | faces.shape = (-1, 3) 27 | return verts, faces 28 | 29 | def marching_cubes2(np.ndarray volume, float isovalue): 30 | 31 | verts, faces = c_marching_cubes2(volume, isovalue) 32 | verts.shape = (-1, 3) 33 | faces.shape = (-1, 3) 34 | return verts, faces 35 | 36 | def marching_cubes3(np.ndarray volume, float isovalue): 37 | 38 | verts, faces = c_marching_cubes3(volume, isovalue) 39 | verts.shape = (-1, 3) 40 | faces.shape = (-1, 3) 41 | return verts, faces 42 | 43 | def marching_cubes_func(tuple lower, tuple upper, int numx, int numy, int numz, object f, double isovalue): 44 | 45 | verts, faces = c_marching_cubes_func(lower, upper, numx, numy, numz, f, isovalue) 46 | verts.shape = (-1, 3) 47 | faces.shape = (-1, 3) 48 | return verts, faces 49 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/mcubes.pyx: -------------------------------------------------------------------------------- 1 | 2 | # distutils: language = c++ 3 | # cython: embedsignature = True 4 | 5 | # from libcpp.vector cimport vector 6 | import numpy as np 7 | 8 | # Define PY_ARRAY_UNIQUE_SYMBOL 9 | cdef extern from "pyarray_symbol.h": 10 | pass 11 | 12 | cimport numpy as np 13 | 14 | np.import_array() 15 | 16 | cdef extern from "pywrapper.h": 17 | cdef object c_marching_cubes "marching_cubes"(np.ndarray, double) except + 18 | cdef object c_marching_cubes2 "marching_cubes2"(np.ndarray, double) except + 19 | cdef object c_marching_cubes3 "marching_cubes3"(np.ndarray, double) except + 20 | cdef object c_marching_cubes_func "marching_cubes_func"(tuple, tuple, int, int, int, object, double) except + 21 | 22 | def marching_cubes(np.ndarray volume, float isovalue): 23 | 24 | verts, faces = c_marching_cubes(volume, isovalue) 25 | verts.shape = (-1, 3) 26 | faces.shape = (-1, 3) 27 | return verts, faces 28 | 29 | def marching_cubes2(np.ndarray volume, float isovalue): 30 | 31 | verts, faces = c_marching_cubes2(volume, isovalue) 32 | verts.shape = (-1, 3) 33 | faces.shape = (-1, 3) 34 | return verts, faces 35 | 36 | def marching_cubes3(np.ndarray volume, float isovalue): 37 | 38 | verts, faces = c_marching_cubes3(volume, isovalue) 39 | verts.shape = (-1, 3) 40 | faces.shape = (-1, 3) 41 | return verts, faces 42 | 43 | def marching_cubes_func(tuple lower, tuple upper, int numx, int numy, int numz, object f, double isovalue): 44 | 45 | verts, faces = c_marching_cubes_func(lower, upper, numx, numy, numz, f, isovalue) 46 | verts.shape = (-1, 3) 47 | faces.shape = (-1, 3) 48 | return verts, faces 49 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/occupancy_to_topology.py: -------------------------------------------------------------------------------- 1 | import math 2 | from torch import nn 3 | from torch.autograd import Function 4 | import torch 5 | from ._cuda_ext import occupancy_to_topology_forward, occupancy_to_topology_backward 6 | 7 | 8 | 9 | class OccupancyToTopologyFunction(Function): 10 | @staticmethod 11 | def forward(ctx, occupancy): 12 | W = occupancy.size()[0] - 1 13 | H = occupancy.size()[1] - 1 14 | D = occupancy.size()[2] - 1 15 | 16 | T = 256 17 | topology = torch.zeros((W*H*D, T), dtype=torch.float32, device='cuda') 18 | occupancy_to_topology_forward(occupancy, topology) 19 | 20 | ctx.save_for_backward(occupancy, topology) 21 | 22 | return topology 23 | 24 | @staticmethod 25 | def backward(ctx, grad_output): 26 | occupancy, topology = ctx.saved_tensors 27 | grad_occupancy = torch.zeros(occupancy.size(), dtype=torch.float32, device='cuda') 28 | occupancy_to_topology_backward(grad_output, occupancy, topology, grad_occupancy) 29 | return grad_occupancy 30 | 31 | 32 | class OccupancyToTopology(nn.Module): 33 | """ 34 | Module for deriving the topology probabilities of each cell given the occupancy probabilities 35 | 36 | Init 37 | ---------- 38 | args1: shape of the topology output [W*H*DxT] 39 | 40 | Forward 41 | ---------- 42 | arg1 : tensor 43 | occupancy probability tensor [W+1xH+1xD+1] 44 | 45 | Returns 46 | ------- 47 | tensor 48 | topology probability tensor [W*H*DxT] 49 | 50 | """ 51 | def __init__(self): 52 | super(OccupancyToTopology, self).__init__() 53 | def forward(self, occupancy): 54 | return OccupancyToTopologyFunction.apply(occupancy) 55 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/setup.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | try: 4 | from setuptools import setup 5 | except ImportError: 6 | from distutils.core import setup 7 | 8 | from Cython.Build import cythonize 9 | 10 | import numpy 11 | from distutils.extension import Extension 12 | 13 | # Get the version number. 14 | numpy_include_dir = numpy.get_include() 15 | 16 | mcubes_module = Extension( 17 | "mcubes", 18 | [ 19 | "mcubes.pyx", 20 | "pywrapper.cpp", 21 | "marchingcubes.cpp" 22 | ], 23 | language="c++", 24 | extra_compile_args=['-std=c++11'], 25 | include_dirs=[numpy_include_dir] 26 | ) 27 | 28 | setup(name="PyMCubes", 29 | version="0.0.6", 30 | description="Marching cubes for Python", 31 | author="Pablo Márquez Neila", 32 | author_email="pablo.marquezneila@epfl.ch", 33 | url="https://github.com/pmneila/PyMCubes", 34 | license="BSD 3-clause", 35 | long_description=""" 36 | Marching cubes for Python 37 | """, 38 | classifiers=[ 39 | "Development Status :: 4 - Beta", 40 | "Environment :: Console", 41 | "Intended Audience :: Developers", 42 | "Intended Audience :: Science/Research", 43 | "License :: OSI Approved :: BSD License", 44 | "Natural Language :: English", 45 | "Operating System :: OS Independent", 46 | "Programming Language :: C++", 47 | "Programming Language :: Python", 48 | "Topic :: Multimedia :: Graphics :: 3D Modeling", 49 | "Topic :: Scientific/Engineering :: Image Recognition", 50 | ], 51 | packages=["mcubes"], 52 | ext_modules=cythonize(mcubes_module), 53 | requires=['numpy', 'Cython', 'PyCollada'], 54 | setup_requires=['numpy', 'Cython'] 55 | ) 56 | -------------------------------------------------------------------------------- /dmifnet/dmc/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dmifnet.dmc import models, training, generation 3 | from dmifnet import data 4 | 5 | 6 | def get_model(cfg, device=None, **kwargs): 7 | encoder = cfg['model']['encoder'] 8 | decoder = cfg['model']['decoder'] 9 | c_dim = cfg['model']['c_dim'] 10 | encoder_kwargs = cfg['model']['encoder_kwargs'] 11 | decoder_kwargs = cfg['model']['decoder_kwargs'] 12 | 13 | encoder = models.encoder_dict[encoder]( 14 | **encoder_kwargs 15 | ) 16 | 17 | decoder = models.decoder_dict[decoder]( 18 | **decoder_kwargs 19 | ) 20 | 21 | model = models.DMC(decoder, encoder) 22 | model = model.to(device) 23 | return model 24 | 25 | 26 | def get_trainer(model, optimizer, cfg, device, **kwargs): 27 | input_type = cfg['data']['input_type'] 28 | out_dir = cfg['training']['out_dir'] 29 | vis_dir = os.path.join(out_dir, 'vis') 30 | num_voxels = cfg['model']['num_voxels'] 31 | weight_prior = cfg['model']['dmc_weight_prior'] 32 | 33 | trainer = training.Trainer( 34 | model, optimizer, device=device, input_type=input_type, 35 | vis_dir=vis_dir, num_voxels=num_voxels, 36 | weight_prior=weight_prior, 37 | ) 38 | return trainer 39 | 40 | 41 | def get_generator(model, cfg, device, **kwargs): 42 | num_voxels = cfg['model']['num_voxels'] 43 | 44 | generator = generation.Generator3D( 45 | model, device=device, num_voxels=num_voxels 46 | ) 47 | return generator 48 | 49 | 50 | def get_data_fields(split, cfg, **kwargs): 51 | with_transforms = cfg['data']['with_transforms'] 52 | # TODO: put this into config 53 | pointcloud_n = 3000 54 | pointcloud_transform = data.SubsamplePointcloud(pointcloud_n) 55 | 56 | fields = {} 57 | fields['pointcloud'] = data.PointCloudField( 58 | cfg['data']['pointcloud_file'], pointcloud_transform, 59 | with_transforms=with_transforms 60 | ) 61 | 62 | return fields 63 | -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/exporter.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | 5 | def export_obj(vertices, triangles, filename): 6 | """ 7 | Exports a mesh in the (.obj) format. 8 | """ 9 | 10 | with open(filename, 'w') as fh: 11 | 12 | for v in vertices: 13 | fh.write("v {} {} {}\n".format(*v)) 14 | 15 | for f in triangles: 16 | fh.write("f {} {} {}\n".format(*(f + 1))) 17 | 18 | 19 | def export_off(vertices, triangles, filename): 20 | """ 21 | Exports a mesh in the (.off) format. 22 | """ 23 | 24 | with open(filename, 'w') as fh: 25 | fh.write('OFF\n') 26 | fh.write('{} {} 0\n'.format(len(vertices), len(triangles))) 27 | 28 | for v in vertices: 29 | fh.write("{} {} {}\n".format(*v)) 30 | 31 | for f in triangles: 32 | fh.write("3 {} {} {}\n".format(*f)) 33 | 34 | 35 | def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"): 36 | """ 37 | Exports a mesh in the COLLADA (.dae) format. 38 | 39 | Needs PyCollada (https://github.com/pycollada/pycollada). 40 | """ 41 | 42 | import collada 43 | 44 | mesh = collada.Collada() 45 | 46 | vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z')) 47 | geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src]) 48 | 49 | input_list = collada.source.InputList() 50 | input_list.addInput(0, 'VERTEX', "#verts-array") 51 | 52 | triset = geom.createTriangleSet(np.copy(triangles), input_list, "") 53 | geom.primitives.append(triset) 54 | mesh.geometries.append(geom) 55 | 56 | geomnode = collada.scene.GeometryNode(geom, []) 57 | node = collada.scene.Node(mesh_name, children=[geomnode]) 58 | 59 | myscene = collada.scene.Scene("mcubes_scene", [node]) 60 | mesh.scenes.append(myscene) 61 | mesh.scene = myscene 62 | 63 | mesh.write(filename) 64 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/exporter.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | 5 | def export_obj(vertices, triangles, filename): 6 | """ 7 | Exports a mesh in the (.obj) format. 8 | """ 9 | 10 | with open(filename, 'w') as fh: 11 | 12 | for v in vertices: 13 | fh.write("v {} {} {}\n".format(*v)) 14 | 15 | for f in triangles: 16 | fh.write("f {} {} {}\n".format(*(f + 1))) 17 | 18 | 19 | def export_off(vertices, triangles, filename): 20 | """ 21 | Exports a mesh in the (.off) format. 22 | """ 23 | 24 | with open(filename, 'w') as fh: 25 | fh.write('OFF\n') 26 | fh.write('{} {} 0\n'.format(len(vertices), len(triangles))) 27 | 28 | for v in vertices: 29 | fh.write("{} {} {}\n".format(*v)) 30 | 31 | for f in triangles: 32 | fh.write("3 {} {} {}\n".format(*f)) 33 | 34 | 35 | def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"): 36 | """ 37 | Exports a mesh in the COLLADA (.dae) format. 38 | 39 | Needs PyCollada (https://github.com/pycollada/pycollada). 40 | """ 41 | 42 | import collada 43 | 44 | mesh = collada.Collada() 45 | 46 | vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z')) 47 | geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src]) 48 | 49 | input_list = collada.source.InputList() 50 | input_list.addInput(0, 'VERTEX', "#verts-array") 51 | 52 | triset = geom.createTriangleSet(np.copy(triangles), input_list, "") 53 | geom.primitives.append(triset) 54 | mesh.geometries.append(geom) 55 | 56 | geomnode = collada.scene.GeometryNode(geom, []) 57 | node = collada.scene.Node(mesh_name, children=[geomnode]) 58 | 59 | myscene = collada.scene.Scene("mcubes_scene", [node]) 60 | mesh.scenes.append(myscene) 61 | mesh.scene = myscene 62 | 63 | mesh.write(filename) 64 | -------------------------------------------------------------------------------- /configs/default.yaml: -------------------------------------------------------------------------------- 1 | method: dmif_model 2 | data: 3 | dataset: Shapes3D 4 | path: data/ShapeNet 5 | classes: null 6 | input_type: img 7 | train_split: train 8 | val_split: val 9 | test_split: test 10 | dim: 3 11 | points_file: points.npz 12 | points_iou_file: points.npz 13 | points_subsample: 1024 14 | points_unpackbits: true 15 | model_file: model.off 16 | watertight_file: model_watertight.off 17 | img_folder: img 18 | img_size: 224 19 | img_with_camera: false 20 | img_augment: false 21 | n_views: 24 22 | pointcloud_file: pointcloud.npz 23 | pointcloud_chamfer_file: pointcloud.npz 24 | pointcloud_n: 256 25 | pointcloud_target_n: 1024 26 | pointcloud_noise: 0.05 27 | voxels_file: 'model.binvox' 28 | with_transforms: false 29 | model: 30 | decoder: simple 31 | encoder: resnet18 32 | encoder_latent: null 33 | decoder_kwargs: {} 34 | encoder_kwargs: {} 35 | encoder_latent_kwargs: {} 36 | multi_gpu: false 37 | c_dim: 512 38 | z_dim: 64 39 | use_camera: false 40 | dmc_weight_prior: 10. 41 | training: 42 | out_dir: out/default 43 | batch_size: 64 44 | print_every: 10 45 | visualize_every: 2000 46 | checkpoint_every: 1000 47 | validate_every: 2000 48 | backup_every: 100000 49 | eval_sample: false 50 | model_selection_metric: loss 51 | model_selection_mode: minimize 52 | test: 53 | threshold: 0.5 54 | eval_mesh: true 55 | eval_pointcloud: true 56 | model_file: model_best.pt 57 | generation: 58 | batch_size: 100000 59 | refinement_step: 0 60 | vis_n_outputs: 30 61 | generate_mesh: true 62 | generate_pointcloud: true 63 | generation_dir: generation 64 | use_sampling: false 65 | resolution_0: 32 66 | upsampling_steps: 2 67 | simplify_nfaces: null 68 | copy_groundtruth: false 69 | copy_input: true 70 | latent_number: 4 71 | latent_H: 8 72 | latent_W: 8 73 | latent_ny: 2 74 | latent_nx: 2 75 | latent_repeat: true 76 | preprocessor: 77 | type: null 78 | config: "" 79 | model_file: null 80 | -------------------------------------------------------------------------------- /dmifnet/preprocess.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from dmifnet import config 3 | from dmifnet.checkpoints import CheckpointIO 4 | from dmifnet.utils.io import export_pointcloud 5 | 6 | 7 | class PSGNPreprocessor: 8 | ''' Point Set Generation Networks (PSGN) preprocessor class. 9 | 10 | Args: 11 | cfg_path (str): path to config file 12 | pointcloud_n (int): number of output points 13 | dataset (dataset): dataset 14 | device (device): pytorch device 15 | model_file (str): model file 16 | ''' 17 | 18 | def __init__(self, cfg_path, pointcloud_n, dataset=None, device=None, 19 | model_file=None): 20 | self.cfg = config.load_config(cfg_path, 'configs/default.yaml') 21 | self.pointcloud_n = pointcloud_n 22 | self.device = device 23 | self.dataset = dataset 24 | self.model = config.get_model(self.cfg, device, dataset) 25 | 26 | # Output directory of psgn model 27 | out_dir = self.cfg['training']['out_dir'] 28 | # If model_file not specified, use the one from psgn model 29 | if model_file is None: 30 | model_file = self.cfg['test']['model_file'] 31 | # Load model 32 | self.checkpoint_io = CheckpointIO(out_dir, model=self.model) 33 | self.checkpoint_io.load(model_file) 34 | 35 | def __call__(self, inputs): 36 | self.model.eval() 37 | with torch.no_grad(): 38 | points = self.model(inputs) 39 | 40 | batch_size = points.size(0) 41 | T = points.size(1) 42 | 43 | # Subsample points if necessary 44 | if T != self.pointcloud_n: 45 | idx = torch.randint( 46 | low=0, high=T, 47 | size=(batch_size, self.pointcloud_n), 48 | device=self.device 49 | ) 50 | idx = idx[:, :, None].expand(batch_size, self.pointcloud_n, 3) 51 | 52 | points = torch.gather(points, dim=1, index=idx) 53 | 54 | return points 55 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/pyrender.pyx: -------------------------------------------------------------------------------- 1 | cimport cython 2 | import numpy as np 3 | cimport numpy as np 4 | 5 | from libc.stdlib cimport free, malloc 6 | from libcpp cimport bool 7 | from cpython cimport PyObject, Py_INCREF 8 | 9 | CREATE_INIT = True # workaround, so cython builds a init function 10 | 11 | np.import_array() 12 | 13 | 14 | cdef extern from "offscreen.h": 15 | void renderDepthMesh(double *FM, int fNum, double *VM, int vNum, double *CM, double *intrinsics, int *imgSizeV, double *zNearFarV, unsigned char * imgBuffer, float *depthBuffer, bool *maskBuffer, double linewidth, bool coloring); 16 | 17 | 18 | def render(double[:,::1] vertices, double[:,::1] faces, double[::1] cam_intr, double[::1] znf, int[::1] img_size): 19 | if vertices.shape[0] != 3: 20 | raise Exception('vertices must be a 3xM double array') 21 | if faces.shape[0] != 3: 22 | raise Exception('faces must be a 3xM double array') 23 | if cam_intr.shape[0] != 4: 24 | raise Exception('cam_intr must be a 4x1 double vector') 25 | if img_size.shape[0] != 2: 26 | raise Exception('img_size must be a 2x1 int vector') 27 | 28 | cdef double* VM = &(vertices[0,0]) 29 | cdef int vNum = vertices.shape[1] 30 | cdef double* FM = &(faces[0,0]) 31 | cdef int fNum = faces.shape[1] 32 | cdef double* intrinsics = &(cam_intr[0]) 33 | cdef double* zNearVarV = &(znf[0]) 34 | cdef int* imgSize = &(img_size[0]) 35 | 36 | cdef bool coloring = False 37 | cdef double* CM = NULL 38 | 39 | depth = np.empty((img_size[1], img_size[0]), dtype=np.float32) 40 | mask = np.empty((img_size[1], img_size[0]), dtype=np.uint8) 41 | img = np.empty((3, img_size[1], img_size[0]), dtype=np.uint8) 42 | cdef float[:,::1] depth_view = depth 43 | cdef unsigned char[:,::1] mask_view = mask 44 | cdef unsigned char[:,:,::1] img_view = img 45 | cdef float* depthBuffer = &(depth_view[0,0]) 46 | cdef bool* maskBuffer = &(mask_view[0,0]) 47 | cdef unsigned char* imgBuffer = &(img_view[0,0,0]) 48 | 49 | renderDepthMesh(FM, fNum, VM, vNum, CM, intrinsics, imgSize, zNearVarV, imgBuffer, depthBuffer, maskBuffer, 0, coloring); 50 | 51 | return depth.T, mask.T, img.transpose((2,1,0)) 52 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/src/kernels.h: -------------------------------------------------------------------------------- 1 | #ifndef __DMC_CUDA_KERNELS__ 2 | #define __DMC_CUDA_KERNELS__ 3 | 4 | #include 5 | #include 6 | 7 | 8 | // Curvature constraint 9 | void curvature_constraint_kernel_forward( 10 | at::Tensor offset, 11 | at::Tensor topology, 12 | at::Tensor xTable, 13 | at::Tensor yTable, 14 | at::Tensor zTable, 15 | at::Tensor innerTable, 16 | at::Tensor loss_x, 17 | at::Tensor loss_y, 18 | at::Tensor loss_z, 19 | at::Tensor loss_inner); 20 | 21 | 22 | void curvature_constraint_kernel_backward( 23 | at::Tensor grad_output, 24 | at::Tensor offset, 25 | at::Tensor topology, 26 | at::Tensor xTable, 27 | at::Tensor yTable, 28 | at::Tensor zTable, 29 | at::Tensor innerTable, 30 | at::Tensor grad_offset); 31 | 32 | // Grid pooling 33 | void grid_pooling_kernel_forward( 34 | at::Tensor point, 35 | at::Tensor feat_points, 36 | at::Tensor shape, 37 | at::Tensor feat_cell, 38 | at::Tensor indices); 39 | 40 | void grid_pooling_kernel_backward( 41 | at::Tensor grad_output, 42 | at::Tensor shape, 43 | at::Tensor indices, 44 | at::Tensor grad_feat_points); 45 | 46 | // Occ2Topo 47 | void occupancy_to_topology_kernel_forward( 48 | at::Tensor occupancy, 49 | at::Tensor topology ); 50 | 51 | void occupancy_to_topology_kernel_backward( 52 | at::Tensor grad_output, 53 | at::Tensor occupancy, 54 | at::Tensor topology, 55 | at::Tensor grad_occupancy); 56 | 57 | // OccConstraint 58 | void occupancy_connectivity_kernel_forward( 59 | at::Tensor occupancy, 60 | at::Tensor loss); 61 | 62 | void occupancy_connectivity_kernel_backward( 63 | at::Tensor grad_output, 64 | at::Tensor occupancy, 65 | at::Tensor grad_occupancy); 66 | 67 | // Points Triangle distance 68 | void point_topology_distance_kernel_forward( 69 | at::Tensor offset, 70 | at::Tensor points, 71 | at::Tensor distances, 72 | at::Tensor indices_all); 73 | 74 | void point_topology_distance_kernel_backward( 75 | at::Tensor grad_output, 76 | at::Tensor offset, 77 | at::Tensor points, 78 | at::Tensor indices_all, 79 | at::Tensor grad_offset); 80 | 81 | #endif -------------------------------------------------------------------------------- /dmifnet/utils/libmcubes/README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | PyMCubes 3 | ======== 4 | 5 | PyMCubes is an implementation of the marching cubes algorithm to extract 6 | isosurfaces from volumetric data. The volumetric data can be given as a 7 | three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first 8 | option is much faster, but it requires more memory and becomes unfeasible for 9 | very large volumes. 10 | 11 | PyMCubes also provides a function to export the results of the marching cubes as 12 | COLLADA ``(.dae)`` files. This requires the 13 | `PyCollada `_ library. 14 | 15 | Installation 16 | ============ 17 | 18 | Just as any standard Python package, clone or download the project 19 | and run:: 20 | 21 | $ cd path/to/PyMCubes 22 | $ python setup.py build 23 | $ python setup.py install 24 | 25 | If you do not have write permission on the directory of Python packages, 26 | install with the ``--user`` option:: 27 | 28 | $ python setup.py install --user 29 | 30 | Example 31 | ======= 32 | 33 | The following example creates a data volume with spherical isosurfaces and 34 | extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as 35 | ``sphere.dae``:: 36 | 37 | >>> import numpy as np 38 | >>> import mcubes 39 | 40 | # Create a data volume (30 x 30 x 30) 41 | >>> X, Y, Z = np.mgrid[:30, :30, :30] 42 | >>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2 43 | 44 | # Extract the 0-isosurface 45 | >>> vertices, triangles = mcubes.marching_cubes(u, 0) 46 | 47 | # Export the result to sphere.dae 48 | >>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere") 49 | 50 | The second example is very similar to the first one, but it uses a function 51 | to represent the volume instead of a NumPy array:: 52 | 53 | >>> import numpy as np 54 | >>> import mcubes 55 | 56 | # Create the volume 57 | >>> f = lambda x, y, z: x**2 + y**2 + z**2 58 | 59 | # Extract the 16-isosurface 60 | >>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10), 61 | ... 100, 100, 100, f, 16) 62 | 63 | # Export the result to sphere2.dae 64 | >>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere") 65 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/point_triangle_distance.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | from torch import nn 4 | from torch.autograd import Function 5 | from torch.autograd import Variable 6 | from dmifnet.dmc.ops.table import get_connected_pairs 7 | from ._cuda_ext import point_topology_distance_forward, point_topology_distance_backward 8 | 9 | 10 | class PointTriangleDistanceFunction(Function): 11 | @staticmethod 12 | def forward(ctx, offset, points): 13 | W = offset.size()[1] 14 | H = offset.size()[2] 15 | D = offset.size()[3] 16 | 17 | # we only considered topologies with up to 3 triangles for calculating 18 | # the distance loss function, the distance can be calculated in regardless 19 | # of the normal vectors, therefore there are only 48 topologies to be 20 | # considered 21 | T = 48 22 | 23 | distances_full = torch.zeros((W-1)*(H-1)*(D-1), T).cuda() 24 | indices = -1 * torch.ones((points.size(0), T), dtype=torch.int32, device='cuda') 25 | point_topology_distance_forward( 26 | offset, points, distances_full, indices) 27 | ctx.save_for_backward(offset, points, indices) 28 | return distances_full 29 | 30 | @staticmethod 31 | def backward(ctx, grad_output): 32 | offset, points, indices = ctx.saved_tensors 33 | 34 | grad_offset = torch.zeros(offset.size(), device='cuda') 35 | point_topology_distance_backward( 36 | grad_output, offset, points, indices, grad_offset) 37 | return grad_offset, None 38 | 39 | 40 | class PointTriangleDistance(nn.Module): 41 | 42 | """ 43 | Module for deriving the Point to Triangle distance 44 | (for each topology with up to 3 triangles) 45 | 46 | Forward 47 | ---------- 48 | arg1 : tensor 49 | offset variable [3 x W+1 x H+1 x D+1] 50 | 51 | arg1 : tensor 52 | points [N x 3] 53 | 54 | Returns 55 | ------- 56 | tensor 57 | distance [W*H*D x T] 58 | 59 | """ 60 | 61 | def __init__(self): 62 | super(PointTriangleDistance, self).__init__() 63 | def forward(self, offset, points): 64 | return PointTriangleDistanceFunction.apply(offset, points) 65 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | PyMCubes 3 | ======== 4 | 5 | PyMCubes is an implementation of the marching cubes algorithm to extract 6 | isosurfaces from volumetric data. The volumetric data can be given as a 7 | three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first 8 | option is much faster, but it requires more memory and becomes unfeasible for 9 | very large volumes. 10 | 11 | PyMCubes also provides a function to export the results of the marching cubes as 12 | COLLADA ``(.dae)`` files. This requires the 13 | `PyCollada `_ library. 14 | 15 | Installation 16 | ============ 17 | 18 | Just as any standard Python package, clone or download the project 19 | and run:: 20 | 21 | $ cd path/to/PyMCubes 22 | $ python setup.py build 23 | $ python setup.py install 24 | 25 | If you do not have write permission on the directory of Python packages, 26 | install with the ``--user`` option:: 27 | 28 | $ python setup.py install --user 29 | 30 | Example 31 | ======= 32 | 33 | The following example creates a data volume with spherical isosurfaces and 34 | extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as 35 | ``sphere.dae``:: 36 | 37 | >>> import numpy as np 38 | >>> import mcubes 39 | 40 | # Create a data volume (30 x 30 x 30) 41 | >>> X, Y, Z = np.mgrid[:30, :30, :30] 42 | >>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2 43 | 44 | # Extract the 0-isosurface 45 | >>> vertices, triangles = mcubes.marching_cubes(u, 0) 46 | 47 | # Export the result to sphere.dae 48 | >>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere") 49 | 50 | The second example is very similar to the first one, but it uses a function 51 | to represent the volume instead of a NumPy array:: 52 | 53 | >>> import numpy as np 54 | >>> import mcubes 55 | 56 | # Create the volume 57 | >>> f = lambda x, y, z: x**2 + y**2 + z**2 58 | 59 | # Extract the 16-isosurface 60 | >>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10), 61 | ... 100, 100, 100, f, 16) 62 | 63 | # Export the result to sphere2.dae 64 | >>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere") 65 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017, The OctNet authors 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # * Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # * Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # * Neither the name of the nor the 12 | # names of its contributors may be used to endorse or promote products 13 | # derived from this software without specific prior written permission. 14 | # 15 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 19 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | cmake_minimum_required(VERSION 2.8) 27 | set(CMAKE_MACOSX_RPATH 1) 28 | 29 | set(CMAKE_CXX_STANDARD 11) 30 | 31 | # set(CMAKE_BUILD_TYPE Debug) 32 | set(CMAKE_BUILD_TYPE Release) 33 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 34 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 35 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_FORCE_INLINES -Wall") 36 | 37 | find_package(OpenMP) 38 | if (OPENMP_FOUND) 39 | set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 40 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 41 | endif() 42 | 43 | add_library(fusion_cpu SHARED fusion.cpp) 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /dmif_model/models/encoder_latent.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | # Max Pooling operation 7 | def maxpool(x, dim=-1, keepdim=False): 8 | out, _ = x.max(dim=dim, keepdim=keepdim) 9 | return out 10 | 11 | 12 | class Encoder(nn.Module): 13 | ''' Latent encoder class. 14 | 15 | It encodes the input points and returns mean and standard deviation for the 16 | posterior Gaussian distribution. 17 | 18 | Args: 19 | z_dim (int): dimension if output code z 20 | c_dim (int): dimension of latent conditioned code c 21 | dim (int): input dimension 22 | leaky (bool): whether to use leaky ReLUs 23 | ''' 24 | def __init__(self, z_dim=128, c_dim=128, dim=3, leaky=False): 25 | super().__init__() 26 | self.z_dim = z_dim 27 | self.c_dim = c_dim 28 | 29 | # Submodules 30 | self.fc_pos = nn.Linear(dim, 128) 31 | 32 | if c_dim != 0: 33 | self.fc_c = nn.Linear(c_dim, 128) 34 | 35 | self.fc_0 = nn.Linear(1, 128) 36 | self.fc_1 = nn.Linear(128, 128) 37 | self.fc_2 = nn.Linear(256, 128) 38 | self.fc_3 = nn.Linear(256, 128) 39 | self.fc_mean = nn.Linear(128, z_dim) 40 | self.fc_logstd = nn.Linear(128, z_dim) 41 | 42 | if not leaky: 43 | self.actvn = F.relu 44 | self.pool = maxpool 45 | else: 46 | self.actvn = lambda x: F.leaky_relu(x, 0.2) 47 | self.pool = torch.mean 48 | 49 | def forward(self, p, x, c=None, **kwargs): 50 | batch_size, T, D = p.size() 51 | 52 | # output size: B x T X F 53 | net = self.fc_0(x.unsqueeze(-1)) 54 | net = net + self.fc_pos(p) 55 | 56 | if self.c_dim != 0: 57 | net = net + self.fc_c(c).unsqueeze(1) 58 | 59 | net = self.fc_1(self.actvn(net)) 60 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 61 | net = torch.cat([net, pooled], dim=2) 62 | 63 | net = self.fc_2(self.actvn(net)) 64 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 65 | net = torch.cat([net, pooled], dim=2) 66 | 67 | net = self.fc_3(self.actvn(net)) 68 | # Reduce 69 | # to B x F 70 | net = self.pool(net, dim=1) 71 | 72 | mean = self.fc_mean(net) 73 | logstd = self.fc_logstd(net) 74 | 75 | return mean, logstd 76 | -------------------------------------------------------------------------------- /dmifnet/dmif_model/models/encoder_latent.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | # Max Pooling operation 7 | def maxpool(x, dim=-1, keepdim=False): 8 | out, _ = x.max(dim=dim, keepdim=keepdim) 9 | return out 10 | 11 | 12 | class Encoder(nn.Module): 13 | ''' Latent encoder class. 14 | 15 | It encodes the input points and returns mean and standard deviation for the 16 | posterior Gaussian distribution. 17 | 18 | Args: 19 | z_dim (int): dimension if output code z 20 | c_dim (int): dimension of latent conditioned code c 21 | dim (int): input dimension 22 | leaky (bool): whether to use leaky ReLUs 23 | ''' 24 | def __init__(self, z_dim=128, c_dim=128, dim=3, leaky=False): 25 | super().__init__() 26 | self.z_dim = z_dim 27 | self.c_dim = c_dim 28 | 29 | # Submodules 30 | self.fc_pos = nn.Linear(dim, 128) 31 | 32 | if c_dim != 0: 33 | self.fc_c = nn.Linear(c_dim, 128) 34 | 35 | self.fc_0 = nn.Linear(1, 128) 36 | self.fc_1 = nn.Linear(128, 128) 37 | self.fc_2 = nn.Linear(256, 128) 38 | self.fc_3 = nn.Linear(256, 128) 39 | self.fc_mean = nn.Linear(128, z_dim) 40 | self.fc_logstd = nn.Linear(128, z_dim) 41 | 42 | if not leaky: 43 | self.actvn = F.relu 44 | self.pool = maxpool 45 | else: 46 | self.actvn = lambda x: F.leaky_relu(x, 0.2) 47 | self.pool = torch.mean 48 | 49 | def forward(self, p, x, c=None, **kwargs): 50 | batch_size, T, D = p.size() 51 | 52 | # output size: B x T X F 53 | net = self.fc_0(x.unsqueeze(-1)) 54 | net = net + self.fc_pos(p) 55 | 56 | if self.c_dim != 0: 57 | net = net + self.fc_c(c).unsqueeze(1) 58 | 59 | net = self.fc_1(self.actvn(net)) 60 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 61 | net = torch.cat([net, pooled], dim=2) 62 | 63 | net = self.fc_2(self.actvn(net)) 64 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 65 | net = torch.cat([net, pooled], dim=2) 66 | 67 | net = self.fc_3(self.actvn(net)) 68 | # Reduce 69 | # to B x F 70 | net = self.pool(net, dim=1) 71 | 72 | mean = self.fc_mean(net) 73 | logstd = self.fc_logstd(net) 74 | 75 | return mean, logstd 76 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017, The OctNet authors 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # * Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # * Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # * Neither the name of the nor the 12 | # names of its contributors may be used to endorse or promote products 13 | # derived from this software without specific prior written permission. 14 | # 15 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 19 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | cmake_minimum_required(VERSION 2.8) 27 | set(CMAKE_CXX_STANDARD 11) 28 | 29 | # set(CMAKE_BUILD_TYPE Debug) 30 | set(CMAKE_BUILD_TYPE Release) 31 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 32 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 33 | 34 | find_package(CUDA 6.5 REQUIRED) 35 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-std=c++11") 36 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_FORCE_INLINES -Wall") 37 | 38 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}; -gencode=arch=compute_30,code=sm_30") 39 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}; -gencode=arch=compute_30,code=compute_30") 40 | 41 | set(FUSION_GPU_SRC 42 | fusion.cu 43 | fusion_zach_tvl1.cu 44 | ) 45 | 46 | cuda_add_library(fusion_gpu SHARED ${FUSION_GPU_SRC}) 47 | target_link_libraries(fusion_gpu ${CUDA_LIBRARIES}) 48 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/grid_pooling.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | from torch import nn 4 | from torch.autograd import Function 5 | from torch.autograd import Variable 6 | from ._cuda_ext import grid_pooling_forward, grid_pooling_backward 7 | 8 | 9 | class GridPoolingFunction(Function): 10 | """ Perform max-pooling in every cell over the point features 11 | see ../src/extension.cpp 12 | ../src/grid_pooling_kernel.cu 13 | for more details 14 | """ 15 | @staticmethod 16 | def forward(ctx, feat_points, points, grid_shape): 17 | feat_points = feat_points.contiguous() 18 | points = points.contiguous() 19 | W = grid_shape[0] 20 | H = grid_shape[1] 21 | D = grid_shape[2] 22 | C = feat_points.size()[1] 23 | grid_shape = grid_shape.cpu().contiguous() 24 | feat_cells = torch.zeros((W*H*D, C), dtype=torch.float32, device='cuda') 25 | indices = -1 * torch.ones((W*H*D, C), dtype=torch.int32, device='cuda') 26 | grid_pooling_forward(points, feat_points, grid_shape, feat_cells, indices) 27 | 28 | # save for back-propagation 29 | ctx.save_for_backward(indices, grid_shape) 30 | # save number of points and feature dimension for back-propagation 31 | ctx.N = points.size()[0] 32 | ctx.C = C 33 | 34 | return feat_cells 35 | 36 | @staticmethod 37 | def backward(ctx, grad_output): 38 | grad_output = grad_output.contiguous() 39 | indices, grid_shape = ctx.saved_tensors 40 | N, C = ctx.N, ctx.C 41 | grad_points = torch.zeros((N, C), dtype=torch.float32, device='cuda') 42 | grid_pooling_backward(grad_output, grid_shape, indices, grad_points) 43 | # we only need gradient on feat_points 44 | return grad_points, None, None 45 | 46 | 47 | class GridPooling(nn.Module): 48 | 49 | """ 50 | Module for Grid Pooling from Points with features to gird cells with features 51 | 52 | Init 53 | ---------- 54 | args1: gridshape [3] 55 | 56 | 57 | Forward 58 | ---------- 59 | arg1 : tensor 60 | point features [N x F] 61 | 62 | arg1 : tensor 63 | point locations [N x 3] 64 | 65 | Returns 66 | ------- 67 | tensor 68 | Feature grid [W*H*D x F] 69 | 70 | """ 71 | 72 | def __init__(self, gridshape): 73 | super(GridPooling, self).__init__() 74 | self.gridshape = gridshape 75 | 76 | def forward(self, features, points): 77 | return GridPoolingFunction.apply(features, points, self.gridshape) 78 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/curvature_constraint.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import math 3 | from torch import nn 4 | from torch.autograd import Function 5 | from torch.autograd import Variable 6 | from dmifnet.dmc.ops.table import get_connected_pairs 7 | from ._cuda_ext import curvature_constraint_forward, curvature_constraint_backward 8 | 9 | 10 | ######### TEST FAILS ######### 11 | 12 | # return connected pairs in x, y, z directions, inner cell pairs as well as a topolgy to triangles table 13 | x, y, z, inner, topology_to_triangles = get_connected_pairs() 14 | 15 | 16 | class CurvatureConstraintFunction(Function): 17 | @staticmethod 18 | def forward(ctx, offset, topology): 19 | loss = torch.zeros(1, dtype=torch.float32, device='cuda') 20 | loss = curvature_constraint_forward( 21 | offset, 22 | topology[:, torch.LongTensor(topology_to_triangles).cuda()], 23 | torch.FloatTensor(x).cuda(), 24 | torch.FloatTensor(y).cuda(), 25 | torch.FloatTensor(z).cuda(), 26 | torch.FloatTensor(inner).cuda()) 27 | ctx.save_for_backward(offset, topology) 28 | return loss 29 | 30 | @staticmethod 31 | def backward(ctx, grad_output): 32 | offset, topology = ctx.saved_tensors 33 | 34 | grad_offset = torch.zeros(offset.size()).cuda() 35 | curvature_constraint_backward( 36 | grad_output, 37 | offset, 38 | topology[:, torch.LongTensor(topology_to_triangles).cuda()], 39 | torch.FloatTensor(x).cuda(), 40 | torch.FloatTensor(y).cuda(), 41 | torch.FloatTensor(z).cuda(), 42 | torch.FloatTensor(inner).cuda(), 43 | grad_offset) 44 | 45 | # Multiply with incoming gradient 46 | grad_offset = grad_offset * grad_output 47 | grad_topology = torch.zeros(topology.size()).cuda() 48 | return grad_offset, grad_topology 49 | 50 | 51 | class CurvatureConstraint(nn.Module): 52 | 53 | """ 54 | ######### TEST FAILS ######### 55 | Module for deriving the Curvature loss of each cell given the offset variables 56 | 57 | Forward 58 | ---------- 59 | arg1 : tensor 60 | offset variables [3 x W+1 x H+1 x D+1] 61 | arg2 : tensor 62 | topology porbabilities [W*H*D x T] 63 | 64 | Returns 65 | ------- 66 | tensor 67 | curvature loss 1 68 | 69 | """ 70 | def __init__(self): 71 | super(CurvatureConstraint, self).__init__() 72 | def forward(self, off, topo): 73 | return CurvatureConstraintFunction.apply(off, topo) 74 | -------------------------------------------------------------------------------- /external/mesh-fusion/3_simplify.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import ntpath 4 | import common 5 | 6 | 7 | class Simplification: 8 | """ 9 | Perform simplification of watertight meshes. 10 | """ 11 | 12 | def __init__(self): 13 | """ 14 | Constructor. 15 | """ 16 | 17 | parser = self.get_parser() 18 | self.options = parser.parse_args() 19 | self.simplification_script = os.path.join( 20 | os.path.dirname(os.path.realpath(__file__)), 'simplification.mlx') 21 | 22 | def get_parser(self): 23 | """ 24 | Get parser of tool. 25 | 26 | :return: parser 27 | """ 28 | 29 | parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.') 30 | input_group = parser.add_mutually_exclusive_group(required=True) 31 | input_group.add_argument('--in_dir', type=str, 32 | help='Path to input directory.') 33 | input_group.add_argument('--in_file', type=str, 34 | help='Path to input directory.') 35 | parser.add_argument('--out_dir', type=str, 36 | help='Path to output directory; files within are overwritten!') 37 | 38 | return parser 39 | 40 | def read_directory(self, directory): 41 | """ 42 | Read directory. 43 | 44 | :param directory: path to directory 45 | :return: list of files 46 | """ 47 | 48 | files = [] 49 | for filename in os.listdir(directory): 50 | files.append(os.path.normpath(os.path.join(directory, filename))) 51 | 52 | return files 53 | 54 | def get_in_files(self): 55 | if self.options.in_dir is not None: 56 | assert os.path.exists(self.options.in_dir) 57 | common.makedir(self.options.out_dir) 58 | files = self.read_directory(self.options.in_dir) 59 | else: 60 | files = [self.options.in_file] 61 | 62 | return files 63 | 64 | def run(self): 65 | """ 66 | Run simplification. 67 | """ 68 | 69 | common.makedir(self.options.out_dir) 70 | files = self.get_in_files() 71 | 72 | for filepath in files: 73 | os.system('meshlabserver -i %s -o %s -s %s' % ( 74 | filepath, 75 | os.path.join(self.options.out_dir, ntpath.basename(filepath)), 76 | self.simplification_script 77 | )) 78 | 79 | 80 | if __name__ == '__main__': 81 | app = Simplification() 82 | app.run() 83 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/test.py: -------------------------------------------------------------------------------- 1 | import pyrender 2 | import numpy as np 3 | from matplotlib import pyplot 4 | import math 5 | 6 | # render settings 7 | img_h = 480 8 | img_w = 480 9 | fx = 480. 10 | fy = 480. 11 | cx = 240 12 | cy = 240 13 | 14 | def model(): 15 | 16 | # note that xx is height here! 17 | xx = -0.2 18 | yy = -0.2 19 | zz = -0.2 20 | 21 | v000 = (xx, yy, zz) # 0 22 | v001 = (xx, yy, zz + 0.4) # 1 23 | v010 = (xx, yy + 0.4, zz) # 2 24 | v011 = (xx, yy + 0.4, zz + 0.4) # 3 25 | v100 = (xx + 0.4, yy, zz) # 4 26 | v101 = (xx + 0.4, yy, zz + 0.4) # 5 27 | v110 = (xx + 0.4, yy + 0.4, zz) # 6 28 | v111 = (xx + 0.4, yy + 0.4, zz + 0.4) # 7 29 | 30 | f1 = [0, 2, 4] 31 | f2 = [4, 2, 6] 32 | f3 = [1, 3, 5] 33 | f4 = [5, 3, 7] 34 | f5 = [0, 1, 2] 35 | f6 = [1, 3, 2] 36 | f7 = [4, 5, 7] 37 | f8 = [4, 7, 6] 38 | f9 = [4, 0, 1] 39 | f10 = [4, 5, 1] 40 | f11 = [2, 3, 6] 41 | f12 = [3, 7, 6] 42 | 43 | vertices = [] 44 | vertices.append(v000) 45 | vertices.append(v001) 46 | vertices.append(v010) 47 | vertices.append(v011) 48 | vertices.append(v100) 49 | vertices.append(v101) 50 | vertices.append(v110) 51 | vertices.append(v111) 52 | 53 | faces = [] 54 | faces.append(f1) 55 | faces.append(f2) 56 | faces.append(f3) 57 | faces.append(f4) 58 | faces.append(f5) 59 | faces.append(f6) 60 | faces.append(f7) 61 | faces.append(f8) 62 | faces.append(f9) 63 | faces.append(f10) 64 | faces.append(f11) 65 | faces.append(f12) 66 | 67 | return vertices, faces 68 | 69 | def render(vertices, faces): 70 | 71 | x = 0 72 | y = math.pi/4 73 | z = 0 74 | R_x = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]]) 75 | R_y = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]]) 76 | R_z = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]]) 77 | R = R_z.dot(R_y.dot(R_x)) 78 | 79 | np_vertices = np.array(vertices).astype(np.float64) 80 | np_vertices = R.dot(np_vertices.T).T 81 | np_vertices[:, 2] += 1.5 82 | np_faces = np.array(faces).astype(np.float64) 83 | np_faces += 1 84 | 85 | depthmap, mask, img = pyrender.render(np_vertices.T.copy(), np_faces.T.copy(), np.array([fx, fy, cx, cy]), np.array([1., 2.]), np.array([img_h, img_w], dtype=np.int32)) 86 | pyplot.imshow(depthmap) 87 | pyplot.show() 88 | pyplot.imshow(img) 89 | pyplot.show() 90 | 91 | if __name__ == '__main__': 92 | vertices, faces = model() 93 | render(vertices, faces) 94 | -------------------------------------------------------------------------------- /dmifnet/utils/libvoxelize/voxelize.pyx: -------------------------------------------------------------------------------- 1 | cimport cython 2 | from libc.math cimport floor, ceil 3 | from cython.view cimport array as cvarray 4 | 5 | cdef extern from "tribox2.h": 6 | int triBoxOverlap(float boxcenter[3], float boxhalfsize[3], 7 | float tri0[3], float tri1[3], float tri2[3]) 8 | 9 | 10 | @cython.boundscheck(False) # Deactivate bounds checking 11 | @cython.wraparound(False) # Deactivate negative indexing. 12 | cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces): 13 | assert(faces.shape[1] == 3) 14 | assert(faces.shape[2] == 3) 15 | 16 | n_faces = faces.shape[0] 17 | cdef int i 18 | for i in range(n_faces): 19 | voxelize_triangle_(occ, faces[i]) 20 | 21 | 22 | @cython.boundscheck(False) # Deactivate bounds checking 23 | @cython.wraparound(False) # Deactivate negative indexing. 24 | cpdef int voxelize_triangle_(bint[:, :, :] occupancies, float[:, ::1] triverts): 25 | cdef int bbox_min[3] 26 | cdef int bbox_max[3] 27 | cdef int i, j, k 28 | cdef float boxhalfsize[3] 29 | cdef float boxcenter[3] 30 | cdef bint intersection 31 | 32 | boxhalfsize[:] = (0.5, 0.5, 0.5) 33 | 34 | for i in range(3): 35 | bbox_min[i] = ( 36 | min(triverts[0, i], triverts[1, i], triverts[2, i]) 37 | ) 38 | bbox_min[i] = min(max(bbox_min[i], 0), occupancies.shape[i] - 1) 39 | 40 | for i in range(3): 41 | bbox_max[i] = ( 42 | max(triverts[0, i], triverts[1, i], triverts[2, i]) 43 | ) 44 | bbox_max[i] = min(max(bbox_max[i], 0), occupancies.shape[i] - 1) 45 | 46 | for i in range(bbox_min[0], bbox_max[0] + 1): 47 | for j in range(bbox_min[1], bbox_max[1] + 1): 48 | for k in range(bbox_min[2], bbox_max[2] + 1): 49 | boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5) 50 | intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], 51 | &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) 52 | occupancies[i, j, k] |= intersection 53 | 54 | 55 | @cython.boundscheck(False) # Deactivate bounds checking 56 | @cython.wraparound(False) # Deactivate negative indexing. 57 | cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts): 58 | assert(boxcenter.shape[0] == 3) 59 | assert(boxhalfsize.shape[0] == 3) 60 | assert(triverts.shape[0] == triverts.shape[1] == 3) 61 | 62 | # print(triverts) 63 | # Call functions 64 | cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], 65 | &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) 66 | return result 67 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/tests/test_distance.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('../../../..') 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Variable 8 | import time 9 | import numpy as np 10 | import resource 11 | 12 | from dmifnet.dmc.ops.tests.loss_autograd import LossAutoGrad 13 | from dmifnet.dmc.ops.point_triangle_distance import PointTriangleDistance 14 | 15 | 16 | print("Testing CUDA extension...") 17 | dtype = torch.cuda.FloatTensor 18 | dtype_long = torch.cuda.LongTensor 19 | num_cells = 4 20 | # autograd loss 21 | loss_autograd = LossAutoGrad(num_cells, 1.0) 22 | 23 | multiGrids = PointTriangleDistance() 24 | 25 | 26 | if __name__ == '__main__': 27 | 28 | print("=========== Input =============") 29 | point = Variable(torch.rand(10, 3).view(-1,3).type(dtype) * 0.9) * num_cells 30 | offset = Variable(torch.zeros(3, num_cells+1, num_cells+1, num_cells+1).type(dtype)*0.5, requires_grad=True) 31 | print(point.shape) 32 | print(offset.shape) 33 | 34 | print("============= cuda extension ============") 35 | # forward 36 | tf_c = time.time() 37 | distance = multiGrids.forward(offset, point) 38 | tf_c = time.time() - tf_c 39 | distance_np = distance.data.cpu().numpy() 40 | print("cffi distance:") 41 | print(distance_np.shape) 42 | 43 | weight_rnd = Variable(torch.rand(distance.size()).type(dtype), requires_grad=False) 44 | distance_sum = torch.sum(torch.mul(distance, weight_rnd)) 45 | 46 | # backward 47 | tb_c = time.time() 48 | grad = distance_sum.backward() 49 | tb_c = time.time() - tb_c 50 | offset_np = np.copy(offset.grad.data.cpu().numpy()) 51 | 52 | print("cffi grad:") 53 | print(offset_np.shape) 54 | 55 | print("============= auto ============") 56 | # forward 57 | tf_py = time.time() 58 | distance_auto = loss_autograd.loss_point_to_mesh_distance_autograd(offset, point) 59 | tf_py = time.time()-tf_py 60 | distance_auto_np = distance_auto.data.cpu().numpy() 61 | print("auto distance:") 62 | print(distance_auto_np.shape) 63 | weight_rnd = Variable(weight_rnd.data) 64 | distance_sum_auto = torch.sum(torch.mul(distance_auto, weight_rnd)) 65 | 66 | # backward 67 | offset.grad.data.zero_() 68 | 69 | tb_py = time.time() 70 | distance_sum_auto.backward() 71 | tb_py = time.time() - tb_py 72 | print("auto grad: ") 73 | offset_auto_np = np.copy(offset.grad.data.cpu().numpy()) 74 | print(offset_auto_np.shape) 75 | 76 | print("========== summary ===========") 77 | print("Forward difference between cffi and auto: "+str(np.sum(np.abs(distance_np[:,:-1]-distance_auto_np[:,:-1])))) 78 | print("Backward difference between cffi and auto: "+str(np.sum(np.abs(offset_np-offset_auto_np)))) -------------------------------------------------------------------------------- /dmifnet/dmc/models/encoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | 4 | from dmifnet.dmc.ops.grid_pooling import GridPooling 5 | 6 | 7 | class PointNetLocal(nn.Module): 8 | ''' Point Net Local Conditional Network from the Deep Marching Cubes paper. 9 | 10 | It applies two fully connected layers to the input points (dim 3) in a 11 | 1D Convolutional Layer fashion to avoid to specify the number of 12 | incoming points 13 | ''' 14 | def __init__(self, c_dim=256, out_dim=16, cell_W=16, cell_H=16, cell_D=16): 15 | super().__init__() 16 | self.cell_W = cell_W 17 | self.cell_H = cell_H 18 | self.cell_D = cell_D 19 | 20 | # TODO change gridpooling input to be compatible to single values of W H D 21 | self.gridshape = torch.cuda.LongTensor([cell_W, cell_H, cell_D]) 22 | actvn = nn.ReLU() 23 | self.grid_pool = GridPooling(self.gridshape) 24 | self.conv1 = nn.Sequential( 25 | nn.Conv1d(3, c_dim, 1), actvn 26 | ) 27 | #self.conv2 = nn.Sequential( 28 | # nn.Conv1d(c_dim, out_dim, 1), actvn 29 | #) 30 | self.conv2 = nn.Conv1d(c_dim, out_dim, 1) 31 | 32 | def forward(self, x): 33 | pts = x 34 | feats = x.transpose(1, 2) # b_size x 3 x num_points 35 | feats = self.conv1(feats) # b_size x c_dim x num_points 36 | feats = self.conv2(feats) # b_size x out_dim x num_points 37 | feats = feats.transpose(1, 2) # b_size x num_points x out_dim 38 | 39 | out = self.point_to_cell(pts, feats, self.cell_W, self.cell_H, self.cell_D) 40 | return out 41 | 42 | def point_to_cell(self, pts, feat, W, H, D, expand=1): 43 | """ perform maxpool on points in every cell set zero vector if cell is 44 | empty if expand=1 then return (N+1)x(N+1)x(N+1), for dmc xpand=0 then 45 | return NxNxN, for occupancy/sdf baselines 46 | """ 47 | batchsize = feat.size()[0] 48 | C = feat.size()[2] 49 | 50 | feat_cell = [] 51 | # grid_shape = torch.LongTensor([W, H, D]) 52 | for k in range(batchsize): 53 | feat_cell.append(self.grid_pool(feat[k, :, :], pts[k, :, :])) 54 | 55 | feat_cell = torch.stack(feat_cell, dim=0) 56 | 57 | # TODO check if this view is compatible to output of grid pool 58 | feat_cell = torch.transpose(feat_cell, 1, 2).contiguous().view( 59 | -1, C, W, H, D) 60 | if expand == 0: 61 | return feat_cell 62 | 63 | # expand to (W+1)x(H+1) 64 | curr_size = feat_cell.size() 65 | feat_cell_exp = torch.zeros( 66 | curr_size[0], curr_size[1], curr_size[2]+1, curr_size[3]+1, 67 | curr_size[4]+1).to(pts.device) 68 | feat_cell_exp[:, :, :-1, :-1, :-1] = feat_cell 69 | return feat_cell_exp 70 | -------------------------------------------------------------------------------- /dmifnet/utils/libsimplify/simplify_mesh.pyx: -------------------------------------------------------------------------------- 1 | # distutils: language = c++ 2 | from libcpp.vector cimport vector 3 | import numpy as np 4 | cimport numpy as np 5 | 6 | 7 | cdef extern from "Simplify.h": 8 | cdef struct vec3f: 9 | double x, y, z 10 | 11 | cdef cppclass SymetricMatrix: 12 | SymetricMatrix() except + 13 | 14 | 15 | cdef extern from "Simplify.h" namespace "Simplify": 16 | cdef struct Triangle: 17 | int v[3] 18 | double err[4] 19 | int deleted, dirty, attr 20 | vec3f uvs[3] 21 | int material 22 | 23 | cdef struct Vertex: 24 | vec3f p 25 | int tstart, tcount 26 | SymetricMatrix q 27 | int border 28 | 29 | cdef vector[Triangle] triangles 30 | cdef vector[Vertex] vertices 31 | cdef void simplify_mesh(int, double) 32 | 33 | 34 | cpdef mesh_simplify(double[:, ::1] vertices_in, long[:, ::1] triangles_in, 35 | int f_target, double agressiveness=7.) except +: 36 | vertices.clear() 37 | triangles.clear() 38 | 39 | # Read in vertices and triangles 40 | cdef Vertex v 41 | for iv in range(vertices_in.shape[0]): 42 | v = Vertex() 43 | v.p.x = vertices_in[iv, 0] 44 | v.p.y = vertices_in[iv, 1] 45 | v.p.z = vertices_in[iv, 2] 46 | vertices.push_back(v) 47 | 48 | cdef Triangle t 49 | for it in range(triangles_in.shape[0]): 50 | t = Triangle() 51 | t.v[0] = triangles_in[it, 0] 52 | t.v[1] = triangles_in[it, 1] 53 | t.v[2] = triangles_in[it, 2] 54 | triangles.push_back(t) 55 | 56 | # Simplify 57 | # print('Simplify...') 58 | simplify_mesh(f_target, agressiveness) 59 | 60 | # Only use triangles that are not deleted 61 | cdef vector[Triangle] triangles_notdel 62 | triangles_notdel.reserve(triangles.size()) 63 | 64 | for t in triangles: 65 | if not t.deleted: 66 | triangles_notdel.push_back(t) 67 | 68 | # Read out triangles 69 | vertices_out = np.empty((vertices.size(), 3), dtype=np.float64) 70 | triangles_out = np.empty((triangles_notdel.size(), 3), dtype=np.int64) 71 | 72 | cdef double[:, :] vertices_out_view = vertices_out 73 | cdef long[:, :] triangles_out_view = triangles_out 74 | 75 | for iv in range(vertices.size()): 76 | vertices_out_view[iv, 0] = vertices[iv].p.x 77 | vertices_out_view[iv, 1] = vertices[iv].p.y 78 | vertices_out_view[iv, 2] = vertices[iv].p.z 79 | 80 | for it in range(triangles_notdel.size()): 81 | triangles_out_view[it, 0] = triangles_notdel[it].v[0] 82 | triangles_out_view[it, 1] = triangles_notdel[it].v[1] 83 | triangles_out_view[it, 2] = triangles_notdel[it].v[2] 84 | 85 | # Clear vertices and triangles 86 | vertices.clear() 87 | triangles.clear() 88 | 89 | return vertices_out, triangles_out -------------------------------------------------------------------------------- /dmifnet/pix2mesh/generation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import trimesh 3 | import dmifnet.common as common 4 | 5 | 6 | class Generator3D(object): 7 | ''' Mesh Generator Class for the Pixel2Mesh model. 8 | 9 | A forward pass is made with the image and camera matrices to obtain the 10 | predicted vertex locations for the mesh. Subsequently, the faces of the 11 | base mesh of an ellipsoid are used together with the predicted vertices to 12 | obtain the final mesh 13 | ''' 14 | 15 | def __init__(self, model, base_mesh, device=None): 16 | ''' Initialisation 17 | 18 | Args: 19 | model (PyTorch model): the Pixel2Mesh model 20 | base_mesh (tensor): the base ellipsoid provided by the authors 21 | device (PyTorch device): the PyTorch device 22 | ''' 23 | self.model = model.to(device) 24 | self.device = device 25 | self.base_mesh = base_mesh 26 | 27 | def generate_mesh(self, data, fix_normals=False): 28 | ''' Generates a mesh. 29 | 30 | Arguments: 31 | data (tensor): input data 32 | fix_normals (boolean): if normals should be fixed 33 | ''' 34 | 35 | img = data.get('inputs').to(self.device) 36 | camera_args = common.get_camera_args( 37 | data, 'pointcloud.loc', 'pointcloud.scale', device=self.device) 38 | world_mat, camera_mat = camera_args['Rt'], camera_args['K'] 39 | with torch.no_grad(): 40 | outputs1, outputs2 = self.model(img, camera_mat) 41 | out_1, out_2, out_3 = outputs1 42 | 43 | transformed_pred = common.transform_points_back(out_3, world_mat) 44 | vertices = transformed_pred.squeeze().cpu().numpy() 45 | 46 | faces = self.base_mesh[:, 1:] # remove the f's in the first column 47 | faces = faces.astype(int) - 1 # To adjust indices to trimesh notation 48 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False) 49 | if fix_normals: 50 | # Fix normals due to wrong base ellipsoid 51 | trimesh.repair.fix_normals(mesh) 52 | return mesh 53 | 54 | def generate_pointcloud(self, data): 55 | ''' Generates a pointcloud by only returning the vertices 56 | 57 | Arguments: 58 | data (tensor): input data 59 | ''' 60 | 61 | img = data.get('inputs').to(self.device) 62 | camera_args = common.get_camera_args( 63 | data, 'pointcloud.loc', 'pointcloud.scale', device=self.device) 64 | world_mat, camera_mat = camera_args['Rt'], camera_args['K'] 65 | 66 | with torch.no_grad(): 67 | outputs1, _ = self.model(img, camera_mat) 68 | _, _, out_3 = outputs1 69 | transformed_pred = common.transform_points_back(out_3, world_mat) 70 | pc_out = transformed_pred.squeeze().cpu().numpy() 71 | return pc_out 72 | -------------------------------------------------------------------------------- /dmifnet/r2n2/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dmifnet.encoder import encoder_dict 3 | from dmifnet.r2n2 import models, training, generation 4 | from dmifnet import data 5 | 6 | 7 | def get_model(cfg, device=None, **kwargs): 8 | ''' Return the model. 9 | 10 | Args: 11 | cfg (dict): loaded yaml config 12 | device (device): pytorch device 13 | ''' 14 | decoder = cfg['model']['decoder'] 15 | encoder = cfg['model']['encoder'] 16 | dim = cfg['data']['dim'] 17 | # z_dim = cfg['model']['z_dim'] 18 | c_dim = cfg['model']['c_dim'] 19 | # encoder_kwargs = cfg['model']['encoder_kwargs'] 20 | decoder_kwargs = cfg['model']['decoder_kwargs'] 21 | encoder_kwargs = cfg['model']['encoder_kwargs'] 22 | 23 | decoder = models.decoder_dict[decoder]( 24 | dim=dim, c_dim=c_dim, 25 | **decoder_kwargs 26 | ) 27 | 28 | encoder = encoder_dict[encoder]( 29 | c_dim=c_dim, 30 | **encoder_kwargs 31 | ) 32 | 33 | model = models.R2N2(decoder, encoder) 34 | model = model.to(device) 35 | 36 | return model 37 | 38 | 39 | def get_trainer(model, optimizer, cfg, device, **kwargs): 40 | ''' Returns the trainer object. 41 | 42 | Args: 43 | model (nn.Module): R2N2 model 44 | optimizer (optimizer): pytorch optimizer 45 | cfg (dict): loaded yaml config 46 | device (device): pytorch device 47 | ''' 48 | threshold = cfg['test']['threshold'] 49 | out_dir = cfg['training']['out_dir'] 50 | vis_dir = os.path.join(out_dir, 'vis') 51 | input_type = cfg['data']['input_type'] 52 | 53 | trainer = training.Trainer( 54 | model, optimizer, device=device, 55 | input_type=input_type, vis_dir=vis_dir, 56 | threshold=threshold 57 | ) 58 | return trainer 59 | 60 | 61 | def get_generator(model, cfg, device, **kwargs): 62 | ''' Returns the generator object. 63 | 64 | Args: 65 | model (nn.Module): R2N2 model 66 | cfg (dict): loaded yaml config 67 | device (device): pytorch device 68 | ''' 69 | generator = generation.VoxelGenerator3D( 70 | model, device=device 71 | ) 72 | return generator 73 | 74 | 75 | def get_data_fields(split, cfg, **kwargs): 76 | ''' Returns the data fields. 77 | 78 | Args: 79 | split (str): the split which should be used 80 | cfg (dict): loaded yaml config 81 | ''' 82 | with_transforms = cfg['data']['with_transforms'] 83 | 84 | fields = {} 85 | 86 | if split == 'train': 87 | fields['voxels'] = data.VoxelsField( 88 | cfg['data']['voxels_file'] 89 | ) 90 | elif split in ('val', 'test'): 91 | fields['points_iou'] = data.PointsField( 92 | cfg['data']['points_iou_file'], 93 | with_transforms=with_transforms, 94 | unpackbits=cfg['data']['points_unpackbits'], 95 | ) 96 | 97 | return fields 98 | -------------------------------------------------------------------------------- /dmifnet/psgn/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dmifnet.encoder import encoder_dict 3 | from dmifnet.psgn import models, training, generation 4 | from dmifnet import data 5 | 6 | 7 | def get_model(cfg, device=None, **kwargs): 8 | r''' Returns the model instance. 9 | 10 | Args: 11 | cfg (yaml object): the config file 12 | device (PyTorch device): the PyTorch device 13 | ''' 14 | decoder = cfg['model']['decoder'] 15 | encoder = cfg['model']['encoder'] 16 | dim = cfg['data']['dim'] 17 | c_dim = cfg['model']['c_dim'] 18 | decoder_kwargs = cfg['model']['decoder_kwargs'] 19 | encoder_kwargs = cfg['model']['encoder_kwargs'] 20 | 21 | decoder = models.decoder_dict[decoder]( 22 | dim=dim, c_dim=c_dim, 23 | **decoder_kwargs 24 | ) 25 | 26 | encoder = encoder_dict[encoder]( 27 | c_dim=c_dim, 28 | **encoder_kwargs 29 | ) 30 | 31 | model = models.PCGN(decoder, encoder) 32 | model = model.to(device) 33 | return model 34 | 35 | 36 | def get_trainer(model, optimizer, cfg, device, **kwargs): 37 | r''' Returns the trainer instance. 38 | 39 | Args: 40 | model (nn.Module): PSGN model 41 | optimizer (PyTorch optimizer): The optimizer that should be used 42 | cfg (yaml object): the config file 43 | device (PyTorch device): the PyTorch device 44 | ''' 45 | input_type = cfg['data']['input_type'] 46 | out_dir = cfg['training']['out_dir'] 47 | vis_dir = os.path.join(out_dir, 'vis') 48 | 49 | trainer = training.Trainer( 50 | model, optimizer, device=device, input_type=input_type, 51 | vis_dir=vis_dir 52 | ) 53 | return trainer 54 | 55 | 56 | def get_generator(model, cfg, device, **kwargs): 57 | r''' Returns the generator instance. 58 | 59 | Args: 60 | cfg (yaml object): the config file 61 | device (PyTorch device): the PyTorch device 62 | ''' 63 | generator = generation.Generator3D(model, device=device) 64 | return generator 65 | 66 | 67 | def get_data_fields(mode, cfg, **kwargs): 68 | r''' Returns the data fields. 69 | 70 | Args: 71 | mode (string): The split that is used (train/val/test) 72 | cfg (yaml object): the config file 73 | ''' 74 | with_transforms = cfg['data']['with_transforms'] 75 | pointcloud_transform = data.SubsamplePointcloud( 76 | cfg['data']['pointcloud_target_n']) 77 | 78 | fields = {} 79 | fields['pointcloud'] = data.PointCloudField( 80 | cfg['data']['pointcloud_file'], pointcloud_transform, 81 | with_transforms=with_transforms 82 | ) 83 | 84 | if mode in ('val', 'test'): 85 | pointcloud_chamfer_file = cfg['data']['pointcloud_chamfer_file'] 86 | if pointcloud_chamfer_file is not None: 87 | fields['pointcloud_chamfer'] = data.PointCloudField( 88 | pointcloud_chamfer_file 89 | ) 90 | 91 | return fields 92 | -------------------------------------------------------------------------------- /dmifnet/encoder/pix2mesh_cond.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class Pix2mesh_Cond(nn.Module): 5 | r''' Conditioning Network proposed in the authors' Pixel2Mesh implementation. 6 | 7 | The network consists of several 2D convolution layers, and several of the 8 | intermediate feature maps are returned to features for the image 9 | projection layer of the encoder network. 10 | ''' 11 | def __init__(self, c_dim=512, return_feature_maps=True): 12 | r''' Initialisation. 13 | 14 | Args: 15 | c_dim (int): channels of the final output 16 | return_feature_maps (bool): whether intermediate feature maps 17 | should be returned 18 | ''' 19 | super().__init__() 20 | actvn = nn.ReLU() 21 | self.return_feature_maps = return_feature_maps 22 | num_fm = int(c_dim/32) 23 | if num_fm != 16: 24 | raise ValueError('Pixel2Mesh requires a fixed c_dim of 512!') 25 | 26 | self.block_1 = nn.Sequential( 27 | nn.Conv2d(3, num_fm, 3, stride=1, padding=1), actvn, 28 | nn.Conv2d(num_fm, num_fm, 3, stride=1, padding=1), actvn, 29 | nn.Conv2d(num_fm, num_fm*2, 3, stride=2, padding=1), actvn, 30 | nn.Conv2d(num_fm*2, num_fm*2, 3, stride=1, padding=1), actvn, 31 | nn.Conv2d(num_fm*2, num_fm*2, 3, stride=1, padding=1), actvn, 32 | nn.Conv2d(num_fm*2, num_fm*4, 3, stride=2, padding=1), actvn, 33 | nn.Conv2d(num_fm*4, num_fm*4, 3, stride=1, padding=1), actvn, 34 | nn.Conv2d(num_fm*4, num_fm*4, 3, stride=1, padding=1), actvn) 35 | 36 | self.block_2 = nn.Sequential( 37 | nn.Conv2d(num_fm*4, num_fm*8, 3, stride=2, padding=1), actvn, 38 | nn.Conv2d(num_fm*8, num_fm*8, 3, stride=1, padding=1), actvn, 39 | nn.Conv2d(num_fm*8, num_fm*8, 3, stride=1, padding=1), actvn) 40 | 41 | self.block_3 = nn.Sequential( 42 | nn.Conv2d(num_fm*8, num_fm*16, 5, stride=2, padding=2), actvn, 43 | nn.Conv2d(num_fm*16, num_fm*16, 3, stride=1, padding=1), actvn, 44 | nn.Conv2d(num_fm*16, num_fm*16, 3, stride=1, padding=1), actvn) 45 | 46 | self.block_4 = nn.Sequential( 47 | nn.Conv2d(num_fm*16, num_fm*32, 5, stride=2, padding=2), actvn, 48 | nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn, 49 | nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn, 50 | nn.Conv2d(num_fm*32, num_fm*32, 3, stride=1, padding=1), actvn, 51 | ) 52 | 53 | def forward(self, x): 54 | # x has size 224 x 224 55 | x_0 = self.block_1(x) # 64 x 56 x 56 56 | x_1 = self.block_2(x_0) # 128 x 28 x 28 57 | x_2 = self.block_3(x_1) # 256 x 14 x 14 58 | x_3 = self.block_4(x_2) # 512 x 7 x 7 59 | 60 | if self.return_feature_maps: 61 | return x_0, x_1, x_2, x_3 62 | return x_3 63 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/tests/test_occupancy_connectivity_yiyi.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | import sys 5 | sys.path.append('../../../..') 6 | import time 7 | import numpy as np 8 | 9 | from .loss import Loss 10 | from .loss_autograd import LossAutoGrad 11 | 12 | # check the cuda extension or c extension 13 | print "Testing CUDA extension..." 14 | dtype = torch.cuda.FloatTensor 15 | 16 | # auto loss 17 | loss_auto = LossAutoGrad(args) 18 | 19 | def loss_on_smoothness(occupancy): 20 | """Compute the smoothness loss defined between neighboring occupancy 21 | variables 22 | """ 23 | weight_smoothness = 3.0 24 | loss = ( 25 | self.occupancyConnectivity(occupancy) / (self.num_cells**3) 26 | * self.weight_smoothness 27 | ) 28 | return loss 29 | 30 | if __name__ == '__main__': 31 | 32 | W = H = D = args.num_cells 33 | occupancy = Variable(torch.rand(W+1, H+1, D+1).type(dtype), requires_grad=True) 34 | rnd_weights = Variable(torch.rand(W*H*D, 48).type(dtype)) 35 | 36 | print "=========== Input =============" 37 | print occupancy 38 | 39 | print "============= cffi ============" 40 | # forward 41 | loss = 0.1*loss_on_smoothness(occupancy)*args.num_cells**3 42 | tf_c = time.time() 43 | loss = 0.1*loss_on_smoothness(occupancy)*args.num_cells**3 44 | tf_c = time.time() - tf_c 45 | print "cffi forward time: ", tf_c 46 | print loss 47 | 48 | # backward 49 | tb_c = time.time() 50 | loss.backward() 51 | tb_c = time.time() - tb_c 52 | print "cffi backward time: ", tb_c 53 | 54 | grad_np = np.copy(occupancy.grad.data.cpu().numpy()) 55 | print grad_np 56 | 57 | print "============= auto ============" 58 | occupancy = Variable(occupancy.data.cpu(), requires_grad=True) 59 | rnd_weights = Variable(rnd_weights.data.cpu()) 60 | 61 | # forward 62 | tf_py = time.time() 63 | loss_auto = 0.1*loss_auto.loss_on_smoothness_autograd(occupancy) 64 | tf_py = time.time()-tf_py 65 | print "auto forward time: ", tf_py 66 | print loss_auto 67 | 68 | # backward 69 | #occupancy.grad.data.zero_() 70 | tb_py = time.time() 71 | loss_auto.backward() 72 | tb_py = time.time()-tb_py 73 | print "auto backward time: ", tf_py 74 | 75 | grad_auto_np = np.copy(occupancy.grad.data.cpu().numpy()) 76 | print grad_auto_np 77 | assert grad_auto_np and grad_np == 0.0 78 | print "========== summary ===========" 79 | print "Forward difference between cffi and auto: ", np.sum(np.abs(loss.data.cpu().numpy()-loss_auto.data.cpu().numpy())) 80 | print "Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np)) 81 | 82 | print "cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c) 83 | print "auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py) 84 | print "ratio: ", (tf_py+tb_py)/(tf_c + tb_c) -------------------------------------------------------------------------------- /dmifnet/psgn/models/psgn_2branch.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | 4 | 5 | class PCGN_2Branch(nn.Module): 6 | r''' The 2-Branch decoder of the Point Set Generation Network. 7 | 8 | The latent embedding of the image is passed through a fully-connected 9 | branch as well as a convolution-based branch which receives additional 10 | input from the conditioning network. 11 | ''' 12 | def __init__(self, dim=3, c_dim=512, n_points=1024): 13 | r''' Initialisation. 14 | 15 | Args: 16 | dim (int): dimension of the output points (e.g. 3) 17 | c_dim (int): dimension of the output of the conditioning network 18 | n_points (int): number of points to predict 19 | 20 | ''' 21 | super().__init__() 22 | # Attributes 23 | actvn = nn.ReLU() 24 | self.actvn = actvn 25 | self.dim = dim 26 | num_fm = int(c_dim/32) 27 | conv_c_in = 32 * num_fm 28 | fc_dim_in = 3*4*conv_c_in # input image is downsampled to 3x4 29 | fc_pts = n_points - 768 # conv branch has a fixed output of 768 points 30 | 31 | # Submodules 32 | self.fc_branch = nn.Sequential(nn.Linear(fc_dim_in, fc_pts*dim), actvn) 33 | self.deconv_1 = nn.ConvTranspose2d(c_dim, num_fm*16, 5, 2, 2, 1) 34 | self.deconv_2 = nn.ConvTranspose2d(num_fm*16, num_fm*8, 5, 2, 2, 1) 35 | self.deconv_3 = nn.ConvTranspose2d(num_fm*8, num_fm*4, 5, 2, 2, 1) 36 | # TODO: unused, remove? (keep it for now to load old checkpoints) 37 | self.deconv_4 = nn.ConvTranspose2d(num_fm*4, 3, 5, 2, 2, 1) 38 | 39 | self.conv_1 = nn.Sequential( 40 | nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn) 41 | self.conv_2 = nn.Sequential( 42 | nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn) 43 | self.conv_3 = nn.Sequential( 44 | nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn) 45 | self.conv_4 = nn.Conv2d(num_fm*4, dim, 3, 1, 1) 46 | 47 | def forward(self, c): 48 | x, feature_maps = c 49 | batch_size = x.shape[0] 50 | 51 | fc_branch = self.fc_branch(x.view(batch_size, -1)) 52 | fc_branch = fc_branch.view(batch_size, -1, 3) 53 | 54 | conv_branch = self.deconv_1(x) 55 | conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-1])) 56 | 57 | conv_branch = self.conv_1(conv_branch) 58 | conv_branch = self.deconv_2(conv_branch) 59 | conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-2])) 60 | 61 | conv_branch = self.conv_2(conv_branch) 62 | conv_branch = self.deconv_3(conv_branch) 63 | conv_branch = self.actvn(torch.add(conv_branch, feature_maps[-3])) 64 | 65 | conv_branch = self.conv_3(conv_branch) 66 | conv_branch = self.conv_4(conv_branch) 67 | conv_branch = conv_branch.view(batch_size, -1, self.dim) 68 | 69 | output = torch.cat([fc_branch, conv_branch], dim=1) 70 | return output 71 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/fusion.cpp: -------------------------------------------------------------------------------- 1 | #include "fusion.h" 2 | 3 | #include 4 | #include 5 | 6 | #if defined(_OPENMP) 7 | #include 8 | #endif 9 | 10 | 11 | template 12 | void fusion_cpu(const Views& views, const FusionFunctorT functor, float vx_size, int n_threads, Volume& vol) { 13 | int vx_res3 = vol.depth_ * vol.height_ * vol.width_; 14 | 15 | #if defined(_OPENMP) 16 | omp_set_num_threads(n_threads); 17 | #endif 18 | #pragma omp parallel for 19 | for(int idx = 0; idx < vx_res3; ++idx) { 20 | int d,h,w; 21 | fusion_idx2dhw(idx, vol.width_,vol.height_, d,h,w); 22 | float x,y,z; 23 | fusion_dhw2xyz(d,h,w, vx_size, x,y,z); 24 | 25 | functor.before_sample(&vol, d,h,w); 26 | bool run = true; 27 | int n_valid_views = 0; 28 | for(int vidx = 0; vidx < views.n_views_ && run; ++vidx) { 29 | float ur, vr, vx_d; 30 | fusion_project(&views, vidx, x,y,z, ur,vr,vx_d); 31 | 32 | int u = int(ur + 0.5f); 33 | int v = int(vr + 0.5f); 34 | // printf(" vx %d,%d,%d has center %f,%f,%f and projects to uvd=%f,%f,%f\n", w,h,d, x,y,z, ur,vr,vx_d); 35 | 36 | if(u >= 0 && v >= 0 && u < views.cols_ && v < views.rows_) { 37 | int dm_idx = (vidx * views.rows_ + v) * views.cols_ + u; 38 | float dm_d = views.depthmaps_[dm_idx]; 39 | // printf(" is on depthmap[%d,%d] with depth=%f, diff=%f\n", views.cols_,views.rows_, dm_d, dm_d - vx_d); 40 | run = functor.new_sample(&vol, vx_d, dm_d, d,h,w, &n_valid_views); 41 | } 42 | } // for vidx 43 | functor.after_sample(&vol, d,h,w, n_valid_views); 44 | } 45 | } 46 | 47 | void fusion_projectionmask_cpu(const Views& views, float vx_size, bool unknown_is_free, int n_threads, Volume& vol) { 48 | ProjectionMaskFusionFunctor functor(unknown_is_free); 49 | fusion_cpu(views, functor, vx_size, n_threads, vol); 50 | } 51 | 52 | void fusion_occupancy_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 53 | OccupancyFusionFunctor functor(truncation, unknown_is_free); 54 | fusion_cpu(views, functor, vx_size, n_threads, vol); 55 | } 56 | 57 | void fusion_tsdfmask_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 58 | TsdfMaskFusionFunctor functor(truncation, unknown_is_free); 59 | fusion_cpu(views, functor, vx_size, n_threads, vol); 60 | } 61 | 62 | void fusion_tsdf_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 63 | TsdfFusionFunctor functor(truncation, unknown_is_free); 64 | fusion_cpu(views, functor, vx_size, n_threads, vol); 65 | } 66 | 67 | void fusion_tsdf_hist_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, float* bin_centers, int n_bins, bool unobserved_is_occupied, int n_threads, Volume& vol) { 68 | TsdfHistFusionFunctor functor(truncation, unknown_is_free, bin_centers, n_bins, unobserved_is_occupied); 69 | fusion_cpu(views, functor, vx_size, n_threads, vol); 70 | } 71 | -------------------------------------------------------------------------------- /dmifnet/encoder/voxels.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | 6 | class VoxelEncoder(nn.Module): 7 | ''' 3D-convolutional encoder network for voxel input. 8 | 9 | Args: 10 | dim (int): input dimension 11 | c_dim (int): output dimension 12 | ''' 13 | 14 | def __init__(self, dim=3, c_dim=128): 15 | super().__init__() 16 | self.actvn = F.relu 17 | 18 | self.conv_in = nn.Conv3d(1, 32, 3, padding=1) 19 | 20 | self.conv_0 = nn.Conv3d(32, 64, 3, padding=1, stride=2) 21 | self.conv_1 = nn.Conv3d(64, 128, 3, padding=1, stride=2) 22 | self.conv_2 = nn.Conv3d(128, 256, 3, padding=1, stride=2) 23 | self.conv_3 = nn.Conv3d(256, 512, 3, padding=1, stride=2) 24 | self.fc = nn.Linear(512 * 2 * 2 * 2, c_dim) 25 | 26 | def forward(self, x): 27 | batch_size = x.size(0) 28 | 29 | x = x.unsqueeze(1) 30 | net = self.conv_in(x) 31 | net = self.conv_0(self.actvn(net)) 32 | net = self.conv_1(self.actvn(net)) 33 | net = self.conv_2(self.actvn(net)) 34 | net = self.conv_3(self.actvn(net)) 35 | 36 | hidden = net.view(batch_size, 512 * 2 * 2 * 2) 37 | c = self.fc(self.actvn(hidden)) 38 | 39 | return c 40 | 41 | 42 | class CoordVoxelEncoder(nn.Module): 43 | ''' 3D-convolutional encoder network for voxel input. 44 | 45 | It additional concatenates the coordinate data. 46 | 47 | Args: 48 | dim (int): input dimension 49 | c_dim (int): output dimension 50 | ''' 51 | 52 | def __init__(self, dim=3, c_dim=128): 53 | super().__init__() 54 | self.actvn = F.relu 55 | 56 | self.conv_in = nn.Conv3d(4, 32, 3, padding=1) 57 | 58 | self.conv_0 = nn.Conv3d(32, 64, 3, padding=1, stride=2) 59 | self.conv_1 = nn.Conv3d(64, 128, 3, padding=1, stride=2) 60 | self.conv_2 = nn.Conv3d(128, 256, 3, padding=1, stride=2) 61 | self.conv_3 = nn.Conv3d(256, 512, 3, padding=1, stride=2) 62 | self.fc = nn.Linear(512 * 2 * 2 * 2, c_dim) 63 | 64 | def forward(self, x): 65 | batch_size = x.size(0) 66 | device = x.device 67 | 68 | coord1 = torch.linspace(-0.5, 0.5, x.size(1)).to(device) 69 | coord2 = torch.linspace(-0.5, 0.5, x.size(2)).to(device) 70 | coord3 = torch.linspace(-0.5, 0.5, x.size(3)).to(device) 71 | 72 | coord1 = coord1.view(1, -1, 1, 1).expand_as(x) 73 | coord2 = coord2.view(1, 1, -1, 1).expand_as(x) 74 | coord3 = coord3.view(1, 1, 1, -1).expand_as(x) 75 | 76 | coords = torch.stack([coord1, coord2, coord3], dim=1) 77 | 78 | x = x.unsqueeze(1) 79 | net = torch.cat([x, coords], dim=1) 80 | net = self.conv_in(net) 81 | net = self.conv_0(self.actvn(net)) 82 | net = self.conv_1(self.actvn(net)) 83 | net = self.conv_2(self.actvn(net)) 84 | net = self.conv_3(self.actvn(net)) 85 | 86 | hidden = net.view(batch_size, 512 * 2 * 2 * 2) 87 | c = self.fc(self.actvn(hidden)) 88 | 89 | return c 90 | -------------------------------------------------------------------------------- /set_env_up.py: -------------------------------------------------------------------------------- 1 | try: 2 | from setuptools import setup 3 | except ImportError: 4 | from distutils.core import setup 5 | from distutils.extension import Extension 6 | from Cython.Build import cythonize 7 | from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension 8 | import numpy 9 | 10 | 11 | numpy_include_dir = numpy.get_include() 12 | 13 | #kd tree 14 | pykdtree = Extension( 15 | 'dmifnet.utils.libkdtree.pykdtree.kdtree', 16 | sources=[ 17 | 'dmifnet/utils/libkdtree/pykdtree/kdtree.c', 18 | 'dmifnet/utils/libkdtree/pykdtree/_kdtree_core.c' 19 | ], 20 | language='c', 21 | extra_compile_args=['-std=c99', '-O3', '-fopenmp'], 22 | extra_link_args=['-lgomp'], 23 | ) 24 | 25 | # marching cubes algorithm 26 | mcubes_module = Extension( 27 | 'dmifnet.utils.libmcubes.mcubes', 28 | sources=[ 29 | 'dmifnet/utils/libmcubes/mcubes.pyx', 30 | 'dmifnet/utils/libmcubes/pywrapper.cpp', 31 | 'dmifnet/utils/libmcubes/marchingcubes.cpp' 32 | ], 33 | language='c++', 34 | extra_compile_args=['-std=c++11'], 35 | include_dirs=[numpy_include_dir] 36 | ) 37 | 38 | # triangle hash (efficient mesh intersection) 39 | triangle_hash_module = Extension( 40 | 'dmifnet.utils.libmesh.triangle_hash', 41 | sources=[ 42 | 'dmifnet/utils/libmesh/triangle_hash.pyx' 43 | ], 44 | libraries=['m'] # Unix-like specific 45 | ) 46 | 47 | 48 | mise_module = Extension( 49 | 'dmifnet.utils.libmise.mise', 50 | sources=[ 51 | 'dmifnet/utils/libmise/mise.pyx' 52 | ], 53 | ) 54 | 55 | simplify_mesh_module = Extension( 56 | 'dmifnet.utils.libsimplify.simplify_mesh', 57 | sources=[ 58 | 'dmifnet/utils/libsimplify/simplify_mesh.pyx' 59 | ] 60 | ) 61 | 62 | voxelize_module = Extension( 63 | 'dmifnet.utils.libvoxelize.voxelize', 64 | sources=[ 65 | 'dmifnet/utils/libvoxelize/voxelize.pyx' 66 | ], 67 | libraries=['m'] # Unix-like specific 68 | ) 69 | 70 | 71 | dmc_pred2mesh_module = CppExtension( 72 | 'dmifnet.dmc.ops.cpp_modules.pred2mesh', 73 | sources=[ 74 | 'dmifnet/dmc/ops/cpp_modules/pred_to_mesh_.cpp', 75 | ] 76 | ) 77 | 78 | dmc_cuda_module = CUDAExtension( 79 | 'dmifnet.dmc.ops._cuda_ext', 80 | sources=[ 81 | 'dmifnet/dmc/ops/src/extension.cpp', 82 | 'dmifnet/dmc/ops/src/curvature_constraint_kernel.cu', 83 | 'dmifnet/dmc/ops/src/grid_pooling_kernel.cu', 84 | 'dmifnet/dmc/ops/src/occupancy_to_topology_kernel.cu', 85 | 'dmifnet/dmc/ops/src/occupancy_connectivity_kernel.cu', 86 | 'dmifnet/dmc/ops/src/point_triangle_distance_kernel.cu', 87 | ] 88 | ) 89 | 90 | 91 | ext_modules = [ 92 | pykdtree, 93 | mcubes_module, 94 | triangle_hash_module, 95 | mise_module, 96 | simplify_mesh_module, 97 | voxelize_module, 98 | dmc_pred2mesh_module, 99 | dmc_cuda_module, 100 | ] 101 | 102 | setup( 103 | ext_modules=cythonize(ext_modules), 104 | include_dirs=[numpy.get_include()], 105 | cmdclass={ 106 | 'build_ext': BuildExtension 107 | } 108 | ) 109 | -------------------------------------------------------------------------------- /dmifnet/encoder/psgn_cond.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class PCGN_Cond(nn.Module): 5 | r''' Point Set Generation Network encoding network. 6 | 7 | The PSGN conditioning network from the original publication consists of 8 | several 2D convolution layers. The intermediate outputs from some layers 9 | are used as additional input to the encoder network, similar to U-Net. 10 | 11 | Args: 12 | c_dim (int): output dimension of the latent embedding 13 | ''' 14 | def __init__(self, c_dim=512): 15 | super().__init__() 16 | actvn = nn.ReLU() 17 | num_fm = int(c_dim/32) 18 | 19 | self.conv_block1 = nn.Sequential( 20 | nn.Conv2d(3, num_fm, 3, 1, 1), actvn, 21 | nn.Conv2d(num_fm, num_fm, 3, 1, 1), actvn) 22 | self.conv_block2 = nn.Sequential( 23 | nn.Conv2d(num_fm, num_fm*2, 3, 2, 1), actvn, 24 | nn.Conv2d(num_fm*2, num_fm*2, 3, 1, 1), actvn, 25 | nn.Conv2d(num_fm*2, num_fm*2, 3, 1, 1), actvn) 26 | self.conv_block3 = nn.Sequential( 27 | nn.Conv2d(num_fm*2, num_fm*4, 3, 2, 1), actvn, 28 | nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn, 29 | nn.Conv2d(num_fm*4, num_fm*4, 3, 1, 1), actvn) 30 | self.conv_block4 = nn.Sequential( 31 | nn.Conv2d(num_fm*4, num_fm*8, 3, 2, 1), actvn, 32 | nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn, 33 | nn.Conv2d(num_fm*8, num_fm*8, 3, 1, 1), actvn) 34 | self.conv_block5 = nn.Sequential( 35 | nn.Conv2d(num_fm*8, num_fm*16, 3, 2, 1), actvn, 36 | nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn, 37 | nn.Conv2d(num_fm*16, num_fm*16, 3, 1, 1), actvn) 38 | self.conv_block6 = nn.Sequential( 39 | nn.Conv2d(num_fm*16, num_fm*32, 3, 2, 1), actvn, 40 | nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn, 41 | nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn, 42 | nn.Conv2d(num_fm*32, num_fm*32, 3, 1, 1), actvn) 43 | self.conv_block7 = nn.Sequential( 44 | nn.Conv2d(num_fm*32, num_fm*32, 5, 2, 2), actvn) 45 | 46 | self.trans_conv1 = nn.Conv2d(num_fm*8, num_fm*4, 3, 1, 1) 47 | self.trans_conv2 = nn.Conv2d(num_fm*16, num_fm*8, 3, 1, 1) 48 | self.trans_conv3 = nn.Conv2d(num_fm*32, num_fm*16, 3, 1, 1) 49 | 50 | def forward(self, x, return_feature_maps=True): 51 | r''' Performs a forward pass through the network. 52 | 53 | Args: 54 | x (tensor): input data 55 | return_feature_maps (bool): whether intermediate feature maps 56 | should be returned 57 | ''' 58 | feature_maps = [] 59 | 60 | x = self.conv_block1(x) 61 | x = self.conv_block2(x) 62 | x = self.conv_block3(x) 63 | x = self.conv_block4(x) 64 | 65 | feature_maps.append(self.trans_conv1(x)) 66 | 67 | x = self.conv_block5(x) 68 | feature_maps.append(self.trans_conv2(x)) 69 | 70 | x = self.conv_block6(x) 71 | feature_maps.append(self.trans_conv3(x)) 72 | 73 | x = self.conv_block7(x) 74 | 75 | if return_feature_maps: 76 | return x, feature_maps 77 | return x 78 | -------------------------------------------------------------------------------- /dmifnet/pix2mesh/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from dmifnet.encoder import encoder_dict 3 | from dmifnet.pix2mesh import models, training, generation 4 | from dmifnet import data 5 | import pickle 6 | import numpy as np 7 | 8 | 9 | def get_model(cfg, device=None, **kwargs): 10 | ''' Returns the Pixel2Mesh model. 11 | 12 | Args: 13 | cfg (yaml file): config file 14 | device (PyTorch device): PyTorch device 15 | ''' 16 | decoder = cfg['model']['decoder'] 17 | encoder = cfg['model']['encoder'] 18 | feat_dim = cfg['model']['feat_dim'] 19 | hidden_dim = cfg['model']['hidden_dim'] 20 | decoder_kwargs = cfg['model']['decoder_kwargs'] 21 | encoder_kwargs = cfg['model']['encoder_kwargs'] 22 | # Encoding necessary due to python2 pickle to python3 pickle convert 23 | ellipsoid = pickle.load( 24 | open(cfg['data']['ellipsoid'], 'rb'), encoding='latin1') 25 | 26 | decoder = models.decoder_dict[decoder]( 27 | ellipsoid, device=device, hidden_dim=hidden_dim, feat_dim=feat_dim, 28 | **decoder_kwargs 29 | ) 30 | 31 | encoder = encoder_dict[encoder]( 32 | return_feature_maps=True, 33 | **encoder_kwargs 34 | ) 35 | 36 | model = models.Pix2Mesh(decoder, encoder) 37 | model = model.to(device) 38 | return model 39 | 40 | 41 | def get_trainer(model, optimizer, cfg, device): 42 | ''' Return the trainer object for the Pixel2Mesh model. 43 | Args: 44 | model (PyTorch model): Pixel2Mesh model 45 | optimizer( PyTorch optimizer): The optimizer that should be used 46 | cfg (yaml file): config file 47 | device (PyTorch device): The PyTorch device that should be used 48 | ''' 49 | out_dir = cfg['training']['out_dir'] 50 | vis_dir = os.path.join(out_dir, 'vis') 51 | adjust_losses = cfg['model']['adjust_losses'] 52 | # Encoding necessary due to python2 pickle to python3 pickle convert 53 | ellipsoid = pickle.load( 54 | open(cfg['data']['ellipsoid'], 'rb'), encoding='latin1') 55 | trainer = training.Trainer( 56 | model, optimizer, ellipsoid, vis_dir, device=device, adjust_losses=adjust_losses) 57 | return trainer 58 | 59 | 60 | def get_generator(model, cfg, device): 61 | ''' Returns a generator object for the Pixel2Mesh model. 62 | 63 | Args: 64 | model (PyTorch model): Pixel2Mesh model 65 | cfg (yaml file): config file 66 | device (PyTorch device): The PyTorch device that should be used 67 | ''' 68 | base_mesh = np.loadtxt(cfg['data']['base_mesh'], dtype='|S32') 69 | generator = generation.Generator3D( 70 | model, base_mesh, device=device) 71 | return generator 72 | 73 | 74 | def get_data_fields(mode, cfg): 75 | ''' Returns the respective data fields. 76 | 77 | Args: 78 | mode (string): which split should be performed (train/test) 79 | cfg (yaml file): config file 80 | ''' 81 | with_transforms = cfg['data']['with_transforms'] 82 | pointcloud_transform = data.SubsamplePointcloud( 83 | cfg['data']['pointcloud_n']) 84 | fields = {} 85 | fields['pointcloud'] = data.PointCloudField( 86 | cfg['data']['pointcloud_file'], pointcloud_transform, 87 | with_transforms=with_transforms) 88 | 89 | return fields 90 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/tests/test_curvature.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | import sys 5 | 6 | sys.path.append('../../../..') 7 | from dmifnet.dmc.ops.tests.loss_autograd import LossAutoGrad 8 | from dmifnet.dmc.ops.curvature_constraint import CurvatureConstraint 9 | import torch.nn.functional as F 10 | import numpy as np 11 | import time 12 | 13 | # check the cuda extension or c extension 14 | 15 | print ("Testing CUDA extension...") 16 | dtype = torch.cuda.FloatTensor 17 | 18 | 19 | # autograd loss 20 | num_cells = 4 21 | len_cell = 1.0 22 | W = H = D = num_cells 23 | 24 | loss_autograd = LossAutoGrad(num_cells, len_cell) 25 | 26 | 27 | # cffi loss 28 | class SmoothLoss(nn.Module): 29 | def __init__(self): 30 | super(SmoothLoss, self).__init__() 31 | self.smoothLoss = CurvatureConstraint() 32 | 33 | def forward(self, offset, topology): 34 | return self.smoothLoss(offset, topology) 35 | 36 | 37 | if __name__ == '__main__': 38 | 39 | # generate offset and topology with relatively low-dimension 40 | print ("=========== Input =============") 41 | T = 96 42 | W = num_cells 43 | H = num_cells 44 | D = num_cells 45 | offset = Variable((torch.rand(3, W+1, H+1, D+1)).type(dtype) * 0.1, requires_grad=True) 46 | topology = Variable(torch.rand(W*H*D, T).type(dtype), requires_grad=True) 47 | #print (offset) 48 | #print (topology) 49 | 50 | loss_cffi = SmoothLoss() 51 | l = loss_cffi(offset, F.softmax(topology, dim=1)) 52 | l.backward() 53 | offset.grad.data.zero_() 54 | 55 | # evaluating the running time of the cffi extension 56 | print ("============= cffi ============") 57 | tf_c = time.time() 58 | l = loss_cffi(offset, F.softmax(topology, dim=1)) 59 | print ("cffi loss:") 60 | print (l) 61 | tf_c = time.time()-tf_c 62 | 63 | 64 | tb_c = time.time() 65 | l.backward() 66 | print ("cffi gradient:") 67 | print( offset.grad) 68 | tb_c = time.time()-tb_c 69 | grad_np = np.copy(offset.grad.data.cpu().numpy()) 70 | 71 | 72 | # evaluating the running time of the autograd version 73 | print ("============= auto ============") 74 | tf_py = time.time() 75 | l_auto = loss_autograd.loss_on_curvature_autograd(offset, topology) 76 | print ("auto loss:") 77 | print (l_auto) 78 | tf_py = time.time()-tf_py 79 | 80 | offset.grad.data.zero_() 81 | tb_py = time.time() 82 | l_auto.backward() 83 | print ("auto grad:") 84 | print (offset.grad) 85 | tb_py = time.time()-tb_py 86 | grad_auto_np = np.copy(offset.grad.data.cpu().numpy()) 87 | assert np.sum(np.abs(grad_auto_np)) and np.sum(np.abs(grad_np)) != 0.0 88 | # print the loss and grad difference and the time comparison 89 | print ("========== summary ===========") 90 | print ("Forward difference between cffi and auto: ", (l-l_auto).data.cpu().numpy()) 91 | print ("Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np))) 92 | print ("Backward difference between cffi and auto: ", np.mean(np.abs(grad_np-grad_auto_np))) 93 | 94 | print ("cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c)) 95 | print ("auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py)) 96 | print ("ratio: ", (tf_py+tb_py)/(tf_c + tb_c)) -------------------------------------------------------------------------------- /dmifnet/encoder/r2n2.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | # import torch.nn.functional as F 3 | from dmifnet.common import normalize_imagenet 4 | 5 | 6 | class SimpleConv(nn.Module): 7 | ''' 3D Recurrent Reconstruction Neural Network (3D-R2-N2) encoder network. 8 | 9 | Args: 10 | c_dim: output dimension 11 | ''' 12 | 13 | def __init__(self, c_dim=1024): 14 | super().__init__() 15 | actvn = nn.LeakyReLU() 16 | pooling = nn.MaxPool2d(2, padding=1) 17 | self.convnet = nn.Sequential( 18 | nn.Conv2d(3, 96, 7, padding=3), 19 | pooling, actvn, 20 | nn.Conv2d(96, 128, 3, padding=1), 21 | pooling, actvn, 22 | nn.Conv2d(128, 256, 3, padding=1), 23 | pooling, actvn, 24 | nn.Conv2d(256, 256, 3, padding=1), 25 | pooling, actvn, 26 | nn.Conv2d(256, 256, 3, padding=1), 27 | pooling, actvn, 28 | nn.Conv2d(256, 256, 3, padding=1), 29 | pooling, actvn, 30 | ) 31 | self.fc_out = nn.Linear(256*3*3, c_dim) 32 | 33 | def forward(self, x): 34 | batch_size = x.size(0) 35 | 36 | net = normalize_imagenet(x) 37 | net = self.convnet(net) 38 | net = net.view(batch_size, 256*3*3) 39 | out = self.fc_out(net) 40 | 41 | return out 42 | 43 | 44 | class Resnet(nn.Module): 45 | ''' 3D Recurrent Reconstruction Neural Network (3D-R2-N2) ResNet-based 46 | encoder network. 47 | 48 | It is the ResNet variant of the previous encoder.s 49 | 50 | Args: 51 | c_dim: output dimension 52 | ''' 53 | 54 | def __init__(self, c_dim=1024): 55 | super().__init__() 56 | actvn = nn.LeakyReLU() 57 | pooling = nn.MaxPool2d(2, padding=1) 58 | self.convnet = nn.Sequential( 59 | nn.Conv2d(3, 96, 7, padding=3), 60 | actvn, 61 | nn.Conv2d(96, 96, 3, padding=1), 62 | actvn, pooling, 63 | ResnetBlock(96, 128), 64 | pooling, 65 | ResnetBlock(128, 256), 66 | pooling, 67 | ResnetBlock(256, 256), 68 | pooling, 69 | ResnetBlock(256, 256), 70 | pooling, 71 | ResnetBlock(256, 256), 72 | pooling, 73 | ) 74 | self.fc_out = nn.Linear(256*3*3, c_dim) 75 | 76 | def forward(self, x): 77 | batch_size = x.size(0) 78 | 79 | net = normalize_imagenet(x) 80 | net = self.convnet(net) 81 | net = net.view(batch_size, 256*3*3) 82 | out = self.fc_out(net) 83 | 84 | return out 85 | 86 | 87 | class ResnetBlock(nn.Module): 88 | ''' ResNet block class. 89 | 90 | Args: 91 | f_in (int): input dimension 92 | f_out (int): output dimension 93 | ''' 94 | 95 | def __init__(self, f_in, f_out): 96 | super().__init__() 97 | actvn = nn.LeakyReLU() 98 | self.convnet = nn.Sequential( 99 | nn.Conv2d(f_in, f_out, 3, padding=1), 100 | actvn, 101 | nn.Conv2d(f_out, f_out, 3, padding=1), 102 | actvn, 103 | ) 104 | self.shortcut = nn.Conv2d(f_in, f_out, 1) 105 | 106 | def forward(self, x): 107 | out = self.convnet(x) + self.shortcut(x) 108 | return out 109 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/fusion.cu: -------------------------------------------------------------------------------- 1 | #include "gpu_common.h" 2 | 3 | #include 4 | #include 5 | 6 | 7 | 8 | template 9 | __global__ void kernel_fusion(int vx_res3, const Views views, const FusionFunctorT functor, float vx_size, Volume vol) { 10 | CUDA_KERNEL_LOOP(idx, vx_res3) { 11 | int d,h,w; 12 | fusion_idx2dhw(idx, vol.width_,vol.height_, d,h,w); 13 | float x,y,z; 14 | fusion_dhw2xyz(d,h,w, vx_size, x,y,z); 15 | 16 | functor.before_sample(&vol, d,h,w); 17 | bool run = true; 18 | int n_valid_views = 0; 19 | for(int vidx = 0; vidx < views.n_views_ && run; ++vidx) { 20 | float ur, vr, vx_d; 21 | fusion_project(&views, vidx, x,y,z, ur,vr,vx_d); 22 | //NOTE: ur,vr,vx_d might differ to CPP (subtle differences in precision) 23 | 24 | int u = int(ur + 0.5f); 25 | int v = int(vr + 0.5f); 26 | 27 | if(u >= 0 && v >= 0 && u < views.cols_ && v < views.rows_) { 28 | int dm_idx = (vidx * views.rows_ + v) * views.cols_ + u; 29 | float dm_d = views.depthmaps_[dm_idx]; 30 | // if(d==103 && h==130 && w==153) printf(" dm_d=%f, dm_idx=%d, u=%d, v=%d, ur=%f, vr=%f\n", dm_d, dm_idx, u,v, ur,vr); 31 | run = functor.new_sample(&vol, vx_d, dm_d, d,h,w, &n_valid_views); 32 | } 33 | } // for vidx 34 | functor.after_sample(&vol, d,h,w, n_valid_views); 35 | } 36 | } 37 | 38 | 39 | 40 | template 41 | void fusion_gpu(const Views& views, const FusionFunctorT functor, float vx_size, Volume& vol) { 42 | Views views_gpu; 43 | views_to_gpu(views, views_gpu, true); 44 | Volume vol_gpu; 45 | volume_alloc_like_gpu(vol, vol_gpu); 46 | 47 | int vx_res3 = vol.depth_ * vol.height_ * vol.width_; 48 | kernel_fusion<<>>( 49 | vx_res3, views_gpu, functor, vx_size, vol_gpu 50 | ); 51 | CUDA_POST_KERNEL_CHECK; 52 | 53 | volume_to_cpu(vol_gpu, vol, false); 54 | 55 | views_free_gpu(views_gpu); 56 | volume_free_gpu(vol_gpu); 57 | } 58 | 59 | void fusion_projectionmask_gpu(const Views& views, float vx_size, bool unknown_is_free, Volume& vol) { 60 | ProjectionMaskFusionFunctor functor(unknown_is_free); 61 | fusion_gpu(views, functor, vx_size, vol); 62 | } 63 | 64 | void fusion_occupancy_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 65 | OccupancyFusionFunctor functor(truncation, unknown_is_free); 66 | fusion_gpu(views, functor, vx_size, vol); 67 | } 68 | 69 | void fusion_tsdfmask_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 70 | TsdfMaskFusionFunctor functor(truncation, unknown_is_free); 71 | fusion_gpu(views, functor, vx_size, vol); 72 | } 73 | 74 | void fusion_tsdf_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 75 | TsdfFusionFunctor functor(truncation, unknown_is_free); 76 | fusion_gpu(views, functor, vx_size, vol); 77 | } 78 | 79 | void fusion_tsdf_hist_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, float* bin_centers, int n_bins, bool unobserved_is_occupied, Volume& vol) { 80 | float* bin_centers_gpu = host_to_device_malloc(bin_centers, n_bins); 81 | TsdfHistFusionFunctor functor(truncation, unknown_is_free, bin_centers_gpu, n_bins, unobserved_is_occupied); 82 | fusion_gpu(views, functor, vx_size, vol); 83 | device_free(bin_centers_gpu); 84 | } 85 | -------------------------------------------------------------------------------- /dmifnet/data/transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | # Transforms 5 | class PointcloudNoise(object): 6 | ''' Point cloud noise transformation class. 7 | 8 | It adds noise to point cloud data. 9 | 10 | Args: 11 | stddev (int): standard deviation 12 | ''' 13 | 14 | def __init__(self, stddev): 15 | self.stddev = stddev 16 | 17 | def __call__(self, data): 18 | ''' Calls the transformation. 19 | 20 | Args: 21 | data (dictionary): data dictionary 22 | ''' 23 | data_out = data.copy() 24 | points = data[None] 25 | noise = self.stddev * np.random.randn(*points.shape) 26 | noise = noise.astype(np.float32) 27 | data_out[None] = points + noise 28 | return data_out 29 | 30 | 31 | class SubsamplePointcloud(object): 32 | ''' Point cloud subsampling transformation class. 33 | 34 | It subsamples the point cloud data. 35 | 36 | Args: 37 | N (int): number of points to be subsampled 38 | ''' 39 | def __init__(self, N): 40 | self.N = N 41 | 42 | def __call__(self, data): 43 | ''' Calls the transformation. 44 | 45 | Args: 46 | data (dict): data dictionary 47 | ''' 48 | data_out = data.copy() 49 | points = data[None] 50 | normals = data['normals'] 51 | 52 | indices = np.random.randint(points.shape[0], size=self.N) 53 | data_out[None] = points[indices, :] 54 | data_out['normals'] = normals[indices, :] 55 | 56 | return data_out 57 | 58 | 59 | class SubsamplePoints(object): 60 | ''' Points subsampling transformation class. 61 | 62 | It subsamples the points data. 63 | 64 | Args: 65 | N (int): number of points to be subsampled 66 | ''' 67 | def __init__(self, N): 68 | self.N = N 69 | 70 | def __call__(self, data): 71 | ''' Calls the transformation. 72 | 73 | Args: 74 | data (dictionary): data dictionary 75 | ''' 76 | points = data[None] 77 | occ = data['occ'] 78 | 79 | data_out = data.copy() 80 | if isinstance(self.N, int): 81 | idx = np.random.randint(points.shape[0], size=self.N) 82 | data_out.update({ 83 | None: points[idx, :], 84 | 'occ': occ[idx], 85 | }) 86 | else: 87 | Nt_out, Nt_in = self.N 88 | occ_binary = (occ >= 0.5) 89 | points0 = points[~occ_binary] 90 | points1 = points[occ_binary] 91 | 92 | idx0 = np.random.randint(points0.shape[0], size=Nt_out) 93 | idx1 = np.random.randint(points1.shape[0], size=Nt_in) 94 | 95 | points0 = points0[idx0, :] 96 | points1 = points1[idx1, :] 97 | points = np.concatenate([points0, points1], axis=0) 98 | 99 | occ0 = np.zeros(Nt_out, dtype=np.float32) 100 | occ1 = np.ones(Nt_in, dtype=np.float32) 101 | occ = np.concatenate([occ0, occ1], axis=0) 102 | 103 | volume = occ_binary.sum() / len(occ_binary) 104 | volume = volume.astype(np.float32) 105 | 106 | data_out.update({ 107 | None: points, 108 | 'occ': occ, 109 | 'volume': volume, 110 | }) 111 | return data_out 112 | -------------------------------------------------------------------------------- /dmifnet/checkpoints.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib 3 | import torch 4 | from torch.utils import model_zoo 5 | 6 | 7 | class CheckpointIO(object): 8 | ''' CheckpointIO class. 9 | 10 | It handles saving and loading checkpoints. 11 | 12 | Args: 13 | checkpoint_dir (str): path where checkpoints are saved 14 | ''' 15 | def __init__(self, checkpoint_dir='./chkpts', **kwargs): 16 | self.module_dict = kwargs 17 | self.checkpoint_dir = checkpoint_dir 18 | if not os.path.exists(checkpoint_dir): 19 | os.makedirs(checkpoint_dir) 20 | 21 | def register_modules(self, **kwargs): 22 | ''' Registers modules in current module dictionary. 23 | ''' 24 | self.module_dict.update(kwargs) 25 | 26 | def save(self, filename, **kwargs): 27 | ''' Saves the current module dictionary. 28 | 29 | Args: 30 | filename (str): name of output file 31 | ''' 32 | if not os.path.isabs(filename): 33 | filename = os.path.join(self.checkpoint_dir, filename) 34 | 35 | outdict = kwargs 36 | for k, v in self.module_dict.items(): 37 | outdict[k] = v.state_dict() 38 | torch.save(outdict, filename) 39 | 40 | def load(self, filename): 41 | '''Loads a module dictionary from local file or url. 42 | 43 | Args: 44 | filename (str): name of saved module dictionary 45 | ''' 46 | if is_url(filename): 47 | return self.load_url(filename) 48 | else: 49 | return self.load_file(filename) 50 | 51 | def load_file(self, filename): 52 | '''Loads a module dictionary from file. 53 | 54 | Args: 55 | filename (str): name of saved module dictionary 56 | ''' 57 | 58 | if not os.path.isabs(filename): 59 | filename = os.path.join(self.checkpoint_dir, filename) 60 | 61 | if os.path.exists(filename): 62 | print(filename) 63 | print('=> Loading checkpoint from local file...') 64 | state_dict = torch.load(filename) 65 | scalars = self.parse_state_dict(state_dict) 66 | return scalars 67 | else: 68 | raise FileExistsError 69 | 70 | def load_url(self, url): 71 | '''Load a module dictionary from url. 72 | 73 | Args: 74 | url (str): url to saved model 75 | ''' 76 | print(url) 77 | print('=> Loading checkpoint from url...') 78 | state_dict = model_zoo.load_url(url, progress=True) 79 | scalars = self.parse_state_dict(state_dict) 80 | return scalars 81 | 82 | def parse_state_dict(self, state_dict): 83 | '''Parse state_dict of model and return scalars. 84 | 85 | Args: 86 | state_dict (dict): State dict of model 87 | ''' 88 | 89 | for k, v in self.module_dict.items(): 90 | if k in state_dict: 91 | v.load_state_dict(state_dict[k]) 92 | else: 93 | print('Warning: Could not find %s in checkpoint!' % k) 94 | scalars = {k: v for k, v in state_dict.items() 95 | if k not in self.module_dict} 96 | return scalars 97 | 98 | def is_url(url): 99 | scheme = urllib.parse.urlparse(url).scheme 100 | return scheme in ('http', 'https') -------------------------------------------------------------------------------- /dmifnet/utils/libmesh/triangle_hash.pyx: -------------------------------------------------------------------------------- 1 | 2 | # distutils: language=c++ 3 | import numpy as np 4 | cimport numpy as np 5 | cimport cython 6 | from libcpp.vector cimport vector 7 | from libc.math cimport floor, ceil 8 | 9 | cdef class TriangleHash: 10 | cdef vector[vector[int]] spatial_hash 11 | cdef int resolution 12 | 13 | def __cinit__(self, double[:, :, :] triangles, int resolution): 14 | self.spatial_hash.resize(resolution * resolution) 15 | self.resolution = resolution 16 | self._build_hash(triangles) 17 | 18 | @cython.boundscheck(False) # Deactivate bounds checking 19 | @cython.wraparound(False) # Deactivate negative indexing. 20 | cdef int _build_hash(self, double[:, :, :] triangles): 21 | assert(triangles.shape[1] == 3) 22 | assert(triangles.shape[2] == 2) 23 | 24 | cdef int n_tri = triangles.shape[0] 25 | cdef int bbox_min[2] 26 | cdef int bbox_max[2] 27 | 28 | cdef int i_tri, j, x, y 29 | cdef int spatial_idx 30 | 31 | for i_tri in range(n_tri): 32 | # Compute bounding box 33 | for j in range(2): 34 | bbox_min[j] = min( 35 | triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] 36 | ) 37 | bbox_max[j] = max( 38 | triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] 39 | ) 40 | bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1) 41 | bbox_max[j] = min(max(bbox_max[j], 0), self.resolution - 1) 42 | 43 | # Find all voxels where bounding box intersects 44 | for x in range(bbox_min[0], bbox_max[0] + 1): 45 | for y in range(bbox_min[1], bbox_max[1] + 1): 46 | spatial_idx = self.resolution * x + y 47 | self.spatial_hash[spatial_idx].push_back(i_tri) 48 | 49 | @cython.boundscheck(False) # Deactivate bounds checking 50 | @cython.wraparound(False) # Deactivate negative indexing. 51 | cpdef query(self, double[:, :] points): 52 | assert(points.shape[1] == 2) 53 | cdef int n_points = points.shape[0] 54 | 55 | cdef vector[int] points_indices 56 | cdef vector[int] tri_indices 57 | # cdef int[:] points_indices_np 58 | # cdef int[:] tri_indices_np 59 | 60 | cdef int i_point, k, x, y 61 | cdef int spatial_idx 62 | 63 | for i_point in range(n_points): 64 | x = int(points[i_point, 0]) 65 | y = int(points[i_point, 1]) 66 | if not (0 <= x < self.resolution and 0 <= y < self.resolution): 67 | continue 68 | 69 | spatial_idx = self.resolution * x + y 70 | for i_tri in self.spatial_hash[spatial_idx]: 71 | points_indices.push_back(i_point) 72 | tri_indices.push_back(i_tri) 73 | 74 | points_indices_np = np.zeros(points_indices.size(), dtype=np.int32) 75 | tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32) 76 | 77 | cdef int[:] points_indices_view = points_indices_np 78 | cdef int[:] tri_indices_view = tri_indices_np 79 | 80 | for k in range(points_indices.size()): 81 | points_indices_view[k] = points_indices[k] 82 | 83 | for k in range(tri_indices.size()): 84 | tri_indices_view[k] = tri_indices[k] 85 | 86 | return points_indices_np, tri_indices_np 87 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/tests/test_occupancy_connectivity.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('../../../..') 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Variable 8 | 9 | import time 10 | import numpy as np 11 | from dmifnet.dmc.ops.occupancy_connectivity import OccupancyConnectivity 12 | #from loss import Loss 13 | #from loss_autograd import LossAutoGrad 14 | #from parse_args import parse_args 15 | 16 | # check the cuda extension or c extension 17 | 18 | def loss_on_smoothness_autograd( occupancy): 19 | """ Compute the smoothness loss using pytorch, 20 | implemented for gradient check of the c/c++ extensions 21 | """ 22 | 23 | Wo=occupancy.size()[0] 24 | Ho=occupancy.size()[1] 25 | Do=occupancy.size()[2] 26 | 27 | loss = 0 28 | for x_ in range(Wo): 29 | for y_ in range(Ho): 30 | for z_ in range(Do): 31 | # horizontal direction 32 | if x_ 96 | 97 | # Futher Information 98 | If you have any problems with the code, please list the problems you encountered in the issue area, and I will reply you soon. 99 | Thanks for baseline work [Occupancy Networks - Learning 3D Reconstruction in Function Space](https://avg.is.tuebingen.mpg.de/publications/occupancy-networks). 100 | 101 | -------------------------------------------------------------------------------- /dmifnet/io.py: -------------------------------------------------------------------------------- 1 | import os 2 | from plyfile import PlyElement, PlyData 3 | import numpy as np 4 | 5 | 6 | def export_pointcloud(vertices, out_file, as_text=True): 7 | assert(vertices.shape[1] == 3) 8 | vertices = vertices.astype(np.float32) 9 | vertices = np.ascontiguousarray(vertices) 10 | vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')] 11 | vertices = vertices.view(dtype=vector_dtype).flatten() 12 | plyel = PlyElement.describe(vertices, 'vertex') 13 | plydata = PlyData([plyel], text=as_text) 14 | plydata.write(out_file) 15 | 16 | 17 | def load_pointcloud(in_file): 18 | plydata = PlyData.read(in_file) 19 | vertices = np.stack([ 20 | plydata['vertex']['x'], 21 | plydata['vertex']['y'], 22 | plydata['vertex']['z'] 23 | ], axis=1) 24 | return vertices 25 | 26 | 27 | def read_off(file): 28 | """ 29 | Reads vertices and faces from an off file. 30 | 31 | :param file: path to file to read 32 | :type file: str 33 | :return: vertices and faces as lists of tuples 34 | :rtype: [(float)], [(int)] 35 | """ 36 | 37 | assert os.path.exists(file), 'file %s not found' % file 38 | 39 | with open(file, 'r') as fp: 40 | lines = fp.readlines() 41 | lines = [line.strip() for line in lines] 42 | 43 | # Fix for ModelNet bug were 'OFF' and the number of vertices and faces 44 | # are all in the first line. 45 | if len(lines[0]) > 3: 46 | assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \ 47 | 'invalid OFF file %s' % file 48 | 49 | parts = lines[0][3:].split(' ') 50 | assert len(parts) == 3 51 | 52 | num_vertices = int(parts[0]) 53 | assert num_vertices > 0 54 | 55 | num_faces = int(parts[1]) 56 | assert num_faces > 0 57 | 58 | start_index = 1 59 | # This is the regular case! 60 | else: 61 | assert lines[0] == 'OFF' or lines[0] == 'off', \ 62 | 'invalid OFF file %s' % file 63 | 64 | parts = lines[1].split(' ') 65 | assert len(parts) == 3 66 | 67 | num_vertices = int(parts[0]) 68 | assert num_vertices > 0 69 | 70 | num_faces = int(parts[1]) 71 | assert num_faces > 0 72 | 73 | start_index = 2 74 | 75 | vertices = [] 76 | for i in range(num_vertices): 77 | vertex = lines[start_index + i].split(' ') 78 | vertex = [float(point.strip()) for point in vertex if point != ''] 79 | assert len(vertex) == 3 80 | 81 | vertices.append(vertex) 82 | 83 | faces = [] 84 | for i in range(num_faces): 85 | face = lines[start_index + num_vertices + i].split(' ') 86 | face = [index.strip() for index in face if index != ''] 87 | 88 | # check to be sure 89 | for index in face: 90 | assert index != '', \ 91 | 'found empty vertex index: %s (%s)' \ 92 | % (lines[start_index + num_vertices + i], file) 93 | 94 | face = [int(index) for index in face] 95 | 96 | assert face[0] == len(face) - 1, \ 97 | 'face should have %d vertices but as %d (%s)' \ 98 | % (face[0], len(face) - 1, file) 99 | assert face[0] == 3, \ 100 | 'only triangular meshes supported (%s)' % file 101 | for index in face: 102 | assert index >= 0 and index < num_vertices, \ 103 | 'vertex %d (of %d vertices) does not exist (%s)' \ 104 | % (index, num_vertices, file) 105 | 106 | assert len(face) > 1 107 | 108 | faces.append(face) 109 | 110 | return vertices, faces 111 | 112 | assert False, 'could not open %s' % file 113 | -------------------------------------------------------------------------------- /dmifnet/utils/io.py: -------------------------------------------------------------------------------- 1 | import os 2 | from plyfile import PlyElement, PlyData 3 | import numpy as np 4 | 5 | 6 | def export_pointcloud(vertices, out_file, as_text=True): 7 | assert(vertices.shape[1] == 3) 8 | vertices = vertices.astype(np.float32) 9 | vertices = np.ascontiguousarray(vertices) 10 | vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')] 11 | vertices = vertices.view(dtype=vector_dtype).flatten() 12 | plyel = PlyElement.describe(vertices, 'vertex') 13 | plydata = PlyData([plyel], text=as_text) 14 | plydata.write(out_file) 15 | 16 | 17 | def load_pointcloud(in_file): 18 | plydata = PlyData.read(in_file) 19 | vertices = np.stack([ 20 | plydata['vertex']['x'], 21 | plydata['vertex']['y'], 22 | plydata['vertex']['z'] 23 | ], axis=1) 24 | return vertices 25 | 26 | 27 | def read_off(file): 28 | """ 29 | Reads vertices and faces from an off file. 30 | 31 | :param file: path to file to read 32 | :type file: str 33 | :return: vertices and faces as lists of tuples 34 | :rtype: [(float)], [(int)] 35 | """ 36 | 37 | assert os.path.exists(file), 'file %s not found' % file 38 | 39 | with open(file, 'r') as fp: 40 | lines = fp.readlines() 41 | lines = [line.strip() for line in lines] 42 | 43 | # Fix for ModelNet bug were 'OFF' and the number of vertices and faces 44 | # are all in the first line. 45 | if len(lines[0]) > 3: 46 | assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \ 47 | 'invalid OFF file %s' % file 48 | 49 | parts = lines[0][3:].split(' ') 50 | assert len(parts) == 3 51 | 52 | num_vertices = int(parts[0]) 53 | assert num_vertices > 0 54 | 55 | num_faces = int(parts[1]) 56 | assert num_faces > 0 57 | 58 | start_index = 1 59 | # This is the regular case! 60 | else: 61 | assert lines[0] == 'OFF' or lines[0] == 'off', \ 62 | 'invalid OFF file %s' % file 63 | 64 | parts = lines[1].split(' ') 65 | assert len(parts) == 3 66 | 67 | num_vertices = int(parts[0]) 68 | assert num_vertices > 0 69 | 70 | num_faces = int(parts[1]) 71 | assert num_faces > 0 72 | 73 | start_index = 2 74 | 75 | vertices = [] 76 | for i in range(num_vertices): 77 | vertex = lines[start_index + i].split(' ') 78 | vertex = [float(point.strip()) for point in vertex if point != ''] 79 | assert len(vertex) == 3 80 | 81 | vertices.append(vertex) 82 | 83 | faces = [] 84 | for i in range(num_faces): 85 | face = lines[start_index + num_vertices + i].split(' ') 86 | face = [index.strip() for index in face if index != ''] 87 | 88 | # check to be sure 89 | for index in face: 90 | assert index != '', \ 91 | 'found empty vertex index: %s (%s)' \ 92 | % (lines[start_index + num_vertices + i], file) 93 | 94 | face = [int(index) for index in face] 95 | 96 | assert face[0] == len(face) - 1, \ 97 | 'face should have %d vertices but as %d (%s)' \ 98 | % (face[0], len(face) - 1, file) 99 | assert face[0] == 3, \ 100 | 'only triangular meshes supported (%s)' % file 101 | for index in face: 102 | assert index >= 0 and index < num_vertices, \ 103 | 'vertex %d (of %d vertices) does not exist (%s)' \ 104 | % (index, num_vertices, file) 105 | 106 | assert len(face) > 1 107 | 108 | faces.append(face) 109 | 110 | return vertices, faces 111 | 112 | assert False, 'could not open %s' % file 113 | -------------------------------------------------------------------------------- /dmifnet/encoder/pointnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from dmifnet.layers import ResnetBlockFC 4 | 5 | 6 | def maxpool(x, dim=-1, keepdim=False): 7 | out, _ = x.max(dim=dim, keepdim=keepdim) 8 | return out 9 | 10 | 11 | class SimplePointnet(nn.Module): 12 | ''' PointNet-based encoder network. 13 | 14 | Args: 15 | c_dim (int): dimension of latent code c 16 | dim (int): input points dimension 17 | hidden_dim (int): hidden dimension of the network 18 | ''' 19 | 20 | def __init__(self, c_dim=128, dim=3, hidden_dim=128): 21 | super().__init__() 22 | self.c_dim = c_dim 23 | 24 | self.fc_pos = nn.Linear(dim, 2*hidden_dim) 25 | self.fc_0 = nn.Linear(2*hidden_dim, hidden_dim) 26 | self.fc_1 = nn.Linear(2*hidden_dim, hidden_dim) 27 | self.fc_2 = nn.Linear(2*hidden_dim, hidden_dim) 28 | self.fc_3 = nn.Linear(2*hidden_dim, hidden_dim) 29 | self.fc_c = nn.Linear(hidden_dim, c_dim) 30 | 31 | self.actvn = nn.ReLU() 32 | self.pool = maxpool 33 | 34 | def forward(self, p): 35 | batch_size, T, D = p.size() 36 | 37 | # output size: B x T X F 38 | net = self.fc_pos(p) 39 | net = self.fc_0(self.actvn(net)) 40 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 41 | net = torch.cat([net, pooled], dim=2) 42 | 43 | net = self.fc_1(self.actvn(net)) 44 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 45 | net = torch.cat([net, pooled], dim=2) 46 | 47 | net = self.fc_2(self.actvn(net)) 48 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 49 | net = torch.cat([net, pooled], dim=2) 50 | 51 | net = self.fc_3(self.actvn(net)) 52 | 53 | # Recude to B x F 54 | net = self.pool(net, dim=1) 55 | 56 | c = self.fc_c(self.actvn(net)) 57 | 58 | return c 59 | 60 | 61 | class ResnetPointnet(nn.Module): 62 | ''' PointNet-based encoder network with ResNet blocks. 63 | 64 | Args: 65 | c_dim (int): dimension of latent code c 66 | dim (int): input points dimension 67 | hidden_dim (int): hidden dimension of the network 68 | ''' 69 | 70 | def __init__(self, c_dim=128, dim=3, hidden_dim=128): 71 | super().__init__() 72 | self.c_dim = c_dim 73 | 74 | self.fc_pos = nn.Linear(dim, 2*hidden_dim) 75 | self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim) 76 | self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim) 77 | self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim) 78 | self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim) 79 | self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim) 80 | self.fc_c = nn.Linear(hidden_dim, c_dim) 81 | 82 | self.actvn = nn.ReLU() 83 | self.pool = maxpool 84 | 85 | def forward(self, p): 86 | batch_size, T, D = p.size() 87 | 88 | # output size: B x T X F 89 | net = self.fc_pos(p) 90 | net = self.block_0(net) 91 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 92 | net = torch.cat([net, pooled], dim=2) 93 | 94 | net = self.block_1(net) 95 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 96 | net = torch.cat([net, pooled], dim=2) 97 | 98 | net = self.block_2(net) 99 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 100 | net = torch.cat([net, pooled], dim=2) 101 | 102 | net = self.block_3(net) 103 | pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) 104 | net = torch.cat([net, pooled], dim=2) 105 | 106 | net = self.block_4(net) 107 | 108 | # Recude to B x F 109 | net = self.pool(net, dim=1) 110 | 111 | c = self.fc_c(self.actvn(net)) 112 | 113 | return c 114 | -------------------------------------------------------------------------------- /dmifnet/icp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | 4 | 5 | def best_fit_transform(A, B): 6 | ''' 7 | Calculates the least-squares best-fit transform that maps corresponding 8 | points A to B in m spatial dimensions 9 | Input: 10 | A: Nxm numpy array of corresponding points 11 | B: Nxm numpy array of corresponding points 12 | Returns: 13 | T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B 14 | R: mxm rotation matrix 15 | t: mx1 translation vector 16 | ''' 17 | 18 | assert A.shape == B.shape 19 | 20 | # get number of dimensions 21 | m = A.shape[1] 22 | 23 | # translate points to their centroids 24 | centroid_A = np.mean(A, axis=0) 25 | centroid_B = np.mean(B, axis=0) 26 | AA = A - centroid_A 27 | BB = B - centroid_B 28 | 29 | # rotation matrix 30 | H = np.dot(AA.T, BB) 31 | U, S, Vt = np.linalg.svd(H) 32 | R = np.dot(Vt.T, U.T) 33 | 34 | # special reflection case 35 | if np.linalg.det(R) < 0: 36 | Vt[m-1,:] *= -1 37 | R = np.dot(Vt.T, U.T) 38 | 39 | # translation 40 | t = centroid_B.T - np.dot(R,centroid_A.T) 41 | 42 | # homogeneous transformation 43 | T = np.identity(m+1) 44 | T[:m, :m] = R 45 | T[:m, m] = t 46 | 47 | return T, R, t 48 | 49 | 50 | def nearest_neighbor(src, dst): 51 | ''' 52 | Find the nearest (Euclidean) neighbor in dst for each point in src 53 | Input: 54 | src: Nxm array of points 55 | dst: Nxm array of points 56 | Output: 57 | distances: Euclidean distances of the nearest neighbor 58 | indices: dst indices of the nearest neighbor 59 | ''' 60 | 61 | assert src.shape == dst.shape 62 | 63 | neigh = NearestNeighbors(n_neighbors=1) 64 | neigh.fit(dst) 65 | distances, indices = neigh.kneighbors(src, return_distance=True) 66 | return distances.ravel(), indices.ravel() 67 | 68 | 69 | def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): 70 | ''' 71 | The Iterative Closest Point method: finds best-fit transform that maps 72 | points A on to points B 73 | Input: 74 | A: Nxm numpy array of source mD points 75 | B: Nxm numpy array of destination mD point 76 | init_pose: (m+1)x(m+1) homogeneous transformation 77 | max_iterations: exit algorithm after max_iterations 78 | tolerance: convergence criteria 79 | Output: 80 | T: final homogeneous transformation that maps A on to B 81 | distances: Euclidean distances (errors) of the nearest neighbor 82 | i: number of iterations to converge 83 | ''' 84 | 85 | assert A.shape == B.shape 86 | 87 | # get number of dimensions 88 | m = A.shape[1] 89 | 90 | # make points homogeneous, copy them to maintain the originals 91 | src = np.ones((m+1,A.shape[0])) 92 | dst = np.ones((m+1,B.shape[0])) 93 | src[:m,:] = np.copy(A.T) 94 | dst[:m,:] = np.copy(B.T) 95 | 96 | # apply the initial pose estimation 97 | if init_pose is not None: 98 | src = np.dot(init_pose, src) 99 | 100 | prev_error = 0 101 | 102 | for i in range(max_iterations): 103 | # find the nearest neighbors between the current source and destination points 104 | distances, indices = nearest_neighbor(src[:m,:].T, dst[:m,:].T) 105 | 106 | # compute the transformation between the current source and nearest destination points 107 | T,_,_ = best_fit_transform(src[:m,:].T, dst[:m,indices].T) 108 | 109 | # update the current source 110 | src = np.dot(T, src) 111 | 112 | # check error 113 | mean_error = np.mean(distances) 114 | if np.abs(prev_error - mean_error) < tolerance: 115 | break 116 | prev_error = mean_error 117 | 118 | # calculate final transformation 119 | T,_,_ = best_fit_transform(A, src[:m,:].T) 120 | 121 | return T, distances, i 122 | -------------------------------------------------------------------------------- /dmifnet/utils/icp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | 4 | 5 | def best_fit_transform(A, B): 6 | ''' 7 | Calculates the least-squares best-fit transform that maps corresponding 8 | points A to B in m spatial dimensions 9 | Input: 10 | A: Nxm numpy array of corresponding points 11 | B: Nxm numpy array of corresponding points 12 | Returns: 13 | T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B 14 | R: mxm rotation matrix 15 | t: mx1 translation vector 16 | ''' 17 | 18 | assert A.shape == B.shape 19 | 20 | # get number of dimensions 21 | m = A.shape[1] 22 | 23 | # translate points to their centroids 24 | centroid_A = np.mean(A, axis=0) 25 | centroid_B = np.mean(B, axis=0) 26 | AA = A - centroid_A 27 | BB = B - centroid_B 28 | 29 | # rotation matrix 30 | H = np.dot(AA.T, BB) 31 | U, S, Vt = np.linalg.svd(H) 32 | R = np.dot(Vt.T, U.T) 33 | 34 | # special reflection case 35 | if np.linalg.det(R) < 0: 36 | Vt[m-1,:] *= -1 37 | R = np.dot(Vt.T, U.T) 38 | 39 | # translation 40 | t = centroid_B.T - np.dot(R,centroid_A.T) 41 | 42 | # homogeneous transformation 43 | T = np.identity(m+1) 44 | T[:m, :m] = R 45 | T[:m, m] = t 46 | 47 | return T, R, t 48 | 49 | 50 | def nearest_neighbor(src, dst): 51 | ''' 52 | Find the nearest (Euclidean) neighbor in dst for each point in src 53 | Input: 54 | src: Nxm array of points 55 | dst: Nxm array of points 56 | Output: 57 | distances: Euclidean distances of the nearest neighbor 58 | indices: dst indices of the nearest neighbor 59 | ''' 60 | 61 | assert src.shape == dst.shape 62 | 63 | neigh = NearestNeighbors(n_neighbors=1) 64 | neigh.fit(dst) 65 | distances, indices = neigh.kneighbors(src, return_distance=True) 66 | return distances.ravel(), indices.ravel() 67 | 68 | 69 | def icp(A, B, init_pose=None, max_iterations=20, tolerance=0.001): 70 | ''' 71 | The Iterative Closest Point method: finds best-fit transform that maps 72 | points A on to points B 73 | Input: 74 | A: Nxm numpy array of source mD points 75 | B: Nxm numpy array of destination mD point 76 | init_pose: (m+1)x(m+1) homogeneous transformation 77 | max_iterations: exit algorithm after max_iterations 78 | tolerance: convergence criteria 79 | Output: 80 | T: final homogeneous transformation that maps A on to B 81 | distances: Euclidean distances (errors) of the nearest neighbor 82 | i: number of iterations to converge 83 | ''' 84 | 85 | assert A.shape == B.shape 86 | 87 | # get number of dimensions 88 | m = A.shape[1] 89 | 90 | # make points homogeneous, copy them to maintain the originals 91 | src = np.ones((m+1,A.shape[0])) 92 | dst = np.ones((m+1,B.shape[0])) 93 | src[:m,:] = np.copy(A.T) 94 | dst[:m,:] = np.copy(B.T) 95 | 96 | # apply the initial pose estimation 97 | if init_pose is not None: 98 | src = np.dot(init_pose, src) 99 | 100 | prev_error = 0 101 | 102 | for i in range(max_iterations): 103 | # find the nearest neighbors between the current source and destination points 104 | distances, indices = nearest_neighbor(src[:m,:].T, dst[:m,:].T) 105 | 106 | # compute the transformation between the current source and nearest destination points 107 | T,_,_ = best_fit_transform(src[:m,:].T, dst[:m,indices].T) 108 | 109 | # update the current source 110 | src = np.dot(T, src) 111 | 112 | # check error 113 | mean_error = np.mean(distances) 114 | if np.abs(prev_error - mean_error) < tolerance: 115 | break 116 | prev_error = mean_error 117 | 118 | # calculate final transformation 119 | T,_,_ = best_fit_transform(A, src[:m,:].T) 120 | 121 | return T, distances, i 122 | -------------------------------------------------------------------------------- /dmifnet/dmc/ops/cpp_modules/old/pred_to_mesh.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "commons.h" 5 | 6 | 7 | // considered all topologies with 4 triangles during visualization 8 | static int visTopology[2][140]={{0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 19, 23, 25, 27, 29, 31, 32, 34, 35, 38, 39, 43, 46, 47, 48, 49, 50, 51, 54, 55, 57, 59, 63, 64, 68, 70, 71, 76, 77, 78, 79, 95, 96, 98, 99, 100, 102, 103, 108, 110, 111, 112, 113, 114, 115, 116, 118, 119, 123, 126, 127, 128, 136, 137, 139, 140, 141, 142, 143, 144, 145, 147, 152, 153, 155, 156, 157, 159, 175, 176, 177, 178, 179, 183, 184, 185, 187, 189, 191, 192, 196, 198, 200, 201, 204, 205, 206, 207, 208, 209, 212, 216, 217, 219, 220, 221, 222, 223, 224, 226, 228, 230, 231, 232, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}, 9 | {0, 1, 1, 2, 1, 2, 3, 1, 2, 3, 2, 3, 3, 2, 1, 2, 3, 4, 3, 4, 4, 3, 1, 2, 3, 3, 4, 4, 4, 3, 2, 3, 3, 2, 4, 3, 4, 3, 2, 1, 2, 3, 4, 3, 4, 4, 3, 4, 2, 3, 4, 3, 2, 3, 4, 3, 2, 3, 4, 4, 3, 4, 3, 2, 4, 4, 1, 1, 2, 3, 4, 3, 4, 4, 3, 2, 3, 4, 3, 2, 3, 4, 3, 2, 4, 3, 4, 4, 3, 4, 4, 3, 2, 4, 1, 2, 3, 4, 3, 4, 2, 3, 3, 2, 3, 4, 4, 4, 3, 4, 3, 2, 4, 1, 3, 4, 4, 3, 4, 4, 3, 4, 2, 1, 2, 3, 3, 2, 3, 4, 2, 1, 3, 2, 4, 1, 2, 1, 1, 0}}; 10 | 11 | /** 12 | * convert the topology probability and vertex displacement field to a mesh by 13 | * selecting the topology with maximum probability in every cell 14 | * params: 15 | * offset vertex displacement field 16 | * topology topology probabilities 17 | * vertices_all vertices locations for all triangles in topologies with maximum probabilities 18 | * note there might be duplications and the unique vertices will be extracted afterwards 19 | * faces_all faces represented by the indices in vertices_all 20 | * vertice_number record the number of vertices as we initialzed the vertices_all with a fixed length 21 | * face_number record the number of faces as we initialized the faces_all with a fixed length 22 | */ 23 | int pred_to_mesh(THFloatTensor offset, THLongTensor *topology, THFloatTensor *vertices_all, THFloatTensor *faces_all, THLongTensor *vertice_number, THLongTensor *face_number){ 24 | // data format check 25 | if (THFloatTensor_nDimension(offset)!=4 || THLongTensor_nDimension(topology)!=3 ){ 26 | printf("Invalid nDimension!\n"); 27 | printf("Expected 4, 3, received %d, %d \n", THFloatTensor_nDimension(offset), THLongTensor_nDimension(topology)); 28 | return 0; 29 | } 30 | int W,H,D; 31 | W = THFloatTensor_size(offset,1)-1; 32 | H = THFloatTensor_size(offset,2)-1; 33 | D = THFloatTensor_size(offset,3)-1; 34 | 35 | int vertice_cnt=0; 36 | int face_cnt=0; 37 | 38 | for (int i=0; i 2 | #include "commons.h" 3 | 4 | /** 5 | * convert vertex displacement field to vertices locations 6 | * params: 7 | * offset vertex displacement field, 3xWxHxD 8 | * x indice of a cell in the full grid 9 | * y indice of a cell in the full grid 10 | * z indice of a cell in the full grid 11 | * return: 12 | * vertices the location of 12 vertices for the specific cell, 3x12 13 | * 14 | */ 15 | THFloatTensor* offset_to_vertices(THFloatTensor *offset, int x, int y, int z){ 16 | THFloatTensor *vertices = THFloatTensor_newWithSize2d(3, 12); 17 | 18 | // #0 19 | THFloatTensor_set2d(vertices, 0, 0, 0.5-THFloatTensor_get4d(offset, 0, x+1, y+1, z )); 20 | THFloatTensor_set2d(vertices, 1, 0, 1.0); 21 | THFloatTensor_set2d(vertices, 2, 0, 0.0); 22 | // #1 23 | THFloatTensor_set2d(vertices, 0, 1, 1.0); 24 | THFloatTensor_set2d(vertices, 1, 1, 0.5-THFloatTensor_get4d(offset, 1, x+1, y+1, z )); 25 | THFloatTensor_set2d(vertices, 2, 1, 0.0); 26 | // #2 27 | THFloatTensor_set2d(vertices, 0, 2, 0.5-THFloatTensor_get4d(offset, 0, x+1, y , z )); 28 | THFloatTensor_set2d(vertices, 1, 2, 0.0); 29 | THFloatTensor_set2d(vertices, 2, 2, 0.0); 30 | // #3 31 | THFloatTensor_set2d(vertices, 0, 3, 0.0); 32 | THFloatTensor_set2d(vertices, 1, 3, 0.5-THFloatTensor_get4d(offset, 1, x , y+1, z )); 33 | THFloatTensor_set2d(vertices, 2, 3, 0.0); 34 | 35 | // #4 36 | THFloatTensor_set2d(vertices, 0, 4, 0.5-THFloatTensor_get4d(offset, 0, x+1, y+1, z+1)); 37 | THFloatTensor_set2d(vertices, 1, 4, 1.0); 38 | THFloatTensor_set2d(vertices, 2, 4, 1.0); 39 | // #5 40 | THFloatTensor_set2d(vertices, 0, 5, 1.0); 41 | THFloatTensor_set2d(vertices, 1, 5, 0.5-THFloatTensor_get4d(offset, 1, x+1, y+1, z+1)); 42 | THFloatTensor_set2d(vertices, 2, 5, 1.0); 43 | // #6 44 | THFloatTensor_set2d(vertices, 0, 6, 0.5-THFloatTensor_get4d(offset, 0, x+1, y , z+1)); 45 | THFloatTensor_set2d(vertices, 1, 6, 0.0); 46 | THFloatTensor_set2d(vertices, 2, 6, 1.0); 47 | // #7 48 | THFloatTensor_set2d(vertices, 0, 7, 0.0); 49 | THFloatTensor_set2d(vertices, 1, 7, 0.5-THFloatTensor_get4d(offset, 1, x , y+1, z+1)); 50 | THFloatTensor_set2d(vertices, 2, 7, 1.0); 51 | 52 | // #8 53 | THFloatTensor_set2d(vertices, 0, 8, 0.0); 54 | THFloatTensor_set2d(vertices, 1, 8, 1.0); 55 | THFloatTensor_set2d(vertices, 2, 8, 0.5-THFloatTensor_get4d(offset, 2, x , y+1, z+1)); 56 | // #9 57 | THFloatTensor_set2d(vertices, 0, 9, 1.0); 58 | THFloatTensor_set2d(vertices, 1, 9, 1.0); 59 | THFloatTensor_set2d(vertices, 2, 9, 0.5-THFloatTensor_get4d(offset, 2, x+1, y+1, z+1)); 60 | // #10 61 | THFloatTensor_set2d(vertices, 0, 10, 1.0); 62 | THFloatTensor_set2d(vertices, 1, 10, 0.0); 63 | THFloatTensor_set2d(vertices, 2, 10, 0.5-THFloatTensor_get4d(offset, 2, x+1, y , z+1)); 64 | // #11 65 | THFloatTensor_set2d(vertices, 0, 11, 0.0); 66 | THFloatTensor_set2d(vertices, 1, 11, 0.0); 67 | THFloatTensor_set2d(vertices, 2, 11, 0.5-THFloatTensor_get4d(offset, 2, x , y , z+1)); 68 | return vertices; 69 | } 70 | 71 | /** 72 | * get points in a specific cell 73 | * params: 74 | * points all points in the grid, Nx3 75 | * i the offset of the specific cell 76 | * j the offset of the specific cell 77 | * k the offset of the specific cell 78 | * return: 79 | * indices a binary 1D tensor indicating if a point is in a specific cell or not, N 80 | * 81 | */ 82 | THLongTensor* points_in_grid(THFloatTensor *points, float i, float j, float k){ 83 | int N=THFloatTensor_size(points, 0); 84 | THLongTensor *indices = THLongTensor_new(); 85 | 86 | THByteTensor *mask = THByteTensor_newWithSize1d(N); 87 | THByteTensor_zero(mask); 88 | for (int p=0; p= i && THFloatTensor_get2d(points, p, 0) < i+1 && 90 | THFloatTensor_get2d(points, p, 1) >= j && THFloatTensor_get2d(points, p, 1) < j+1 && 91 | THFloatTensor_get2d(points, p, 2) >= k && THFloatTensor_get2d(points, p, 2) < k+1) 92 | THByteTensor_set1d(mask, p, 1); 93 | } 94 | 95 | THByteTensor_nonzero(indices, mask); 96 | 97 | THLongTensor_squeeze(indices, indices); 98 | THByteTensor_free(mask); 99 | return indices; 100 | } 101 | --------------------------------------------------------------------------------