├── .gitignore ├── LICENSE.md ├── README.md ├── configs ├── ablation │ ├── architecture_nocbn.yaml │ ├── architecture_noresnet.yaml │ ├── sampling_equal.yaml │ ├── sampling_few.yaml │ └── sampling_surface.yaml ├── default.yaml ├── demo.yaml ├── img │ ├── onet.yaml │ ├── onet_legacy_pretrained.yaml │ ├── onet_pretrained.yaml │ ├── pixel2mesh.yaml │ ├── psgn.yaml │ └── r2n2.yaml ├── pointcloud │ ├── dmc.yaml │ ├── onet.yaml │ ├── onet_pretrained.yaml │ ├── psgn.yaml │ └── r2n2.yaml ├── repr_power │ └── onet.yaml ├── unconditional │ ├── onet_airplanes.yaml │ ├── onet_airplanes_pretrained.yaml │ ├── onet_cars.yaml │ ├── onet_cars_pretrained.yaml │ ├── onet_chairs.yaml │ ├── onet_chairs_pretrained.yaml │ ├── onet_sofas.yaml │ └── onet_sofas_pretrained.yaml └── voxels │ ├── onet.yaml │ └── onet_pretrained.yaml ├── data ├── .gitkeep └── metadata.yaml ├── demo ├── .gitignore ├── 00.jpg ├── 01.jpg ├── 02.jpg ├── 03.jpg ├── 04.jpg ├── 05.jpg ├── 06.jpg ├── 07.jpg └── 08.jpg ├── environment.yaml ├── eval.py ├── eval_meshes.py ├── external └── mesh-fusion │ ├── 1_scale.py │ ├── 2_fusion.py │ ├── 3_simplify.py │ ├── README.md │ ├── common.py │ ├── examples │ └── 0_in │ │ ├── chair_0890.off │ │ ├── chair_0891.off │ │ ├── chair_0892.off │ │ ├── chair_0893.off │ │ └── chair_0894.off │ ├── libfusioncpu │ ├── .gitignore │ ├── CMakeLists.txt │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── cyfusion.pyx │ ├── fusion.cpp │ ├── fusion.h │ └── setup.py │ ├── libfusiongpu │ ├── CMakeLists.txt │ ├── LICENSE │ ├── README.md │ ├── __init__.py │ ├── cyfusion.cpp │ ├── cyfusion.pyx │ ├── fusion.cu │ ├── fusion.h │ ├── fusion_zach_tvl1.cu │ ├── gpu_common.h │ └── setup.py │ ├── libmcubes │ ├── LICENSE │ ├── README.rst │ ├── __init__.py │ ├── exporter.py │ ├── marchingcubes.cpp │ ├── marchingcubes.h │ ├── mcubes.cpp │ ├── mcubes.pyx │ ├── pyarray_symbol.h │ ├── pyarraymodule.h │ ├── pywrapper.cpp │ ├── pywrapper.h │ └── setup.py │ ├── librender │ ├── __init__.py │ ├── offscreen.cpp │ ├── offscreen.h │ ├── pyrender.cpp │ ├── pyrender.pyx │ ├── setup.py │ └── test.py │ ├── screenshot.jpg │ └── simplification.mlx ├── generate.py ├── im2mesh ├── __init__.py ├── checkpoints.py ├── common.py ├── config.py ├── data │ ├── __init__.py │ ├── core.py │ ├── fields.py │ ├── real.py │ └── transforms.py ├── dmc │ ├── __init__.py │ ├── config.py │ ├── generation.py │ ├── models │ │ ├── __init__.py │ │ ├── decoder.py │ │ └── encoder.py │ ├── ops │ │ ├── __init__.py │ │ ├── cpp_modules │ │ │ ├── old │ │ │ │ ├── commons.cpp │ │ │ │ ├── commons.h │ │ │ │ ├── pred_to_mesh.cpp │ │ │ │ └── pred_to_mesh.h │ │ │ ├── pred_to_mesh_.cpp │ │ │ └── setup.py │ │ ├── curvature_constraint.py │ │ ├── grid_pooling.py │ │ ├── occupancy_connectivity.py │ │ ├── occupancy_to_topology.py │ │ ├── point_triangle_distance.py │ │ ├── setup.py │ │ ├── src │ │ │ ├── curvature_constraint_kernel.cu │ │ │ ├── extension.cpp │ │ │ ├── grid_pooling_kernel.cu │ │ │ ├── kernels.h │ │ │ ├── occupancy_connectivity_kernel.cu │ │ │ ├── occupancy_to_topology_kernel.cu │ │ │ └── point_triangle_distance_kernel.cu │ │ ├── table.py │ │ └── tests │ │ │ ├── loss_autograd.py │ │ │ ├── test_curvature.py │ │ │ ├── test_distance.py │ │ │ ├── test_gridpooling.py │ │ │ ├── test_occupancy_connectivity.py │ │ │ ├── test_occupancy_connectivity_yiyi.py │ │ │ └── test_occupancy_to_topology.py │ ├── training.py │ └── utils │ │ ├── __init__.py │ │ ├── config.py │ │ ├── pointTriangleDistance.py │ │ ├── pred2mesh.py │ │ ├── util.py │ │ └── visualize.py ├── encoder │ ├── __init__.py │ ├── conv.py │ ├── pix2mesh_cond.py │ ├── pointnet.py │ ├── psgn_cond.py │ ├── r2n2.py │ └── voxels.py ├── eval.py ├── layers.py ├── onet │ ├── __init__.py │ ├── config.py │ ├── generation.py │ ├── models │ │ ├── __init__.py │ │ ├── decoder.py │ │ ├── encoder_latent.py │ │ └── legacy.py │ └── training.py ├── pix2mesh │ ├── __init__.py │ ├── config.py │ ├── ellipsoid │ │ ├── face1.obj │ │ ├── face2.obj │ │ ├── face3.obj │ │ └── info_ellipsoid.dat │ ├── generation.py │ ├── layers.py │ ├── models │ │ ├── __init__.py │ │ └── decoder.py │ └── training.py ├── preprocess.py ├── psgn │ ├── __init__.py │ ├── config.py │ ├── generation.py │ ├── models │ │ ├── __init__.py │ │ ├── decoder.py │ │ └── psgn_2branch.py │ └── training.py ├── r2n2 │ ├── __init__.py │ ├── config.py │ ├── generation.py │ ├── models │ │ ├── __init__.py │ │ └── decoder.py │ └── training.py ├── training.py └── utils │ ├── __init__.py │ ├── binvox_rw.py │ ├── icp.py │ ├── io.py │ ├── libkdtree │ ├── .gitignore │ ├── LICENSE.txt │ ├── MANIFEST.in │ ├── README │ ├── README.rst │ ├── __init__.py │ ├── pykdtree │ │ ├── __init__.py │ │ ├── _kdtree_core.c │ │ ├── _kdtree_core.c.mako │ │ ├── kdtree.c │ │ ├── kdtree.pyx │ │ ├── render_template.py │ │ └── test_tree.py │ └── setup.cfg │ ├── libmcubes │ ├── .gitignore │ ├── LICENSE │ ├── README.rst │ ├── __init__.py │ ├── exporter.py │ ├── marchingcubes.cpp │ ├── marchingcubes.h │ ├── mcubes.pyx │ ├── pyarray_symbol.h │ ├── pyarraymodule.h │ ├── pywrapper.cpp │ └── pywrapper.h │ ├── libmesh │ ├── .gitignore │ ├── __init__.py │ ├── inside_mesh.py │ └── triangle_hash.pyx │ ├── libmise │ ├── .gitignore │ ├── __init__.py │ ├── mise.pyx │ └── test.py │ ├── libsimplify │ ├── Simplify.h │ ├── __init__.py │ ├── simplify_mesh.pyx │ └── test.py │ ├── libvoxelize │ ├── .gitignore │ ├── __init__.py │ ├── tribox2.h │ └── voxelize.pyx │ ├── mesh.py │ ├── visualize.py │ └── voxels.py ├── img ├── 00.gif ├── 01.gif ├── 02.gif ├── 03.gif ├── example_input.png ├── example_input_raw.png ├── example_output.gif └── table_img2mesh.png ├── scripts ├── create_split.py ├── dataset_shapenet │ ├── build.sh │ ├── config.sh │ ├── get_r2n2_cameras.py │ └── install.sh ├── download_data.sh └── sample_mesh.py ├── setup.py └── train.py /.gitignore: -------------------------------------------------------------------------------- 1 | /output 2 | /out 3 | /data 4 | build 5 | .vscode 6 | .pytest_cache 7 | .cache 8 | *.pyc 9 | *.pt 10 | *.so 11 | *.o 12 | *.prof 13 | .nfs* 14 | /im2mesh/utils/libmcubes/mcubes.cpp 15 | /im2mesh/utils/libsimplify/simplify_mesh.cpp 16 | /im2mesh/utils/libsimplify/build 17 | 18 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2019 Lars Mescheder, Michael Oechsle, Michael Niemeyer, Andreas Geiger, Sebastian Nowozin 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /configs/ablation/architecture_nocbn.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: null 9 | decoder: batchnorm 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/ablation/architecture_nocbn 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: -1 19 | test: 20 | threshold: 0.2 21 | eval_mesh: true 22 | eval_pointcloud: false 23 | generation: 24 | batch_size: 100000 25 | refine: false 26 | n_x: 128 27 | n_z: 1 28 | -------------------------------------------------------------------------------- /configs/ablation/architecture_noresnet.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm_noresnet 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/ablation/architecture_noresnet 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: -1 19 | test: 20 | threshold: 0.2 21 | eval_mesh: true 22 | eval_pointcloud: false 23 | generation: 24 | batch_size: 100000 25 | refine: false 26 | n_x: 128 27 | n_z: 1 28 | -------------------------------------------------------------------------------- /configs/ablation/sampling_equal.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: [1024, 1024] 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/ablation/sampling_equal 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: -1 19 | test: 20 | threshold: 0.2 21 | eval_mesh: true 22 | eval_pointcloud: false 23 | generation: 24 | batch_size: 100000 25 | refine: false 26 | n_x: 128 27 | n_z: 1 28 | -------------------------------------------------------------------------------- /configs/ablation/sampling_few.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 64 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/ablation/sampling_few 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: -1 19 | test: 20 | threshold: 0.2 21 | eval_mesh: true 22 | eval_pointcloud: false 23 | generation: 24 | batch_size: 100000 25 | refine: false 26 | n_x: 128 27 | n_z: 1 28 | -------------------------------------------------------------------------------- /configs/ablation/sampling_surface.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 2048 7 | points_file: points_surface.npz 8 | model: 9 | encoder_latent: null 10 | decoder: cbatchnorm 11 | encoder: resnet18 12 | c_dim: 256 13 | z_dim: 0 14 | training: 15 | out_dir: out/ablation/sampling_surface 16 | batch_size: 64 17 | model_selection_metric: iou 18 | model_selection_mode: maximize 19 | visualize_every: -1 20 | test: 21 | threshold: 0.2 22 | eval_mesh: true 23 | eval_pointcloud: false 24 | generation: 25 | batch_size: 100000 26 | refine: false 27 | n_x: 128 28 | n_z: 1 29 | -------------------------------------------------------------------------------- /configs/default.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | dataset: Shapes3D 4 | path: data/ShapeNet 5 | classes: null 6 | input_type: img 7 | train_split: train 8 | val_split: val 9 | test_split: test 10 | dim: 3 11 | points_file: points.npz 12 | points_iou_file: points.npz 13 | points_subsample: 1024 14 | points_unpackbits: true 15 | model_file: model.off 16 | watertight_file: model_watertight.off 17 | img_folder: img 18 | img_size: 224 19 | img_with_camera: false 20 | img_augment: false 21 | n_views: 24 22 | pointcloud_file: pointcloud.npz 23 | pointcloud_chamfer_file: pointcloud.npz 24 | pointcloud_n: 256 25 | pointcloud_target_n: 1024 26 | pointcloud_noise: 0.05 27 | voxels_file: 'model.binvox' 28 | with_transforms: false 29 | model: 30 | decoder: simple 31 | encoder: resnet18 32 | encoder_latent: null 33 | decoder_kwargs: {} 34 | encoder_kwargs: {} 35 | encoder_latent_kwargs: {} 36 | multi_gpu: false 37 | c_dim: 512 38 | z_dim: 64 39 | use_camera: false 40 | dmc_weight_prior: 10. 41 | training: 42 | out_dir: out/default 43 | batch_size: 64 44 | print_every: 10 45 | visualize_every: 2000 46 | checkpoint_every: 1000 47 | validate_every: 2000 48 | backup_every: 100000 49 | eval_sample: false 50 | model_selection_metric: loss 51 | model_selection_mode: minimize 52 | test: 53 | threshold: 0.5 54 | eval_mesh: true 55 | eval_pointcloud: true 56 | model_file: model_best.pt 57 | generation: 58 | batch_size: 100000 59 | refinement_step: 0 60 | vis_n_outputs: 30 61 | generate_mesh: true 62 | generate_pointcloud: true 63 | generation_dir: generation 64 | use_sampling: false 65 | resolution_0: 32 66 | upsampling_steps: 2 67 | simplify_nfaces: null 68 | copy_groundtruth: false 69 | copy_input: true 70 | latent_number: 4 71 | latent_H: 8 72 | latent_W: 8 73 | latent_ny: 2 74 | latent_nx: 2 75 | latent_repeat: true 76 | preprocessor: 77 | type: null 78 | config: "" 79 | model_file: null 80 | -------------------------------------------------------------------------------- /configs/demo.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/img/onet_pretrained.yaml 2 | data: 3 | dataset: images 4 | path: demo 5 | training: 6 | out_dir: demo 7 | generation: 8 | generation_dir: generation 9 | refinement_step: 30 10 | simplify_nfaces: 5000 11 | -------------------------------------------------------------------------------- /configs/img/onet.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm 10 | encoder: resnet18 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/img/onet 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | visualize_every: 20000 19 | validate_every: 20000 20 | test: 21 | threshold: 0.2 22 | eval_mesh: true 23 | eval_pointcloud: false 24 | generation: 25 | batch_size: 100000 26 | refine: false 27 | n_x: 128 28 | n_z: 1 29 | resolution_0: 32 30 | upsampling_steps: 2 31 | 32 | -------------------------------------------------------------------------------- /configs/img/onet_legacy_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/img/onet.yaml 2 | data: 3 | img_augment: true 4 | model: 5 | decoder_kwargs: 6 | legacy: true 7 | training: 8 | out_dir: out/img/onet_legacy 9 | test: 10 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_img2mesh-0c7780d1.pt 11 | generation: 12 | generation_dir: pretrained 13 | -------------------------------------------------------------------------------- /configs/img/onet_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/img/onet.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_img2mesh_3-f786b04a.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/img/pixel2mesh.yaml: -------------------------------------------------------------------------------- 1 | method: pix2mesh 2 | data: 3 | multiclass: True 4 | path: data/ShapeNet 5 | base_mesh: im2mesh/pix2mesh/ellipsoid/face3.obj 6 | ellipsoid: im2mesh/pix2mesh/ellipsoid/info_ellipsoid.dat 7 | img_folder: img_choy2016 8 | img_size: 224 9 | img_with_camera: true 10 | with_transforms: true 11 | pointcloud_target_n: 8000 12 | model: 13 | encoder_latent: simple 14 | decoder: simple 15 | encoder: pixel2mesh_cond 16 | encoder_kwargs: {} 17 | decoder_kwargs: 18 | adjust_ellipsoid: True 19 | hidden_dim: 192 20 | feat_dim: 963 21 | c_dim: 512 22 | z_dim: 64 23 | adjust_losses: True 24 | training: 25 | out_dir: out/img/pixel2mesh 26 | batch_size: 12 27 | print_every: 40 28 | visualize_every: 10000 29 | checkpoint_every: 1000 30 | validate_every: 2000 31 | model_selection_metric: chamfer 32 | test: 33 | threshold: 0.9 34 | eval_mesh: true 35 | eval_pointcloud: false 36 | generation: 37 | batch_size: 100000 38 | generate_mesh: true 39 | generate_pointcloud: false 40 | -------------------------------------------------------------------------------- /configs/img/psgn.yaml: -------------------------------------------------------------------------------- 1 | method: psgn 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | pointcloud_target_n: 1024 7 | model: 8 | decoder: simple 9 | encoder: resnet18 10 | c_dim: 256 11 | z_dim: 0 12 | training: 13 | out_dir: out/img/psgn 14 | batch_size: 64 15 | test: 16 | eval_mesh: false 17 | eval_pointcloud: true 18 | generation: 19 | batch_size: 100000 20 | refine: false 21 | n_x: 128 22 | n_z: 1 23 | generate_mesh: false 24 | -------------------------------------------------------------------------------- /configs/img/r2n2.yaml: -------------------------------------------------------------------------------- 1 | method: r2n2 2 | data: 3 | path: data/ShapeNet 4 | img_folder: img_choy2016 5 | img_size: 224 6 | model: 7 | encoder_latent: null 8 | decoder: simple 9 | encoder: resnet18 10 | c_dim: 256 11 | z_dim: 0 12 | training: 13 | out_dir: out/img/r2n2 14 | batch_size: 64 15 | model_selection_metric: iou 16 | model_selection_mode: maximize 17 | test: 18 | threshold: 0.4 19 | eval_mesh: true 20 | eval_pointcloud: false 21 | generation: 22 | batch_size: 100000 23 | refine: false 24 | n_x: 128 25 | n_z: 1 26 | -------------------------------------------------------------------------------- /configs/pointcloud/dmc.yaml: -------------------------------------------------------------------------------- 1 | method: dmc 2 | data: 3 | input_type: pointcloud 4 | path: data/ShapeNet 5 | pointcloud_n: 300 6 | pointcloud_noise: 0.005 7 | model: 8 | encoder_latent: simple 9 | decoder: unet 10 | encoder: pointnet_local 11 | decoder_kwargs: 12 | input_dim: 16 13 | skip_connection: True 14 | T: 256 15 | W: 32 16 | H: 32 17 | D: 32 18 | encoder_kwargs: 19 | out_dim: 16 20 | cell_W: 32 21 | cell_H: 32 22 | cell_D: 32 23 | c_dim: 256 24 | z_dim: 0 25 | num_voxels: 32 26 | dmc_weight_prior: 5. 27 | training: 28 | out_dir: out/pointcloud/dmc 29 | batch_size: 8 30 | print_every: 10 31 | visualize_every: 2000 32 | checkpoint_every: 500 33 | validate_every: 2000 34 | backup_every: 10000 35 | eval_sample: false 36 | model_selection_metric: loss 37 | model_selection_mode: minimize 38 | test: 39 | threshold: 0.5 40 | eval_mesh: true 41 | eval_pointcloud: false 42 | model_file: model_best.pt 43 | generation: 44 | dataset: same 45 | batch_size: 100000 46 | refinement_step: 0 47 | # vis_n_outputs: 16 48 | generate_mesh: true 49 | generate_pointcloud: true 50 | generation_dir: generation 51 | -------------------------------------------------------------------------------- /configs/pointcloud/onet.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | input_type: pointcloud 4 | path: data/ShapeNet 5 | pointcloud_n: 300 6 | pointcloud_noise: 0.005 7 | points_subsample: 2048 8 | model: 9 | encoder_latent: null 10 | decoder: cbatchnorm 11 | encoder: pointnet_resnet 12 | encoder_kwargs: 13 | hidden_dim: 512 14 | c_dim: 512 15 | z_dim: 0 16 | training: 17 | out_dir: out/pointcloud/onet 18 | batch_size: 64 19 | model_selection_metric: iou 20 | model_selection_mode: maximize 21 | test: 22 | threshold: 0.2 23 | eval_mesh: true 24 | eval_pointcloud: false 25 | generation: 26 | batch_size: 100000 27 | refine: false 28 | n_x: 128 29 | n_z: 1 30 | -------------------------------------------------------------------------------- /configs/pointcloud/onet_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/pointcloud/onet.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_pcl2mesh-5c0be168.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/pointcloud/psgn.yaml: -------------------------------------------------------------------------------- 1 | method: psgn 2 | data: 3 | input_type: pointcloud 4 | path: data/ShapeNet 5 | pointcloud_n: 300 6 | pointcloud_noise: 0.005 7 | pointcloud_target_n: 1024 8 | model: 9 | decoder: simple 10 | encoder: pointnet_resnet 11 | encoder_kwargs: 12 | hidden_dim: 512 13 | c_dim: 512 14 | z_dim: 0 15 | training: 16 | out_dir: out/pointcloud/psgn 17 | batch_size: 64 18 | model_selection_metric: chamfer 19 | test: 20 | eval_mesh: false 21 | eval_pointcloud: true 22 | generation: 23 | batch_size: 100000 24 | refine: false 25 | n_x: 128 26 | n_z: 1 27 | generate_mesh: false 28 | -------------------------------------------------------------------------------- /configs/pointcloud/r2n2.yaml: -------------------------------------------------------------------------------- 1 | method: r2n2 2 | data: 3 | input_type: pointcloud 4 | path: data/ShapeNet 5 | pointcloud_n: 300 6 | pointcloud_noise: 0.005 7 | model: 8 | decoder: simple 9 | encoder: pointnet_resnet 10 | encoder_kwargs: 11 | hidden_dim: 512 12 | c_dim: 512 13 | z_dim: 0 14 | training: 15 | out_dir: out/pointcloud/r2n2 16 | batch_size: 64 17 | model_selection_metric: iou 18 | model_selection_mode: maximize 19 | test: 20 | threshold: 0.4 21 | eval_mesh: true 22 | eval_pointcloud: false 23 | generation: 24 | batch_size: 100000 25 | refine: false 26 | n_x: 128 27 | n_z: 1 28 | -------------------------------------------------------------------------------- /configs/repr_power/onet.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | input_type: idx 4 | classes: ['03001627'] 5 | path: data/ShapeNet 6 | train_split: train 7 | test_split: train 8 | val_split: train 9 | points_subsample: 1024 10 | model: 11 | encoder_latent: simple 12 | decoder: cbatchnorm 13 | encoder: idx 14 | decoder_kwargs: 15 | legacy: true 16 | c_dim: 512 17 | z_dim: 0 18 | use_poincloud: false 19 | training: 20 | out_dir: out/repr_power/onet 21 | batch_size: 64 22 | visualize_every: 2000 23 | validate_every: 2000 24 | model_selection_metric: iou 25 | model_selection_mode: maximize 26 | test: 27 | model_file: model.pt 28 | threshold: 0.5 29 | eval_mesh: true 30 | generation: 31 | split: train 32 | batch_size: 100000 33 | refine: false 34 | n_x: 128 35 | n_z: 1 36 | -------------------------------------------------------------------------------- /configs/unconditional/onet_airplanes.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | classes: ['02691156'] 4 | input_type: null 5 | path: data/ShapeNet 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: simple 9 | decoder: simple 10 | encoder: null 11 | c_dim: 0 12 | z_dim: 128 13 | training: 14 | out_dir: out/unconditional/onet_airplanes 15 | batch_size: 64 16 | eval_sample: true 17 | test: 18 | threshold: 0.2 19 | eval_mesh: true 20 | eval_pointcloud: false 21 | generation: 22 | batch_size: 100000 23 | refine: false 24 | n_x: 128 25 | n_z: 1 26 | use_sampling: true 27 | copy_groundtruth: false 28 | copy_input: false 29 | -------------------------------------------------------------------------------- /configs/unconditional/onet_airplanes_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/unconditional/onet_airplanes.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_uncond_airplanes-26c9d089.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/unconditional/onet_cars.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | classes: ['02958343'] 4 | input_type: null 5 | path: data/ShapeNet 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: simple 9 | decoder: simple 10 | encoder: null 11 | c_dim: 0 12 | z_dim: 128 13 | training: 14 | out_dir: out/unconditional/onet_cars 15 | batch_size: 64 16 | eval_sample: true 17 | test: 18 | threshold: 0.2 19 | eval_mesh: true 20 | eval_pointcloud: false 21 | generation: 22 | batch_size: 100000 23 | refine: false 24 | n_x: 128 25 | n_z: 1 26 | use_sampling: true 27 | copy_groundtruth: false 28 | copy_input: false 29 | 30 | -------------------------------------------------------------------------------- /configs/unconditional/onet_cars_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/unconditional/onet_cars.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_uncond_cars-59320701.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/unconditional/onet_chairs.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | classes: ['03001627'] 4 | input_type: null 5 | path: data/ShapeNet 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: simple 9 | decoder: simple 10 | encoder: null 11 | c_dim: 0 12 | z_dim: 128 13 | training: 14 | out_dir: out/unconditional/onet_chairs 15 | batch_size: 64 16 | eval_sample: true 17 | test: 18 | threshold: 0.2 19 | eval_mesh: true 20 | eval_pointcloud: false 21 | generation: 22 | batch_size: 100000 23 | refine: false 24 | n_x: 128 25 | n_z: 1 26 | use_sampling: true 27 | copy_groundtruth: false 28 | copy_input: false 29 | -------------------------------------------------------------------------------- /configs/unconditional/onet_chairs_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/unconditional/onet_chairs.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_uncond_chairs-800ea1f3.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/unconditional/onet_sofas.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | classes: ['04256520'] 4 | input_type: null 5 | path: data/ShapeNet 6 | points_subsample: 2048 7 | model: 8 | encoder_latent: simple 9 | decoder: simple 10 | encoder: null 11 | c_dim: 0 12 | z_dim: 128 13 | training: 14 | out_dir: out/unconditional/onet_sofas 15 | batch_size: 64 16 | eval_sample: true 17 | test: 18 | threshold: 0.2 19 | eval_mesh: true 20 | eval_pointcloud: false 21 | generation: 22 | batch_size: 100000 23 | refine: false 24 | n_x: 128 25 | n_z: 1 26 | use_sampling: true 27 | copy_groundtruth: false 28 | copy_input: false 29 | -------------------------------------------------------------------------------- /configs/unconditional/onet_sofas_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/unconditional/onet_sofas.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_uncond_sofas-bf6c039d.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /configs/voxels/onet.yaml: -------------------------------------------------------------------------------- 1 | method: onet 2 | data: 3 | input_type: voxels 4 | path: data/ShapeNet 5 | dim: 3 6 | points_subsample: 1024 7 | model: 8 | encoder_latent: null 9 | decoder: cbatchnorm 10 | encoder: voxel_simple 11 | c_dim: 256 12 | z_dim: 0 13 | training: 14 | out_dir: out/voxels/onet 15 | batch_size: 64 16 | model_selection_metric: iou 17 | model_selection_mode: maximize 18 | test: 19 | threshold: 0.2 20 | eval_mesh: true 21 | eval_pointcloud: false 22 | generation: 23 | batch_size: 100000 24 | refine: false 25 | n_x: 128 26 | n_z: 1 27 | -------------------------------------------------------------------------------- /configs/voxels/onet_pretrained.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/voxels/onet.yaml 2 | test: 3 | model_file: https://s3.eu-central-1.amazonaws.com/avg-projects/occupancy_networks/models/onet_voxel2mesh-52ee34d4.pt 4 | generation: 5 | generation_dir: pretrained 6 | -------------------------------------------------------------------------------- /data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/data/.gitkeep -------------------------------------------------------------------------------- /data/metadata.yaml: -------------------------------------------------------------------------------- 1 | { 2 | "04256520": { 3 | "id": "04256520", 4 | "name": "sofa,couch,lounge" 5 | }, 6 | "02691156": { 7 | "id": "02691156", 8 | "name": "airplane,aeroplane,plane" 9 | }, 10 | "03636649": { 11 | "id": "03636649", 12 | "name": "lamp" 13 | }, 14 | "04401088": { 15 | "id": "04401088", 16 | "name": "telephone,phone,telephone set" 17 | }, 18 | "04530566": { 19 | "id": "04530566", 20 | "name": "vessel,watercraft" 21 | }, 22 | "03691459": { 23 | "id": "03691459", 24 | "name": "loudspeaker,speaker,speaker unit,loudspeaker system,speaker system" 25 | }, 26 | "03001627": { 27 | "id": "03001627", 28 | "name": "chair" 29 | }, 30 | "02933112": { 31 | "id": "02933112", 32 | "name": "cabinet" 33 | }, 34 | "04379243": { 35 | "id": "04379243", 36 | "name": "table" 37 | }, 38 | "03211117": { 39 | "id": "03211117", 40 | "name": "display,video display" 41 | }, 42 | "02958343": { 43 | "id": "02958343", 44 | "name": "car,auto,automobile,machine,motorcar" 45 | }, 46 | "02828884": { 47 | "id": "02828884", 48 | "name": "bench" 49 | }, 50 | "04090263": { 51 | "id": "04090263", 52 | "name": "rifle" 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /demo/.gitignore: -------------------------------------------------------------------------------- 1 | generation/ 2 | -------------------------------------------------------------------------------- /demo/00.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/00.jpg -------------------------------------------------------------------------------- /demo/01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/01.jpg -------------------------------------------------------------------------------- /demo/02.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/02.jpg -------------------------------------------------------------------------------- /demo/03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/03.jpg -------------------------------------------------------------------------------- /demo/04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/04.jpg -------------------------------------------------------------------------------- /demo/05.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/05.jpg -------------------------------------------------------------------------------- /demo/06.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/06.jpg -------------------------------------------------------------------------------- /demo/07.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/07.jpg -------------------------------------------------------------------------------- /demo/08.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/demo/08.jpg -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | name: mesh_funcspace 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | - defaults 6 | dependencies: 7 | - cython=0.29.2 8 | - imageio=2.4.1 9 | - numpy=1.15.4 10 | - numpy-base=1.15.4 11 | - matplotlib=3.0.3 12 | - matplotlib-base=3.0.3 13 | - pandas=0.23.4 14 | - pillow=5.3.0 15 | - pyembree=0.1.4 16 | - pytest=4.0.2 17 | - python=3.6.7 18 | - pytorch=1.0.0 19 | - pyyaml=3.13 20 | - scikit-image=0.14.1 21 | - scipy=1.1.0 22 | - tensorboardx=1.4 23 | - torchvision=0.2.1 24 | - tqdm=4.28.1 25 | - trimesh=2.37.7 26 | - pip: 27 | - h5py==2.9.0 28 | - plyfile==0.7 29 | 30 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import pandas as pd 4 | import torch 5 | import numpy as np 6 | from tqdm import tqdm 7 | from im2mesh import config, data 8 | from im2mesh.checkpoints import CheckpointIO 9 | 10 | 11 | parser = argparse.ArgumentParser( 12 | description='Evaluate mesh algorithms.' 13 | ) 14 | parser.add_argument('config', type=str, help='Path to config file.') 15 | parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.') 16 | 17 | # Get configuration and basic arguments 18 | args = parser.parse_args() 19 | cfg = config.load_config(args.config, 'configs/default.yaml') 20 | is_cuda = (torch.cuda.is_available() and not args.no_cuda) 21 | device = torch.device("cuda" if is_cuda else "cpu") 22 | 23 | # Shorthands 24 | out_dir = cfg['training']['out_dir'] 25 | out_file = os.path.join(out_dir, 'eval_full.pkl') 26 | out_file_class = os.path.join(out_dir, 'eval.csv') 27 | 28 | # Dataset 29 | dataset = config.get_dataset('test', cfg, return_idx=True) 30 | model = config.get_model(cfg, device=device, dataset=dataset) 31 | 32 | checkpoint_io = CheckpointIO(out_dir, model=model) 33 | try: 34 | checkpoint_io.load(cfg['test']['model_file']) 35 | except FileExistsError: 36 | print('Model file does not exist. Exiting.') 37 | exit() 38 | 39 | # Trainer 40 | trainer = config.get_trainer(model, None, cfg, device=device) 41 | 42 | # Print model 43 | nparameters = sum(p.numel() for p in model.parameters()) 44 | print(model) 45 | print('Total number of parameters: %d' % nparameters) 46 | 47 | # Evaluate 48 | model.eval() 49 | 50 | eval_dicts = [] 51 | print('Evaluating networks...') 52 | 53 | 54 | test_loader = torch.utils.data.DataLoader( 55 | dataset, batch_size=1, shuffle=False, 56 | collate_fn=data.collate_remove_none, 57 | worker_init_fn=data.worker_init_fn) 58 | 59 | # Handle each dataset separately 60 | for it, data in enumerate(tqdm(test_loader)): 61 | if data is None: 62 | print('Invalid data.') 63 | continue 64 | # Get index etc. 65 | idx = data['idx'].item() 66 | 67 | try: 68 | model_dict = dataset.get_model_dict(idx) 69 | except AttributeError: 70 | model_dict = {'model': str(idx), 'category': 'n/a'} 71 | 72 | modelname = model_dict['model'] 73 | category_id = model_dict['category'] 74 | 75 | try: 76 | category_name = dataset.metadata[category_id].get('name', 'n/a') 77 | except AttributeError: 78 | category_name = 'n/a' 79 | 80 | eval_dict = { 81 | 'idx': idx, 82 | 'class id': category_id, 83 | 'class name': category_name, 84 | 'modelname':modelname, 85 | } 86 | eval_dicts.append(eval_dict) 87 | eval_data = trainer.eval_step(data) 88 | eval_dict.update(eval_data) 89 | 90 | 91 | # Create pandas dataframe and save 92 | eval_df = pd.DataFrame(eval_dicts) 93 | eval_df.set_index(['idx'], inplace=True) 94 | eval_df.to_pickle(out_file) 95 | 96 | # Create CSV file with main statistics 97 | eval_df_class = eval_df.groupby(by=['class name']).mean() 98 | eval_df_class.to_csv(out_file_class) 99 | 100 | # Print results 101 | eval_df_class.loc['mean'] = eval_df_class.mean() 102 | print(eval_df_class) -------------------------------------------------------------------------------- /external/mesh-fusion/3_simplify.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import ntpath 4 | import common 5 | 6 | 7 | class Simplification: 8 | """ 9 | Perform simplification of watertight meshes. 10 | """ 11 | 12 | def __init__(self): 13 | """ 14 | Constructor. 15 | """ 16 | 17 | parser = self.get_parser() 18 | self.options = parser.parse_args() 19 | self.simplification_script = os.path.join( 20 | os.path.dirname(os.path.realpath(__file__)), 'simplification.mlx') 21 | 22 | def get_parser(self): 23 | """ 24 | Get parser of tool. 25 | 26 | :return: parser 27 | """ 28 | 29 | parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.') 30 | input_group = parser.add_mutually_exclusive_group(required=True) 31 | input_group.add_argument('--in_dir', type=str, 32 | help='Path to input directory.') 33 | input_group.add_argument('--in_file', type=str, 34 | help='Path to input directory.') 35 | parser.add_argument('--out_dir', type=str, 36 | help='Path to output directory; files within are overwritten!') 37 | 38 | return parser 39 | 40 | def read_directory(self, directory): 41 | """ 42 | Read directory. 43 | 44 | :param directory: path to directory 45 | :return: list of files 46 | """ 47 | 48 | files = [] 49 | for filename in os.listdir(directory): 50 | files.append(os.path.normpath(os.path.join(directory, filename))) 51 | 52 | return files 53 | 54 | def get_in_files(self): 55 | if self.options.in_dir is not None: 56 | assert os.path.exists(self.options.in_dir) 57 | common.makedir(self.options.out_dir) 58 | files = self.read_directory(self.options.in_dir) 59 | else: 60 | files = [self.options.in_file] 61 | 62 | return files 63 | 64 | def run(self): 65 | """ 66 | Run simplification. 67 | """ 68 | 69 | common.makedir(self.options.out_dir) 70 | files = self.get_in_files() 71 | 72 | for filepath in files: 73 | os.system('meshlabserver -i %s -o %s -s %s' % ( 74 | filepath, 75 | os.path.join(self.options.out_dir, ntpath.basename(filepath)), 76 | self.simplification_script 77 | )) 78 | 79 | 80 | if __name__ == '__main__': 81 | app = Simplification() 82 | app.run() 83 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/.gitignore: -------------------------------------------------------------------------------- 1 | /cyfusion.cpp -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017, The OctNet authors 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # * Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # * Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # * Neither the name of the nor the 12 | # names of its contributors may be used to endorse or promote products 13 | # derived from this software without specific prior written permission. 14 | # 15 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 19 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | cmake_minimum_required(VERSION 2.8) 27 | set(CMAKE_MACOSX_RPATH 1) 28 | 29 | set(CMAKE_CXX_STANDARD 11) 30 | 31 | # set(CMAKE_BUILD_TYPE Debug) 32 | set(CMAKE_BUILD_TYPE Release) 33 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 34 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 35 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_FORCE_INLINES -Wall") 36 | 37 | find_package(OpenMP) 38 | if (OPENMP_FOUND) 39 | set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 40 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 41 | endif() 42 | 43 | add_library(fusion_cpu SHARED fusion.cpp) 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Gernot 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/README.md: -------------------------------------------------------------------------------- 1 | # PyFusion 2 | 3 | PyFusion is a Python framework for volumetric depth fusion. 4 | It contains simple occupancy and TSDF fusion methods that can be executed on a CPU as well as on a GPU. 5 | 6 | To use the code, first compile the native code via 7 | 8 | ```bash 9 | cd build 10 | cmake .. 11 | make 12 | ``` 13 | Afterwards you can compile the Cython code via 14 | 15 | ```bash 16 | python setup.py build_ext --inplace 17 | ``` 18 | 19 | You can then use the fusion functions 20 | 21 | ```python 22 | import pyfusion 23 | 24 | # create a views object 25 | # depthmaps: a NxHxW numpy float tensor of N depthmaps, invalid depth values are marked by negative numbers 26 | # Ks: the camera intric matrices, Nx3x3 float tensor 27 | # Rs: the camera rotation matrices, Nx3x3 float tensor 28 | # Ts: the camera translation vectors, Nx3 float tensor 29 | views = pyfusion.PyViews(depthmaps, Ks,Rs,Ts) 30 | 31 | # afterwards you can fuse the depth maps for example by 32 | # depth,height,width: number of voxels in each dimension 33 | # truncation: TSDF truncation value 34 | tsdf = pyfusion.tsdf_gpu(views, depth,height,width, vx_size, truncation, False) 35 | 36 | # the same code can also be run on the CPU 37 | tsdf = pyfusion.tsdf_cpu(views, depth,height,width, vx_size, truncation, False, n_threads=8) 38 | ``` 39 | 40 | Make sure `pyfusion` is in your `$PYTHONPATH`. 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | pyfusion_dir = os.path.dirname(os.path.realpath(__file__)) 5 | ctypes.cdll.LoadLibrary(os.path.join(pyfusion_dir, 'build', 'libfusion_cpu.so')) 6 | from cyfusion import * 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/fusion.cpp: -------------------------------------------------------------------------------- 1 | #include "fusion.h" 2 | 3 | #include 4 | #include 5 | 6 | #if defined(_OPENMP) 7 | #include 8 | #endif 9 | 10 | 11 | template 12 | void fusion_cpu(const Views& views, const FusionFunctorT functor, float vx_size, int n_threads, Volume& vol) { 13 | int vx_res3 = vol.depth_ * vol.height_ * vol.width_; 14 | 15 | #if defined(_OPENMP) 16 | omp_set_num_threads(n_threads); 17 | #endif 18 | #pragma omp parallel for 19 | for(int idx = 0; idx < vx_res3; ++idx) { 20 | int d,h,w; 21 | fusion_idx2dhw(idx, vol.width_,vol.height_, d,h,w); 22 | float x,y,z; 23 | fusion_dhw2xyz(d,h,w, vx_size, x,y,z); 24 | 25 | functor.before_sample(&vol, d,h,w); 26 | bool run = true; 27 | int n_valid_views = 0; 28 | for(int vidx = 0; vidx < views.n_views_ && run; ++vidx) { 29 | float ur, vr, vx_d; 30 | fusion_project(&views, vidx, x,y,z, ur,vr,vx_d); 31 | 32 | int u = int(ur + 0.5f); 33 | int v = int(vr + 0.5f); 34 | // printf(" vx %d,%d,%d has center %f,%f,%f and projects to uvd=%f,%f,%f\n", w,h,d, x,y,z, ur,vr,vx_d); 35 | 36 | if(u >= 0 && v >= 0 && u < views.cols_ && v < views.rows_) { 37 | int dm_idx = (vidx * views.rows_ + v) * views.cols_ + u; 38 | float dm_d = views.depthmaps_[dm_idx]; 39 | // printf(" is on depthmap[%d,%d] with depth=%f, diff=%f\n", views.cols_,views.rows_, dm_d, dm_d - vx_d); 40 | run = functor.new_sample(&vol, vx_d, dm_d, d,h,w, &n_valid_views); 41 | } 42 | } // for vidx 43 | functor.after_sample(&vol, d,h,w, n_valid_views); 44 | } 45 | } 46 | 47 | void fusion_projectionmask_cpu(const Views& views, float vx_size, bool unknown_is_free, int n_threads, Volume& vol) { 48 | ProjectionMaskFusionFunctor functor(unknown_is_free); 49 | fusion_cpu(views, functor, vx_size, n_threads, vol); 50 | } 51 | 52 | void fusion_occupancy_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 53 | OccupancyFusionFunctor functor(truncation, unknown_is_free); 54 | fusion_cpu(views, functor, vx_size, n_threads, vol); 55 | } 56 | 57 | void fusion_tsdfmask_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 58 | TsdfMaskFusionFunctor functor(truncation, unknown_is_free); 59 | fusion_cpu(views, functor, vx_size, n_threads, vol); 60 | } 61 | 62 | void fusion_tsdf_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, int n_threads, Volume& vol) { 63 | TsdfFusionFunctor functor(truncation, unknown_is_free); 64 | fusion_cpu(views, functor, vx_size, n_threads, vol); 65 | } 66 | 67 | void fusion_tsdf_hist_cpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, float* bin_centers, int n_bins, bool unobserved_is_occupied, int n_threads, Volume& vol) { 68 | TsdfHistFusionFunctor functor(truncation, unknown_is_free, bin_centers, n_bins, unobserved_is_occupied); 69 | fusion_cpu(views, functor, vx_size, n_threads, vol); 70 | } 71 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusioncpu/setup.py: -------------------------------------------------------------------------------- 1 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 3 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 4 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 5 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 6 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 7 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 8 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 9 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | from distutils.core import setup 12 | from Cython.Build import cythonize 13 | from distutils.extension import Extension 14 | from Cython.Distutils import build_ext 15 | import numpy as np 16 | import platform 17 | 18 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2'] 19 | extra_link_args = [] 20 | if 'Linux' in platform.system(): 21 | print('Added OpenMP') 22 | extra_compile_args.append('-fopenmp') 23 | extra_link_args.append('-fopenmp') 24 | 25 | 26 | setup( 27 | name="cyfusion", 28 | cmdclass= {'build_ext': build_ext}, 29 | ext_modules=[ 30 | Extension('cyfusion', 31 | ['cyfusion.pyx'], 32 | language='c++', 33 | library_dirs=['./build/'], 34 | libraries=['m', "fusion_cpu"], 35 | include_dirs=[np.get_include()], 36 | extra_compile_args=extra_compile_args, 37 | extra_link_args=extra_link_args 38 | ) 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017, The OctNet authors 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # * Redistributions of source code must retain the above copyright 7 | # notice, this list of conditions and the following disclaimer. 8 | # * Redistributions in binary form must reproduce the above copyright 9 | # notice, this list of conditions and the following disclaimer in the 10 | # documentation and/or other materials provided with the distribution. 11 | # * Neither the name of the nor the 12 | # names of its contributors may be used to endorse or promote products 13 | # derived from this software without specific prior written permission. 14 | # 15 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 19 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | cmake_minimum_required(VERSION 2.8) 27 | set(CMAKE_CXX_STANDARD 11) 28 | 29 | # set(CMAKE_BUILD_TYPE Debug) 30 | set(CMAKE_BUILD_TYPE Release) 31 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 32 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse2 -msse3 -msse4.2 -fPIC") 33 | 34 | find_package(CUDA 6.5 REQUIRED) 35 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS};-std=c++11") 36 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_FORCE_INLINES -Wall") 37 | 38 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}; -gencode=arch=compute_30,code=sm_30") 39 | set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}; -gencode=arch=compute_30,code=compute_30") 40 | 41 | set(FUSION_GPU_SRC 42 | fusion.cu 43 | fusion_zach_tvl1.cu 44 | ) 45 | 46 | cuda_add_library(fusion_gpu SHARED ${FUSION_GPU_SRC}) 47 | target_link_libraries(fusion_gpu ${CUDA_LIBRARIES}) 48 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Gernot 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/README.md: -------------------------------------------------------------------------------- 1 | # PyFusion 2 | 3 | PyFusion is a Python framework for volumetric depth fusion. 4 | It contains simple occupancy and TSDF fusion methods that can be executed on a CPU as well as on a GPU. 5 | 6 | To use the code, first compile the native code via 7 | 8 | ```bash 9 | cd build 10 | cmake .. 11 | make 12 | ``` 13 | Afterwards you can compile the Cython code via 14 | 15 | ```bash 16 | python setup.py build_ext --inplace 17 | ``` 18 | 19 | You can then use the fusion functions 20 | 21 | ```python 22 | import pyfusion 23 | 24 | # create a views object 25 | # depthmaps: a NxHxW numpy float tensor of N depthmaps, invalid depth values are marked by negative numbers 26 | # Ks: the camera intric matrices, Nx3x3 float tensor 27 | # Rs: the camera rotation matrices, Nx3x3 float tensor 28 | # Ts: the camera translation vectors, Nx3 float tensor 29 | views = pyfusion.PyViews(depthmaps, Ks,Rs,Ts) 30 | 31 | # afterwards you can fuse the depth maps for example by 32 | # depth,height,width: number of voxels in each dimension 33 | # truncation: TSDF truncation value 34 | tsdf = pyfusion.tsdf_gpu(views, depth,height,width, vx_size, truncation, False) 35 | 36 | # the same code can also be run on the CPU 37 | tsdf = pyfusion.tsdf_cpu(views, depth,height,width, vx_size, truncation, False, n_threads=8) 38 | ``` 39 | 40 | Make sure `pyfusion` is in your `$PYTHONPATH`. 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | pyfusion_dir = os.path.dirname(os.path.realpath(__file__)) 5 | ctypes.cdll.LoadLibrary(os.path.join(pyfusion_dir, 'build', 'libfusion_gpu.so')) 6 | from libfusiongpu.cyfusion import * 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/fusion.cu: -------------------------------------------------------------------------------- 1 | #include "gpu_common.h" 2 | 3 | #include 4 | #include 5 | 6 | 7 | 8 | template 9 | __global__ void kernel_fusion(int vx_res3, const Views views, const FusionFunctorT functor, float vx_size, Volume vol) { 10 | CUDA_KERNEL_LOOP(idx, vx_res3) { 11 | int d,h,w; 12 | fusion_idx2dhw(idx, vol.width_,vol.height_, d,h,w); 13 | float x,y,z; 14 | fusion_dhw2xyz(d,h,w, vx_size, x,y,z); 15 | 16 | functor.before_sample(&vol, d,h,w); 17 | bool run = true; 18 | int n_valid_views = 0; 19 | for(int vidx = 0; vidx < views.n_views_ && run; ++vidx) { 20 | float ur, vr, vx_d; 21 | fusion_project(&views, vidx, x,y,z, ur,vr,vx_d); 22 | //NOTE: ur,vr,vx_d might differ to CPP (subtle differences in precision) 23 | 24 | int u = int(ur + 0.5f); 25 | int v = int(vr + 0.5f); 26 | 27 | if(u >= 0 && v >= 0 && u < views.cols_ && v < views.rows_) { 28 | int dm_idx = (vidx * views.rows_ + v) * views.cols_ + u; 29 | float dm_d = views.depthmaps_[dm_idx]; 30 | // if(d==103 && h==130 && w==153) printf(" dm_d=%f, dm_idx=%d, u=%d, v=%d, ur=%f, vr=%f\n", dm_d, dm_idx, u,v, ur,vr); 31 | run = functor.new_sample(&vol, vx_d, dm_d, d,h,w, &n_valid_views); 32 | } 33 | } // for vidx 34 | functor.after_sample(&vol, d,h,w, n_valid_views); 35 | } 36 | } 37 | 38 | 39 | 40 | template 41 | void fusion_gpu(const Views& views, const FusionFunctorT functor, float vx_size, Volume& vol) { 42 | Views views_gpu; 43 | views_to_gpu(views, views_gpu, true); 44 | Volume vol_gpu; 45 | volume_alloc_like_gpu(vol, vol_gpu); 46 | 47 | int vx_res3 = vol.depth_ * vol.height_ * vol.width_; 48 | kernel_fusion<<>>( 49 | vx_res3, views_gpu, functor, vx_size, vol_gpu 50 | ); 51 | CUDA_POST_KERNEL_CHECK; 52 | 53 | volume_to_cpu(vol_gpu, vol, false); 54 | 55 | views_free_gpu(views_gpu); 56 | volume_free_gpu(vol_gpu); 57 | } 58 | 59 | void fusion_projectionmask_gpu(const Views& views, float vx_size, bool unknown_is_free, Volume& vol) { 60 | ProjectionMaskFusionFunctor functor(unknown_is_free); 61 | fusion_gpu(views, functor, vx_size, vol); 62 | } 63 | 64 | void fusion_occupancy_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 65 | OccupancyFusionFunctor functor(truncation, unknown_is_free); 66 | fusion_gpu(views, functor, vx_size, vol); 67 | } 68 | 69 | void fusion_tsdfmask_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 70 | TsdfMaskFusionFunctor functor(truncation, unknown_is_free); 71 | fusion_gpu(views, functor, vx_size, vol); 72 | } 73 | 74 | void fusion_tsdf_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, Volume& vol) { 75 | TsdfFusionFunctor functor(truncation, unknown_is_free); 76 | fusion_gpu(views, functor, vx_size, vol); 77 | } 78 | 79 | void fusion_tsdf_hist_gpu(const Views& views, float vx_size, float truncation, bool unknown_is_free, float* bin_centers, int n_bins, bool unobserved_is_occupied, Volume& vol) { 80 | float* bin_centers_gpu = host_to_device_malloc(bin_centers, n_bins); 81 | TsdfHistFusionFunctor functor(truncation, unknown_is_free, bin_centers_gpu, n_bins, unobserved_is_occupied); 82 | fusion_gpu(views, functor, vx_size, vol); 83 | device_free(bin_centers_gpu); 84 | } 85 | -------------------------------------------------------------------------------- /external/mesh-fusion/libfusiongpu/setup.py: -------------------------------------------------------------------------------- 1 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 2 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 3 | # DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY 4 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 5 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 6 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 7 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 8 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 9 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 10 | 11 | from distutils.core import setup 12 | from Cython.Build import cythonize 13 | from distutils.extension import Extension 14 | from Cython.Distutils import build_ext 15 | import numpy as np 16 | import platform 17 | 18 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2'] 19 | extra_link_args = [] 20 | if 'Linux' in platform.system(): 21 | print('Added OpenMP') 22 | extra_compile_args.append('-fopenmp') 23 | extra_link_args.append('-fopenmp') 24 | 25 | 26 | setup( 27 | name="cyfusion", 28 | cmdclass= {'build_ext': build_ext}, 29 | ext_modules=[ 30 | Extension('cyfusion', 31 | ['cyfusion.pyx'], 32 | language='c++', 33 | library_dirs=['./build/'], 34 | libraries=['m', "fusion_gpu"], 35 | include_dirs=[np.get_include()], 36 | extra_compile_args=extra_compile_args, 37 | extra_link_args=extra_link_args 38 | ) 39 | ] 40 | ) 41 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012-2015, P. M. Neila 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of the copyright holder nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | PyMCubes 3 | ======== 4 | 5 | PyMCubes is an implementation of the marching cubes algorithm to extract 6 | isosurfaces from volumetric data. The volumetric data can be given as a 7 | three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first 8 | option is much faster, but it requires more memory and becomes unfeasible for 9 | very large volumes. 10 | 11 | PyMCubes also provides a function to export the results of the marching cubes as 12 | COLLADA ``(.dae)`` files. This requires the 13 | `PyCollada `_ library. 14 | 15 | Installation 16 | ============ 17 | 18 | Just as any standard Python package, clone or download the project 19 | and run:: 20 | 21 | $ cd path/to/PyMCubes 22 | $ python setup.py build 23 | $ python setup.py install 24 | 25 | If you do not have write permission on the directory of Python packages, 26 | install with the ``--user`` option:: 27 | 28 | $ python setup.py install --user 29 | 30 | Example 31 | ======= 32 | 33 | The following example creates a data volume with spherical isosurfaces and 34 | extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as 35 | ``sphere.dae``:: 36 | 37 | >>> import numpy as np 38 | >>> import mcubes 39 | 40 | # Create a data volume (30 x 30 x 30) 41 | >>> X, Y, Z = np.mgrid[:30, :30, :30] 42 | >>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2 43 | 44 | # Extract the 0-isosurface 45 | >>> vertices, triangles = mcubes.marching_cubes(u, 0) 46 | 47 | # Export the result to sphere.dae 48 | >>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere") 49 | 50 | The second example is very similar to the first one, but it uses a function 51 | to represent the volume instead of a NumPy array:: 52 | 53 | >>> import numpy as np 54 | >>> import mcubes 55 | 56 | # Create the volume 57 | >>> f = lambda x, y, z: x**2 + y**2 + z**2 58 | 59 | # Extract the 16-isosurface 60 | >>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10), 61 | ... 100, 100, 100, f, 16) 62 | 63 | # Export the result to sphere2.dae 64 | >>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere") 65 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/__init__.py: -------------------------------------------------------------------------------- 1 | from libmcubes.mcubes import marching_cubes, marching_cubes_func 2 | from libmcubes.exporter import export_mesh, export_obj, export_off 3 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/exporter.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | 5 | def export_obj(vertices, triangles, filename): 6 | """ 7 | Exports a mesh in the (.obj) format. 8 | """ 9 | 10 | with open(filename, 'w') as fh: 11 | 12 | for v in vertices: 13 | fh.write("v {} {} {}\n".format(*v)) 14 | 15 | for f in triangles: 16 | fh.write("f {} {} {}\n".format(*(f + 1))) 17 | 18 | 19 | def export_off(vertices, triangles, filename): 20 | """ 21 | Exports a mesh in the (.off) format. 22 | """ 23 | 24 | with open(filename, 'w') as fh: 25 | fh.write('OFF\n') 26 | fh.write('{} {} 0\n'.format(len(vertices), len(triangles))) 27 | 28 | for v in vertices: 29 | fh.write("{} {} {}\n".format(*v)) 30 | 31 | for f in triangles: 32 | fh.write("3 {} {} {}\n".format(*f)) 33 | 34 | 35 | def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"): 36 | """ 37 | Exports a mesh in the COLLADA (.dae) format. 38 | 39 | Needs PyCollada (https://github.com/pycollada/pycollada). 40 | """ 41 | 42 | import collada 43 | 44 | mesh = collada.Collada() 45 | 46 | vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z')) 47 | geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src]) 48 | 49 | input_list = collada.source.InputList() 50 | input_list.addInput(0, 'VERTEX', "#verts-array") 51 | 52 | triset = geom.createTriangleSet(np.copy(triangles), input_list, "") 53 | geom.primitives.append(triset) 54 | mesh.geometries.append(geom) 55 | 56 | geomnode = collada.scene.GeometryNode(geom, []) 57 | node = collada.scene.Node(mesh_name, children=[geomnode]) 58 | 59 | myscene = collada.scene.Scene("mcubes_scene", [node]) 60 | mesh.scenes.append(myscene) 61 | mesh.scene = myscene 62 | 63 | mesh.write(filename) 64 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/mcubes.pyx: -------------------------------------------------------------------------------- 1 | 2 | # distutils: language = c++ 3 | # cython: embedsignature = True 4 | 5 | # from libcpp.vector cimport vector 6 | import numpy as np 7 | 8 | # Define PY_ARRAY_UNIQUE_SYMBOL 9 | cdef extern from "pyarray_symbol.h": 10 | pass 11 | 12 | cimport numpy as np 13 | 14 | np.import_array() 15 | 16 | cdef extern from "pywrapper.h": 17 | cdef object c_marching_cubes "marching_cubes"(np.ndarray, double) except + 18 | cdef object c_marching_cubes2 "marching_cubes2"(np.ndarray, double) except + 19 | cdef object c_marching_cubes3 "marching_cubes3"(np.ndarray, double) except + 20 | cdef object c_marching_cubes_func "marching_cubes_func"(tuple, tuple, int, int, int, object, double) except + 21 | 22 | def marching_cubes(np.ndarray volume, float isovalue): 23 | 24 | verts, faces = c_marching_cubes(volume, isovalue) 25 | verts.shape = (-1, 3) 26 | faces.shape = (-1, 3) 27 | return verts, faces 28 | 29 | def marching_cubes2(np.ndarray volume, float isovalue): 30 | 31 | verts, faces = c_marching_cubes2(volume, isovalue) 32 | verts.shape = (-1, 3) 33 | faces.shape = (-1, 3) 34 | return verts, faces 35 | 36 | def marching_cubes3(np.ndarray volume, float isovalue): 37 | 38 | verts, faces = c_marching_cubes3(volume, isovalue) 39 | verts.shape = (-1, 3) 40 | faces.shape = (-1, 3) 41 | return verts, faces 42 | 43 | def marching_cubes_func(tuple lower, tuple upper, int numx, int numy, int numz, object f, double isovalue): 44 | 45 | verts, faces = c_marching_cubes_func(lower, upper, numx, numy, numz, f, isovalue) 46 | verts.shape = (-1, 3) 47 | faces.shape = (-1, 3) 48 | return verts, faces 49 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/pyarray_symbol.h: -------------------------------------------------------------------------------- 1 | 2 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 3 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/pywrapper.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _PYWRAPPER_H 3 | #define _PYWRAPPER_H 4 | 5 | #include 6 | #include "pyarraymodule.h" 7 | 8 | #include 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue); 11 | PyObject* marching_cubes2(PyArrayObject* arr, double isovalue); 12 | PyObject* marching_cubes3(PyArrayObject* arr, double isovalue); 13 | PyObject* marching_cubes_func(PyObject* lower, PyObject* upper, 14 | int numx, int numy, int numz, PyObject* f, double isovalue); 15 | 16 | #endif // _PYWRAPPER_H 17 | -------------------------------------------------------------------------------- /external/mesh-fusion/libmcubes/setup.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | try: 4 | from setuptools import setup 5 | except ImportError: 6 | from distutils.core import setup 7 | 8 | from Cython.Build import cythonize 9 | 10 | import numpy 11 | from distutils.extension import Extension 12 | 13 | # Get the version number. 14 | numpy_include_dir = numpy.get_include() 15 | 16 | mcubes_module = Extension( 17 | "mcubes", 18 | [ 19 | "mcubes.pyx", 20 | "pywrapper.cpp", 21 | "marchingcubes.cpp" 22 | ], 23 | language="c++", 24 | extra_compile_args=['-std=c++11'], 25 | include_dirs=[numpy_include_dir] 26 | ) 27 | 28 | setup(name="PyMCubes", 29 | version="0.0.6", 30 | description="Marching cubes for Python", 31 | author="Pablo Márquez Neila", 32 | author_email="pablo.marquezneila@epfl.ch", 33 | url="https://github.com/pmneila/PyMCubes", 34 | license="BSD 3-clause", 35 | long_description=""" 36 | Marching cubes for Python 37 | """, 38 | classifiers=[ 39 | "Development Status :: 4 - Beta", 40 | "Environment :: Console", 41 | "Intended Audience :: Developers", 42 | "Intended Audience :: Science/Research", 43 | "License :: OSI Approved :: BSD License", 44 | "Natural Language :: English", 45 | "Operating System :: OS Independent", 46 | "Programming Language :: C++", 47 | "Programming Language :: Python", 48 | "Topic :: Multimedia :: Graphics :: 3D Modeling", 49 | "Topic :: Scientific/Engineering :: Image Recognition", 50 | ], 51 | packages=["mcubes"], 52 | ext_modules=cythonize(mcubes_module), 53 | requires=['numpy', 'Cython', 'PyCollada'], 54 | setup_requires=['numpy', 'Cython'] 55 | ) 56 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/__init__.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | 4 | #pyrender_dir = os.path.dirname(os.path.realpath(__file__)) 5 | #ctypes.cdll.LoadLibrary(os.path.join(pyrender_dir, 'pyrender.so')) 6 | from librender.pyrender import * 7 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/offscreen.h: -------------------------------------------------------------------------------- 1 | #ifndef LIBRENDER_OFFSCREEN_H 2 | #define LIBRENDER_OFFSCREEN_H 3 | 4 | #include "GL/glew.h" 5 | #include "GL/gl.h" 6 | #include "GL/glu.h" 7 | #include "GL/glut.h" 8 | 9 | class OffscreenGL { 10 | 11 | public: 12 | OffscreenGL(int maxHeight, int maxWidth); 13 | ~OffscreenGL(); 14 | 15 | private: 16 | static int glutWin; 17 | static bool glutInitialized; 18 | GLuint fb; 19 | GLuint renderTex; 20 | GLuint depthTex; 21 | }; 22 | 23 | 24 | void renderDepthMesh(double *FM, int fNum, double *VM, int vNum, double *CM, double *intrinsics, int *imgSizeV, double *zNearFarV, unsigned char * imgBuffer, float *depthBuffer, bool *maskBuffer, double linewidth, bool coloring); 25 | 26 | #endif -------------------------------------------------------------------------------- /external/mesh-fusion/librender/pyrender.pyx: -------------------------------------------------------------------------------- 1 | cimport cython 2 | import numpy as np 3 | cimport numpy as np 4 | 5 | from libc.stdlib cimport free, malloc 6 | from libcpp cimport bool 7 | from cpython cimport PyObject, Py_INCREF 8 | 9 | CREATE_INIT = True # workaround, so cython builds a init function 10 | 11 | np.import_array() 12 | 13 | 14 | cdef extern from "offscreen.h": 15 | void renderDepthMesh(double *FM, int fNum, double *VM, int vNum, double *CM, double *intrinsics, int *imgSizeV, double *zNearFarV, unsigned char * imgBuffer, float *depthBuffer, bool *maskBuffer, double linewidth, bool coloring); 16 | 17 | 18 | def render(double[:,::1] vertices, double[:,::1] faces, double[::1] cam_intr, double[::1] znf, int[::1] img_size): 19 | if vertices.shape[0] != 3: 20 | raise Exception('vertices must be a 3xM double array') 21 | if faces.shape[0] != 3: 22 | raise Exception('faces must be a 3xM double array') 23 | if cam_intr.shape[0] != 4: 24 | raise Exception('cam_intr must be a 4x1 double vector') 25 | if img_size.shape[0] != 2: 26 | raise Exception('img_size must be a 2x1 int vector') 27 | 28 | cdef double* VM = &(vertices[0,0]) 29 | cdef int vNum = vertices.shape[1] 30 | cdef double* FM = &(faces[0,0]) 31 | cdef int fNum = faces.shape[1] 32 | cdef double* intrinsics = &(cam_intr[0]) 33 | cdef double* zNearVarV = &(znf[0]) 34 | cdef int* imgSize = &(img_size[0]) 35 | 36 | cdef bool coloring = False 37 | cdef double* CM = NULL 38 | 39 | depth = np.empty((img_size[1], img_size[0]), dtype=np.float32) 40 | mask = np.empty((img_size[1], img_size[0]), dtype=np.uint8) 41 | img = np.empty((3, img_size[1], img_size[0]), dtype=np.uint8) 42 | cdef float[:,::1] depth_view = depth 43 | cdef unsigned char[:,::1] mask_view = mask 44 | cdef unsigned char[:,:,::1] img_view = img 45 | cdef float* depthBuffer = &(depth_view[0,0]) 46 | cdef bool* maskBuffer = &(mask_view[0,0]) 47 | cdef unsigned char* imgBuffer = &(img_view[0,0,0]) 48 | 49 | renderDepthMesh(FM, fNum, VM, vNum, CM, intrinsics, imgSize, zNearVarV, imgBuffer, depthBuffer, maskBuffer, 0, coloring); 50 | 51 | return depth.T, mask.T, img.transpose((2,1,0)) 52 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy as np 5 | 6 | extra_compile_args = ["-ffast-math", '-msse', '-msse2', '-msse3', '-msse4.2', '-O4', '-fopenmp'] 7 | extra_link_args = ['-lGLEW', '-lglut', '-lGL', '-lGLU', '-fopenmp'] 8 | 9 | setup( 10 | name="pyrender", 11 | cmdclass= {'build_ext': build_ext}, 12 | ext_modules=[ 13 | Extension('pyrender', 14 | [ 15 | 'pyrender.pyx', 16 | 'offscreen.cpp', 17 | ], 18 | language='c++', 19 | include_dirs=[np.get_include()], 20 | extra_compile_args=extra_compile_args, 21 | extra_link_args=extra_link_args 22 | ) 23 | ] 24 | ) 25 | 26 | 27 | -------------------------------------------------------------------------------- /external/mesh-fusion/librender/test.py: -------------------------------------------------------------------------------- 1 | import pyrender 2 | import numpy as np 3 | from matplotlib import pyplot 4 | import math 5 | 6 | # render settings 7 | img_h = 480 8 | img_w = 480 9 | fx = 480. 10 | fy = 480. 11 | cx = 240 12 | cy = 240 13 | 14 | def model(): 15 | 16 | # note that xx is height here! 17 | xx = -0.2 18 | yy = -0.2 19 | zz = -0.2 20 | 21 | v000 = (xx, yy, zz) # 0 22 | v001 = (xx, yy, zz + 0.4) # 1 23 | v010 = (xx, yy + 0.4, zz) # 2 24 | v011 = (xx, yy + 0.4, zz + 0.4) # 3 25 | v100 = (xx + 0.4, yy, zz) # 4 26 | v101 = (xx + 0.4, yy, zz + 0.4) # 5 27 | v110 = (xx + 0.4, yy + 0.4, zz) # 6 28 | v111 = (xx + 0.4, yy + 0.4, zz + 0.4) # 7 29 | 30 | f1 = [0, 2, 4] 31 | f2 = [4, 2, 6] 32 | f3 = [1, 3, 5] 33 | f4 = [5, 3, 7] 34 | f5 = [0, 1, 2] 35 | f6 = [1, 3, 2] 36 | f7 = [4, 5, 7] 37 | f8 = [4, 7, 6] 38 | f9 = [4, 0, 1] 39 | f10 = [4, 5, 1] 40 | f11 = [2, 3, 6] 41 | f12 = [3, 7, 6] 42 | 43 | vertices = [] 44 | vertices.append(v000) 45 | vertices.append(v001) 46 | vertices.append(v010) 47 | vertices.append(v011) 48 | vertices.append(v100) 49 | vertices.append(v101) 50 | vertices.append(v110) 51 | vertices.append(v111) 52 | 53 | faces = [] 54 | faces.append(f1) 55 | faces.append(f2) 56 | faces.append(f3) 57 | faces.append(f4) 58 | faces.append(f5) 59 | faces.append(f6) 60 | faces.append(f7) 61 | faces.append(f8) 62 | faces.append(f9) 63 | faces.append(f10) 64 | faces.append(f11) 65 | faces.append(f12) 66 | 67 | return vertices, faces 68 | 69 | def render(vertices, faces): 70 | 71 | x = 0 72 | y = math.pi/4 73 | z = 0 74 | R_x = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]]) 75 | R_y = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]]) 76 | R_z = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]]) 77 | R = R_z.dot(R_y.dot(R_x)) 78 | 79 | np_vertices = np.array(vertices).astype(np.float64) 80 | np_vertices = R.dot(np_vertices.T).T 81 | np_vertices[:, 2] += 1.5 82 | np_faces = np.array(faces).astype(np.float64) 83 | np_faces += 1 84 | 85 | depthmap, mask, img = pyrender.render(np_vertices.T.copy(), np_faces.T.copy(), np.array([fx, fy, cx, cy]), np.array([1., 2.]), np.array([img_h, img_w], dtype=np.int32)) 86 | pyplot.imshow(depthmap) 87 | pyplot.show() 88 | pyplot.imshow(img) 89 | pyplot.show() 90 | 91 | if __name__ == '__main__': 92 | vertices, faces = model() 93 | render(vertices, faces) 94 | -------------------------------------------------------------------------------- /external/mesh-fusion/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/external/mesh-fusion/screenshot.jpg -------------------------------------------------------------------------------- /external/mesh-fusion/simplification.mlx: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /im2mesh/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/im2mesh/__init__.py -------------------------------------------------------------------------------- /im2mesh/checkpoints.py: -------------------------------------------------------------------------------- 1 | import os 2 | import urllib 3 | import torch 4 | from torch.utils import model_zoo 5 | 6 | 7 | class CheckpointIO(object): 8 | ''' CheckpointIO class. 9 | 10 | It handles saving and loading checkpoints. 11 | 12 | Args: 13 | checkpoint_dir (str): path where checkpoints are saved 14 | ''' 15 | def __init__(self, checkpoint_dir='./chkpts', **kwargs): 16 | self.module_dict = kwargs 17 | self.checkpoint_dir = checkpoint_dir 18 | if not os.path.exists(checkpoint_dir): 19 | os.makedirs(checkpoint_dir) 20 | 21 | def register_modules(self, **kwargs): 22 | ''' Registers modules in current module dictionary. 23 | ''' 24 | self.module_dict.update(kwargs) 25 | 26 | def save(self, filename, **kwargs): 27 | ''' Saves the current module dictionary. 28 | 29 | Args: 30 | filename (str): name of output file 31 | ''' 32 | if not os.path.isabs(filename): 33 | filename = os.path.join(self.checkpoint_dir, filename) 34 | 35 | outdict = kwargs 36 | for k, v in self.module_dict.items(): 37 | outdict[k] = v.state_dict() 38 | torch.save(outdict, filename) 39 | 40 | def load(self, filename): 41 | '''Loads a module dictionary from local file or url. 42 | 43 | Args: 44 | filename (str): name of saved module dictionary 45 | ''' 46 | if is_url(filename): 47 | return self.load_url(filename) 48 | else: 49 | return self.load_file(filename) 50 | 51 | def load_file(self, filename): 52 | '''Loads a module dictionary from file. 53 | 54 | Args: 55 | filename (str): name of saved module dictionary 56 | ''' 57 | 58 | if not os.path.isabs(filename): 59 | filename = os.path.join(self.checkpoint_dir, filename) 60 | 61 | if os.path.exists(filename): 62 | print(filename) 63 | print('=> Loading checkpoint from local file...') 64 | state_dict = torch.load(filename) 65 | scalars = self.parse_state_dict(state_dict) 66 | return scalars 67 | else: 68 | raise FileExistsError 69 | 70 | def load_url(self, url): 71 | '''Load a module dictionary from url. 72 | 73 | Args: 74 | url (str): url to saved model 75 | ''' 76 | print(url) 77 | print('=> Loading checkpoint from url...') 78 | state_dict = model_zoo.load_url(url, progress=True) 79 | scalars = self.parse_state_dict(state_dict) 80 | return scalars 81 | 82 | def parse_state_dict(self, state_dict): 83 | '''Parse state_dict of model and return scalars. 84 | 85 | Args: 86 | state_dict (dict): State dict of model 87 | ''' 88 | 89 | for k, v in self.module_dict.items(): 90 | if k in state_dict: 91 | v.load_state_dict(state_dict[k]) 92 | else: 93 | print('Warning: Could not find %s in checkpoint!' % k) 94 | scalars = {k: v for k, v in state_dict.items() 95 | if k not in self.module_dict} 96 | return scalars 97 | 98 | def is_url(url): 99 | scheme = urllib.parse.urlparse(url).scheme 100 | return scheme in ('http', 'https') -------------------------------------------------------------------------------- /im2mesh/data/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from im2mesh.data.core import ( 3 | Shapes3dDataset, collate_remove_none, worker_init_fn 4 | ) 5 | from im2mesh.data.fields import ( 6 | IndexField, CategoryField, ImagesField, PointsField, 7 | VoxelsField, PointCloudField, MeshField, 8 | ) 9 | from im2mesh.data.transforms import ( 10 | PointcloudNoise, SubsamplePointcloud, 11 | SubsamplePoints 12 | ) 13 | from im2mesh.data.real import ( 14 | KittiDataset, OnlineProductDataset, 15 | ImageDataset, 16 | ) 17 | 18 | 19 | __all__ = [ 20 | # Core 21 | Shapes3dDataset, 22 | collate_remove_none, 23 | worker_init_fn, 24 | # Fields 25 | IndexField, 26 | CategoryField, 27 | ImagesField, 28 | PointsField, 29 | VoxelsField, 30 | PointCloudField, 31 | MeshField, 32 | # Transforms 33 | PointcloudNoise, 34 | SubsamplePointcloud, 35 | SubsamplePoints, 36 | # Real Data 37 | KittiDataset, 38 | OnlineProductDataset, 39 | ImageDataset, 40 | ] 41 | -------------------------------------------------------------------------------- /im2mesh/data/transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | # Transforms 5 | class PointcloudNoise(object): 6 | ''' Point cloud noise transformation class. 7 | 8 | It adds noise to point cloud data. 9 | 10 | Args: 11 | stddev (int): standard deviation 12 | ''' 13 | 14 | def __init__(self, stddev): 15 | self.stddev = stddev 16 | 17 | def __call__(self, data): 18 | ''' Calls the transformation. 19 | 20 | Args: 21 | data (dictionary): data dictionary 22 | ''' 23 | data_out = data.copy() 24 | points = data[None] 25 | noise = self.stddev * np.random.randn(*points.shape) 26 | noise = noise.astype(np.float32) 27 | data_out[None] = points + noise 28 | return data_out 29 | 30 | 31 | class SubsamplePointcloud(object): 32 | ''' Point cloud subsampling transformation class. 33 | 34 | It subsamples the point cloud data. 35 | 36 | Args: 37 | N (int): number of points to be subsampled 38 | ''' 39 | def __init__(self, N): 40 | self.N = N 41 | 42 | def __call__(self, data): 43 | ''' Calls the transformation. 44 | 45 | Args: 46 | data (dict): data dictionary 47 | ''' 48 | data_out = data.copy() 49 | points = data[None] 50 | normals = data['normals'] 51 | 52 | indices = np.random.randint(points.shape[0], size=self.N) 53 | data_out[None] = points[indices, :] 54 | data_out['normals'] = normals[indices, :] 55 | 56 | return data_out 57 | 58 | 59 | class SubsamplePoints(object): 60 | ''' Points subsampling transformation class. 61 | 62 | It subsamples the points data. 63 | 64 | Args: 65 | N (int): number of points to be subsampled 66 | ''' 67 | def __init__(self, N): 68 | self.N = N 69 | 70 | def __call__(self, data): 71 | ''' Calls the transformation. 72 | 73 | Args: 74 | data (dictionary): data dictionary 75 | ''' 76 | points = data[None] 77 | occ = data['occ'] 78 | 79 | data_out = data.copy() 80 | if isinstance(self.N, int): 81 | idx = np.random.randint(points.shape[0], size=self.N) 82 | data_out.update({ 83 | None: points[idx, :], 84 | 'occ': occ[idx], 85 | }) 86 | else: 87 | Nt_out, Nt_in = self.N 88 | occ_binary = (occ >= 0.5) 89 | points0 = points[~occ_binary] 90 | points1 = points[occ_binary] 91 | 92 | idx0 = np.random.randint(points0.shape[0], size=Nt_out) 93 | idx1 = np.random.randint(points1.shape[0], size=Nt_in) 94 | 95 | points0 = points0[idx0, :] 96 | points1 = points1[idx1, :] 97 | points = np.concatenate([points0, points1], axis=0) 98 | 99 | occ0 = np.zeros(Nt_out, dtype=np.float32) 100 | occ1 = np.ones(Nt_in, dtype=np.float32) 101 | occ = np.concatenate([occ0, occ1], axis=0) 102 | 103 | volume = occ_binary.sum() / len(occ_binary) 104 | volume = volume.astype(np.float32) 105 | 106 | data_out.update({ 107 | None: points, 108 | 'occ': occ, 109 | 'volume': volume, 110 | }) 111 | return data_out 112 | -------------------------------------------------------------------------------- /im2mesh/dmc/__init__.py: -------------------------------------------------------------------------------- 1 | from im2mesh.dmc import ( 2 | config, generation, training, models 3 | ) 4 | 5 | __all__ = [ 6 | config, generation, training, models 7 | ] 8 | -------------------------------------------------------------------------------- /im2mesh/dmc/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from im2mesh.dmc import models, training, generation 3 | from im2mesh import data 4 | 5 | 6 | def get_model(cfg, device=None, **kwargs): 7 | encoder = cfg['model']['encoder'] 8 | decoder = cfg['model']['decoder'] 9 | c_dim = cfg['model']['c_dim'] 10 | encoder_kwargs = cfg['model']['encoder_kwargs'] 11 | decoder_kwargs = cfg['model']['decoder_kwargs'] 12 | 13 | encoder = models.encoder_dict[encoder]( 14 | **encoder_kwargs 15 | ) 16 | 17 | decoder = models.decoder_dict[decoder]( 18 | **decoder_kwargs 19 | ) 20 | 21 | model = models.DMC(decoder, encoder) 22 | model = model.to(device) 23 | return model 24 | 25 | 26 | def get_trainer(model, optimizer, cfg, device, **kwargs): 27 | input_type = cfg['data']['input_type'] 28 | out_dir = cfg['training']['out_dir'] 29 | vis_dir = os.path.join(out_dir, 'vis') 30 | num_voxels = cfg['model']['num_voxels'] 31 | weight_prior = cfg['model']['dmc_weight_prior'] 32 | 33 | trainer = training.Trainer( 34 | model, optimizer, device=device, input_type=input_type, 35 | vis_dir=vis_dir, num_voxels=num_voxels, 36 | weight_prior=weight_prior, 37 | ) 38 | return trainer 39 | 40 | 41 | def get_generator(model, cfg, device, **kwargs): 42 | num_voxels = cfg['model']['num_voxels'] 43 | 44 | generator = generation.Generator3D( 45 | model, device=device, num_voxels=num_voxels 46 | ) 47 | return generator 48 | 49 | 50 | def get_data_fields(split, cfg, **kwargs): 51 | with_transforms = cfg['data']['with_transforms'] 52 | # TODO: put this into config 53 | pointcloud_n = 3000 54 | pointcloud_transform = data.SubsamplePointcloud(pointcloud_n) 55 | 56 | fields = {} 57 | fields['pointcloud'] = data.PointCloudField( 58 | cfg['data']['pointcloud_file'], pointcloud_transform, 59 | with_transforms=with_transforms 60 | ) 61 | 62 | return fields 63 | -------------------------------------------------------------------------------- /im2mesh/dmc/generation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import trimesh 4 | from im2mesh.dmc.utils.pred2mesh import pred_to_mesh_max 5 | from im2mesh.dmc.ops.occupancy_to_topology import OccupancyToTopology 6 | from im2mesh.dmc.ops.table import get_accept_topology 7 | 8 | 9 | class Generator3D(object): 10 | def __init__(self, model, device=None, num_voxels=32): 11 | self.model = model.to(device) 12 | self.device = device 13 | self.num_voxels = num_voxels 14 | self.vis_topology = torch.LongTensor(get_accept_topology(4)) 15 | 16 | def generate_mesh(self, data): 17 | self.model.eval() 18 | device = self.device 19 | 20 | inputs = data.get('inputs', torch.empty(1, 0)).to(device) 21 | 22 | inputs = self.num_voxels * (inputs / 1.2 + 0.5) 23 | 24 | with torch.no_grad(): 25 | offset, topology, occupancy = self.model(inputs) 26 | 27 | offset = offset.squeeze() 28 | topology = topology.squeeze() 29 | topology = topology[:, self.vis_topology] 30 | 31 | vertices, faces = pred_to_mesh_max(offset, topology) 32 | faces = faces.astype(np.int64) 33 | 34 | vertices = 1.2 * (vertices / self.num_voxels - 0.5) 35 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False) 36 | return mesh 37 | 38 | 39 | -------------------------------------------------------------------------------- /im2mesh/dmc/models/__init__.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from im2mesh.dmc.models import encoder, decoder 3 | 4 | 5 | decoder_dict = { 6 | 'unet': decoder.UNetDecoder 7 | } 8 | 9 | encoder_dict = { 10 | 'pointnet_local': encoder.PointNetLocal, 11 | } 12 | 13 | class DMC(nn.Module): 14 | def __init__(self, decoder, encoder): 15 | super().__init__() 16 | self.decoder = decoder 17 | self.encoder = encoder 18 | 19 | def forward(self, x): 20 | c = self.encoder(x) 21 | offset, topology, occupancy = self.decoder(c) 22 | 23 | return offset, topology, occupancy 24 | -------------------------------------------------------------------------------- /im2mesh/dmc/models/encoder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | 4 | from im2mesh.dmc.ops.grid_pooling import GridPooling 5 | 6 | 7 | class PointNetLocal(nn.Module): 8 | ''' Point Net Local Conditional Network from the Deep Marching Cubes paper. 9 | 10 | It applies two fully connected layers to the input points (dim 3) in a 11 | 1D Convolutional Layer fashion to avoid to specify the number of 12 | incoming points 13 | ''' 14 | def __init__(self, c_dim=256, out_dim=16, cell_W=16, cell_H=16, cell_D=16): 15 | super().__init__() 16 | self.cell_W = cell_W 17 | self.cell_H = cell_H 18 | self.cell_D = cell_D 19 | 20 | # TODO change gridpooling input to be compatible to single values of W H D 21 | self.gridshape = torch.cuda.LongTensor([cell_W, cell_H, cell_D]) 22 | actvn = nn.ReLU() 23 | self.grid_pool = GridPooling(self.gridshape) 24 | self.conv1 = nn.Sequential( 25 | nn.Conv1d(3, c_dim, 1), actvn 26 | ) 27 | #self.conv2 = nn.Sequential( 28 | # nn.Conv1d(c_dim, out_dim, 1), actvn 29 | #) 30 | self.conv2 = nn.Conv1d(c_dim, out_dim, 1) 31 | 32 | def forward(self, x): 33 | pts = x 34 | feats = x.transpose(1, 2) # b_size x 3 x num_points 35 | feats = self.conv1(feats) # b_size x c_dim x num_points 36 | feats = self.conv2(feats) # b_size x out_dim x num_points 37 | feats = feats.transpose(1, 2) # b_size x num_points x out_dim 38 | 39 | out = self.point_to_cell(pts, feats, self.cell_W, self.cell_H, self.cell_D) 40 | return out 41 | 42 | def point_to_cell(self, pts, feat, W, H, D, expand=1): 43 | """ perform maxpool on points in every cell set zero vector if cell is 44 | empty if expand=1 then return (N+1)x(N+1)x(N+1), for dmc xpand=0 then 45 | return NxNxN, for occupancy/sdf baselines 46 | """ 47 | batchsize = feat.size()[0] 48 | C = feat.size()[2] 49 | 50 | feat_cell = [] 51 | # grid_shape = torch.LongTensor([W, H, D]) 52 | for k in range(batchsize): 53 | feat_cell.append(self.grid_pool(feat[k, :, :], pts[k, :, :])) 54 | 55 | feat_cell = torch.stack(feat_cell, dim=0) 56 | 57 | # TODO check if this view is compatible to output of grid pool 58 | feat_cell = torch.transpose(feat_cell, 1, 2).contiguous().view( 59 | -1, C, W, H, D) 60 | if expand == 0: 61 | return feat_cell 62 | 63 | # expand to (W+1)x(H+1) 64 | curr_size = feat_cell.size() 65 | feat_cell_exp = torch.zeros( 66 | curr_size[0], curr_size[1], curr_size[2]+1, curr_size[3]+1, 67 | curr_size[4]+1).to(pts.device) 68 | feat_cell_exp[:, :, :-1, :-1, :-1] = feat_cell 69 | return feat_cell_exp 70 | -------------------------------------------------------------------------------- /im2mesh/dmc/ops/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autonomousvision/occupancy_networks/406f79468fb8b57b3e76816aaa73b1915c53ad22/im2mesh/dmc/ops/__init__.py -------------------------------------------------------------------------------- /im2mesh/dmc/ops/cpp_modules/old/commons.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "commons.h" 3 | 4 | /** 5 | * convert vertex displacement field to vertices locations 6 | * params: 7 | * offset vertex displacement field, 3xWxHxD 8 | * x indice of a cell in the full grid 9 | * y indice of a cell in the full grid 10 | * z indice of a cell in the full grid 11 | * return: 12 | * vertices the location of 12 vertices for the specific cell, 3x12 13 | * 14 | */ 15 | THFloatTensor* offset_to_vertices(THFloatTensor *offset, int x, int y, int z){ 16 | THFloatTensor *vertices = THFloatTensor_newWithSize2d(3, 12); 17 | 18 | // #0 19 | THFloatTensor_set2d(vertices, 0, 0, 0.5-THFloatTensor_get4d(offset, 0, x+1, y+1, z )); 20 | THFloatTensor_set2d(vertices, 1, 0, 1.0); 21 | THFloatTensor_set2d(vertices, 2, 0, 0.0); 22 | // #1 23 | THFloatTensor_set2d(vertices, 0, 1, 1.0); 24 | THFloatTensor_set2d(vertices, 1, 1, 0.5-THFloatTensor_get4d(offset, 1, x+1, y+1, z )); 25 | THFloatTensor_set2d(vertices, 2, 1, 0.0); 26 | // #2 27 | THFloatTensor_set2d(vertices, 0, 2, 0.5-THFloatTensor_get4d(offset, 0, x+1, y , z )); 28 | THFloatTensor_set2d(vertices, 1, 2, 0.0); 29 | THFloatTensor_set2d(vertices, 2, 2, 0.0); 30 | // #3 31 | THFloatTensor_set2d(vertices, 0, 3, 0.0); 32 | THFloatTensor_set2d(vertices, 1, 3, 0.5-THFloatTensor_get4d(offset, 1, x , y+1, z )); 33 | THFloatTensor_set2d(vertices, 2, 3, 0.0); 34 | 35 | // #4 36 | THFloatTensor_set2d(vertices, 0, 4, 0.5-THFloatTensor_get4d(offset, 0, x+1, y+1, z+1)); 37 | THFloatTensor_set2d(vertices, 1, 4, 1.0); 38 | THFloatTensor_set2d(vertices, 2, 4, 1.0); 39 | // #5 40 | THFloatTensor_set2d(vertices, 0, 5, 1.0); 41 | THFloatTensor_set2d(vertices, 1, 5, 0.5-THFloatTensor_get4d(offset, 1, x+1, y+1, z+1)); 42 | THFloatTensor_set2d(vertices, 2, 5, 1.0); 43 | // #6 44 | THFloatTensor_set2d(vertices, 0, 6, 0.5-THFloatTensor_get4d(offset, 0, x+1, y , z+1)); 45 | THFloatTensor_set2d(vertices, 1, 6, 0.0); 46 | THFloatTensor_set2d(vertices, 2, 6, 1.0); 47 | // #7 48 | THFloatTensor_set2d(vertices, 0, 7, 0.0); 49 | THFloatTensor_set2d(vertices, 1, 7, 0.5-THFloatTensor_get4d(offset, 1, x , y+1, z+1)); 50 | THFloatTensor_set2d(vertices, 2, 7, 1.0); 51 | 52 | // #8 53 | THFloatTensor_set2d(vertices, 0, 8, 0.0); 54 | THFloatTensor_set2d(vertices, 1, 8, 1.0); 55 | THFloatTensor_set2d(vertices, 2, 8, 0.5-THFloatTensor_get4d(offset, 2, x , y+1, z+1)); 56 | // #9 57 | THFloatTensor_set2d(vertices, 0, 9, 1.0); 58 | THFloatTensor_set2d(vertices, 1, 9, 1.0); 59 | THFloatTensor_set2d(vertices, 2, 9, 0.5-THFloatTensor_get4d(offset, 2, x+1, y+1, z+1)); 60 | // #10 61 | THFloatTensor_set2d(vertices, 0, 10, 1.0); 62 | THFloatTensor_set2d(vertices, 1, 10, 0.0); 63 | THFloatTensor_set2d(vertices, 2, 10, 0.5-THFloatTensor_get4d(offset, 2, x+1, y , z+1)); 64 | // #11 65 | THFloatTensor_set2d(vertices, 0, 11, 0.0); 66 | THFloatTensor_set2d(vertices, 1, 11, 0.0); 67 | THFloatTensor_set2d(vertices, 2, 11, 0.5-THFloatTensor_get4d(offset, 2, x , y , z+1)); 68 | return vertices; 69 | } 70 | 71 | /** 72 | * get points in a specific cell 73 | * params: 74 | * points all points in the grid, Nx3 75 | * i the offset of the specific cell 76 | * j the offset of the specific cell 77 | * k the offset of the specific cell 78 | * return: 79 | * indices a binary 1D tensor indicating if a point is in a specific cell or not, N 80 | * 81 | */ 82 | THLongTensor* points_in_grid(THFloatTensor *points, float i, float j, float k){ 83 | int N=THFloatTensor_size(points, 0); 84 | THLongTensor *indices = THLongTensor_new(); 85 | 86 | THByteTensor *mask = THByteTensor_newWithSize1d(N); 87 | THByteTensor_zero(mask); 88 | for (int p=0; p= i && THFloatTensor_get2d(points, p, 0) < i+1 && 90 | THFloatTensor_get2d(points, p, 1) >= j && THFloatTensor_get2d(points, p, 1) < j+1 && 91 | THFloatTensor_get2d(points, p, 2) >= k && THFloatTensor_get2d(points, p, 2) < k+1) 92 | THByteTensor_set1d(mask, p, 1); 93 | } 94 | 95 | THByteTensor_nonzero(indices, mask); 96 | 97 | THLongTensor_squeeze(indices, indices); 98 | THByteTensor_free(mask); 99 | return indices; 100 | } 101 | -------------------------------------------------------------------------------- /im2mesh/dmc/ops/cpp_modules/old/pred_to_mesh.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "commons.h" 5 | 6 | 7 | // considered all topologies with 4 triangles during visualization 8 | static int visTopology[2][140]={{0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 19, 23, 25, 27, 29, 31, 32, 34, 35, 38, 39, 43, 46, 47, 48, 49, 50, 51, 54, 55, 57, 59, 63, 64, 68, 70, 71, 76, 77, 78, 79, 95, 96, 98, 99, 100, 102, 103, 108, 110, 111, 112, 113, 114, 115, 116, 118, 119, 123, 126, 127, 128, 136, 137, 139, 140, 141, 142, 143, 144, 145, 147, 152, 153, 155, 156, 157, 159, 175, 176, 177, 178, 179, 183, 184, 185, 187, 189, 191, 192, 196, 198, 200, 201, 204, 205, 206, 207, 208, 209, 212, 216, 217, 219, 220, 221, 222, 223, 224, 226, 228, 230, 231, 232, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}, 9 | {0, 1, 1, 2, 1, 2, 3, 1, 2, 3, 2, 3, 3, 2, 1, 2, 3, 4, 3, 4, 4, 3, 1, 2, 3, 3, 4, 4, 4, 3, 2, 3, 3, 2, 4, 3, 4, 3, 2, 1, 2, 3, 4, 3, 4, 4, 3, 4, 2, 3, 4, 3, 2, 3, 4, 3, 2, 3, 4, 4, 3, 4, 3, 2, 4, 4, 1, 1, 2, 3, 4, 3, 4, 4, 3, 2, 3, 4, 3, 2, 3, 4, 3, 2, 4, 3, 4, 4, 3, 4, 4, 3, 2, 4, 1, 2, 3, 4, 3, 4, 2, 3, 3, 2, 3, 4, 4, 4, 3, 4, 3, 2, 4, 1, 3, 4, 4, 3, 4, 4, 3, 4, 2, 1, 2, 3, 3, 2, 3, 4, 2, 1, 3, 2, 4, 1, 2, 1, 1, 0}}; 10 | 11 | /** 12 | * convert the topology probability and vertex displacement field to a mesh by 13 | * selecting the topology with maximum probability in every cell 14 | * params: 15 | * offset vertex displacement field 16 | * topology topology probabilities 17 | * vertices_all vertices locations for all triangles in topologies with maximum probabilities 18 | * note there might be duplications and the unique vertices will be extracted afterwards 19 | * faces_all faces represented by the indices in vertices_all 20 | * vertice_number record the number of vertices as we initialzed the vertices_all with a fixed length 21 | * face_number record the number of faces as we initialized the faces_all with a fixed length 22 | */ 23 | int pred_to_mesh(THFloatTensor offset, THLongTensor *topology, THFloatTensor *vertices_all, THFloatTensor *faces_all, THLongTensor *vertice_number, THLongTensor *face_number){ 24 | // data format check 25 | if (THFloatTensor_nDimension(offset)!=4 || THLongTensor_nDimension(topology)!=3 ){ 26 | printf("Invalid nDimension!\n"); 27 | printf("Expected 4, 3, received %d, %d \n", THFloatTensor_nDimension(offset), THLongTensor_nDimension(topology)); 28 | return 0; 29 | } 30 | int W,H,D; 31 | W = THFloatTensor_size(offset,1)-1; 32 | H = THFloatTensor_size(offset,2)-1; 33 | D = THFloatTensor_size(offset,3)-1; 34 | 35 | int vertice_cnt=0; 36 | int face_cnt=0; 37 | 38 | for (int i=0; i 5 | #include 6 | 7 | 8 | // Curvature constraint 9 | void curvature_constraint_kernel_forward( 10 | at::Tensor offset, 11 | at::Tensor topology, 12 | at::Tensor xTable, 13 | at::Tensor yTable, 14 | at::Tensor zTable, 15 | at::Tensor innerTable, 16 | at::Tensor loss_x, 17 | at::Tensor loss_y, 18 | at::Tensor loss_z, 19 | at::Tensor loss_inner); 20 | 21 | 22 | void curvature_constraint_kernel_backward( 23 | at::Tensor grad_output, 24 | at::Tensor offset, 25 | at::Tensor topology, 26 | at::Tensor xTable, 27 | at::Tensor yTable, 28 | at::Tensor zTable, 29 | at::Tensor innerTable, 30 | at::Tensor grad_offset); 31 | 32 | // Grid pooling 33 | void grid_pooling_kernel_forward( 34 | at::Tensor point, 35 | at::Tensor feat_points, 36 | at::Tensor shape, 37 | at::Tensor feat_cell, 38 | at::Tensor indices); 39 | 40 | void grid_pooling_kernel_backward( 41 | at::Tensor grad_output, 42 | at::Tensor shape, 43 | at::Tensor indices, 44 | at::Tensor grad_feat_points); 45 | 46 | // Occ2Topo 47 | void occupancy_to_topology_kernel_forward( 48 | at::Tensor occupancy, 49 | at::Tensor topology ); 50 | 51 | void occupancy_to_topology_kernel_backward( 52 | at::Tensor grad_output, 53 | at::Tensor occupancy, 54 | at::Tensor topology, 55 | at::Tensor grad_occupancy); 56 | 57 | // OccConstraint 58 | void occupancy_connectivity_kernel_forward( 59 | at::Tensor occupancy, 60 | at::Tensor loss); 61 | 62 | void occupancy_connectivity_kernel_backward( 63 | at::Tensor grad_output, 64 | at::Tensor occupancy, 65 | at::Tensor grad_occupancy); 66 | 67 | // Points Triangle distance 68 | void point_topology_distance_kernel_forward( 69 | at::Tensor offset, 70 | at::Tensor points, 71 | at::Tensor distances, 72 | at::Tensor indices_all); 73 | 74 | void point_topology_distance_kernel_backward( 75 | at::Tensor grad_output, 76 | at::Tensor offset, 77 | at::Tensor points, 78 | at::Tensor indices_all, 79 | at::Tensor grad_offset); 80 | 81 | #endif -------------------------------------------------------------------------------- /im2mesh/dmc/ops/tests/test_curvature.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | import sys 5 | 6 | sys.path.append('../../../..') 7 | from im2mesh.dmc.ops.tests.loss_autograd import LossAutoGrad 8 | from im2mesh.dmc.ops.curvature_constraint import CurvatureConstraint 9 | import torch.nn.functional as F 10 | import numpy as np 11 | import time 12 | 13 | # check the cuda extension or c extension 14 | 15 | print ("Testing CUDA extension...") 16 | dtype = torch.cuda.FloatTensor 17 | 18 | 19 | # autograd loss 20 | num_cells = 4 21 | len_cell = 1.0 22 | W = H = D = num_cells 23 | 24 | loss_autograd = LossAutoGrad(num_cells, len_cell) 25 | 26 | 27 | # cffi loss 28 | class SmoothLoss(nn.Module): 29 | def __init__(self): 30 | super(SmoothLoss, self).__init__() 31 | self.smoothLoss = CurvatureConstraint() 32 | 33 | def forward(self, offset, topology): 34 | return self.smoothLoss(offset, topology) 35 | 36 | 37 | if __name__ == '__main__': 38 | 39 | # generate offset and topology with relatively low-dimension 40 | print ("=========== Input =============") 41 | T = 96 42 | W = num_cells 43 | H = num_cells 44 | D = num_cells 45 | offset = Variable((torch.rand(3, W+1, H+1, D+1)).type(dtype) * 0.1, requires_grad=True) 46 | topology = Variable(torch.rand(W*H*D, T).type(dtype), requires_grad=True) 47 | #print (offset) 48 | #print (topology) 49 | 50 | loss_cffi = SmoothLoss() 51 | l = loss_cffi(offset, F.softmax(topology, dim=1)) 52 | l.backward() 53 | offset.grad.data.zero_() 54 | 55 | # evaluating the running time of the cffi extension 56 | print ("============= cffi ============") 57 | tf_c = time.time() 58 | l = loss_cffi(offset, F.softmax(topology, dim=1)) 59 | print ("cffi loss:") 60 | print (l) 61 | tf_c = time.time()-tf_c 62 | 63 | 64 | tb_c = time.time() 65 | l.backward() 66 | print ("cffi gradient:") 67 | print( offset.grad) 68 | tb_c = time.time()-tb_c 69 | grad_np = np.copy(offset.grad.data.cpu().numpy()) 70 | 71 | 72 | # evaluating the running time of the autograd version 73 | print ("============= auto ============") 74 | tf_py = time.time() 75 | l_auto = loss_autograd.loss_on_curvature_autograd(offset, topology) 76 | print ("auto loss:") 77 | print (l_auto) 78 | tf_py = time.time()-tf_py 79 | 80 | offset.grad.data.zero_() 81 | tb_py = time.time() 82 | l_auto.backward() 83 | print ("auto grad:") 84 | print (offset.grad) 85 | tb_py = time.time()-tb_py 86 | grad_auto_np = np.copy(offset.grad.data.cpu().numpy()) 87 | assert np.sum(np.abs(grad_auto_np)) and np.sum(np.abs(grad_np)) != 0.0 88 | # print the loss and grad difference and the time comparison 89 | print ("========== summary ===========") 90 | print ("Forward difference between cffi and auto: ", (l-l_auto).data.cpu().numpy()) 91 | print ("Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np))) 92 | print ("Backward difference between cffi and auto: ", np.mean(np.abs(grad_np-grad_auto_np))) 93 | 94 | print ("cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c)) 95 | print ("auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py)) 96 | print ("ratio: ", (tf_py+tb_py)/(tf_c + tb_c)) -------------------------------------------------------------------------------- /im2mesh/dmc/ops/tests/test_distance.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('../../../..') 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Variable 8 | import time 9 | import numpy as np 10 | import resource 11 | 12 | from im2mesh.dmc.ops.tests.loss_autograd import LossAutoGrad 13 | from im2mesh.dmc.ops.point_triangle_distance import PointTriangleDistance 14 | 15 | 16 | print("Testing CUDA extension...") 17 | dtype = torch.cuda.FloatTensor 18 | dtype_long = torch.cuda.LongTensor 19 | num_cells = 4 20 | # autograd loss 21 | loss_autograd = LossAutoGrad(num_cells, 1.0) 22 | 23 | multiGrids = PointTriangleDistance() 24 | 25 | 26 | if __name__ == '__main__': 27 | 28 | print("=========== Input =============") 29 | point = Variable(torch.rand(10, 3).view(-1,3).type(dtype) * 0.9) * num_cells 30 | offset = Variable(torch.zeros(3, num_cells+1, num_cells+1, num_cells+1).type(dtype)*0.5, requires_grad=True) 31 | print(point.shape) 32 | print(offset.shape) 33 | 34 | print("============= cuda extension ============") 35 | # forward 36 | tf_c = time.time() 37 | distance = multiGrids.forward(offset, point) 38 | tf_c = time.time() - tf_c 39 | distance_np = distance.data.cpu().numpy() 40 | print("cffi distance:") 41 | print(distance_np.shape) 42 | 43 | weight_rnd = Variable(torch.rand(distance.size()).type(dtype), requires_grad=False) 44 | distance_sum = torch.sum(torch.mul(distance, weight_rnd)) 45 | 46 | # backward 47 | tb_c = time.time() 48 | grad = distance_sum.backward() 49 | tb_c = time.time() - tb_c 50 | offset_np = np.copy(offset.grad.data.cpu().numpy()) 51 | 52 | print("cffi grad:") 53 | print(offset_np.shape) 54 | 55 | print("============= auto ============") 56 | # forward 57 | tf_py = time.time() 58 | distance_auto = loss_autograd.loss_point_to_mesh_distance_autograd(offset, point) 59 | tf_py = time.time()-tf_py 60 | distance_auto_np = distance_auto.data.cpu().numpy() 61 | print("auto distance:") 62 | print(distance_auto_np.shape) 63 | weight_rnd = Variable(weight_rnd.data) 64 | distance_sum_auto = torch.sum(torch.mul(distance_auto, weight_rnd)) 65 | 66 | # backward 67 | offset.grad.data.zero_() 68 | 69 | tb_py = time.time() 70 | distance_sum_auto.backward() 71 | tb_py = time.time() - tb_py 72 | print("auto grad: ") 73 | offset_auto_np = np.copy(offset.grad.data.cpu().numpy()) 74 | print(offset_auto_np.shape) 75 | 76 | print("========== summary ===========") 77 | print("Forward difference between cffi and auto: "+str(np.sum(np.abs(distance_np[:,:-1]-distance_auto_np[:,:-1])))) 78 | print("Backward difference between cffi and auto: "+str(np.sum(np.abs(offset_np-offset_auto_np)))) -------------------------------------------------------------------------------- /im2mesh/dmc/ops/tests/test_occupancy_connectivity.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | sys.path.append('../../../..') 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Variable 8 | 9 | import time 10 | import numpy as np 11 | from im2mesh.dmc.ops.occupancy_connectivity import OccupancyConnectivity 12 | #from loss import Loss 13 | #from loss_autograd import LossAutoGrad 14 | #from parse_args import parse_args 15 | 16 | # check the cuda extension or c extension 17 | 18 | def loss_on_smoothness_autograd( occupancy): 19 | """ Compute the smoothness loss using pytorch, 20 | implemented for gradient check of the c/c++ extensions 21 | """ 22 | 23 | Wo=occupancy.size()[0] 24 | Ho=occupancy.size()[1] 25 | Do=occupancy.size()[2] 26 | 27 | loss = 0 28 | for x_ in range(Wo): 29 | for y_ in range(Ho): 30 | for z_ in range(Do): 31 | # horizontal direction 32 | if x_