├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── VERSION ├── csrc ├── forest │ ├── forest.cpp │ ├── forest.h │ └── forest_cpp_api.h ├── lotd │ ├── include │ │ └── lotd │ │ │ ├── if_constexpr.hpp │ │ │ ├── linear_interpolate.cuh │ │ │ ├── lotd_cuda.h │ │ │ ├── lotd_encoding.h │ │ │ ├── lotd_forest.h │ │ │ ├── lotd_hash_only.h │ │ │ ├── lotd_torch_api.h │ │ │ └── lotd_types.h │ └── src │ │ ├── compile_split_1.cu │ │ ├── compile_split_2.cu │ │ ├── compile_split_3.cu │ │ ├── lotd.cpp │ │ └── lotd_torch_api.cu ├── occ_grid │ ├── include │ │ └── occ_grid │ │ │ ├── cpp_api.h │ │ │ ├── helpers_contraction.h │ │ │ ├── helpers_cuda.h │ │ │ ├── helpers_march.h │ │ │ └── helpers_math.h │ └── src │ │ ├── batched_marching.cu │ │ ├── forest_marching.cu │ │ ├── occ_grid.cpp │ │ └── ray_marching.cu ├── pack_ops │ ├── pack_ops.cpp │ ├── pack_ops.h │ └── pack_ops_cuda.cu ├── permuto │ ├── include │ │ └── permuto │ │ │ ├── permuto.h │ │ │ └── permuto_cuda.h │ └── src │ │ ├── compile_split_1.cu │ │ ├── compile_split_2.cu │ │ ├── compile_split_3.cu │ │ ├── compile_split_4.cu │ │ ├── compile_split_5.cu │ │ ├── compile_split_6.cu │ │ ├── permuto.cpp │ │ ├── permuto_cuda.cu │ │ └── unit_test.cpp └── sphere_trace │ ├── compile_flags.txt │ ├── include │ └── sphere_trace │ │ ├── common.cuh │ │ ├── dense_grid.cuh │ │ ├── ray_march.cuh │ │ └── sphere_tracer.cuh │ └── src │ ├── entry.cu │ ├── ray_march.cu │ └── sphere_tracer.cu ├── docs ├── camera_paths.md ├── config.md ├── pack_ops.md └── tutorials │ └── raymarching.md ├── externals ├── freqencoder │ ├── bindings.cpp │ ├── freqencoder.cu │ └── freqencoder.h ├── kaolin_spc_raytrace_fixed │ ├── README.md │ ├── include │ │ └── kaolin_spc_raytrace_fixed │ │ │ ├── check.h │ │ │ ├── raytrace.h │ │ │ ├── spc_math.h │ │ │ ├── spc_render_utils.cuh │ │ │ ├── spc_utils.cuh │ │ │ └── utils.h │ └── src │ │ ├── raytrace.cpp │ │ └── raytrace_cuda.cu ├── pytorch3d_knn │ ├── cutils.h │ ├── dispatch.cuh │ ├── ext.cpp │ ├── index_utils.cuh │ ├── knn.cu │ ├── knn.h │ ├── knn_cpu.cpp │ └── mink.cuh ├── r3dg_rasterization │ ├── .gitignore │ ├── CMakeLists.txt │ ├── README.md │ ├── cuda_rasterizer │ │ ├── auxiliary.h │ │ ├── backward.cu │ │ ├── backward.h │ │ ├── config.h │ │ ├── forward.cu │ │ ├── forward.h │ │ ├── rasterizer.h │ │ ├── rasterizer_impl.cu │ │ └── rasterizer_impl.h │ ├── ext.cpp │ ├── r3dg_rasterization │ │ └── __init__.py │ ├── rasterize_points.cu │ ├── rasterize_points.h │ ├── render_equation.cu │ ├── render_equation.h │ └── setup.py ├── shencoder │ ├── bindings.cpp │ ├── shencoder.cu │ └── shencoder.h ├── simple_knn │ ├── ext.cpp │ ├── setup.py │ ├── simple_knn.cu │ ├── simple_knn.h │ ├── simple_knn │ │ └── .gitkeep │ ├── spatial.cu │ └── spatial.h └── stb │ └── stbi_image_write.h ├── media ├── attr_camera.png ├── attr_transform.png ├── convert_png.bat ├── data_batched2.png ├── data_packed2.png ├── errormap@0.5x.jpg ├── multi_stage_upsample_occ.png ├── nr3d_raw.png ├── pack_ops │ ├── convert_png.bat │ ├── interleave_arange.png │ ├── interleave_arange_simple.png │ ├── interleave_linstep.png │ ├── merge_two_packs_sorted.png │ ├── merge_two_packs_sorted_a_includes_b.png │ ├── merge_two_packs_sorted_aligned.png │ ├── packed_add.png │ ├── packed_backward_diff_prepends.png │ ├── packed_cumprod.png │ ├── packed_cumsum.png │ ├── packed_diff.png │ ├── packed_diff_appends.png │ ├── packed_invert_cdf.png │ ├── packed_mean.png │ ├── packed_searchsorted.png │ ├── packed_searchsorted_packed.png │ ├── packed_sort.png │ └── packed_sum.png └── pack_ops_overview.png ├── nr3d_lib ├── __init__.py ├── checkpoint.py ├── config.py ├── coordinates │ ├── README.md │ ├── __init__.py │ ├── conversion.py │ └── shapenet_to_srn.py ├── distributed.py ├── fmt.py ├── graphics │ ├── __init__.py │ ├── cameras │ │ ├── __init__.py │ │ ├── camera_paths.py │ │ ├── common.py │ │ ├── fisheye.py │ │ ├── normalize_views.py │ │ ├── opencv.py │ │ └── pinhole.py │ ├── nerf │ │ ├── __init__.py │ │ ├── nerf_ray_query.py │ │ └── nerf_utils.py │ ├── neus.py │ ├── neus │ │ ├── __init__.py │ │ ├── neus_ray_query.py │ │ └── neus_utils.py │ ├── pack_ops │ │ ├── __init__.py │ │ ├── pack_ops.py │ │ └── unit_test.py │ ├── pointcloud.py │ ├── raymarch │ │ ├── __init__.py │ │ ├── occgrid_raymarch.py │ │ └── octree_raymarch.py │ ├── raysample.py │ ├── raytest.py │ ├── sphere_trace.py │ ├── tetmesh.py │ ├── trianglemesh.py │ └── utils.py ├── gui │ ├── __init__.py │ ├── datalayers │ │ ├── __init__.py │ │ ├── forest_datalayers.py │ │ ├── occgrid_datalayers.py │ │ └── octree_datalayers.py │ ├── kaolin_wisp_modified │ │ ├── __init__.py │ │ ├── camera_control.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ └── primitives.py │ │ ├── cuda_guard.py │ │ ├── gizmos │ │ │ ├── __init__.py │ │ │ ├── gizmo.py │ │ │ └── ogl │ │ │ │ ├── __init__.py │ │ │ │ ├── axis_painter.py │ │ │ │ ├── primitives_painter.py │ │ │ │ └── world_grid.py │ │ ├── render_core.py │ │ └── wisp_app.py │ └── neural_renderer.py ├── logger.py ├── maths │ ├── __init__.py │ ├── chamfer_distance.py │ ├── common.py │ ├── depth_completion_np.py │ ├── depth_completion_pytorch.py │ ├── pytorch3d_knn.py │ ├── slerp.py │ ├── spherical_harmonics.py │ └── transforms.py ├── models │ ├── __init__.py │ ├── accelerations │ │ ├── __init__.py │ │ ├── occgrid │ │ │ ├── __init__.py │ │ │ ├── ema_batched.py │ │ │ ├── ema_single.py │ │ │ ├── getter.py │ │ │ ├── unit_test.py │ │ │ └── utils.py │ │ ├── occgrid_accel │ │ │ ├── __init__.py │ │ │ ├── batched.py │ │ │ ├── batched_dynamic.py │ │ │ ├── dynamic.py │ │ │ ├── forest.py │ │ │ └── single.py │ │ └── utils.py │ ├── annealers.py │ ├── attributes │ │ ├── __init__.py │ │ ├── attr.py │ │ ├── camera_param.py │ │ ├── segment.py │ │ ├── transform.py │ │ └── unit_test.py │ ├── autodecoder.py │ ├── blocks │ │ ├── __init__.py │ │ └── mlp.py │ ├── embedders │ │ ├── __init__.py │ │ ├── sequential.py │ │ ├── sinusoidal_cuda │ │ │ ├── __init__.py │ │ │ └── freq.py │ │ ├── sinusoidal_pytorch.py │ │ └── spherical_harmonics │ │ │ ├── __init__.py │ │ │ └── sphere_harmonics.py │ ├── embeddings │ │ ├── __init__.py │ │ ├── embedding.py │ │ └── sequential.py │ ├── fields │ │ ├── __init__.py │ │ ├── nerf │ │ │ ├── __init__.py │ │ │ ├── lotd_nerf.py │ │ │ ├── mlp_nerf.py │ │ │ ├── permuto_nerf.py │ │ │ ├── renderer_mixin.py │ │ │ ├── tcnn_nerf.py │ │ │ └── utils.py │ │ ├── neus │ │ │ ├── __init__.py │ │ │ ├── lotd_neus.py │ │ │ ├── mlp_neus.py │ │ │ ├── permuto_neus.py │ │ │ ├── renderer_mixin.py │ │ │ └── variance.py │ │ └── sdf │ │ │ ├── __init__.py │ │ │ ├── lotd_sdf.py │ │ │ ├── mlp_sdf.py │ │ │ ├── permuto_sdf.py │ │ │ ├── renderer_mixin.py │ │ │ └── utils.py │ ├── fields_conditional │ │ ├── __init__.py │ │ ├── nerf │ │ │ ├── __init__.py │ │ │ └── style_lotd_nerf.py │ │ ├── neus │ │ │ ├── __init__.py │ │ │ ├── generative_permuto_neus.py │ │ │ ├── renderer_mixin.py │ │ │ └── style_lotd_neus.py │ │ └── sdf │ │ │ ├── __init__.py │ │ │ ├── dit_sdf.py │ │ │ ├── generative_permuto_sdf.py │ │ │ ├── style_lotd_sdf.py │ │ │ └── utils.py │ ├── fields_conditional_dynamic │ │ ├── __init__.py │ │ └── neus │ │ │ ├── __init__.py │ │ │ ├── dynamic_generative_permuto_neus.py │ │ │ └── renderer_mixin.py │ ├── fields_directvox │ │ ├── __init__.py │ │ ├── nerf │ │ ├── neus │ │ ├── sdf │ │ └── utils.py │ ├── fields_distant │ │ ├── __init__.py │ │ └── nerf │ │ │ ├── __init__.py │ │ │ ├── lotd_nerf.py │ │ │ ├── nerf.py │ │ │ ├── permuto_nerf.py │ │ │ └── renderer_mixin.py │ ├── fields_dynamic │ │ ├── __init__.py │ │ ├── common │ │ ├── nerf │ │ │ ├── __init__.py │ │ │ ├── emernerf.py │ │ │ └── renderer_mixin.py │ │ ├── neus │ │ │ ├── __init__.py │ │ │ ├── dynamic_permuto_neus.py │ │ │ ├── emernerf_neus.py │ │ │ └── renderer_mixin.py │ │ └── sdf │ │ │ ├── __init__.py │ │ │ └── dynamic_permuto_sdf.py │ ├── fields_forest │ │ ├── __init__.py │ │ ├── nerf │ │ │ ├── __init__.py │ │ │ ├── block_nerf.py │ │ │ ├── lotd_forest_nerf.py │ │ │ └── renderer_mixin.py │ │ ├── neus │ │ │ ├── __init__.py │ │ │ ├── lotd_forest_neus.py │ │ │ └── renderer_mixin.py │ │ ├── sdf │ │ │ ├── __init__.py │ │ │ ├── lotd_forest_sdf.py │ │ │ ├── renderer_mixin.py │ │ │ └── utils.py │ │ └── utils.py │ ├── grid_encodings │ │ ├── __init__.py │ │ ├── lotd │ │ │ ├── __init__.py │ │ │ ├── lotd.py │ │ │ ├── lotd_batched.py │ │ │ ├── lotd_batched_growers.py │ │ │ ├── lotd_cfg.py │ │ │ ├── lotd_encoding.py │ │ │ ├── lotd_forest.py │ │ │ ├── lotd_helpers.py │ │ │ └── tests │ │ │ │ ├── math_test.py │ │ │ │ ├── math_test_forest.py │ │ │ │ ├── test_extra_pos_nablas.py │ │ │ │ ├── test_extra_pos_nablas_refine.py │ │ │ │ ├── unit_test.py │ │ │ │ ├── unit_test_forest.py │ │ │ │ └── unit_test_grid_inds.py │ │ ├── multires_annealer.py │ │ ├── multires_decoder.py │ │ ├── permuto │ │ │ ├── __init__.py │ │ │ ├── generative_permuto_concat.py │ │ │ ├── mll.py │ │ │ ├── permuto.py │ │ │ ├── permuto_encoding.py │ │ │ └── tests │ │ │ │ ├── benchmark_save_intermediate.py │ │ │ │ ├── compare_save_intermediate.py │ │ │ │ ├── match_with_original.py │ │ │ │ ├── match_with_original_save_intermediate.py │ │ │ │ ├── math_test.py │ │ │ │ ├── unit_test.py │ │ │ │ └── unit_test_intermediate.py │ │ └── utils.py │ ├── importance.py │ ├── layers.py │ ├── loss │ │ ├── GEM.py │ │ ├── __init__.py │ │ ├── clip_loss.py │ │ ├── lpipsPyTorch │ │ │ ├── __init__.py │ │ │ └── modules │ │ │ │ ├── lpips.py │ │ │ │ ├── networks.py │ │ │ │ └── utils.py │ │ ├── recon.py │ │ ├── safe.py │ │ ├── ssim.py │ │ └── utils.py │ ├── model_base.py │ ├── modulations │ │ ├── __init__.py │ │ ├── filmsiren.py │ │ └── modulations.py │ ├── spatial │ │ ├── __init__.py │ │ ├── aabb.py │ │ ├── aabb_dynamic.py │ │ ├── batched.py │ │ ├── batched_dynamic.py │ │ ├── forest.py │ │ └── utils.py │ ├── tcnn_adapter.py │ ├── temporal │ │ └── flow │ ├── tetrahedral │ │ ├── __init__.py │ │ ├── dmtet.py │ │ ├── splatet.py │ │ └── unit_test.py │ └── utils.py ├── plot │ ├── __init__.py │ ├── plot_2d.py │ ├── plot_3d.py │ ├── plot_basic.py │ └── plot_dynamic.py ├── profile.py ├── tests │ ├── dbg_forest.pt │ └── test_kaolin.py └── utils.py ├── set_env.sh └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | *.pyc 3 | .spyproject/ 4 | .vscode/ 5 | .idea 6 | *.bk 7 | build/ 8 | dist/ 9 | *.egg-info/ 10 | *.egg 11 | backups/ 12 | 13 | nr3d_lib/bindings/ 14 | **/internal/ 15 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "externals/cub"] 2 | path = externals/cub 3 | url = git@github.com:NVIDIA/cub.git 4 | [submodule "externals/glm"] 5 | path = externals/glm 6 | url = git@github.com:g-truc/glm.git 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 ADG@PJLab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.6.0 -------------------------------------------------------------------------------- /csrc/forest/forest.cpp: -------------------------------------------------------------------------------- 1 | /** @file forest.cpp 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief Forest of blocks API bindings. 4 | */ 5 | 6 | #ifdef _MSC_VER 7 | #pragma warning(push, 0) 8 | #include 9 | #pragma warning(pop) 10 | #else 11 | #include 12 | #endif 13 | 14 | #ifdef snprintf 15 | #undef snprintf 16 | #endif 17 | 18 | #include "forest_cpp_api.h" 19 | #include "kaolin_spc_raytrace_fixed/raytrace.h" 20 | 21 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 22 | py::class_(m, "ForestMeta") 23 | .def(py::init<>()) 24 | .def_readwrite("octree", &ForestMeta::octree) 25 | .def_readwrite("exsum", &ForestMeta::exsum) 26 | .def_readwrite("block_ks", &ForestMeta::block_ks) 27 | .def_readwrite("world_block_size", &ForestMeta::world_block_size) 28 | .def_readwrite("world_origin", &ForestMeta::world_origin) 29 | .def_readwrite("resolution", &ForestMeta::resolution) 30 | .def_readwrite("n_trees", &ForestMeta::n_trees) 31 | .def_readwrite("level", &ForestMeta::level) 32 | .def_readwrite("level_poffset", &ForestMeta::level_poffset) 33 | .def_readwrite("continuity_enabled", &ForestMeta::continuity_enabled) 34 | ; 35 | m.def("raytrace_cuda_fixed", &kaolin::raytrace_cuda_fixed); 36 | } -------------------------------------------------------------------------------- /csrc/forest/forest.h: -------------------------------------------------------------------------------- 1 | /** @file forest.h 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief 4 | */ 5 | 6 | #pragma once 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #include "forest_cpp_api.h" 23 | 24 | // #include "kaolin_spc_raytrace_fixed/spc_utils.cuh" 25 | static __device__ __forceinline__ int32_t identify( 26 | const short3 k, 27 | const uint32_t level, 28 | const int32_t* prefix_sum, 29 | const uint8_t* octree) 30 | { 31 | // Modified from kaolin 32 | int maxval = (0x1 << level) - 1; // seems you could do this better using Morton codes 33 | // Check if in bounds 34 | if (k.x < 0 || k.y < 0 || k.z < 0 || k.x > maxval || k.y > maxval || k.z > maxval) { 35 | return -1; 36 | } 37 | int ord = 0; 38 | for (uint l = 0; l < level; l++) 39 | { 40 | uint depth = level - l - 1; 41 | uint mask = (0x1 << depth); 42 | uint child_idx = ((mask & k.x) << 2 | (mask & k.y) << 1 | (mask & k.z)) >> depth; 43 | uint8_t bits = octree[ord]; 44 | // if bit set, keep going 45 | if (bits & (0x1 << child_idx)) 46 | { 47 | // count set bits up to child - inclusive sum 48 | uint cnt = __popc(bits & ((0x2 << child_idx) - 1)); 49 | ord = prefix_sum[ord] + cnt; 50 | if (depth == 0) { 51 | return ord; 52 | } 53 | } 54 | else { 55 | return -1; 56 | } 57 | } 58 | return ord; // only if called with Level=0 59 | } 60 | 61 | struct ForestMetaRef { 62 | uint8_t* octree; 63 | int32_t* exsum; 64 | // int16_t* block_ks; 65 | short3* block_ks; 66 | 67 | float3 world_block_size; 68 | float3 world_origin; 69 | int3 resolution; 70 | uint32_t n_trees; 71 | uint32_t level; 72 | uint32_t level_poffset=0; 73 | 74 | bool continuity_enabled=true; 75 | 76 | ForestMetaRef(ForestMeta meta): 77 | level{meta.level}, level_poffset{meta.level_poffset}, n_trees{meta.n_trees}, continuity_enabled{meta.continuity_enabled} { 78 | world_block_size = make_float3((float)meta.world_block_size[0], (float)meta.world_block_size[1], (float)meta.world_block_size[2]); 79 | world_origin = make_float3((float)meta.world_origin[0], (float)meta.world_origin[1], (float)meta.world_origin[2]); 80 | resolution = make_int3(meta.resolution[0], meta.resolution[1], meta.resolution[2]); 81 | octree = meta.octree.data_ptr(); 82 | exsum = meta.exsum.data_ptr(); 83 | block_ks = reinterpret_cast(meta.block_ks.data_ptr()); 84 | } 85 | 86 | __device__ int32_t map_block_ind(const int16_t idx[3]) const { 87 | short3 k = make_short3(idx[0], idx[1], idx[2]); 88 | return map_block_ind(k); 89 | } 90 | 91 | __device__ int32_t map_block_ind(const short3 k) const { 92 | int32_t pidx = identify(k, level, exsum, octree); 93 | // int32_t pidx = kaolin::identify(k, level, exsum, octree); 94 | return (pidx == -1) ? -1 : (pidx - (int32_t)level_poffset); 95 | } 96 | 97 | }; 98 | 99 | -------------------------------------------------------------------------------- /csrc/forest/forest_cpp_api.h: -------------------------------------------------------------------------------- 1 | /** @file forest_app_api.h 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief 4 | */ 5 | 6 | #pragma once 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | struct ForestMeta { 17 | // TODO: Remember to avoid duplication as much as possible 18 | 19 | // Needed for query state using short3 index 20 | at::Tensor octree; 21 | at::Tensor exsum; 22 | 23 | // Needed for conversions from blidx to block integer coors 24 | at::Tensor block_ks; 25 | 26 | std::vector world_block_size; 27 | std::vector world_origin; 28 | std::vector resolution; 29 | 30 | uint32_t n_trees=0; 31 | uint32_t level=0; 32 | uint32_t level_poffset=0; 33 | 34 | bool continuity_enabled=true; 35 | }; 36 | 37 | -------------------------------------------------------------------------------- /csrc/lotd/include/lotd/lotd_types.h: -------------------------------------------------------------------------------- 1 | /** @file lotd_types.h 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief LoTD basic common declarations. 4 | */ 5 | 6 | #pragma once 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | namespace lotd { 15 | 16 | enum class LoDType { 17 | // type N-linear abstract impl NOTE; 18 | Dense, // yes Dense LoD, no param reuse 19 | VectorMatrix, // yes as in tensoRF; use the outer product of N x (N-2)-linears and N x (N-1)-linears 20 | VecZMatXoY, // yes modified VM from tensoRF; use the outer product of XoY x Z 21 | CP, // yes as in tensoRF; use the product of N linears 22 | CPfast, // no as in tensoRF; use the product of N linears, 23 | NPlaneMul, // yes another type of CP decomposition; use the product of N x (N-1)-linears 24 | NPlaneSum, // no as in EG3D; use the sum of N x (N-1)-linears 25 | Hash // yes as in ngp; 26 | }; 27 | 28 | 29 | inline std::string to_lower(std::string str) { 30 | std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return (char)std::tolower(c); }); 31 | return str; 32 | } 33 | 34 | inline std::string to_upper(std::string str) { 35 | std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return (char)std::toupper(c); }); 36 | return str; 37 | } 38 | inline bool equals_case_insensitive(const std::string& str1, const std::string& str2) { 39 | return to_lower(str1) == to_lower(str2); 40 | } 41 | 42 | inline LoDType string_to_lod_type(const std::string& lod_type) { 43 | if (equals_case_insensitive(lod_type, "Dense")) { 44 | return LoDType::Dense; 45 | } else if ( equals_case_insensitive(lod_type, "Hash") ) { 46 | return LoDType::Hash; 47 | } else if (equals_case_insensitive(lod_type, "NPlane") || equals_case_insensitive(lod_type, "NPlaneSum")) { 48 | return LoDType::NPlaneSum; 49 | } else if (equals_case_insensitive(lod_type, "NPlaneMul")) { 50 | return LoDType::NPlaneMul; 51 | } else if (equals_case_insensitive(lod_type, "VectorMatrix") || equals_case_insensitive(lod_type, "VM")) { 52 | return LoDType::VectorMatrix; 53 | } else if (equals_case_insensitive(lod_type, "VecZMatXoY")) { 54 | return LoDType::VecZMatXoY; 55 | } else if (equals_case_insensitive(lod_type, "CPfast")) { 56 | return LoDType::CPfast; 57 | } else if (equals_case_insensitive(lod_type, "CP")) { 58 | return LoDType::CP; 59 | } 60 | 61 | throw std::runtime_error{std::string{"LoTDEncoding: Invalid lod type: "} + lod_type}; 62 | } 63 | 64 | inline std::string to_string(LoDType lod_type) { 65 | switch (lod_type) { 66 | case LoDType::Dense: return "Dense"; 67 | case LoDType::Hash: return "Hash"; 68 | case LoDType::NPlaneSum: return "NPlaneSum"; 69 | case LoDType::NPlaneMul: return "NPlaneMul"; 70 | case LoDType::VectorMatrix: return "VectorMatrix"; 71 | case LoDType::VecZMatXoY: return "VecZMatXoY"; 72 | case LoDType::CPfast: return "CPfast"; 73 | case LoDType::CP: return "CP"; 74 | default: throw std::runtime_error{"LoTDEncoding: Invalid lod type"}; 75 | } 76 | } 77 | 78 | enum class InterpolationType { 79 | Linear, 80 | // LinearAlignCorners, 81 | Smoothstep, 82 | }; 83 | 84 | } -------------------------------------------------------------------------------- /csrc/lotd/src/compile_split_1.cu: -------------------------------------------------------------------------------- 1 | /** @file compile_split_1.cu 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief This file is for parallel compilation acceleration only. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | namespace lotd { 26 | namespace torch { 27 | 28 | template void lod_fwd_impl<2>(LoDMeta&,at::Tensor,at::Tensor,at::optional,at::optional,uint32_t, int32_t, bool, at::Tensor, at::Tensor); 29 | template void lod_bwd_impl<2>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t, bool,bool, at::Tensor,at::Tensor); 30 | template void lod_bwd_bwd_input_impl<2>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,bool,at::Tensor,at::Tensor,at::Tensor); 31 | 32 | } // namespace lotd::torch 33 | 34 | } // namespace lotd -------------------------------------------------------------------------------- /csrc/lotd/src/compile_split_2.cu: -------------------------------------------------------------------------------- 1 | /** @file compile_split_2.cu 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief This file is for parallel compilation acceleration only. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | namespace lotd { 26 | namespace torch { 27 | 28 | template void lod_fwd_impl<3>(LoDMeta&,at::Tensor,at::Tensor,at::optional,at::optional,uint32_t, int32_t, bool, at::Tensor, at::Tensor); 29 | template void lod_bwd_impl<3>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t, bool,bool, at::Tensor,at::Tensor); 30 | template void lod_bwd_bwd_input_impl<3>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,bool,at::Tensor,at::Tensor,at::Tensor); 31 | 32 | } // namespace lotd::torch 33 | 34 | } // namespace lotd -------------------------------------------------------------------------------- /csrc/lotd/src/compile_split_3.cu: -------------------------------------------------------------------------------- 1 | /** @file compile_split_3.cu 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief This file is for parallel compilation acceleration only. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | namespace lotd { 26 | namespace torch { 27 | 28 | template void lod_fwd_impl<4>(LoDMeta&,at::Tensor,at::Tensor,at::optional,at::optional,uint32_t, int32_t, bool, at::Tensor, at::Tensor); 29 | template void lod_bwd_impl<4>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t, bool,bool, at::Tensor,at::Tensor); 30 | template void lod_bwd_bwd_input_impl<4>(LoDMeta&,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,bool,at::Tensor,at::Tensor,at::Tensor); 31 | 32 | } // namespace lotd::torch 33 | 34 | } // namespace lotd -------------------------------------------------------------------------------- /csrc/occ_grid/include/occ_grid/cpp_api.h: -------------------------------------------------------------------------------- 1 | /** @file occ_grid.h 2 | * @brief 3 | * Modified from https://github.com/KAIR-BAIR/nerfacc 4 | * Copyright (c) 2022 Ruilong Li, UC Berkeley. 5 | */ 6 | 7 | #pragma once 8 | 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | enum ContractionType 15 | { 16 | AABB = 0, 17 | UN_BOUNDED_TANH = 1, 18 | UN_BOUNDED_SPHERE = 2, 19 | }; 20 | 21 | std::vector ray_marching( 22 | const at::Tensor rays_o, 23 | const at::Tensor rays_d, 24 | const at::Tensor t_min, 25 | const at::Tensor t_max, 26 | const at::Tensor roi, 27 | const at::Tensor grid_binary, 28 | const ContractionType contraction_type, 29 | const float step_size, 30 | const float max_step_size, 31 | const float dt_gamma, 32 | const uint32_t max_steps, 33 | const bool return_gidx); 34 | 35 | std::vector batched_ray_marching( 36 | const at::Tensor rays_o, 37 | const at::Tensor rays_d, 38 | const at::Tensor t_min, 39 | const at::Tensor t_max, 40 | const at::optional batch_inds_, 41 | const at::optional batch_data_size_, 42 | const at::Tensor roi, 43 | const at::Tensor grid_binary, 44 | const ContractionType type, 45 | const float step_size, 46 | const float max_step_size, 47 | const float dt_gamma, 48 | const uint32_t max_steps, 49 | const bool return_gidx); 50 | 51 | std::vector forest_ray_marching( 52 | const ForestMeta& forest, 53 | const at::Tensor rays_o, 54 | const at::Tensor rays_d, 55 | const at::Tensor t_min, 56 | const at::Tensor t_max, 57 | const at::Tensor seg_block_inds, 58 | const at::Tensor seg_entries, 59 | const at::Tensor seg_exits, 60 | const at::Tensor seg_pack_infos, 61 | const at::Tensor grid_binary, 62 | const float step_size, 63 | const float max_step_size, 64 | const float dt_gamma, 65 | const uint32_t max_steps, 66 | const bool return_gidx); -------------------------------------------------------------------------------- /csrc/occ_grid/include/occ_grid/helpers_contraction.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | */ 4 | 5 | #pragma once 6 | 7 | #include "helpers_math.h" 8 | #include "cpp_api.h" 9 | 10 | inline __device__ __host__ float3 roi_to_unit( 11 | const float3 xyz, const float3 roi_min, const float3 roi_max) 12 | { 13 | // roi -> [0, 1]^3 14 | return (xyz - roi_min) / (roi_max - roi_min); 15 | } 16 | 17 | inline __device__ __host__ float3 unit_to_roi( 18 | const float3 xyz, const float3 roi_min, const float3 roi_max) 19 | { 20 | // [0, 1]^3 -> roi 21 | return xyz * (roi_max - roi_min) + roi_min; 22 | } 23 | 24 | inline __device__ __host__ float3 inf_to_unit_tanh( 25 | const float3 xyz, float3 roi_min, const float3 roi_max) 26 | { 27 | /** 28 | [-inf, inf]^3 -> [0, 1]^3 29 | roi -> cube of [0.25, 0.75]^3 30 | **/ 31 | float3 xyz_unit = roi_to_unit(xyz, roi_min, roi_max); // roi -> [0, 1]^3 32 | xyz_unit = xyz_unit - 0.5f; // roi -> [-0.5, 0.5]^3 33 | return make_float3(tanhf(xyz_unit.x), tanhf(xyz_unit.y), tanhf(xyz_unit.z)) * 0.5f + 0.5f; 34 | } 35 | 36 | inline __device__ __host__ float3 unit_to_inf_tanh( 37 | const float3 xyz, float3 roi_min, const float3 roi_max) 38 | { 39 | /** 40 | [0, 1]^3 -> [-inf, inf]^3 41 | cube of [0.25, 0.75]^3 -> roi 42 | **/ 43 | float3 xyz_unit = clamp( 44 | make_float3( 45 | atanhf(xyz.x * 2.0f - 1.0f), 46 | atanhf(xyz.y * 2.0f - 1.0f), 47 | atanhf(xyz.z * 2.0f - 1.0f)), 48 | -1e10f, 49 | 1e10f); 50 | xyz_unit = xyz_unit + 0.5f; 51 | xyz_unit = unit_to_roi(xyz_unit, roi_min, roi_max); 52 | return xyz_unit; 53 | } 54 | 55 | inline __device__ __host__ float3 inf_to_unit_sphere( 56 | const float3 xyz, const float3 roi_min, const float3 roi_max) 57 | { 58 | /** From MipNeRF360 59 | [-inf, inf]^3 -> sphere of [0, 1]^3; 60 | roi -> sphere of [0.25, 0.75]^3 61 | **/ 62 | float3 xyz_unit = roi_to_unit(xyz, roi_min, roi_max); // roi -> [0, 1]^3 63 | xyz_unit = xyz_unit * 2.0f - 1.0f; // roi -> [-1, 1]^3 64 | 65 | float norm_sq = dot(xyz_unit, xyz_unit); 66 | float norm = sqrt(norm_sq); 67 | if (norm > 1.0f) 68 | { 69 | xyz_unit = (2.0f - 1.0f / norm) * (xyz_unit / norm); 70 | } 71 | xyz_unit = xyz_unit * 0.25f + 0.5f; // [-1, 1]^3 -> [0.25, 0.75]^3 72 | return xyz_unit; 73 | } 74 | 75 | inline __device__ __host__ float3 unit_sphere_to_inf( 76 | const float3 xyz, const float3 roi_min, const float3 roi_max) 77 | { 78 | /** From MipNeRF360 79 | sphere of [0, 1]^3 -> [-inf, inf]^3; 80 | sphere of [0.25, 0.75]^3 -> roi 81 | **/ 82 | float3 xyz_unit = (xyz - 0.5f) * 4.0f; // [0.25, 0.75]^3 -> [-1, 1]^3 83 | 84 | float norm_sq = dot(xyz_unit, xyz_unit); 85 | float norm = sqrt(norm_sq); 86 | if (norm > 1.0f) 87 | { 88 | xyz_unit = xyz_unit / fmaxf((2.0f * norm - 1.0f * norm_sq), 1e-10f); 89 | } 90 | xyz_unit = xyz_unit * 0.5f + 0.5f; // [-1, 1]^3 -> [0, 1]^3 91 | xyz_unit = unit_to_roi(xyz_unit, roi_min, roi_max); // [0, 1]^3 -> roi 92 | return xyz_unit; 93 | } 94 | 95 | inline __device__ __host__ float3 apply_contraction( 96 | const float3 xyz, const float3 roi_min, const float3 roi_max, 97 | const ContractionType type) 98 | { 99 | switch (type) 100 | { 101 | default: 102 | case AABB: 103 | return roi_to_unit(xyz, roi_min, roi_max); 104 | case UN_BOUNDED_TANH: 105 | return inf_to_unit_tanh(xyz, roi_min, roi_max); 106 | case UN_BOUNDED_SPHERE: 107 | return inf_to_unit_sphere(xyz, roi_min, roi_max); 108 | } 109 | } 110 | 111 | inline __device__ __host__ float3 apply_contraction_inv( 112 | const float3 xyz, const float3 roi_min, const float3 roi_max, 113 | const ContractionType type) 114 | { 115 | switch (type) 116 | { 117 | default: 118 | case AABB: 119 | return unit_to_roi(xyz, roi_min, roi_max); 120 | case UN_BOUNDED_TANH: 121 | return unit_to_inf_tanh(xyz, roi_min, roi_max); 122 | case UN_BOUNDED_SPHERE: 123 | return unit_sphere_to_inf(xyz, roi_min, roi_max); 124 | } 125 | } -------------------------------------------------------------------------------- /csrc/occ_grid/include/occ_grid/helpers_cuda.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2022 Ruilong Li, UC Berkeley. 3 | */ 4 | 5 | #pragma once 6 | 7 | #include 8 | #include 9 | 10 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor") 11 | #define CHECK_CONTIGUOUS(x) \ 12 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 13 | #define CHECK_INPUT(x) \ 14 | CHECK_CUDA(x); \ 15 | CHECK_CONTIGUOUS(x) 16 | #define CUDA_GET_THREAD_ID(tid, Q) \ 17 | const int tid = blockIdx.x * blockDim.x + threadIdx.x; \ 18 | if (tid >= Q) \ 19 | return 20 | #define CUDA_N_BLOCKS_NEEDED(Q, CUDA_N_THREADS) ((Q - 1) / CUDA_N_THREADS + 1) 21 | #define DEVICE_GUARD(_ten) \ 22 | const at::cuda::OptionalCUDAGuard device_guard(device_of(_ten)); 23 | -------------------------------------------------------------------------------- /csrc/occ_grid/include/occ_grid/helpers_march.h: -------------------------------------------------------------------------------- 1 | /** @file helpers_march.h 2 | * @brief Ray marching of occupancy grid 3 | * Modified from https://github.com/KAIR-BAIR/nerfacc 4 | * Copyright (c) 2022 Ruilong Li, UC Berkeley. 5 | */ 6 | 7 | #include "helpers_cuda.h" 8 | #include "helpers_math.h" 9 | #include "helpers_contraction.h" 10 | 11 | inline __device__ __host__ float calc_dt(const float t, const float dt_gamma, const float dt_min, const float dt_max) 12 | { 13 | return clamp(t * dt_gamma, dt_min, dt_max); 14 | } 15 | 16 | inline __device__ __host__ int grid_idx_at(const float3 xyz_unit, const int3 grid_res) 17 | { 18 | // xyz should be always in [0, 1]^3. 19 | int3 ixyz = make_int3(xyz_unit * make_float3(grid_res)); 20 | ixyz = clamp(ixyz, make_int3(0, 0, 0), grid_res - 1); 21 | int3 grid_offset = make_int3(grid_res.y * grid_res.z, grid_res.z, 1); // Contiguous memory on z-dim 22 | int idx = dot(ixyz, grid_offset); 23 | return idx; 24 | } 25 | 26 | template 27 | inline __device__ __host__ scalar_t grid_occupied_at( 28 | const float3 xyz, 29 | const float3 roi_min, const float3 roi_max, 30 | ContractionType type, 31 | const int3 grid_res, const scalar_t *grid_value, int* grid_idx_out) 32 | { 33 | if (type == ContractionType::AABB && 34 | (xyz.x < roi_min.x || xyz.x > roi_max.x || 35 | xyz.y < roi_min.y || xyz.y > roi_max.y || 36 | xyz.z < roi_min.z || xyz.z > roi_max.z)) 37 | { 38 | return false; 39 | } 40 | float3 xyz_unit = apply_contraction(xyz, roi_min, roi_max, type); 41 | int idx = grid_idx_at(xyz_unit, grid_res); 42 | *grid_idx_out = idx; 43 | return grid_value[idx]; 44 | } 45 | 46 | // dda like step 47 | inline __device__ __host__ float distance_to_next_voxel( 48 | const float3 xyz, const float3 dir, const float3 inv_dir, 49 | const float3 roi_min, const float3 roi_max, const int3 grid_res) 50 | { 51 | float3 _occ_res = make_float3(grid_res); 52 | float3 _xyz = roi_to_unit(xyz, roi_min, roi_max) * _occ_res; 53 | float3 txyz = ((floorf(_xyz + 0.5f + 0.5f * sign(dir)) - _xyz) * inv_dir) / _occ_res * (roi_max - roi_min); 54 | float t = min(min(txyz.x, txyz.y), txyz.z); 55 | return fmaxf(t, 0.0f); 56 | } 57 | 58 | inline __device__ __host__ float advance_to_next_voxel( 59 | const float t, const float dt_min, 60 | const float3 xyz, const float3 dir, const float3 inv_dir, 61 | const float3 roi_min, const float3 roi_max, const int3 grid_res) 62 | { 63 | // Regular stepping (may be slower but matches non-empty space) 64 | float t_target = t + distance_to_next_voxel(xyz, dir, inv_dir, roi_min, roi_max, grid_res); 65 | float _t = t; 66 | do 67 | { 68 | _t += dt_min; 69 | } while (_t < t_target); 70 | 71 | // float _t = t; 72 | // if (_t < t_target) { 73 | // _t += dt_min * ( (int)( (t_target-t) / dt_min ) + 1 ); 74 | // } 75 | 76 | return _t; 77 | } -------------------------------------------------------------------------------- /csrc/occ_grid/src/occ_grid.cpp: -------------------------------------------------------------------------------- 1 | /** @file occ_grid.cpp 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief Occupancy grid marching operations. 4 | * Modified from https://github.com/KAIR-BAIR/nerfacc 5 | * Copyright (c) 2022 Ruilong Li, UC Berkeley. 6 | */ 7 | 8 | #ifdef _MSC_VER 9 | #pragma warning(push, 0) 10 | #include 11 | #pragma warning(pop) 12 | #else 13 | #include 14 | #endif 15 | 16 | #ifdef snprintf 17 | #undef snprintf 18 | #endif 19 | 20 | #include 21 | 22 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 23 | py::enum_(m, "ContractionType", py::module_local(true)) 24 | .value("AABB", ContractionType::AABB) 25 | .value("UN_BOUNDED_TANH", ContractionType::UN_BOUNDED_TANH) 26 | .value("UN_BOUNDED_SPHERE", ContractionType::UN_BOUNDED_SPHERE) 27 | .export_values() 28 | ; 29 | 30 | m.def("ray_marching", &ray_marching, "ray_marching on a single block"); 31 | m.def("batched_ray_marching", &batched_ray_marching, "ray_marching on batched blocks"); 32 | m.def("forest_ray_marching", &forest_ray_marching, "ray_marching on a forest of blocks"); 33 | } 34 | -------------------------------------------------------------------------------- /csrc/pack_ops/pack_ops.cpp: -------------------------------------------------------------------------------- 1 | /** @file pack_ops.cpp 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief Pack ops Pytorch bindings. 4 | */ 5 | 6 | #ifdef _MSC_VER 7 | #pragma warning(push, 0) 8 | #include 9 | #pragma warning(pop) 10 | #else 11 | #include 12 | #endif 13 | 14 | #ifdef snprintf 15 | #undef snprintf 16 | #endif 17 | 18 | #include "pack_ops.h" 19 | 20 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 21 | m.def("interleave_arange", py::overload_cast(&interleave_arange)); 22 | m.def("interleave_linstep", py::overload_cast(&interleave_linstep)); 23 | m.def("interleave_linstep", py::overload_cast(&interleave_linstep)); 24 | m.def("interleave_linstep", py::overload_cast(&interleave_linstep)); 25 | m.def("interleave_sample_step_wrt_depth_clamp_deprecated", &interleave_sample_step_wrt_depth_clamp_deprecated); 26 | m.def("interleave_sample_step_wrt_depth_clamped", &interleave_sample_step_wrt_depth_clamped); 27 | m.def("interleave_sample_step_wrt_depth_in_packed_segments", &interleave_sample_step_wrt_depth_in_packed_segments); 28 | 29 | m.def("packed_add", &packed_add); 30 | m.def("packed_sub", &packed_sub); 31 | m.def("packed_mul", &packed_mul); 32 | m.def("packed_div", &packed_div); 33 | m.def("packed_matmul", &packed_matmul); 34 | 35 | m.def("packed_gt", &packed_gt); 36 | m.def("packed_geq", &packed_geq); 37 | m.def("packed_lt", &packed_lt); 38 | m.def("packed_leq", &packed_leq); 39 | m.def("packed_eq", &packed_eq); 40 | m.def("packed_neq", &packed_neq); 41 | 42 | m.def("packed_sum", &packed_sum); 43 | m.def("packed_diff", &packed_diff); 44 | m.def("packed_backward_diff", &packed_backward_diff); 45 | m.def("packed_cumsum", &packed_cumsum); 46 | m.def("packed_cumprod", &packed_cumprod); 47 | 48 | m.def("packed_sort_qsort", &packed_sort_qsort); 49 | m.def("packed_sort_thrust", &packed_sort_thrust); 50 | m.def("packed_searchsorted", &packed_searchsorted); 51 | m.def("packed_searchsorted_packed_vals", &packed_searchsorted_packed_vals); 52 | m.def("try_merge_two_packs_sorted_aligned", &try_merge_two_packs_sorted_aligned); 53 | m.def("packed_invert_cdf", &packed_invert_cdf); 54 | m.def("packed_alpha_to_vw_forward", &packed_alpha_to_vw_forward); 55 | m.def("packed_alpha_to_vw_backward", &packed_alpha_to_vw_backward); 56 | 57 | m.def("mark_pack_boundaries_cuda", &mark_pack_boundaries_cuda); 58 | m.def("octree_mark_consecutive_segments", &octree_mark_consecutive_segments); 59 | } -------------------------------------------------------------------------------- /csrc/permuto/src/compile_split_1.cu: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | 19 | namespace permuto { 20 | 21 | // Explicit Template Instantiation 22 | template void permuto_enc_fwd_impl< 2>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 23 | template void permuto_enc_fwd_impl< 3>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 24 | template void permuto_enc_fwd_impl< 4>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 25 | template void permuto_enc_fwd_impl< 5>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 26 | template void permuto_enc_fwd_impl< 6>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 27 | template void permuto_enc_fwd_impl< 7>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 28 | template void permuto_enc_fwd_impl< 8>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 29 | template void permuto_enc_fwd_impl< 9>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 30 | template void permuto_enc_fwd_impl<10>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 31 | template void permuto_enc_fwd_impl<11>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 32 | template void permuto_enc_fwd_impl<12>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 33 | template void permuto_enc_fwd_impl<13>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 34 | template void permuto_enc_fwd_impl<14>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 35 | template void permuto_enc_fwd_impl<15>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 36 | template void permuto_enc_fwd_impl<16>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 37 | template void permuto_enc_fwd_impl<17>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 38 | template void permuto_enc_fwd_impl<18>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 39 | template void permuto_enc_fwd_impl<19>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 40 | template void permuto_enc_fwd_impl<20>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 41 | 42 | } -------------------------------------------------------------------------------- /csrc/permuto/src/compile_split_2.cu: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | 19 | namespace permuto { 20 | 21 | // Explicit Template Instantiation 22 | template void permuto_enc_fwd_impl<24>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 23 | template void permuto_enc_fwd_impl<28>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 24 | template void permuto_enc_fwd_impl<32>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 25 | template void permuto_enc_fwd_impl<40>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 26 | template void permuto_enc_fwd_impl<48>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 27 | template void permuto_enc_fwd_impl<56>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 28 | template void permuto_enc_fwd_impl<64>(PermutoEncMeta&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,at::Tensor&); 29 | 30 | } -------------------------------------------------------------------------------- /csrc/permuto/src/compile_split_4.cu: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | 19 | namespace permuto { 20 | 21 | // Explicit Template Instantiation 22 | template void permuto_enc_bwd_impl<24>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 23 | template void permuto_enc_bwd_impl<28>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 24 | template void permuto_enc_bwd_impl<32>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 25 | template void permuto_enc_bwd_impl<36>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 26 | template void permuto_enc_bwd_impl<40>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 27 | template void permuto_enc_bwd_impl<48>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 28 | template void permuto_enc_bwd_impl<56>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 29 | template void permuto_enc_bwd_impl<64>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,uint32_t,bool,bool,at::Tensor&,at::Tensor&); 30 | 31 | } -------------------------------------------------------------------------------- /csrc/permuto/src/compile_split_6.cu: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | #include 18 | 19 | namespace permuto { 20 | 21 | // Explicit Template Instantiation 22 | template void permuto_enc_bwd_bwd_input_impl<24>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 23 | template void permuto_enc_bwd_bwd_input_impl<28>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 24 | template void permuto_enc_bwd_bwd_input_impl<32>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 25 | template void permuto_enc_bwd_bwd_input_impl<36>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 26 | template void permuto_enc_bwd_bwd_input_impl<40>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 27 | template void permuto_enc_bwd_bwd_input_impl<48>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 28 | template void permuto_enc_bwd_bwd_input_impl<56>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 29 | template void permuto_enc_bwd_bwd_input_impl<64>(PermutoEncMeta,at::Tensor&,at::Tensor&,at::Tensor&,at::Tensor&,at::optional,at::optional,at::optional,uint32_t,int32_t,bool,bool,at::Tensor&,at::Tensor&); 30 | 31 | } -------------------------------------------------------------------------------- /csrc/permuto/src/permuto.cpp: -------------------------------------------------------------------------------- 1 | /** @file permuto.cpp 2 | * @author Jianfei Guo, Shanghai AI Lab 3 | * @brief A re-implementation of the permutohedral encoding. 4 | 5 | New features: 6 | - Support half(float16) param dtype 7 | - Support 2 <= n_levels <= 20 8 | - Support n_feats >= 2 9 | - Support different layers using different widths (n_feats) 10 | - Support batched inference with batch inds or batched input 11 | 12 | Original: https://github.com/RaduAlexandru/permutohedral_encoding 13 | 14 | Citation: 15 | @inproceedings{rosu2023permutosdf, 16 | title={PermutoSDF: Fast Multi-View Reconstruction with 17 | Implicit Surfaces using Permutohedral Lattices }, 18 | author={Radu Alexandru Rosu and Sven Behnke}, 19 | booktitle = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, 20 | year={2023} 21 | } 22 | */ 23 | 24 | #ifdef _MSC_VER 25 | #pragma warning(push, 0) 26 | #include 27 | #pragma warning(pop) 28 | #else 29 | #include 30 | #endif 31 | 32 | #ifdef snprintf 33 | #undef snprintf 34 | #endif 35 | 36 | #include 37 | 38 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 39 | #define OPTIONAL_ARGS \ 40 | py::arg("level_random_shifts")=nullptr, \ 41 | py::arg("batch_inds")=nullptr, \ 42 | py::arg("batch_offsets")=nullptr, \ 43 | py::arg("batch_data_size")=nullptr, \ 44 | py::arg("max_level")=nullptr 45 | 46 | m.def("permuto_enc_fwd", &permuto::permuto_enc_fwd, py::arg("meta"), py::arg("positions"), py::arg("lattice_values"), OPTIONAL_ARGS); 47 | m.def("permuto_enc_bwd", &permuto::permuto_enc_bwd, py::arg("meta"), py::arg("dL_dy"), py::arg("positions"), py::arg("lattice_values"), OPTIONAL_ARGS, py::arg("max_pos_dims"), py::arg("need_input_grad")=nullptr, py::arg("need_param_grad")=nullptr); 48 | m.def("permuto_enc_bwd_bwd_input", &permuto::permuto_enc_bwd_bwd_input, py::arg("meta"), py::arg("dL_ddLdx"), py::arg("dL_dy"), py::arg("positions"), py::arg("lattice_values"), OPTIONAL_ARGS, py::arg("need_dL_ddLdy")=nullptr, py::arg("need_dL_dparams")=nullptr); 49 | m.attr("supported_n_input_dims") = py::cast(permuto::supported_n_input_dims); 50 | 51 | #undef OPTIONAL_ARGS 52 | 53 | py::class_(m, "PermutoEncMeta") 54 | .def( 55 | py::init&, const std::vector& >(), 56 | "Create permutohedra encoding meta", 57 | py::arg("n_input_dim"), py::arg("hashmap_size"), py::arg("res_list"), py::arg("n_feats_list") 58 | ) 59 | .def_readonly("level_scales_multidim", &permuto::PermutoEncMeta::level_scales_multidim) 60 | .def_readonly("level_scales0", &permuto::PermutoEncMeta::level_scales0) 61 | .def_readonly("level_n_feats", &permuto::PermutoEncMeta::level_n_feats) 62 | .def_readonly("level_n_params", &permuto::PermutoEncMeta::level_n_params) 63 | .def_readonly("level_offsets", &permuto::PermutoEncMeta::level_offsets) 64 | .def_readonly("level_sizes", &permuto::PermutoEncMeta::level_sizes) 65 | .def_readonly("map_levels", &permuto::PermutoEncMeta::map_levels) 66 | .def_readonly("map_cnt", &permuto::PermutoEncMeta::map_cnt) 67 | 68 | .def_readonly("n_levels", &permuto::PermutoEncMeta::n_levels) 69 | .def_readonly("n_pseudo_levels", &permuto::PermutoEncMeta::n_pseudo_levels) 70 | .def_readonly("n_feat_per_pseudo_lvl", &permuto::PermutoEncMeta::n_feat_per_pseudo_lvl) 71 | .def_readonly("n_dims_to_encode", &permuto::PermutoEncMeta::n_dims_to_encode) 72 | .def_readonly("n_encoded_dims", &permuto::PermutoEncMeta::n_encoded_dims) 73 | .def_readonly("n_params", &permuto::PermutoEncMeta::n_params) 74 | 75 | ; 76 | 77 | } -------------------------------------------------------------------------------- /csrc/sphere_trace/compile_flags.txt: -------------------------------------------------------------------------------- 1 | -xc++ 2 | -xcuda 3 | -DNGP_OPTIX 4 | -D__NVCC__ 5 | -D__CUDACC_VER_MAJOR__=10 6 | -D__CUDACC_VER_MINOR__=3 7 | -DTCNN_MIN_GPU_ARCH=86 8 | --cuda-gpu-arch=sm_86 9 | --cuda-path=/usr/local/cuda/ 10 | -I 11 | /usr/include/c++/9/ 12 | -I 13 | /usr/include/x86_64-linux-gnu/c++/9/ 14 | -I 15 | /usr/local/cuda/include/ 16 | -I 17 | /usr/include/python3.8/ 18 | -I 19 | /home/dengnianchen/miniconda3/envs/3dnr/lib/python3.8/site-packages/torch/include/ 20 | -I 21 | /home/dengnianchen/miniconda3/envs/3dnr/lib/python3.8/site-packages/torch/include/torch/csrc/api/include/ 22 | -I 23 | /home/dengnianchen/miniconda3/envs/3dnr/lib/python3.8/site-packages/torch/include/TH/ 24 | -I 25 | /home/dengnianchen/miniconda3/envs/3dnr/lib/python3.8/site-packages/torch/include/THC/ 26 | -I 27 | $HOME/Libs/Optix-7.5.0/include 28 | -I 29 | include/ -------------------------------------------------------------------------------- /csrc/sphere_trace/include/sphere_trace/common.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #ifdef __NVCC__ 6 | #define HOST_DEVICE __host__ __device__ 7 | #else 8 | #define HOST_DEVICE 9 | #endif 10 | 11 | constexpr uint32_t n_threads_linear = 128; 12 | 13 | template 14 | HOST_DEVICE T div_round_up(T val, T divisor) { 15 | return (val + divisor - 1) / divisor; 16 | } 17 | 18 | template 19 | constexpr uint32_t n_blocks_linear(T n_elements) { 20 | return (uint32_t)div_round_up(n_elements, (T)n_threads_linear); 21 | } 22 | 23 | #ifdef __NVCC__ 24 | template 25 | inline void linear_kernel(K kernel, uint32_t shmem_size, cudaStream_t stream, T n_elements, Types ... args) { 26 | if (n_elements <= 0) { 27 | return; 28 | } 29 | kernel<<>>(n_elements, args...); 30 | } 31 | #endif -------------------------------------------------------------------------------- /csrc/sphere_trace/include/sphere_trace/ray_march.cuh: -------------------------------------------------------------------------------- 1 | /** @file ray_march.cuh 2 | * @author Nianchen Deng, Shanghai AI Lab 3 | * @brief Ray marching for dense occupancy grid. 4 | */ 5 | #pragma once 6 | #include "common.cuh" 7 | #include "dense_grid.cuh" 8 | 9 | std::tuple> 10 | ray_march(const DenseGrid &grid, at::Tensor rays_o, at::Tensor rays_d, at::Tensor rays_near, 11 | at::Tensor rays_far, bool return_pts = false, bool enable_debug = false); -------------------------------------------------------------------------------- /csrc/sphere_trace/include/sphere_trace/sphere_tracer.cuh: -------------------------------------------------------------------------------- 1 | /** @file sphere_tracer.cuh 2 | * @author Nianchen Deng, Shanghai AI Lab 3 | * @brief Sphere trace class. 4 | */ 5 | #pragma once 6 | #include "common.cuh" 7 | 8 | enum RayStatus : uint8_t { ALIVE, HIT, OUT }; 9 | 10 | struct TracePayload { 11 | int32_t idx; 12 | int32_t seg_idx; 13 | int32_t seg_end_idx; 14 | uint16_t n_steps; 15 | RayStatus status; 16 | int8_t debug_flag; 17 | }; 18 | 19 | struct HitPayload { 20 | int32_t idx; 21 | float t; 22 | int32_t n_steps; 23 | }; 24 | 25 | class TraceBuffer { 26 | public: 27 | float *trace_depths; 28 | glm::vec4 *trace_hit_regions; // t0, t1, d0, d1 29 | glm::ivec2 *trace_hit_seg_regions; 30 | TracePayload *trace_payloads; 31 | 32 | TraceBuffer(uint32_t n_rays) { 33 | cudaMalloc(&trace_depths, n_rays * sizeof(float)); 34 | cudaMalloc(&trace_hit_regions, n_rays * sizeof(glm::vec4)); 35 | cudaMalloc(&trace_hit_seg_regions, n_rays * sizeof(glm::ivec2)); 36 | cudaMalloc(&trace_payloads, n_rays * sizeof(TracePayload)); 37 | } 38 | ~TraceBuffer() { 39 | cudaFree(trace_depths); 40 | cudaFree(trace_hit_regions); 41 | cudaFree(trace_hit_seg_regions); 42 | cudaFree(trace_payloads); 43 | } 44 | }; 45 | 46 | using distance_fun_t = std::function; 47 | 48 | class SphereTracer { 49 | public: 50 | SphereTracer(float min_step, float distance_scale, float zero_offset = 0.0f, 51 | float hit_threshold = 0.001f); 52 | ~SphereTracer(); 53 | 54 | void init_rays(at::Tensor rays_o, at::Tensor rays_d, at::Tensor valid_rays_idx, 55 | at::Tensor segs_pack_info, at::Tensor segs, 56 | at::optional segs_endpoint_distances = at::nullopt); 57 | 58 | uint32_t compact_rays(); 59 | 60 | void advance_rays(at::Tensor distances); 61 | 62 | std::map get_rays(RayStatus status) const; 63 | 64 | at::Tensor get_trace_positions() const; 65 | 66 | std::tuple sample_on_segments(float step_size); 67 | 68 | void trace_on_samples(at::Tensor rays_samples_offset, at::Tensor rays_n_samples, 69 | at::Tensor rays_sample_depths, at::Tensor rays_sample_distances); 70 | 71 | void trace(at::Tensor rays_o, at::Tensor rays_d, const distance_fun_t &distance_function, 72 | uint32_t max_steps_between_compact, uint32_t max_march_iters, 73 | at::Tensor valid_rays_idx, at::Tensor segs_pack_info, at::Tensor segs, 74 | at::optional segs_endpoint_distances = at::nullopt); 75 | 76 | uint32_t n_rays(RayStatus status) const { 77 | return status == OUT ? _n_total_rays - n_rays(ALIVE) - n_rays(HIT) 78 | : status == ALIVE ? _n_rays_alive 79 | : _get_count(status); 80 | } 81 | 82 | private: 83 | float _min_step; 84 | float _distance_scale; 85 | float _zero_offset; 86 | float _hit_threshold; 87 | TraceBuffer *_rays_payload[2]; 88 | HitPayload *_rays_payload_hit; 89 | uint32_t *_counters; 90 | uint32_t _n_total_rays; 91 | uint32_t _n_rays_alive; 92 | uint32_t _buffer_index; 93 | at::Tensor _rays_o; 94 | at::Tensor _rays_d; 95 | at::Tensor _segs; 96 | at::optional _segs_endpoint_distances; 97 | 98 | uint32_t _get_count(RayStatus status) const; 99 | void _malloc_payload_buffers(uint32_t n_rays); 100 | void _free_payload_buffers(); 101 | }; -------------------------------------------------------------------------------- /csrc/sphere_trace/src/entry.cu: -------------------------------------------------------------------------------- 1 | /** @file entry.cu 2 | * @author Nianchen Deng, Shanghai AI Lab 3 | * @brief The PyTorch entry of sphere trace module. 4 | */ 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | using namespace pybind11::literals; 13 | 14 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 15 | py::enum_(m, "RayStatus") 16 | .value("ALIVE", RayStatus::ALIVE) 17 | .value("HIT", RayStatus::HIT) 18 | .value("OUT", RayStatus::OUT) 19 | .export_values(); 20 | 21 | py::class_(m, "DenseGrid") 22 | .def(py::init([](int x, int y, int z, at::Tensor grid_occ) { 23 | return new DenseGrid({x, y, z}, grid_occ.data_ptr()); 24 | })) 25 | .def_property_readonly("res", [](DenseGrid *self) { 26 | return std::make_tuple(self->res().x, self->res().y, self->res().z); 27 | }); 28 | 29 | py::class_(m, "SphereTracer") 30 | .def(py::init(), "min_step"_a, "distance_scale"_a, 31 | "zero_offset"_a = 0.0f, "hit_threshold"_a = 0.001f) 32 | .def("init_rays", &SphereTracer::init_rays, "rays_o"_a, "rays_d"_a, "valid_rays_idx"_a, 33 | "segs_pack_info"_a, "segs"_a, "segs_endpoint_distances"_a = nullptr) 34 | .def("compact_rays", &SphereTracer::compact_rays) 35 | .def("advance_rays", &SphereTracer::advance_rays, "distances"_a) 36 | .def("get_rays", &SphereTracer::get_rays, "status"_a) 37 | .def("get_trace_positions", &SphereTracer::get_trace_positions) 38 | .def("sample_on_segments", &SphereTracer::sample_on_segments, "step_size"_a) 39 | .def("trace_on_samples", &SphereTracer::trace_on_samples, "rays_samples_offset"_a, 40 | "rays_n_samples"_a, "rays_sample_depths"_a, "rays_sample_distances"_a) 41 | .def("trace", &SphereTracer::trace, "rays_o"_a, "rays_d"_a, "distance_function"_a, 42 | "max_steps_between_compact"_a, "max_march_iters"_a, "valid_rays_idx"_a, 43 | "segs_pack_info"_a, "segs"_a, "segs_endpoint_distances"_a = nullptr); 44 | 45 | m.def("ray_march", &ray_march, "grid"_a, "rays_o"_a, "rays_d"_a, "rays_near"_a, "rays_far"_a, 46 | "return_pts"_a = false, "enable_debug"_a = false); 47 | } 48 | -------------------------------------------------------------------------------- /docs/camera_paths.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/docs/camera_paths.md -------------------------------------------------------------------------------- /docs/config.md: -------------------------------------------------------------------------------- 1 | We have developed a configuration tool based on [OmageConf](https://omegaconf.readthedocs.io/), primarily because of its convenient [interpolation string]((https://omegaconf.readthedocs.io/en/2.3_branch/grammar.html#interpolation-strings)) mechanism, support for [custom resolvers](https://omegaconf.readthedocs.io/en/2.3_branch/custom_resolvers.html), config merging tools, [dot-list parsing](https://omegaconf.readthedocs.io/en/2.3_branch/usage.html#from-a-dot-list), etc. 2 | 3 | Below are some examples of how you can make config files and how you can dynamically change configs while running. 4 | 5 | ### Making a config file with interpolation strings 6 | 7 | 8 | 9 | ### Command-line dynamic configs 10 | 11 | -------------------------------------------------------------------------------- /docs/tutorials/raymarching.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/docs/tutorials/raymarching.md -------------------------------------------------------------------------------- /externals/freqencoder/bindings.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | */ 4 | 5 | #include 6 | 7 | #include "freqencoder.h" 8 | 9 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 10 | m.def("freq_encode_forward", &freq_encode_forward, "freq encode forward (CUDA)"); 11 | m.def("freq_encode_backward", &freq_encode_backward, "freq encode backward (CUDA)"); 12 | } -------------------------------------------------------------------------------- /externals/freqencoder/freqencoder.h: -------------------------------------------------------------------------------- 1 | /* 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | */ 4 | 5 | # pragma once 6 | 7 | #include 8 | #include 9 | 10 | // _backend.freq_encode_forward(inputs, B, input_dim, degree, output_dim, outputs) 11 | void freq_encode_forward(at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor outputs); 12 | 13 | // _backend.freq_encode_backward(grad, outputs, B, input_dim, degree, output_dim, grad_inputs) 14 | void freq_encode_backward(at::Tensor grad, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t deg, const uint32_t C, at::Tensor grad_inputs); -------------------------------------------------------------------------------- /externals/kaolin_spc_raytrace_fixed/README.md: -------------------------------------------------------------------------------- 1 | Borrowed from kaolin (https://github.com/NVIDIAGameWorks/kaolin), and modified by Nianchen Deng (dengnianchen@pjlab.org.cn): 2 | 3 | Fix the ray tracing behavior for spc. -------------------------------------------------------------------------------- /externals/kaolin_spc_raytrace_fixed/include/kaolin_spc_raytrace_fixed/check.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019,20-21 NVIDIA CORPORATION & AFFILIATES. 2 | // All rights reserved. 3 | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #ifndef KAOLIN_CHECK_H_ 17 | #define KAOLIN_CHECK_H_ 18 | 19 | #include 20 | #include 21 | 22 | #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") 23 | #define CHECK_CPU(x) TORCH_CHECK(x.device().is_cpu(), #x " must be a cpu tensor") 24 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") 25 | 26 | #define CHECK_HALF(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Half, #x " is not half") 27 | #define CHECK_FLOAT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float, #x " must be byte") 28 | #define CHECK_DOUBLE(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Double, #x " must be double") 29 | #define CHECK_BOOL(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Bool, #x " must be bool") 30 | #define CHECK_BYTE(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Byte, #x " must be byte") 31 | #define CHECK_SHORT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Short, #x " must be short") 32 | #define CHECK_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be int") 33 | #define CHECK_LONG(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Long, #x " must be long") 34 | 35 | #define CHECK_DIMS(x, d) TORCH_CHECK(x.dim() == d, #x " must have " #d " dims") 36 | #define CHECK_SIZE(x, d, s) \ 37 | TORCH_CHECK(x.size(d) == s, #x " must have dim " #d " of size " #s) 38 | #define CHECK_SIZES(x, ...) \ 39 | TORCH_CHECK(x.sizes() == std::vector({__VA_ARGS__}), \ 40 | #x " must of size {" #__VA_ARGS__ "}") 41 | 42 | #define KAOLIN_NO_CUDA_ERROR(func_name) \ 43 | AT_ERROR("In ", func_name, ": Kaolin built without CUDA, " \ 44 | "cannot run with GPU tensors") 45 | 46 | #endif // KAOLIN_CHECK_H_ 47 | -------------------------------------------------------------------------------- /externals/kaolin_spc_raytrace_fixed/include/kaolin_spc_raytrace_fixed/raytrace.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. 2 | // All rights reserved. 3 | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #ifndef KAOLIN_OPS_RENDER_SPC_RAYTRACE_H_ 17 | #define KAOLIN_OPS_RENDER_SPC_RAYTRACE_H_ 18 | 19 | #ifdef WITH_CUDA 20 | #include "spc_math.h" 21 | #endif 22 | 23 | #include 24 | 25 | namespace kaolin { 26 | 27 | std::vector raytrace_cuda_fixed( 28 | at::Tensor octree, 29 | at::Tensor points, 30 | at::Tensor pyramid, 31 | at::Tensor exclusive_sum, 32 | at::Tensor ray_o, 33 | at::Tensor ray_d, 34 | uint32_t target_level, 35 | bool return_depth, 36 | bool with_exit, 37 | bool include_head); 38 | 39 | } // namespace kaolin 40 | 41 | #endif // KAOLIN_OPS_RENDER_SPC_RAYTRACE_H_ 42 | -------------------------------------------------------------------------------- /externals/kaolin_spc_raytrace_fixed/include/kaolin_spc_raytrace_fixed/utils.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | #ifndef KAOLIN_UTILS_H_ 16 | #define KAOLIN_UTILS_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | 22 | #define PRIVATE_CASE_TYPE(ENUM_TYPE, TYPE, TYPE_NAME, ...) \ 23 | case ENUM_TYPE: { \ 24 | using TYPE_NAME = TYPE; \ 25 | return __VA_ARGS__(); \ 26 | } 27 | 28 | #define PRIVATE_CASE_INOUT_TYPES(CONST_IN_TYPE, CONST_OUT_TYPE, ENUM_IN_TYPE, ENUM_OUT_TYPE, \ 29 | IN_TYPE, OUT_TYPE, IN_TYPE_NAME, OUT_TYPE_NAME, ...) \ 30 | if (CONST_IN_TYPE == ENUM_IN_TYPE && CONST_OUT_TYPE == ENUM_OUT_TYPE) { \ 31 | using IN_TYPE_NAME = IN_TYPE; \ 32 | using OUT_TYPE_NAME = OUT_TYPE; \ 33 | return __VA_ARGS__(); \ 34 | } else \ 35 | 36 | #define PRIVATE_CASE_INOUT_DEDUCED_TYPES(ENUM_TYPE, IN_TYPE, OUT_TYPE, \ 37 | IN_TYPE_NAME, OUT_TYPE_NAME, ...) \ 38 | case ENUM_TYPE: { \ 39 | using IN_TYPE_NAME = IN_TYPE; \ 40 | using OUT_TYPE_NAME = OUT_TYPE; \ 41 | return __VA_ARGS__(); \ 42 | } 43 | 44 | #define PRIVATE_CASE_INT(CONST_INT, VAR_NAME, ...) \ 45 | case CONST_INT: { \ 46 | const int VAR_NAME = CONST_INT; \ 47 | return __VA_ARGS__(); \ 48 | } 49 | 50 | #define DISPATCH_NUM_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \ 51 | [&] { \ 52 | switch(TYPE) \ 53 | { \ 54 | PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, TYPE_NAME, __VA_ARGS__) \ 55 | PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, TYPE_NAME, __VA_ARGS__) \ 56 | PRIVATE_CASE_TYPE(at::ScalarType::Int, int, TYPE_NAME, __VA_ARGS__) \ 57 | PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, TYPE_NAME, __VA_ARGS__) \ 58 | PRIVATE_CASE_TYPE(at::ScalarType::Half, at::Half, TYPE_NAME, __VA_ARGS__) \ 59 | PRIVATE_CASE_TYPE(at::ScalarType::Float, float, TYPE_NAME, __VA_ARGS__) \ 60 | PRIVATE_CASE_TYPE(at::ScalarType::Double, double, TYPE_NAME, __VA_ARGS__) \ 61 | default: \ 62 | AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \ 63 | } \ 64 | }() 65 | 66 | 67 | #define DISPATCH_INTEGER_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \ 68 | [&] { \ 69 | switch(TYPE) \ 70 | { \ 71 | PRIVATE_CASE_TYPE(at::ScalarType::Byte, uint8_t, TYPE_NAME, __VA_ARGS__) \ 72 | PRIVATE_CASE_TYPE(at::ScalarType::Short, int16_t, TYPE_NAME, __VA_ARGS__) \ 73 | PRIVATE_CASE_TYPE(at::ScalarType::Int, int, TYPE_NAME, __VA_ARGS__) \ 74 | PRIVATE_CASE_TYPE(at::ScalarType::Long, int64_t, TYPE_NAME, __VA_ARGS__) \ 75 | default: \ 76 | AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \ 77 | } \ 78 | }() 79 | 80 | #define DISPATCH_FLOAT_TYPES(TYPE, TYPE_NAME, SCOPE_NAME, ...) \ 81 | [&] { \ 82 | switch(TYPE) \ 83 | { \ 84 | PRIVATE_CASE_TYPE(at::ScalarType::Half, at::Half, TYPE_NAME, __VA_ARGS__) \ 85 | PRIVATE_CASE_TYPE(at::ScalarType::Float, float, TYPE_NAME, __VA_ARGS__) \ 86 | PRIVATE_CASE_TYPE(at::ScalarType::Double, double, TYPE_NAME, __VA_ARGS__) \ 87 | default: \ 88 | AT_ERROR(#SCOPE_NAME, " not implemented for '", toString(TYPE), "'"); \ 89 | } \ 90 | }() 91 | 92 | #endif // KAOLIN_UTILS_H_ 93 | -------------------------------------------------------------------------------- /externals/kaolin_spc_raytrace_fixed/src/raytrace.cpp: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. 2 | // All rights reserved. 3 | 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | #include 17 | 18 | #include 19 | 20 | #include "kaolin_spc_raytrace_fixed/check.h" 21 | #ifdef WITH_CUDA 22 | #include "kaolin_spc_raytrace_fixed/utils.h" 23 | #include "kaolin_spc_raytrace_fixed/spc_math.h" 24 | #endif 25 | 26 | namespace kaolin { 27 | 28 | #define CHECK_TRIPLE(x) TORCH_CHECK(x.dim() == 1 && x.size(0) == 3, #x " must be a triplet") 29 | #define CHECK_CPU_COORDS(x) CHECK_CONTIGUOUS(x); CHECK_CPU(x); CHECK_FLOAT(x); CHECK_TRIPLE(x) 30 | 31 | using namespace std; 32 | using namespace at::indexing; 33 | 34 | #ifdef WITH_CUDA 35 | 36 | std::vector raytrace_cuda_impl( 37 | at::Tensor octree, 38 | at::Tensor points, 39 | at::Tensor pyramid, 40 | at::Tensor exclusive_sum, 41 | at::Tensor ray_o, 42 | at::Tensor ray_d, 43 | uint32_t max_level, 44 | uint32_t target_level, 45 | bool return_depth, 46 | bool with_exit, 47 | bool include_head); 48 | 49 | #endif 50 | 51 | std::vector raytrace_cuda_fixed( 52 | at::Tensor octree, 53 | at::Tensor points, 54 | at::Tensor pyramid, 55 | at::Tensor exclusive_sum, 56 | at::Tensor ray_o, 57 | at::Tensor ray_d, 58 | uint32_t target_level, 59 | bool return_depth, 60 | bool with_exit, 61 | bool include_head) { 62 | #ifdef WITH_CUDA 63 | at::TensorArg octree_arg{octree, "octree", 1}; 64 | at::TensorArg points_arg{points, "points", 2}; 65 | at::TensorArg pyramid_arg{pyramid, "pyramid", 3}; 66 | at::TensorArg exclusive_sum_arg{exclusive_sum, "exclusive_sum", 4}; 67 | at::TensorArg ray_o_arg{ray_o, "ray_o", 5}; 68 | at::TensorArg ray_d_arg{ray_d, "ray_d", 6}; 69 | at::checkAllSameGPU(__func__, {octree_arg, points_arg, exclusive_sum_arg, ray_o_arg, ray_d_arg}); 70 | at::checkAllContiguous(__func__, {octree_arg, points_arg, exclusive_sum_arg, ray_o_arg, ray_d_arg}); 71 | at::checkDeviceType(__func__, {pyramid}, at::DeviceType::CPU); 72 | 73 | CHECK_SHORT(points); 74 | at::checkDim(__func__, points_arg, 2); 75 | at::checkSize(__func__, points_arg, 1, 3); 76 | at::checkDim(__func__, pyramid_arg, 2); 77 | at::checkSize(__func__, pyramid_arg, 0, 2); 78 | uint32_t max_level = pyramid.size(1)-2; 79 | TORCH_CHECK(max_level < KAOLIN_SPC_MAX_LEVELS, "SPC pyramid too big"); 80 | 81 | uint32_t* pyramid_ptr = (uint32_t*)pyramid.data_ptr(); 82 | uint32_t osize = pyramid_ptr[2*max_level+2]; 83 | uint32_t psize = pyramid_ptr[2*max_level+3]; 84 | at::checkSize(__func__, octree_arg, 0, osize); 85 | at::checkSize(__func__, points_arg, 0, psize); 86 | TORCH_CHECK(pyramid_ptr[max_level+1] == 0 && pyramid_ptr[max_level+2] == 0, 87 | "SPC pyramid corrupt, check if the SPC pyramid has been sliced"); 88 | 89 | // do cuda 90 | return raytrace_cuda_impl(octree, points, pyramid, exclusive_sum, ray_o, ray_d, 91 | max_level, target_level, return_depth, with_exit, include_head); 92 | 93 | #else 94 | KAOLIN_NO_CUDA_ERROR(__func__); 95 | #endif // WITH_CUDA 96 | } 97 | 98 | } // namespace kaolin 99 | -------------------------------------------------------------------------------- /externals/pytorch3d_knn/cutils.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Meta Platforms, Inc. and affiliates. 3 | * All rights reserved. 4 | * 5 | * This source code is licensed under the BSD-style license found in the 6 | * LICENSE file in the root directory of this source tree. 7 | */ 8 | 9 | #pragma once 10 | #include 11 | 12 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor.") 13 | #define CHECK_CONTIGUOUS(x) \ 14 | TORCH_CHECK(x.is_contiguous(), #x " must be contiguous.") 15 | #define CHECK_CONTIGUOUS_CUDA(x) \ 16 | CHECK_CUDA(x); \ 17 | CHECK_CONTIGUOUS(x) 18 | -------------------------------------------------------------------------------- /externals/pytorch3d_knn/ext.cpp: -------------------------------------------------------------------------------- 1 | // Borrrowed from pytorch3d ! 2 | 3 | #include 4 | 5 | #include "knn.h" 6 | 7 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 8 | #ifdef WITH_CUDA 9 | m.def("knn_check_version", &KnnCheckVersion); 10 | #endif 11 | m.def("knn_points_idx", &KNearestNeighborIdx); 12 | m.def("knn_points_backward", &KNearestNeighborBackward); 13 | } 14 | -------------------------------------------------------------------------------- /externals/r3dg_rasterization/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | r3dg_rasterization.egg-info/ 3 | dist/ 4 | -------------------------------------------------------------------------------- /externals/r3dg_rasterization/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | cmake_minimum_required(VERSION 3.20) 13 | 14 | project(DiffRast LANGUAGES CUDA CXX) 15 | 16 | set(CMAKE_CXX_STANDARD 17) 17 | set(CMAKE_CXX_EXTENSIONS OFF) 18 | set(CMAKE_CUDA_STANDARD 17) 19 | 20 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") 21 | 22 | add_library(CudaRasterizer 23 | cuda_rasterizer/backward.h 24 | cuda_rasterizer/backward.cu 25 | cuda_rasterizer/forward.h 26 | cuda_rasterizer/forward.cu 27 | cuda_rasterizer/auxiliary.h 28 | cuda_rasterizer/rasterizer_impl.cu 29 | cuda_rasterizer/rasterizer_impl.h 30 | cuda_rasterizer/rasterizer.h 31 | ) 32 | 33 | set_target_properties(CudaRasterizer PROPERTIES CUDA_ARCHITECTURES "75;86") 34 | 35 | target_include_directories(CudaRasterizer PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cuda_rasterizer) 36 | target_include_directories(CudaRasterizer PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../glm ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}) 37 | -------------------------------------------------------------------------------- /externals/r3dg_rasterization/README.md: -------------------------------------------------------------------------------- 1 | # Differential Gaussian Rasterization 2 | 3 | Used as the rasterization engine for the paper "3D Gaussian Splatting for Real-Time Rendering of Radiance Fields". If you can make use of it in your own research, please be so kind to cite us. 4 | 5 |
6 |
7 |

BibTeX

8 |
@Article{kerbl3Dgaussians,
 9 |       author       = {Kerbl, Bernhard and Kopanas, Georgios and Leimk{\"u}hler, Thomas and Drettakis, George},
10 |       title        = {3D Gaussian Splatting for Real-Time Radiance Field Rendering},
11 |       journal      = {ACM Transactions on Graphics},
12 |       number       = {4},
13 |       volume       = {42},
14 |       month        = {July},
15 |       year         = {2023},
16 |       url          = {https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/}
17 | }
18 |
19 |
-------------------------------------------------------------------------------- /externals/r3dg_rasterization/cuda_rasterizer/backward.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #ifndef CUDA_RASTERIZER_BACKWARD_H_INCLUDED 13 | #define CUDA_RASTERIZER_BACKWARD_H_INCLUDED 14 | 15 | #include 16 | #include "cuda_runtime.h" 17 | #include "device_launch_parameters.h" 18 | #define GLM_FORCE_CUDA 19 | #include 20 | 21 | namespace BACKWARD 22 | { 23 | void render( 24 | const dim3 grid, dim3 block, 25 | const uint2* ranges, 26 | const uint32_t* point_list, 27 | int S, int W, int H, 28 | const float* bg_color, 29 | const float2* means2D, 30 | const float* depths, 31 | const float4* conic_opacity, 32 | const float* colors, 33 | const float* features, 34 | const float* final_Ts, 35 | const uint32_t* n_contrib, 36 | const float* dL_dpixels, 37 | const float* dL_dpixels_o, 38 | const float* dL_dpixels_d, 39 | const float* dL_dpixels_f, 40 | float3* dL_dmean2D, 41 | float4* dL_dconic2D, 42 | float* dL_dopacity, 43 | float* dL_dcolors, 44 | float* dL_dfeature, 45 | bool backward_geometry 46 | ); 47 | 48 | void preprocess( 49 | int P, int D, int M, 50 | const float3* means, 51 | const int* radii, 52 | const float* shs, 53 | const bool* clamped, 54 | const glm::vec3* scales, 55 | const glm::vec4* rotations, 56 | const float scale_modifier, 57 | const float* cov3Ds, 58 | const float* view, 59 | const float* proj, 60 | const float focal_x, float focal_y, 61 | const float tan_fovx, float tan_fovy, 62 | const glm::vec3* campos, 63 | const float3* dL_dmean2D, 64 | const float* dL_dconics, 65 | glm::vec3* dL_dmeans, 66 | float* dL_dcolor, 67 | float* dL_dcov3D, 68 | float* dL_dsh, 69 | glm::vec3* dL_dscale, 70 | glm::vec4* dL_drot); 71 | } 72 | 73 | #endif -------------------------------------------------------------------------------- /externals/r3dg_rasterization/cuda_rasterizer/config.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #ifndef CUDA_RASTERIZER_CONFIG_H_INCLUDED 13 | #define CUDA_RASTERIZER_CONFIG_H_INCLUDED 14 | 15 | #define NUM_CHANNELS 3 // Default 3, RGB 16 | #define BLOCK_X 16 17 | #define BLOCK_Y 16 18 | 19 | #endif -------------------------------------------------------------------------------- /externals/r3dg_rasterization/cuda_rasterizer/forward.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #ifndef CUDA_RASTERIZER_FORWARD_H_INCLUDED 13 | #define CUDA_RASTERIZER_FORWARD_H_INCLUDED 14 | 15 | #include 16 | #include "cuda_runtime.h" 17 | #include "device_launch_parameters.h" 18 | #define GLM_FORCE_CUDA 19 | #include 20 | 21 | namespace FORWARD 22 | { 23 | // Perform initial steps for each Gaussian prior to rasterization. 24 | void preprocess(int P, int D, int M, 25 | const float* orig_points, 26 | const glm::vec3* scales, 27 | const float scale_modifier, 28 | const glm::vec4* rotations, 29 | const float* opacities, 30 | const float* shs, 31 | bool* clamped, 32 | const float* cov3D_precomp, 33 | const float* colors_precomp, 34 | const float* viewmatrix, 35 | const float* projmatrix, 36 | const glm::vec3* cam_pos, 37 | const int W, int H, 38 | const float focal_x, float focal_y, 39 | const float tan_fovx, float tan_fovy, 40 | int* radii, 41 | float2* points_xy_image, 42 | float* depths, 43 | float* cov3Ds, 44 | float* colors, 45 | float4* conic_opacity, 46 | const dim3 grid, 47 | uint32_t* tiles_touched, 48 | bool prefiltered); 49 | 50 | // Main rasterization method. 51 | void render( 52 | const dim3 grid, dim3 block, 53 | const uint2* ranges, 54 | const uint32_t* point_list, 55 | int S, int W, int H, 56 | const float2* points_xy_image, 57 | const float* depths, 58 | const float* features, 59 | const float* colors, 60 | const float4* conic_opacity, 61 | float* final_T, 62 | uint32_t* n_contrib, 63 | const float* bg_color, 64 | float* out_color, 65 | float* out_opacity, 66 | float* out_depth, 67 | float* out_feature); 68 | 69 | void render_xyz( 70 | const dim3 grid, dim3 block, 71 | const int W, const int H, 72 | const float* viewmatrix, 73 | const float focal_x, const float focal_y, 74 | const float cx, const float cy, 75 | const float tan_fovx, const float tan_fovy, 76 | const float* opacities, 77 | const float* depths, 78 | float* normals, 79 | float* surface_xyz); 80 | 81 | void render_pseudo_normal( 82 | const dim3 grid, dim3 block, 83 | const int W, const int H, 84 | const float* viewmatrix, 85 | const float focal_x, const float focal_y, 86 | const float cx, const float cy, 87 | const float tan_fovx, const float tan_fovy, 88 | const float* opacities, 89 | const float* depths, 90 | float* normals, 91 | float* surface_xyz); 92 | } 93 | 94 | 95 | #endif -------------------------------------------------------------------------------- /externals/r3dg_rasterization/cuda_rasterizer/rasterizer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #ifndef CUDA_RASTERIZER_H_INCLUDED 13 | #define CUDA_RASTERIZER_H_INCLUDED 14 | 15 | #include 16 | #include 17 | 18 | namespace CudaRasterizer 19 | { 20 | class Rasterizer 21 | { 22 | public: 23 | 24 | static void markVisible( 25 | int P, 26 | float* means3D, 27 | float* viewmatrix, 28 | float* projmatrix, 29 | bool* present); 30 | 31 | static int forward( 32 | std::function geometryBuffer, 33 | std::function binningBuffer, 34 | std::function imageBuffer, 35 | const int P, const int S, int D, int M, 36 | const float* background, 37 | const int width, int height, 38 | const float* means3D, 39 | const float* shs, 40 | const float* colors_precomp, 41 | const float* features, 42 | const float* opacities, 43 | const float* scales, 44 | const float scale_modifier, 45 | const float* rotations, 46 | const float* cov3D_precomp, 47 | const float* viewmatrix, 48 | const float* projmatrix, 49 | const float* cam_pos, 50 | const float tan_fovx, float tan_fovy, 51 | const float cx, const float cy, 52 | const bool prefiltered, 53 | const bool computer_pseudo_normal, 54 | float* out_color, 55 | float* out_opacity, 56 | float* out_depth, 57 | float* out_feature, 58 | float* out_normal, 59 | float* out_surface_xyz, 60 | int* radii = nullptr, 61 | bool debug = false); 62 | 63 | static void backward( 64 | const int P, int S, int D, int M, int R, 65 | const float* background, 66 | const int width, int height, 67 | const float* means3D, 68 | const float* shs, 69 | const float* features, 70 | const float* colors_precomp, 71 | const float* scales, 72 | const float scale_modifier, 73 | const float* rotations, 74 | const float* cov3D_precomp, 75 | const float* viewmatrix, 76 | const float* projmatrix, 77 | const float* campos, 78 | const float tan_fovx, float tan_fovy, 79 | const int* radii, 80 | char* geom_buffer, 81 | char* binning_buffer, 82 | char* image_buffer, 83 | const float* dL_dpix, 84 | const float* dL_dpix_o, 85 | const float* dL_dpix_d, 86 | const float* dL_dpix_f, 87 | float* dL_dmean2D, 88 | float* dL_dconic, 89 | float* dL_dopacity, 90 | float* dL_dcolor, 91 | float* dL_dfeature, 92 | float* dL_dmean3D, 93 | float* dL_dcov3D, 94 | float* dL_dsh, 95 | float* dL_dscale, 96 | float* dL_drot, 97 | bool backward_geometry, 98 | bool debug); 99 | }; 100 | }; 101 | 102 | #endif -------------------------------------------------------------------------------- /externals/r3dg_rasterization/cuda_rasterizer/rasterizer_impl.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #pragma once 13 | 14 | #include 15 | #include 16 | #include "rasterizer.h" 17 | #include 18 | 19 | namespace CudaRasterizer 20 | { 21 | template 22 | static void obtain(char*& chunk, T*& ptr, std::size_t count, std::size_t alignment) 23 | { 24 | std::size_t offset = (reinterpret_cast(chunk) + alignment - 1) & ~(alignment - 1); 25 | ptr = reinterpret_cast(offset); 26 | chunk = reinterpret_cast(ptr + count); 27 | } 28 | 29 | struct GeometryState 30 | { 31 | size_t scan_size; 32 | float* depths; 33 | char* scanning_space; 34 | bool* clamped; 35 | int* internal_radii; 36 | float2* means2D; 37 | float* cov3D; 38 | float* cov3DInverse; 39 | float4* conic_opacity; 40 | float* rgb; 41 | uint32_t* point_offsets; 42 | uint32_t* tiles_touched; 43 | 44 | static GeometryState fromChunk(char*& chunk, size_t P); 45 | }; 46 | 47 | struct ImageState 48 | { 49 | uint2* ranges; 50 | uint32_t* n_contrib; 51 | float* accum_alpha; 52 | static ImageState fromChunk(char*& chunk, size_t N); 53 | }; 54 | 55 | struct BinningState 56 | { 57 | size_t sorting_size; 58 | uint64_t* point_list_keys_unsorted; 59 | uint64_t* point_list_keys; 60 | uint32_t* point_list_unsorted; 61 | uint32_t* point_list; 62 | char* list_sorting_space; 63 | 64 | static BinningState fromChunk(char*& chunk, size_t P); 65 | }; 66 | 67 | template 68 | size_t required(size_t P) 69 | { 70 | char* size = nullptr; 71 | T::fromChunk(size, P); 72 | return ((size_t)size) + 128; 73 | } 74 | }; -------------------------------------------------------------------------------- /externals/r3dg_rasterization/ext.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #include 13 | #include "rasterize_points.h" 14 | #include "render_equation.h" 15 | 16 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 17 | m.def("rasterize_gaussians", &RasterizeGaussiansCUDA); 18 | m.def("rasterize_gaussians_backward", &RasterizeGaussiansBackwardCUDA); 19 | m.def("render_equation_forward", &RenderEquationForwardCUDA); 20 | m.def("render_equation_forward_complex", &RenderEquationForwardCUDA_complex); 21 | m.def("render_equation_backward", &RenderEquationBackwardCUDA); 22 | m.def("mark_visible", &markVisible); 23 | } -------------------------------------------------------------------------------- /externals/r3dg_rasterization/rasterize_points.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #pragma once 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | std::tuple 19 | RasterizeGaussiansCUDA( 20 | const torch::Tensor& background, 21 | const torch::Tensor& means3D, 22 | const torch::Tensor& features, 23 | const torch::Tensor& colors, 24 | const torch::Tensor& opacity, 25 | const torch::Tensor& scales, 26 | const torch::Tensor& rotations, 27 | const float scale_modifier, 28 | const torch::Tensor& cov3D_precomp, 29 | const torch::Tensor& viewmatrix, 30 | const torch::Tensor& projmatrix, 31 | const float tan_fovx, 32 | const float tan_fovy, 33 | const float cx, 34 | const float cy, 35 | const int image_height, 36 | const int image_width, 37 | const torch::Tensor& sh, 38 | const int degree, 39 | const torch::Tensor& campos, 40 | const bool prefiltered, 41 | const bool computer_pseudo_normal, 42 | const bool debug); 43 | 44 | std::tuple 45 | RasterizeGaussiansBackwardCUDA( 46 | const torch::Tensor& background, 47 | const torch::Tensor& means3D, 48 | const torch::Tensor& features, 49 | const torch::Tensor& radii, 50 | const torch::Tensor& colors, 51 | const torch::Tensor& scales, 52 | const torch::Tensor& rotations, 53 | const float scale_modifier, 54 | const torch::Tensor& cov3D_precomp, 55 | const torch::Tensor& viewmatrix, 56 | const torch::Tensor& projmatrix, 57 | const float tan_fovx, 58 | const float tan_fovy, 59 | const torch::Tensor& dL_dout_color, 60 | const torch::Tensor& dL_dout_opacity, 61 | const torch::Tensor& dL_dout_depth, 62 | const torch::Tensor& dL_dout_feature, 63 | const torch::Tensor& sh, 64 | const int degree, 65 | const torch::Tensor& campos, 66 | const torch::Tensor& geomBuffer, 67 | const int R, 68 | const torch::Tensor& binningBuffer, 69 | const torch::Tensor& imageBuffer, 70 | const bool backward_geometry, 71 | const bool debug); 72 | 73 | torch::Tensor markVisible( 74 | torch::Tensor& means3D, 75 | torch::Tensor& viewmatrix, 76 | torch::Tensor& projmatrix); -------------------------------------------------------------------------------- /externals/r3dg_rasterization/render_equation.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | std::tuple 8 | RenderEquationForwardCUDA_complex( 9 | const torch::Tensor& base_color, 10 | const torch::Tensor& roughness, 11 | const torch::Tensor& metallic, 12 | const torch::Tensor& normals, 13 | const torch::Tensor& viewdirs, 14 | const torch::Tensor& incidents_shs, 15 | const torch::Tensor& direct_shs, 16 | const torch::Tensor& visibility_shs, 17 | const int sample_num); 18 | 19 | std::tuple 20 | RenderEquationForwardCUDA( 21 | const torch::Tensor& base_color, 22 | const torch::Tensor& roughness, 23 | const torch::Tensor& metallic, 24 | const torch::Tensor& normals, 25 | const torch::Tensor& viewdirs, 26 | const torch::Tensor& incidents_shs, 27 | const torch::Tensor& direct_shs, 28 | const torch::Tensor& visibility_shs, 29 | const int sample_num, 30 | const bool is_training, 31 | const bool debug); 32 | 33 | std::tuple 34 | RenderEquationBackwardCUDA( 35 | const torch::Tensor& base_color, 36 | const torch::Tensor& roughness, 37 | const torch::Tensor& metallic, 38 | const torch::Tensor& normals, 39 | const torch::Tensor& viewdirs, 40 | const torch::Tensor& incidents, 41 | const torch::Tensor& direct_shs, 42 | const torch::Tensor& visibility_shs, 43 | const int sample_num, 44 | const torch::Tensor& incident_dirs, 45 | const torch::Tensor& dL_drgb, 46 | const torch::Tensor& dL_ddiffuse_light, 47 | const bool debug); -------------------------------------------------------------------------------- /externals/r3dg_rasterization/setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from setuptools import setup 13 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 14 | import os 15 | 16 | os.path.dirname(os.path.abspath(__file__)) 17 | os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 18 | 19 | setup( 20 | name="r3dg_rasterization", 21 | packages=['r3dg_rasterization'], 22 | ext_modules=[ 23 | CUDAExtension( 24 | name="r3dg_rasterization._C", 25 | sources=[ 26 | "cuda_rasterizer/rasterizer_impl.cu", 27 | "cuda_rasterizer/forward.cu", 28 | "cuda_rasterizer/backward.cu", 29 | "rasterize_points.cu", 30 | "render_equation.cu", 31 | "ext.cpp"], 32 | extra_compile_args={ 33 | "nvcc": ["-I" + os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "glm/"), 34 | "-O3"], 35 | "cxx": ["-O3"]}) 36 | ], 37 | cmdclass={ 38 | 'build_ext': BuildExtension 39 | } 40 | ) 41 | -------------------------------------------------------------------------------- /externals/shencoder/bindings.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | */ 4 | 5 | #include 6 | 7 | #include "shencoder.h" 8 | 9 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 10 | m.def("sh_encode_forward", &sh_encode_forward, "SH encode forward (CUDA)"); 11 | m.def("sh_encode_backward", &sh_encode_backward, "SH encode backward (CUDA)"); 12 | } -------------------------------------------------------------------------------- /externals/shencoder/shencoder.h: -------------------------------------------------------------------------------- 1 | /* 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | */ 4 | 5 | # pragma once 6 | 7 | #include 8 | #include 9 | 10 | // inputs: [B, D], float, in [-1, 1] 11 | // outputs: [B, F], float 12 | 13 | // encode_forward(inputs, outputs, B, input_dim, degree, calc_grad_inputs, dy_dx) 14 | void sh_encode_forward(at::Tensor inputs, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const bool calc_grad_inputs, at::Tensor dy_dx); 15 | 16 | // sh_encode_backward(grad, inputs, B, input_dim, degree, ctx.calc_grad_inputs, dy_dx, grad_inputs) 17 | void sh_encode_backward(at::Tensor grad, at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t C, at::Tensor dy_dx, at::Tensor grad_inputs); -------------------------------------------------------------------------------- /externals/simple_knn/ext.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #include 13 | #include "spatial.h" 14 | 15 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 16 | m.def("distCUDA2", &distCUDA2); 17 | } 18 | -------------------------------------------------------------------------------- /externals/simple_knn/setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (C) 2023, Inria 3 | # GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | # All rights reserved. 5 | # 6 | # This software is free for non-commercial, research and evaluation use 7 | # under the terms of the LICENSE.md file. 8 | # 9 | # For inquiries contact george.drettakis@inria.fr 10 | # 11 | 12 | from setuptools import setup 13 | from torch.utils.cpp_extension import CUDAExtension, BuildExtension 14 | import os 15 | 16 | cxx_compiler_flags = [] 17 | 18 | if os.name == 'nt': 19 | cxx_compiler_flags.append("/wd4624") 20 | 21 | setup( 22 | name="simple_knn", 23 | ext_modules=[ 24 | CUDAExtension( 25 | name="simple_knn._C", 26 | sources=[ 27 | "spatial.cu", 28 | "simple_knn.cu", 29 | "ext.cpp"], 30 | extra_compile_args={"nvcc": [], "cxx": cxx_compiler_flags}) 31 | ], 32 | cmdclass={ 33 | 'build_ext': BuildExtension 34 | } 35 | ) 36 | -------------------------------------------------------------------------------- /externals/simple_knn/simple_knn.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #ifndef SIMPLEKNN_H_INCLUDED 13 | #define SIMPLEKNN_H_INCLUDED 14 | 15 | class SimpleKNN 16 | { 17 | public: 18 | static void knn(int P, float3* points, float* meanDists); 19 | }; 20 | 21 | #endif -------------------------------------------------------------------------------- /externals/simple_knn/simple_knn/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/externals/simple_knn/simple_knn/.gitkeep -------------------------------------------------------------------------------- /externals/simple_knn/spatial.cu: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #include "spatial.h" 13 | #include "simple_knn.h" 14 | 15 | torch::Tensor 16 | distCUDA2(const torch::Tensor& points) 17 | { 18 | const int P = points.size(0); 19 | 20 | auto float_opts = points.options().dtype(torch::kFloat32); 21 | torch::Tensor means = torch::full({P}, 0.0, float_opts); 22 | 23 | SimpleKNN::knn(P, (float3*)points.contiguous().data(), means.contiguous().data()); 24 | 25 | return means; 26 | } -------------------------------------------------------------------------------- /externals/simple_knn/spatial.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, Inria 3 | * GRAPHDECO research group, https://team.inria.fr/graphdeco 4 | * All rights reserved. 5 | * 6 | * This software is free for non-commercial, research and evaluation use 7 | * under the terms of the LICENSE.md file. 8 | * 9 | * For inquiries contact george.drettakis@inria.fr 10 | */ 11 | 12 | #include 13 | 14 | torch::Tensor distCUDA2(const torch::Tensor& points); -------------------------------------------------------------------------------- /media/attr_camera.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/attr_camera.png -------------------------------------------------------------------------------- /media/attr_transform.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/attr_transform.png -------------------------------------------------------------------------------- /media/convert_png.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal EnableDelayedExpansion 3 | 4 | for %%f in (*.svg) do ( 5 | echo "Converting %%~nf.png" 6 | for /f "tokens=1,2 delims=, " %%a in ('inkscape "%%f" --query-width --query-height') do ( 7 | set WIDTH=%%a 8 | set HEIGHT=%%b 9 | ) 10 | @REM set /a NEW_WIDTH=WIDTH*3/4 11 | @REM set /a NEW_HEIGHT=HEIGHT*3/4 12 | set /a NEW_WIDTH=WIDTH 13 | set /a NEW_HEIGHT=HEIGHT 14 | inkscape "%%f" --export-type="png" --export-filename="%%~nf.png" -w !NEW_WIDTH! -h !NEW_HEIGHT! 15 | ) 16 | 17 | echo "Done!" 18 | pause 19 | 20 | 21 | @REM !\[(.*)\]\(\.\./media/pack_ops/(.*)\.png\) 22 | @REM $1 -------------------------------------------------------------------------------- /media/data_batched2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/data_batched2.png -------------------------------------------------------------------------------- /media/data_packed2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/data_packed2.png -------------------------------------------------------------------------------- /media/errormap@0.5x.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/errormap@0.5x.jpg -------------------------------------------------------------------------------- /media/multi_stage_upsample_occ.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/multi_stage_upsample_occ.png -------------------------------------------------------------------------------- /media/nr3d_raw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/nr3d_raw.png -------------------------------------------------------------------------------- /media/pack_ops/convert_png.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal EnableDelayedExpansion 3 | 4 | for %%f in (*.svg) do ( 5 | echo "Converting %%~nf.png" 6 | @REM inkscape "%%f" --export-type="png" --export-filename="%%~nf.png" -w 1200 7 | inkscape "%%f" --export-type="png" --export-filename="%%~nf.png" 8 | ) 9 | 10 | echo "Done!" 11 | pause 12 | -------------------------------------------------------------------------------- /media/pack_ops/interleave_arange.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/interleave_arange.png -------------------------------------------------------------------------------- /media/pack_ops/interleave_arange_simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/interleave_arange_simple.png -------------------------------------------------------------------------------- /media/pack_ops/interleave_linstep.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/interleave_linstep.png -------------------------------------------------------------------------------- /media/pack_ops/merge_two_packs_sorted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/merge_two_packs_sorted.png -------------------------------------------------------------------------------- /media/pack_ops/merge_two_packs_sorted_a_includes_b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/merge_two_packs_sorted_a_includes_b.png -------------------------------------------------------------------------------- /media/pack_ops/merge_two_packs_sorted_aligned.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/merge_two_packs_sorted_aligned.png -------------------------------------------------------------------------------- /media/pack_ops/packed_add.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_add.png -------------------------------------------------------------------------------- /media/pack_ops/packed_backward_diff_prepends.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_backward_diff_prepends.png -------------------------------------------------------------------------------- /media/pack_ops/packed_cumprod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_cumprod.png -------------------------------------------------------------------------------- /media/pack_ops/packed_cumsum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_cumsum.png -------------------------------------------------------------------------------- /media/pack_ops/packed_diff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_diff.png -------------------------------------------------------------------------------- /media/pack_ops/packed_diff_appends.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_diff_appends.png -------------------------------------------------------------------------------- /media/pack_ops/packed_invert_cdf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_invert_cdf.png -------------------------------------------------------------------------------- /media/pack_ops/packed_mean.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_mean.png -------------------------------------------------------------------------------- /media/pack_ops/packed_searchsorted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_searchsorted.png -------------------------------------------------------------------------------- /media/pack_ops/packed_searchsorted_packed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_searchsorted_packed.png -------------------------------------------------------------------------------- /media/pack_ops/packed_sort.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_sort.png -------------------------------------------------------------------------------- /media/pack_ops/packed_sum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops/packed_sum.png -------------------------------------------------------------------------------- /media/pack_ops_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/media/pack_ops_overview.png -------------------------------------------------------------------------------- /nr3d_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/coordinates/README.md: -------------------------------------------------------------------------------- 1 | # Coordinates 2 | An ongoing list of various 3D coordinate systems. 3 | 4 | ## OpenCV / COLMAP / Open3D / standard pinhole camera in textbook 5 | - right-handed 6 | - [open3d](https://github.com/isl-org/Open3D/issues/1347#issuecomment-558205561) 7 | ```python 8 | """ 9 | < opencv / colmap convention > 10 | Facing [+z] direction, y downwards, x right, 11 | z 12 | ↗ 13 | / 14 | / 15 | o------> x 16 | | 17 | | 18 | | 19 | ↓ 20 | y 21 | """ 22 | ``` 23 | 24 | ## OpenGL / Blender 25 | 26 | ```python 27 | """ 28 | < openGL / blender convention > 29 | Facing [-z] direction, y upwards, x right 30 | y 31 | ↑ 32 | | 33 | | 34 | | 35 | o-------> x 36 | / 37 | / 38 | ↙ 39 | z 40 | """ 41 | ``` 42 | 43 | ## Unreal Engine / CARLA 44 | - :warning: left-handed :warning: 45 | - [source](https://carla.readthedocs.io/en/latest/python_api/#carlarotation) 46 | - > CARLA uses the Unreal Engine coordinates system. This is a Z-up left-handed system. 47 | 48 | ```python 49 | """ 50 | < carla / UE convention > 51 | Facing [+x] direction, z upwards, y right 52 | z ↑ 53 | | ↗ x 54 | | / 55 | |/ 56 | o---------> 57 | y 58 | 59 | 60 | 61 | 62 | """ 63 | ``` 64 | 65 | ## Unity 66 | - :warning: left-handed :warning: 67 | - 68 | 69 | ```python 70 | """ 71 | < Unity convention > 72 | Facing [+z] direction, y upwards, x right 73 | y ↑ 74 | | ↗ z 75 | | / 76 | |/ 77 | o---------> 78 | x 79 | 80 | 81 | 82 | 83 | """ 84 | ``` 85 | 86 | ## VTK / mayavi 87 | 88 | - [source](https://kitware.github.io/vtk-examples/site/VTKBook/08Chapter8/#81-coordinate-systems) 89 | ```python 90 | """ 91 | < VTK convention > 92 | Facing [+y] direction, z upwards, x right 93 | z ↑ 94 | | ↗ y 95 | | / 96 | |/ 97 | o---------> 98 | x 99 | 100 | 101 | 102 | 103 | """ 104 | ``` 105 | 106 | ## ROS / waymo 107 | - right-handed 108 | - [source](https://waymo.com/open/data/perception/), in #Coordinate Systems 109 | - > The x-axis points down the lens barrel out of the lens. The z-axis points up. The y/z plane is parallel to the camera plane. The coordinate system is right handed. 110 | ```python 111 | """ 112 | < ROS / waymo convention > 113 | Facing [+x] direction, z upwards, y left 114 | z ↑ 115 | | ↗ x 116 | | / 117 | |/ 118 | <--------o 119 | y 120 | 121 | 122 | 123 | 124 | """ 125 | ``` 126 | 127 | 128 | ## habitat_sim 129 | 130 | - [ ] check habitat_sim 131 | 132 | ```python 133 | """ 134 | < habitat_sim convention > 135 | Facing [+x] direction, y upwards, z right 136 | y ↑ 137 | | ↗ x 138 | | / 139 | |/ 140 | o---------> 141 | z 142 | """ 143 | ``` 144 | -------------------------------------------------------------------------------- /nr3d_lib/coordinates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/coordinates/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/coordinates/shapenet_to_srn.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file shapenet_to_srn.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Helper functions for coordinate conversion among different versions of Shapenet dataset 5 | and SRN (Scene representation network, V.Sitzmann et al) dataset 6 | """ 7 | 8 | import numpy as np 9 | from copy import deepcopy 10 | 11 | def from_shapenet_to_srn(points, normalize_dict: dict, conversion='v2_to_srn'): 12 | """ Align points from shapenet models to SRN renderings 13 | Args: 14 | points: [N, 3] 15 | normalize_dict: loaded dict of the model_normalized.json file 16 | conversion: choose among: 17 | v2_to_srn: From shapenet v2 to SRN 18 | v1_to_srn: From shapenet v1 to SRN 19 | v2_to_sitzmann_rendering: From shapenet v2 to https://github.com/vsitzmann/shapenet_renderer 20 | v1_to_sitzmann_rendering: From shapenet v1 to https://github.com/vsitzmann/shapenet_renderer 21 | Return: [N,3] 22 | """ 23 | 24 | bmax = np.array(normalize_dict['max']) 25 | bmin = np.array(normalize_dict['min']) 26 | centroid = np.array(normalize_dict['centroid']) 27 | norm = np.linalg.norm(bmax-bmin) 28 | center = (bmax + bmin) / 2. 29 | 30 | points = deepcopy(points) 31 | #--------------- 32 | # offset the model, so that the center of the bounding box is at the origin. 33 | # for shapenet v1, it's already satisfied. 34 | #--------------- 35 | if conversion == 'v2_to_srn': 36 | points[:, :3] = (points[:, :3] + (centroid - center) / norm) 37 | 38 | if conversion == 'v1_to_srn' or conversion == 'v2_to_srn': 39 | #--------------- 40 | # rescale the model, so that the max value of the bouding box size equals 1. 41 | #--------------- 42 | points[:, :3] = points[:, :3] / ((bmax-bmin).max()/norm) 43 | 44 | #--------------- 45 | # different choices of rotations 46 | #---------------- 47 | if conversion == 'v2_to_srn': 48 | #---------------- 49 | # From shapenet v2 to SRN 50 | R2srn = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) 51 | 52 | elif conversion == 'v1_to_srn': 53 | #---------------- 54 | # From shapenet v1 to SRN 55 | R2srn = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) 56 | 57 | elif conversion == 'v1_to_sitzmann_rendering': 58 | #----------------- 59 | # From shapenet v1 to sitzmann's rendrings scipts 60 | # R2srn = np.array([[0, 0, -1], [0, 1, 0], [-1, 0, 0]]) 61 | # R2srn = np.array([[1, 0, 0], [0, 1, 0], [0, 0, -1]]) 62 | R2srn = np.eye(3) 63 | elif conversion == 'v2_to_sitzmann_rendering': 64 | #------------------ 65 | # From shapenet v2 to sitzmann's rendrings scipts 66 | # # no rotation is needed 67 | # R2srn = np.eye(3) 68 | R2srn = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]]) 69 | else: 70 | raise RuntimeError("please specify conversion type") 71 | 72 | points[:, :3] = np.matmul(R2srn[None, ...], points[:, :3, None])[...,0] 73 | 74 | return points 75 | -------------------------------------------------------------------------------- /nr3d_lib/fmt.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file fmt.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Terminal/console logging formatter. 5 | """ 6 | 7 | import sys 8 | import logging 9 | #--------------------------------------------------------------------------- 10 | #----------------------- logging instead of printing ----------------------- 11 | #--------------------------------------------------------------------------- 12 | 13 | logs = set() 14 | # LOGGER 15 | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) 16 | RESET_SEQ = "\033[0m" 17 | COLOR_SEQ = "\033[1;%dm" 18 | 19 | COLORS = { 20 | 'WARNING': YELLOW, 21 | 'INFO': WHITE, 22 | 'DEBUG': BLUE, 23 | 'CRITICAL': YELLOW, 24 | 'ERROR': RED 25 | } 26 | 27 | def colored_str(s: str, color: int = BLACK): 28 | assert 0 <= color < 8 29 | return COLOR_SEQ % (30 + color) + s + RESET_SEQ 30 | 31 | class ColoredFormatter(logging.Formatter): 32 | def __init__(self, msg, use_color=True): 33 | logging.Formatter.__init__(self, msg) 34 | self.use_color = use_color 35 | 36 | def format(self, record): 37 | msg = record.msg 38 | levelname = record.levelname 39 | if self.use_color and levelname in COLORS and COLORS[levelname] != WHITE: 40 | if isinstance(msg, str): 41 | msg_color = COLOR_SEQ % (30 + COLORS[levelname]) + msg + RESET_SEQ 42 | record.msg = msg_color 43 | levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ 44 | record.levelname = levelname_color 45 | return logging.Formatter.format(self, record) 46 | 47 | def init_log(name, level=logging.INFO): 48 | if (name, level) in logs: 49 | return 50 | 51 | # Break circular dependency 52 | from .distributed import is_master, get_rank 53 | 54 | logs.add((name, level)) 55 | logger = logging.getLogger(name) 56 | logger.setLevel(level) 57 | ch = logging.StreamHandler(stream=sys.stdout) 58 | ch.setLevel(level) 59 | 60 | logger.addFilter(lambda record: is_master()) 61 | 62 | format_str = f'%(asctime)s-rk{get_rank()}-%(filename)s#%(lineno)d:%(message)s' 63 | formatter = ColoredFormatter(format_str) 64 | ch.setFormatter(formatter) 65 | logger.addHandler(ch) 66 | 67 | logger.propagate = False 68 | 69 | return logger 70 | 71 | 72 | log = init_log('global', logging.INFO) -------------------------------------------------------------------------------- /nr3d_lib/graphics/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/graphics/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/graphics/cameras/__init__.py: -------------------------------------------------------------------------------- 1 | from .pinhole import * 2 | from .opencv import * 3 | from .fisheye import * 4 | from .common import * 5 | from .normalize_views import * 6 | from .camera_paths import * -------------------------------------------------------------------------------- /nr3d_lib/graphics/cameras/fisheye.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file fisheye.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Fisheye camera math ops. 5 | Refer to: https://docs.opencv.org/4.7.0/db/d58/group__calib3d__fisheye.html 6 | """ 7 | 8 | __all__ = [ 9 | 'fisheye_distort_points_cpu', 10 | 'fisheye_undistort_points_cpu' 11 | ] 12 | 13 | import cv2 14 | import numpy as np 15 | from typing import Optional 16 | 17 | import torch 18 | 19 | from nr3d_lib.utils import check_to_torch 20 | 21 | def fisheye_distort_points_cpu( 22 | points: torch.Tensor, # Pixel coords of input undistorted image 23 | K: torch.Tensor, # Camera matrix for output distorted points 24 | dist: torch.Tensor, 25 | new_K: Optional[torch.Tensor] = None, # additional K for input undistorted points (to normalize input points) 26 | ) -> torch.Tensor: 27 | if new_K is None: 28 | new_K = K 29 | 30 | # Convert 2D points from pixels to normalized camera coordinates 31 | new_cx: torch.Tensor = new_K[..., 0:1, 2] # princial point in x (Bx1) 32 | new_cy: torch.Tensor = new_K[..., 1:2, 2] # princial point in y (Bx1) 33 | new_fx: torch.Tensor = new_K[..., 0:1, 0] # focal in x (Bx1) 34 | new_fy: torch.Tensor = new_K[..., 1:2, 1] # focal in y (Bx1) 35 | 36 | # This is equivalent to K^-1 [u,v,1]^T 37 | x: torch.Tensor = (points[..., 0] - new_cx) / new_fx 38 | y: torch.Tensor = (points[..., 1] - new_cy) / new_fy 39 | 40 | points = torch.stack([x,y], dim=-1) 41 | 42 | distorted = cv2.fisheye.distortPoints( 43 | points.data.cpu().numpy(), # Normalized pixel coords of input undistorted image 44 | K.data.cpu().numpy(), # Camera matrix for output distorted points 45 | dist.data.cpu().numpy() 46 | ) 47 | 48 | return check_to_torch(distorted, ref=points) 49 | 50 | def fisheye_distort_points( 51 | points: torch.Tensor, # Pixel coords of input undistorted image 52 | K: torch.Tensor, # Camera matrix for output distorted points 53 | dist: torch.Tensor, 54 | new_K: Optional[torch.Tensor] = None, # additional K for input undistorted points (to normalize input points) 55 | ) -> torch.Tensor: 56 | if new_K is None: 57 | new_K = K 58 | raise NotImplementedError 59 | 60 | def fisheye_undistort_points_cpu( 61 | points: torch.Tensor, # Pixel coords of input distorted image 62 | K: torch.Tensor, # Camera matrix for input distorted points 63 | dist: torch.Tensor, 64 | new_K: Optional[torch.Tensor] = None, # additional K for output undistorted points 65 | ) -> torch.Tensor: 66 | prefix = points.shape[0:-2] 67 | # NOTE: K and dist should be equal for all 68 | # K = K.expand([*prefix,3,3]) 69 | # dist = dist.expand([*prefix,dist.shape[-1]]) 70 | if new_K is None: 71 | new_K = K 72 | undistorted = cv2.fisheye.undistortPoints( 73 | points.flatten(0,-3).data.cpu().numpy(), # Normalized pixel coords of input distorted image 74 | K.data.cpu().numpy(), # Camera matrix for input distorted points 75 | dist.data.cpu().numpy(), 76 | None, 77 | new_K.data.cpu().numpy() 78 | ) 79 | undistorted = check_to_torch(undistorted, ref=points) 80 | undistorted = undistorted.unflatten(0, prefix) 81 | return undistorted 82 | -------------------------------------------------------------------------------- /nr3d_lib/graphics/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | from .nerf_utils import * 2 | from .nerf_ray_query import * -------------------------------------------------------------------------------- /nr3d_lib/graphics/neus.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/graphics/neus.py -------------------------------------------------------------------------------- /nr3d_lib/graphics/neus/__init__.py: -------------------------------------------------------------------------------- 1 | from .neus_utils import * 2 | from .neus_ray_query import * 3 | -------------------------------------------------------------------------------- /nr3d_lib/graphics/pack_ops/__init__.py: -------------------------------------------------------------------------------- 1 | from .pack_ops import * -------------------------------------------------------------------------------- /nr3d_lib/graphics/pointcloud.py: -------------------------------------------------------------------------------- 1 | 2 | import plyfile # pip install plyfile 3 | import numpy as np 4 | from typing import NamedTuple 5 | 6 | from nr3d_lib.fmt import log 7 | 8 | def export_pcl_ply(pcl: np.ndarray, pcl_color: np.ndarray = None, filepath: str = ...): 9 | """ 10 | pcl_color: if provided, should be uint8_t 11 | """ 12 | num_pts = pcl.shape[0] 13 | if pcl_color is not None: 14 | verts_tuple = np.zeros((num_pts,), dtype=[( 15 | "x", "f4"), ("y", "f4"), ("z", "f4"), ("red", "u1"), ("green", "u1"), ("blue", "u1")]) 16 | data = [tuple(p1.tolist() + p2.tolist()) for p1, p2 in zip(pcl, pcl_color)] 17 | verts_tuple[:] = data[:] 18 | else: 19 | verts_tuple = np.zeros((num_pts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")]) 20 | data = [tuple(p.tolist()) for p in pcl] 21 | verts_tuple[:] = data[:] 22 | 23 | el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex") 24 | ply_data = plyfile.PlyData([el_verts]) 25 | log.info(f"=> Saving pointclouds to {str(filepath)}") 26 | ply_data.write(filepath) -------------------------------------------------------------------------------- /nr3d_lib/graphics/tetmesh.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/graphics/tetmesh.py -------------------------------------------------------------------------------- /nr3d_lib/gui/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/gui/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/gui/datalayers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/gui/datalayers/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/gui/datalayers/forest_datalayers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified from https://github.com/NVIDIAGameWorks/kaolin-wisp 3 | """ 4 | 5 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 | # 7 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 8 | # and proprietary rights in and to this software, related documentation 9 | # and any modifications thereto. Any use, reproduction, disclosure or 10 | # distribution of this software and related documentation without an express 11 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 12 | 13 | from typing import Dict 14 | from abc import ABC, abstractmethod 15 | 16 | import torch 17 | 18 | from nr3d_lib.models.spatial import ForestBlockSpace 19 | from nr3d_lib.models.grid_encodings.utils import points_to_corners 20 | from nr3d_lib.gui.kaolin_wisp_modified.core.primitives import PrimitivesPack 21 | from nr3d_lib.plot import soft_blue, soft_red, lime_green, purple, gold, light_pink 22 | 23 | class Datalayers(ABC): 24 | 25 | @abstractmethod 26 | def needs_redraw(self, *args, **kwargs) -> bool: 27 | raise NotImplementedError 28 | 29 | @abstractmethod 30 | def regenerate_data_layers(self, *args, **kwargs) -> Dict[str, PrimitivesPack]: 31 | raise NotImplementedError 32 | 33 | 34 | class ForestDatalayers(Datalayers): 35 | 36 | def __init__(self): 37 | self._last_state = dict() 38 | 39 | def needs_redraw(self, grid: ForestBlockSpace) -> True: 40 | # Pyramids contain information about the number of cells per level, 41 | # it's a plausible heuristic to determine whether the frame should be redrawn 42 | return not ('block_ks' in self._last_state and torch.equal(self._last_state['block_ks'], grid.block_ks)) 43 | 44 | def regenerate_data_layers(self, grid: ForestBlockSpace, alpha=1.0) -> Dict[str, PrimitivesPack]: 45 | data_layers = dict() 46 | # lod_colors = [ 47 | # torch.tensor((*soft_blue, alpha)), 48 | # torch.tensor((*soft_red, alpha)), 49 | # torch.tensor((*lime_green, alpha)), 50 | # torch.tensor((*purple, alpha)), 51 | # torch.tensor((*gold, alpha)), 52 | # ] 53 | 54 | colors = torch.tensor((*light_pink, alpha)) 55 | 56 | cells = PrimitivesPack() 57 | 58 | corners_int = points_to_corners(grid.block_ks) 59 | corners = grid.world_origin + corners_int * grid.world_block_size 60 | 61 | grid_lines = corners[:, [(0, 1), (1, 3), (3, 2), (2, 0), 62 | (4, 5), (5, 7), (7, 6), (6, 4), 63 | (0, 4), (1, 5), (2, 6), (3, 7)]] 64 | 65 | grid_lines_start = grid_lines[:, :, 0].reshape(-1, 3) 66 | grid_lines_end = grid_lines[:, :, 1].reshape(-1, 3) 67 | color_tensor = colors 68 | grid_lines_color = color_tensor.repeat(grid_lines_start.shape[0], 1) 69 | cells.add_lines(grid_lines_start, grid_lines_end, grid_lines_color) 70 | 71 | data_layers[f'Occupancy Grid'] = cells 72 | 73 | self._last_state['block_ks'] = grid.block_ks 74 | return data_layers 75 | -------------------------------------------------------------------------------- /nr3d_lib/gui/datalayers/occgrid_datalayers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified from https://github.com/NVIDIAGameWorks/kaolin-wisp 3 | """ 4 | 5 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 | # 7 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 8 | # and proprietary rights in and to this software, related documentation 9 | # and any modifications thereto. Any use, reproduction, disclosure or 10 | # distribution of this software and related documentation without an express 11 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 12 | 13 | from typing import Dict 14 | from abc import ABC, abstractmethod 15 | 16 | import torch 17 | 18 | import kaolin.ops.spc as spc_ops 19 | from nr3d_lib.models.accelerations.occgrid_accel import OccGridAccel 20 | from nr3d_lib.gui.kaolin_wisp_modified.core.primitives import PrimitivesPack 21 | from nr3d_lib.plot import soft_blue, soft_red, lime_green, purple, gold, light_pink 22 | 23 | class Datalayers(ABC): 24 | 25 | @abstractmethod 26 | def needs_redraw(self, *args, **kwargs) -> bool: 27 | raise NotImplementedError 28 | 29 | @abstractmethod 30 | def regenerate_data_layers(self, *args, **kwargs) -> Dict[str, PrimitivesPack]: 31 | raise NotImplementedError 32 | 33 | 34 | class OccGridDatalayers(Datalayers): 35 | 36 | def __init__(self): 37 | self._last_state = dict() 38 | 39 | def needs_redraw(self, grid: OccGridAccel) -> True: 40 | # Pyramids contain information about the number of cells per level, 41 | # it's a plausible heuristic to determine whether the frame should be redrawn 42 | return not ('occ_grid' in self._last_state and torch.equal(self._last_state['occ_grid'], grid.occ.occ_grid.data)) 43 | 44 | def regenerate_data_layers(self, grid: OccGridAccel, alpha=1.0) -> Dict[str, PrimitivesPack]: 45 | data_layers = dict() 46 | # lod_colors = [ 47 | # torch.tensor((*soft_blue, alpha)), 48 | # torch.tensor((*soft_red, alpha)), 49 | # torch.tensor((*lime_green, alpha)), 50 | # torch.tensor((*purple, alpha)), 51 | # torch.tensor((*gold, alpha)), 52 | # ] 53 | 54 | colors = torch.tensor((*light_pink, alpha)) 55 | 56 | cells = PrimitivesPack() 57 | 58 | points = grid.occ.occ_grid.nonzero().short() 59 | corners = spc_ops.points_to_corners(points) / grid.occ.resolution 60 | 61 | corners = corners * 2.0 - 1.0 62 | corners = grid.space.unnormalize_coords(corners) 63 | 64 | grid_lines = corners[:, [(0, 1), (1, 3), (3, 2), (2, 0), 65 | (4, 5), (5, 7), (7, 6), (6, 4), 66 | (0, 4), (1, 5), (2, 6), (3, 7)]] 67 | 68 | grid_lines_start = grid_lines[:, :, 0].reshape(-1, 3) 69 | grid_lines_end = grid_lines[:, :, 1].reshape(-1, 3) 70 | color_tensor = colors 71 | grid_lines_color = color_tensor.repeat(grid_lines_start.shape[0], 1) 72 | cells.add_lines(grid_lines_start, grid_lines_end, grid_lines_color) 73 | 74 | data_layers[f'Occupancy Grid'] = cells 75 | 76 | self._last_state['occ_grid'] = grid.occ.occ_grid.data 77 | return data_layers 78 | -------------------------------------------------------------------------------- /nr3d_lib/gui/datalayers/octree_datalayers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modified from https://github.com/NVIDIAGameWorks/kaolin-wisp 3 | """ 4 | 5 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 | # 7 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 8 | # and proprietary rights in and to this software, related documentation 9 | # and any modifications thereto. Any use, reproduction, disclosure or 10 | # distribution of this software and related documentation without an express 11 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 12 | 13 | from typing import Dict 14 | from abc import ABC, abstractmethod 15 | 16 | import torch 17 | 18 | import kaolin.ops.spc as spc_ops 19 | from nr3d_lib.models.accelerations import OctreeAS 20 | from nr3d_lib.gui.kaolin_wisp_modified.core.primitives import PrimitivesPack 21 | from nr3d_lib.plot import soft_blue, soft_red, lime_green, purple, gold, light_pink 22 | 23 | class Datalayers(ABC): 24 | 25 | @abstractmethod 26 | def needs_redraw(self, *args, **kwargs) -> bool: 27 | raise NotImplementedError 28 | 29 | @abstractmethod 30 | def regenerate_data_layers(self, *args, **kwargs) -> Dict[str, PrimitivesPack]: 31 | raise NotImplementedError 32 | 33 | 34 | class OctreeDatalayers(Datalayers): 35 | 36 | def __init__(self): 37 | self._last_state = dict() 38 | 39 | def needs_redraw(self, grid: OctreeAS) -> True: 40 | # Pyramids contain information about the number of cells per level, 41 | # it's a plausible heuristic to determine whether the frame should be redrawn 42 | return not ('pyramids' in self._last_state and torch.equal(self._last_state['pyramids'], grid.spc.pyramids[0])) 43 | 44 | def regenerate_data_layers(self, grid: OctreeAS, render_level=None, max_only=False, alpha=1.0) -> Dict[str, PrimitivesPack]: 45 | data_layers = dict() 46 | # lod_colors = [ 47 | # torch.tensor((*soft_blue, alpha)), 48 | # torch.tensor((*soft_red, alpha)), 49 | # torch.tensor((*lime_green, alpha)), 50 | # torch.tensor((*purple, alpha)), 51 | # torch.tensor((*gold, alpha)), 52 | # ] 53 | 54 | lod_colors = [ 55 | torch.tensor((*light_pink, alpha)), 56 | ] 57 | 58 | max_level = grid.max_level if render_level is None else render_level 59 | if max_only: 60 | levels = [max_level-1] 61 | else: 62 | levels = list(range(max_level)) 63 | 64 | for lod in levels: 65 | cells = PrimitivesPack() 66 | 67 | level_points = spc_ops.unbatched_get_level_points(grid.spc.point_hierarchies, grid.spc.pyramids[0], lod) 68 | 69 | corners = spc_ops.points_to_corners(level_points) / (2 ** lod) 70 | corners = corners * 2.0 - 1.0 71 | corners = grid.space.unnormalize_coords(corners) 72 | 73 | grid_lines = corners[:, [(0, 1), (1, 3), (3, 2), (2, 0), 74 | (4, 5), (5, 7), (7, 6), (6, 4), 75 | (0, 4), (1, 5), (2, 6), (3, 7)]] 76 | 77 | grid_lines_start = grid_lines[:, :, 0].reshape(-1, 3) 78 | grid_lines_end = grid_lines[:, :, 1].reshape(-1, 3) 79 | color_tensor = lod_colors[lod % len(lod_colors)] 80 | grid_lines_color = color_tensor.repeat(grid_lines_start.shape[0], 1) 81 | cells.add_lines(grid_lines_start, grid_lines_end, grid_lines_color) 82 | 83 | data_layers[f'Octree LOD{lod}'] = cells 84 | 85 | self._last_state['pyramids'] = grid.spc.pyramids[0] 86 | return data_layers 87 | -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/gui/kaolin_wisp_modified/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/gui/kaolin_wisp_modified/core/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/cuda_guard.py: -------------------------------------------------------------------------------- 1 | """ 2 | Borrowed from https://github.com/NVIDIAGameWorks/kaolin-wisp 3 | """ 4 | 5 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 | # 7 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 8 | # and proprietary rights in and to this software, related documentation 9 | # and any modifications thereto. Any use, reproduction, disclosure or 10 | # distribution of this software and related documentation without an express 11 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 12 | 13 | def setup_cuda_context(): 14 | """ Carefully load CUDA based frameworks to avoid interference. 15 | Interactive apps should invoke this function as early as possible. 16 | """ 17 | import os 18 | if not os.environ.get('NR3D_HEADLESS') == '1': 19 | window = None 20 | try: 21 | # !!! Should be called when interactive wisp loads, before any torch ops take place !!! 22 | # The following is a hacky workaround due to a cublas error on interfering streams: 23 | # pycuda.gl fails to initialize after torch performs batched matrix multiplication 24 | # (the bugs causes any following torch.dot invocations to fail).. 25 | # The solution is to initialize pycuda.gl early when wisp loads. 26 | # To load pycuda.gl, we require a GL context of some window, 27 | # so here we force glumpy-glfw to create an invisible window, which generates an opengl context. 28 | # Then immediately import pycuda.gl.autoint to let it initialize properly 29 | from glumpy import app 30 | # Tell glumpy to use glfw backend 31 | app.use("glfw_imgui") 32 | # Let glumpy use glfw to create an invisible window 33 | window = app.Window(width=10, height=10, title='dummy', visible=False) 34 | 35 | # pycuda initializes the default context with "cuGLCtxCreate", but this call will fail if a GL context 36 | # is not currently set. Therefore import is invoked only after glfw obtains a GL context. 37 | # See: https://documen.tician.de/pycuda/gl.html#module-pycuda.gl.autoinit 38 | import pycuda.gl.autoinit 39 | 40 | # Next tell torch to initialize the primary cuda context 41 | import torch 42 | torch.cuda.init() 43 | 44 | # pycuda should not create a new context, but retain the torch one 45 | import pycuda.driver as cuda 46 | pycuda_context = cuda.Device(0).retain_primary_context() 47 | 48 | except (ModuleNotFoundError, ImportError): 49 | pass # Don't fail if interactive mode is disabled (e.g: glumpy or pycuda are unavailable) 50 | finally: 51 | if window is not None: 52 | window.close() 53 | -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/gizmos/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 8 | 9 | 10 | from .gizmo import * 11 | from .ogl.world_grid import * 12 | from .ogl.axis_painter import * 13 | from .ogl.primitives_painter import * 14 | -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/gizmos/gizmo.py: -------------------------------------------------------------------------------- 1 | """ 2 | Borrowed from https://github.com/NVIDIAGameWorks/kaolin-wisp 3 | """ 4 | 5 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 | # 7 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 8 | # and proprietary rights in and to this software, related documentation 9 | # and any modifications thereto. Any use, reproduction, disclosure or 10 | # distribution of this software and related documentation without an express 11 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 12 | 13 | 14 | from abc import ABC, abstractmethod 15 | from kaolin.render.camera import Camera 16 | 17 | 18 | class Gizmo(ABC): 19 | """ 20 | A template for representing gizmos the interactive renderer is able to render over the canvas. 21 | Gizmos are entities rendered by the graphics api (i.e: OpenGL). 22 | Normally they're used to draw transient markups or tools over the canvas (such as a world grid or axes). 23 | """ 24 | 25 | @abstractmethod 26 | def render(self, camera: Camera): 27 | """ Renders the gizmo using the graphics api. """ 28 | raise NotImplementedError("Gizmos must implement the render function") 29 | 30 | @abstractmethod 31 | def destroy(self): 32 | """ Release GL resources, must be called from the rendering thread which owns the GL context """ 33 | raise NotImplementedError("Gizmos must implement the destroy function") 34 | -------------------------------------------------------------------------------- /nr3d_lib/gui/kaolin_wisp_modified/gizmos/ogl/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 2 | # 3 | # NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property 4 | # and proprietary rights in and to this software, related documentation 5 | # and any modifications thereto. Any use, reproduction, disclosure or 6 | # distribution of this software and related documentation without an express 7 | # license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. 8 | -------------------------------------------------------------------------------- /nr3d_lib/gui/neural_renderer.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file neural_renderer.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief A kaolin-wisp interface for nr3d_lib-defined renderers 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | 10 | from nr3d_lib.config import ConfigDict 11 | from nr3d_lib.gui.datalayers.forest_datalayers import ForestDatalayers 12 | from nr3d_lib.gui.datalayers.occgrid_datalayers import OccGridDatalayers 13 | 14 | from nr3d_lib.models.spatial import ForestBlockSpace 15 | from nr3d_lib.models.accelerations.occgrid_accel import OccGridAccel 16 | 17 | 18 | class NR3DKaolinWispRenderer(object): 19 | def __init__( 20 | self, model: nn.Module, config: dict, image_embed_code: torch.Tensor = None) -> None: 21 | self.model = model 22 | self.config = config 23 | self.image_embed_code = image_embed_code 24 | 25 | if isinstance(self.model.space, ForestBlockSpace): 26 | self.layers_painter = ForestDatalayers() 27 | self._data_layers = self.layers_painter.regenerate_data_layers( 28 | self.model.space, alpha=0.2) 29 | elif isinstance(self.model.accel, OccGridAccel): 30 | self.layers_painter = OccGridDatalayers() 31 | self._data_layers = self.layers_painter.regenerate_data_layers(self.model.accel) 32 | else: 33 | self._data_layers = {} 34 | 35 | def data_layers(self): 36 | return {} #self._data_layers 37 | 38 | def render(self, rays_o: torch.Tensor, rays_d: torch.Tensor, near, far, res_x, res_y): 39 | ray_input = dict(rays_o=rays_o, rays_d=rays_d, near=near, far=far, 40 | rays_h_appear=None if self.image_embed_code is None 41 | else self.image_embed_code.expand(rays_o.shape[0], -1)) 42 | ray_tested = self.model.space.ray_test(**ray_input) 43 | ret = self.model.ray_query(ray_input=ray_input, ray_tested=ray_tested, config=self.config, 44 | render_per_obj_individual=True) 45 | 46 | # To render buffer 47 | rendered = ret['rendered'] 48 | rgb = rendered['rgb_volume'].view([res_y, res_x, 3]) * 255. 49 | alpha = rendered['mask_volume'].view([res_y, res_x, 1]) * 255. 50 | rgba = torch.cat([rgb, alpha], dim=-1) 51 | # depth = (rendered['depth_volume'] / far).view([res_y, res_x, 1]) 52 | # depth = (rendered['depth_volume'] * dir_scale).view([res_y, res_x, 1]) 53 | depth = rendered['depth_volume'].view([res_y, res_x, 1]) 54 | return rgba, depth 55 | -------------------------------------------------------------------------------- /nr3d_lib/maths/__init__.py: -------------------------------------------------------------------------------- 1 | from .common import * 2 | from .slerp import * 3 | from .transforms import * 4 | from .chamfer_distance import * 5 | from .spherical_harmonics import * 6 | from .depth_completion_np import * 7 | from .depth_completion_pytorch import * -------------------------------------------------------------------------------- /nr3d_lib/maths/chamfer_distance.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from typing import Literal, Tuple, Union 4 | 5 | import torch 6 | import torch.nn.functional as F 7 | 8 | def chamfer_distance( 9 | x: torch.Tensor, y: torch.Tensor, norm: int = 2, 10 | backend: Literal['pytorch', 'pt3d_import', 'pt3d'] = 'pt3d') -> Tuple[torch.Tensor, torch.Tensor]: 11 | """ Chamfer distance between two pointclouds x and y. 12 | 13 | Args: 14 | x (torch.Tensor): [(B, )N1, 3], point clouds or a batch of point clouds. 15 | y (torch.Tensor): [(B, )N2, 3], point clouds or a batch of point clouds. 16 | norm (int, optional): the norm used for the distance. Supports 1 for L1 and 2 for L2. Defaults to 2. 17 | 18 | Returns: 19 | Tuple[torch.Tensor, torch.Tensor]: [(B, )N1, 3], [(B, )N2, 3], the distances between the pointclouds 20 | """ 21 | if backend == 'pytorch': 22 | assert norm == 2, f"backend={backend} only supports norm=2" 23 | return chamfer_distance_pytorch(x,y) 24 | elif backend == 'pt3d_import': 25 | return chamfer_distance_pt3d_import(x, y, norm=norm) 26 | elif backend == 'pt3d': 27 | return chamfer_distance_pt3d_borrowed(x, y, norm=norm) 28 | else: 29 | raise RuntimeError(f"Invalid backend={backend}") 30 | 31 | def chamfer_distance_pytorch(x: torch.Tensor, y: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 32 | """ 33 | NOTE: Not memory efficient, OOM when x,y are large. 34 | """ 35 | assert (x.dim() == y.dim()) and (x.dim() >= 2) and x.shape[-1] == y.shape[-1] 36 | x_i = x.unsqueeze(-2) # [..., N1, 1, D] 37 | y_j = x.unsqueeze(-3) # [..., 1, N2, D] 38 | D_ij = ((x_i - y_j)**2).sum(dim=-1) # [..., N1, N2] 39 | cham_x = D_ij.min(dim=-1).values 40 | cham_y = D_ij.min(dim=-2).values 41 | return cham_x, cham_y 42 | 43 | def chamfer_distance_pt3d_import(x: torch.Tensor, y: torch.Tensor, norm: int = 2) -> Tuple[torch.Tensor, torch.Tensor]: 44 | from pytorch3d.ops.knn import knn_points 45 | if not ((norm == 1) or (norm == 2)): 46 | raise ValueError("Support for 1 or 2 norm.") 47 | 48 | _x = x[None] if len(x.shape) == 2 else x 49 | _y = y[None] if len(y.shape) == 2 else y 50 | 51 | if _y.shape[0] != _x.shape[0] or _y.shape[2] != _x.shape[2]: 52 | raise ValueError("y does not have the correct shape.") 53 | 54 | x_nn = knn_points(_x, _y, norm=norm, K=1) 55 | y_nn = knn_points(_y, _x, norm=norm, K=1) 56 | 57 | cham_x = x_nn.dists[..., 0] # (N, P1) 58 | cham_y = y_nn.dists[..., 0] # (N, P2) 59 | cham_x = cham_x[0] if len(x.shape) == 2 else cham_x 60 | cham_y = cham_y[0] if len(y.shape) == 2 else cham_y 61 | return cham_x, cham_y 62 | 63 | def chamfer_distance_pt3d_borrowed(x: torch.Tensor, y: torch.Tensor, norm: int = 2) -> Tuple[torch.Tensor, torch.Tensor]: 64 | from nr3d_lib.maths.pytorch3d_knn import knn_points 65 | if not ((norm == 1) or (norm == 2)): 66 | raise ValueError("Support for 1 or 2 norm.") 67 | 68 | _x = x[None] if len(x.shape) == 2 else x 69 | _y = y[None] if len(y.shape) == 2 else y 70 | 71 | if _y.shape[0] != _x.shape[0] or _y.shape[2] != _x.shape[2]: 72 | raise ValueError("y does not have the correct shape.") 73 | 74 | x_nn = knn_points(_x, _y, norm=norm, K=1) 75 | y_nn = knn_points(_y, _x, norm=norm, K=1) 76 | 77 | cham_x = x_nn.dists[..., 0] # (N, P1) 78 | cham_y = y_nn.dists[..., 0] # (N, P2) 79 | cham_x = cham_x[0] if len(x.shape) == 2 else cham_x 80 | cham_y = cham_y[0] if len(y.shape) == 2 else cham_y 81 | return cham_x, cham_y -------------------------------------------------------------------------------- /nr3d_lib/maths/spherical_harmonics.py: -------------------------------------------------------------------------------- 1 | __all__ = [ 2 | 'eval_sh', 3 | 'RGB2SH', 4 | 'SH2RGB' 5 | ] 6 | 7 | import torch 8 | 9 | C0 = 0.28209479177387814 10 | C1 = 0.4886025119029199 11 | C2 = [ 12 | 1.0925484305920792, 13 | -1.0925484305920792, 14 | 0.31539156525252005, 15 | -1.0925484305920792, 16 | 0.5462742152960396 17 | ] 18 | C3 = [ 19 | -0.5900435899266435, 20 | 2.890611442640554, 21 | -0.4570457994644658, 22 | 0.3731763325901154, 23 | -0.4570457994644658, 24 | 1.445305721320277, 25 | -0.5900435899266435 26 | ] 27 | C4 = [ 28 | 2.5033429417967046, 29 | -1.7701307697799304, 30 | 0.9461746957575601, 31 | -0.6690465435572892, 32 | 0.10578554691520431, 33 | -0.6690465435572892, 34 | 0.47308734787878004, 35 | -1.7701307697799304, 36 | 0.6258357354491761, 37 | ] 38 | 39 | def eval_sh(deg: int, sh: torch.Tensor, dirs: torch.Tensor) -> torch.Tensor: 40 | """ 41 | Evaluate spherical harmonics at view directions using hardcoded SH polynomials. 42 | 43 | Args: 44 | deg (int): SH deg. Currently, 0-4 supported 45 | sh (torch.Tensor): [N..., C, (deg + 1)^2] SH coeffs 46 | dirs (torch.Tensor): [N..., 3] view directions 47 | 48 | Returns: 49 | torch.Tensor: [N..., C] evaluated colors 50 | """ 51 | assert deg <= 4 and deg >= 0 52 | 53 | result = C0 * sh[..., 0] 54 | if deg > 0: 55 | x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3] 56 | result = (result - 57 | C1 * y * sh[..., 1] + 58 | C1 * z * sh[..., 2] - 59 | C1 * x * sh[..., 3]) 60 | if deg > 1: 61 | xx, yy, zz = x * x, y * y, z * z 62 | xy, yz, xz = x * y, y * z, x * z 63 | result = (result + 64 | C2[0] * xy * sh[..., 4] + 65 | C2[1] * yz * sh[..., 5] + 66 | C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] + 67 | C2[3] * xz * sh[..., 7] + 68 | C2[4] * (xx - yy) * sh[..., 8]) 69 | 70 | if deg > 2: 71 | result = (result + 72 | C3[0] * y * (3 * xx - yy) * sh[..., 9] + 73 | C3[1] * xy * z * sh[..., 10] + 74 | C3[2] * y * (4 * zz - xx - yy) * sh[..., 11] + 75 | C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] + 76 | C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] + 77 | C3[5] * z * (xx - yy) * sh[..., 14] + 78 | C3[6] * x * (xx - 3 * yy) * sh[..., 15]) 79 | if deg > 3: 80 | result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] + 81 | C4[1] * yz * (3 * xx - yy) * sh[..., 17] + 82 | C4[2] * xy * (7 * zz - 1) * sh[..., 18] + 83 | C4[3] * yz * (7 * zz - 3) * sh[..., 19] + 84 | C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] + 85 | C4[5] * xz * (7 * zz - 3) * sh[..., 21] + 86 | C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] + 87 | C4[7] * xz * (xx - 3 * yy) * sh[..., 23] + 88 | C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24]) 89 | return result 90 | 91 | def RGB2SH(rgb): 92 | return (rgb - 0.5) / C0 93 | 94 | def SH2RGB(sh): 95 | return sh * C0 + 0.5 -------------------------------------------------------------------------------- /nr3d_lib/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/accelerations/occgrid/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .utils import sample_pts_in_voxels, err_msg_empty_occ 3 | from .ema_single import * 4 | from .ema_batched import * 5 | from .getter import * -------------------------------------------------------------------------------- /nr3d_lib/models/accelerations/occgrid_accel/__init__.py: -------------------------------------------------------------------------------- 1 | from .single import * 2 | from .batched import * 3 | from .forest import * 4 | from .dynamic import * 5 | from .batched_dynamic import * 6 | 7 | # def get_occ_grid_accel(type: str = None, **kwargs): 8 | # type = type.lower() 9 | # if type is None or type == 'none': 10 | # return None 11 | # elif type in ['occ_grid', 'occgrid']: 12 | # return OccGridAccel(**kwargs) 13 | # elif type in ['occ_grid_batched_ema', 'occgrid_batched_ema']: 14 | # return OccGridAccelBatched_Ema(**kwargs) 15 | # elif type in ['occ_grid_batched_getter', 'occgrid_batched_getter']: 16 | # return OccGridAccelBatched_Getter(**kwargs) 17 | # elif type in ['occ_grid_dynamic', 'occgrid_dynamic']: 18 | # return OccGridAccelDynamic(**kwargs) 19 | # elif type in ['occ_grid_static_and_dynamic', 'occgrid_static_and_dynamic']: 20 | # return OccGridAccelStaticAndDynamic(**kwargs) 21 | # elif type in ['occ_grid_batched_dynamic_ema', 'occgrid_batched_dynamic_ema']: 22 | # return OccGridAccelBatchedDynamic_Ema(**kwargs) 23 | # elif type in ['occ_grid_batched_dynamic_getter', 'occgrid_batched_dynamic_getter']: 24 | # return OccGridAccelBatchedDynamic_Getter(**kwargs) 25 | # elif type in ['occ_grid_forest', 'occgrid_forest']: 26 | # return OccGridAccelForest(**kwargs) 27 | # else: 28 | # raise RuntimeError(f"Invalid type={type}") 29 | -------------------------------------------------------------------------------- /nr3d_lib/models/accelerations/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file utils.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Utility functions for acceleration structs. 5 | """ 6 | 7 | __all__ = [ 8 | 'expand_idx', 9 | 'expand_points' 10 | ] 11 | 12 | from itertools import product 13 | 14 | import torch 15 | 16 | from nr3d_lib.utils import check_to_torch 17 | 18 | def expand_idx(idx: torch.LongTensor, dilation: int=1): 19 | cube_3x3x3 = list(product(*zip([-1, -1, -1], [0, 0, 0], [1, 1, 1]))) # 1 us 20 | cube_3x3x3 = check_to_torch(cube_3x3x3, ref=idx) 21 | idx = idx.unsqueeze(-2) + cube_3x3x3 * dilation 22 | return idx 23 | 24 | def expand_points(points: torch.Tensor, dilation: float): 25 | """ 26 | Modified from neucon-w 27 | A naive version of the sparse dilation. 28 | 29 | Args: 30 | points: [..., 3] 31 | 32 | Returns: 33 | [..., 27, 3] 34 | """ 35 | # [27, 3] A cube with size=3 and step=1. 36 | cube_3x3x3 = list(product(*zip([-1, -1, -1], [0, 0, 0], [1, 1, 1]))) # 1 us 37 | cube_3x3x3 = check_to_torch(cube_3x3x3, ref=points) 38 | points = points.unsqueeze(-2) + cube_3x3x3 * dilation 39 | return points 40 | 41 | if __name__ == "__main__": 42 | def unit_test(device=torch.device('cuda')): 43 | pts = torch.randn([4096, 3], device=device, dtype=torch.float) 44 | expand_points(pts, 0.25) 45 | unit_test() -------------------------------------------------------------------------------- /nr3d_lib/models/attributes/__init__.py: -------------------------------------------------------------------------------- 1 | from .attr import * 2 | from .transform import * 3 | from .camera_param import * 4 | from .segment import * -------------------------------------------------------------------------------- /nr3d_lib/models/attributes/segment.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file segment.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Attr log sequences related definitions 5 | """ 6 | 7 | __all__ = [ 8 | "Valid", 9 | # "AttrSegment" 10 | ] 11 | 12 | import functools 13 | import numpy as np 14 | from typing import Union 15 | 16 | import torch 17 | import torch.nn as nn 18 | 19 | from nr3d_lib.utils import is_scalar, check_to_torch 20 | 21 | from .attr import * 22 | 23 | @AttrBase 24 | class Valid(Attr): 25 | """ 26 | Mark validness of each frame of scene members 27 | """ 28 | default = torch.ones([], dtype=torch.bool) 29 | def any(self) -> bool: 30 | return self.tensor.any() 31 | def value(self) -> torch.Tensor: 32 | return self.tensor[:] 33 | def interp1d(self, ts_keyframes: torch.Tensor, ts: torch.Tensor): 34 | """ 35 | NOTE: Boolean valid flags should not be interpolated continously, hence we are using nearest neighbor instead. 36 | """ 37 | assert ts_keyframes.dim() == 1, "`ts_keyframes` should be 1D keyframe timestamps tensor" 38 | assert [*self.prefix] == [ts_keyframes.size(0)], \ 39 | f"To interpolate Attr of {self.__class__.__name__}, its prefix should be the same with `ts_keyframes`={ts_keyframes.size(0)}" 40 | ts = check_to_torch(ts, device=self.device) 41 | 42 | length = ts_keyframes.size(0) 43 | inds0 = torch.searchsorted(ts_keyframes, ts) # in range [0, len] 44 | flags = self.tensor[inds0.clamp(1, length-1)] 45 | 46 | # Should always be False on those out of bounds timestamps 47 | flags[inds0==length] = False # `ts` after the 0-th keyframe 48 | flags[inds0==0] = False # `ts` before the 0-th keyframe 49 | return type(self)(flags) 50 | 51 | # class AttrSegment(object): 52 | # """ 53 | # Using `start_frame`, `stop_frame` to mark segments 54 | # """ 55 | # def __init__(self, **kwargs): 56 | # self.subattr = kwargs 57 | # self.n_frames = 0 58 | # self.start_frame = None 59 | # self.stop_frame = None 60 | # def is_valid(self, i: Union[slice, int, torch.Tensor, np.ndarray]): 61 | # if isinstance(i, slice): 62 | # if i.start is None: 63 | # i.start = 0 64 | # if i.stop is None: 65 | # raise "Can not decide validness if given stop_frame is None." 66 | # if (i.stop <= self.start_frame) or (i.start >= self.stop_frame): 67 | # return False 68 | # else: 69 | # return True 70 | # elif is_scalar(i): 71 | # return i >= self.start_frame and i < self.stop_frame 72 | # elif isinstance(i, (torch.Tensor, np.ndarray)): 73 | # return ((i >= self.start_frame) & (i < self.stop_frame)).all() 74 | # else: 75 | # raise ValueError(f"Invalid input type(i)={type(i)}") 76 | # def __len__(self): 77 | # return self.n_frames 78 | # def __getitem__(self, index): 79 | # # TODO: Use float timestamp to do interpolation or nearest neighbor search 80 | # return {k: v[index] for k,v in self.subattr.items()} 81 | # def __repr__(self) -> str: 82 | # return f"{type(self).__name__}(" +\ 83 | # ",\n".join( 84 | # [f"start_frame={self.start_frame}", f"n_frames={self.n_frames}"] +\ 85 | # [f"{k}={repr(v)}" for k, v in self.subattr.items()] 86 | # ) + "\n)" 87 | # @functools.wraps(nn.Module.to) 88 | # def to(self, *args, **kwargs): 89 | # self.subattr = {k:v.to(*args, **kwargs) for k,v in self.subattr.items()} 90 | # @functools.wraps(nn.Module._apply) 91 | # def _apply(self, fn): 92 | # self.subattr = {k:v._apply(fn) for k,v in self.subattr.items()} 93 | # return self -------------------------------------------------------------------------------- /nr3d_lib/models/autodecoder.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file autodecoder.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Generic Auto-Decoder Mixin modules. 5 | """ 6 | 7 | from typing import Dict 8 | from copy import deepcopy 9 | 10 | import torch 11 | import torch.nn as nn 12 | from nr3d_lib.models.embeddings import Embedding 13 | 14 | from nr3d_lib.utils import check_to_torch 15 | from nr3d_lib.models.model_base import ModelMixin 16 | 17 | class AutoDecoderMixin(ModelMixin): 18 | """ 19 | The resulting MRO: 20 | AD_xxxModel -> AutoDecoderMixin -> xxxModel -> xxxRendererMixin -> xxxNet -> ModelMixin -> nn.Module 21 | """ 22 | def __init__(self, latents_cfg: dict = None, **model_params): 23 | mro = type(self).mro() 24 | super_class = mro[mro.index(AutoDecoderMixin)+1] 25 | assert super_class is not ModelMixin, "Incorrect class inheritance. Three possible misuse scenarios:\n"\ 26 | "Case 1: The Net class for mixin should also inherit from `ModelMixin`.\n"\ 27 | "Case 2: AutoDecoderMixin should come before the Net class when inheriting.\n"\ 28 | "Case 3: You should not directly instantiate this mixin class." 29 | super().__init__(**model_params) 30 | # assert latents_cfg is not None, f"`latents_cfg` is required for {self.__class__.__name__}" 31 | self.latents_cfg = deepcopy(latents_cfg) if latents_cfg is not None else None 32 | 33 | def autodecoder_populate(self, key_maps: Dict[str, list], latent_maps: Dict[str, Embedding]): 34 | self._keys = key_maps 35 | self._index_maps = {kk: {v:i for i,v in enumerate(vv)} for kk, vv in self._keys.items()} 36 | # self._latents = nn.ParameterDict(latent_maps) 37 | self._latents = nn.ModuleDict(latent_maps) 38 | 39 | # override 40 | def state_dict(self, destination=None, prefix: str='', keep_vars=False): 41 | # Re-organize state_dict with _latent and _models 42 | if destination is None: 43 | destination = dict() 44 | model_dict = super().state_dict(destination=None, prefix='', keep_vars=keep_vars) 45 | destination[prefix + '_latents'] = dict() 46 | for k, _ in self._latents.named_parameters(): 47 | destination[prefix + '_latents'][k] = model_dict.pop('_latents.' + k) 48 | destination[prefix + '_models'] = model_dict 49 | 50 | # Other stuff 51 | destination[prefix + '_keys'] = self._keys 52 | return destination 53 | 54 | # override 55 | def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, *args, **kwargs): 56 | # Re-organize state_dict in pytorch's favor 57 | if prefix + '_latents' in state_dict: 58 | latent_pnames = [k for k, _ in self._latents.named_parameters()] 59 | latent_dict = state_dict.pop(prefix + '_latents') 60 | for k in latent_pnames: 61 | if k in latent_dict: 62 | state_dict[prefix + '_latents' + '.' + k] = latent_dict[k] 63 | if prefix + '_models' in state_dict: 64 | model_dict = state_dict.pop(prefix + '_models') 65 | for k in model_dict: 66 | state_dict[prefix + k] = model_dict[k] 67 | 68 | # Other stuff. TODO: make below more auto-matic 69 | if prefix + '_keys' in state_dict: 70 | self._keys = state_dict.pop(prefix + '_keys') 71 | self._index_maps = {kk: {v:i for i,v in enumerate(vv)} for kk, vv in self._keys.items()} 72 | elif strict: 73 | missing_keys.append(prefix + '_keys') 74 | 75 | # Call original pytorch's load_state_dict 76 | super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, *args, **kwargs) 77 | 78 | if __name__ == "__main__": 79 | def unit_test(): 80 | pass -------------------------------------------------------------------------------- /nr3d_lib/models/blocks/__init__.py: -------------------------------------------------------------------------------- 1 | from .mlp import * 2 | 3 | def get_blocks(in_features: int, out_features: int, use_tcnn_backend=None, use_lipshitz=False, **params): 4 | use_tcnn_backend = use_tcnn_backend or False 5 | if use_lipshitz: 6 | assert not use_tcnn_backend, "LipshitzMLP does not support tcnn backend" 7 | return LipshitzMLP(in_features, out_features, **params) 8 | 9 | if use_tcnn_backend: 10 | params.pop('weight_norm', None) 11 | from nr3d_lib.models.tcnn_adapter import get_tcnn_blocks 12 | return get_tcnn_blocks(in_features, out_features, **params) 13 | else: 14 | return MLP(in_features, out_features, **params) 15 | 16 | get_mlp = get_blocks -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/__init__.py: -------------------------------------------------------------------------------- 1 | from .sinusoidal_pytorch import * 2 | from .sinusoidal_cuda import * 3 | from .spherical_harmonics import * 4 | 5 | import torch.nn as nn 6 | from typing import Tuple 7 | 8 | def get_embedder(embed_cfg:dict, input_dim=3, use_tcnn_backend=None) -> Tuple[nn.Module, int]: 9 | embed_cfg = embed_cfg.copy() 10 | use_tcnn_backend = embed_cfg.pop('use_tcnn_backend', False if use_tcnn_backend is None else use_tcnn_backend) 11 | if (tp:=embed_cfg['type']) == 'none' or tp == 'identity': 12 | enc, n_encoded_dims = nn.Identity(), input_dim 13 | else: 14 | if use_tcnn_backend: 15 | from nr3d_lib.models.tcnn_adapter import TcnnEncoding, encoding_map 16 | """ 17 | supported types: [sinusoidal, spherical, tiangle_wave, oneblob] 18 | """ 19 | assert tp in encoding_map.keys(), f"[tcnn backend] Unsupported embeder type={tp}" 20 | enc = TcnnEncoding(input_dim, embed_cfg) 21 | n_encoded_dims = enc.out_features 22 | else: 23 | """ 24 | supported types: [sinusoidal, spherical] 25 | """ 26 | tp = embed_cfg.pop('type') 27 | if tp == 'spherical': 28 | enc = SHEncoder(input_dim=input_dim, **embed_cfg) 29 | n_encoded_dims = enc.out_features 30 | elif tp == 'sinusoidal': 31 | # Sinusoidal CUDA 32 | enc = FreqEncoder(input_dim=input_dim, **embed_cfg) 33 | n_encoded_dims = enc.out_features 34 | elif tp == 'sinusoidal_legacy': 35 | # Sinusoidal pytorch 36 | enc, n_encoded_dims = get_sinusoidal_embedder(input_dim=input_dim, **embed_cfg) 37 | else: 38 | raise RuntimeError(f"[pytorch backend] Unsupported embeder type={tp}") 39 | enc._embedder_type = tp 40 | return enc, n_encoded_dims 41 | -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/sequential.py: -------------------------------------------------------------------------------- 1 | 2 | import math 3 | import numpy as np 4 | from numbers import Number 5 | from typing import List, Literal, Union 6 | 7 | import torch 8 | import torch.nn as nn 9 | 10 | from nr3d_lib.utils import check_to_torch 11 | 12 | class SeqEmbedder(nn.Module): 13 | def __init__(self) -> None: 14 | super().__init__() 15 | 16 | def forward(self, ts: torch.Tensor): 17 | pass 18 | 19 | class UniformSeqEmbedder(SeqEmbedder): 20 | def __init__(self, start: Number, stop: Number) -> None: 21 | super().__init__() 22 | self.start = start 23 | self.stop = stop 24 | 25 | class SinusoidalSeqEmbedder(SeqEmbedder): 26 | def __init__( 27 | self, 28 | sin_or_cos: Literal['sin', 'cos', 'sincos'] = 'cos', 29 | amp: float = 1.0, offset: float = 0.0, 30 | periods: Union[Number, List, np.ndarray] = 1, 31 | shifts: Union[Number, List, np.ndarray] = 0, 32 | learnable=False, dtype=torch.float, device=None 33 | ) -> None: 34 | super().__init__() 35 | 36 | bands = 1 if isinstance(periods, Number) else len(periods) 37 | periods = check_to_torch(periods, dtype=dtype, device=device).view([bands]) 38 | shifts = check_to_torch(shifts, dtype=dtype, device=device).view([bands]) 39 | 40 | dim_per_band = 2 if sin_or_cos == 'sincos' else 1 41 | 42 | self.embedding_dim = dim_per_band * bands 43 | self.sin_or_cos = sin_or_cos 44 | 45 | freqs = (math.pi * 2.) / periods 46 | 47 | self.amp = amp 48 | self.offset = offset 49 | if learnable: 50 | self.register_parameter('freqs', nn.Parameter(freqs, requires_grad=True)) 51 | self.register_parameter('shifts', nn.Parameter(shifts, requires_grad=True)) 52 | else: 53 | self.register_buffer('freqs', freqs, persistent=True) 54 | self.register_buffer('shifts', shifts, persistent=True) 55 | 56 | @property 57 | def device(self) -> torch.device: 58 | return self.freqs.device 59 | 60 | @property 61 | def dtype(self): 62 | return self.freqs.dtype 63 | 64 | def forward(self, ts: torch.Tensor): 65 | angles = (ts.unsqueeze(-1) - self.shifts) * self.freqs 66 | if self.sin_or_cos == 'sincos': 67 | angles = torch.stack([angles, angles + math.pi/2.], dim=-1).flatten(-2, -1) 68 | elif self.sin_or_cos == 'cos': 69 | angles = angles + math.pi/2. 70 | elif self.sin_or_cos == 'sin': 71 | pass 72 | else: 73 | raise RuntimeError(f"Invalid self.sin_or_cos={self.sin_or_cos}") 74 | out = self.amp * torch.sin(angles) + self.offset 75 | return out -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/sinusoidal_cuda/__init__.py: -------------------------------------------------------------------------------- 1 | from .freq import FreqEncoder, freq_encode -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/sinusoidal_cuda/freq.py: -------------------------------------------------------------------------------- 1 | """ 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | """ 4 | 5 | import numpy as np 6 | 7 | import torch 8 | import torch.nn as nn 9 | from torch.autograd import Function 10 | from torch.autograd.function import once_differentiable 11 | from torch.cuda.amp import custom_bwd, custom_fwd 12 | 13 | import nr3d_lib.bindings._freqencoder as _backend 14 | from nr3d_lib.profile import profile 15 | 16 | class _freq_encoder(Function): 17 | @staticmethod 18 | @custom_fwd(cast_inputs=torch.float32) # force float32 for better precision 19 | def forward(ctx, inputs, n_frequencies, output_dim): 20 | # inputs: [B, input_dim], float 21 | # RETURN: [B, F], float 22 | 23 | if not inputs.is_cuda: inputs = inputs.cuda() 24 | inputs = inputs.contiguous() 25 | 26 | B, input_dim = inputs.shape # batch size, coord dim 27 | 28 | outputs = torch.empty(B, output_dim, dtype=inputs.dtype, device=inputs.device) 29 | 30 | _backend.freq_encode_forward(inputs, B, input_dim, n_frequencies, output_dim, outputs) 31 | 32 | if ctx.needs_input_grad[0]: # inputs 33 | ctx.save_for_backward(inputs, outputs) 34 | ctx.dims = [B, input_dim, n_frequencies, output_dim] 35 | 36 | return outputs 37 | 38 | @staticmethod 39 | @once_differentiable # NOTE: Important !!! Not 2nd-backwardable !!! 40 | @custom_bwd 41 | def backward(ctx, grad): 42 | # grad: [B, C * C] 43 | 44 | grad = grad.contiguous() 45 | inputs, outputs = ctx.saved_tensors 46 | B, input_dim, n_frequencies, output_dim = ctx.dims 47 | 48 | grad_inputs = torch.zeros_like(inputs) 49 | _backend.freq_encode_backward(grad, outputs, B, input_dim, n_frequencies, output_dim, grad_inputs) 50 | 51 | return grad_inputs, None, None 52 | 53 | def freq_encode(input: torch.Tensor, n_frequencies: int, output_dim: int=None) -> torch.Tensor: 54 | input_dim = input.shape[-1] 55 | if output_dim is None: output_dim = input_dim + input_dim * 2 * n_frequencies 56 | return _freq_encoder.apply(input, n_frequencies, output_dim) 57 | 58 | class FreqEncoder(nn.Module): 59 | def __init__(self, input_dim=3, n_frequencies=4, include_input=True): 60 | super().__init__() 61 | 62 | assert include_input, "Currently sinusoidal embedder only support `include_input`==True." 63 | 64 | self.in_features = input_dim 65 | self.n_frequencies = n_frequencies 66 | self.out_features = input_dim + input_dim * 2 * n_frequencies 67 | 68 | def __repr__(self): 69 | return f"FreqEncoder: input_dim={self.in_features}, output_dim={self.out_features}, n_frequencies={self.n_frequencies} " 70 | 71 | @profile 72 | def forward(self, inputs, **kwargs) -> torch.Tensor: 73 | # inputs: [..., input_dim] 74 | # return: [..., ] 75 | prefix = inputs.shape[:-1] 76 | inputs = inputs.flatten(0,-2) 77 | 78 | outputs = freq_encode(inputs, self.n_frequencies, self.out_features).unflatten(0, prefix) 79 | 80 | return outputs 81 | 82 | if __name__ == "__main__": 83 | def test(): 84 | from icecream import ic 85 | m = FreqEncoder(3, 10) 86 | ic(m) 87 | x = torch.randn([7,3]).cuda().requires_grad_(True) 88 | y = m(x) 89 | ic(y) 90 | y.mean().backward() 91 | ic(x.grad) 92 | test() -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/spherical_harmonics/__init__.py: -------------------------------------------------------------------------------- 1 | from .sphere_harmonics import SHEncoder, sh_encode -------------------------------------------------------------------------------- /nr3d_lib/models/embedders/spherical_harmonics/sphere_harmonics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Borrowed from https://github.com/ashawkey/torch-ngp 3 | """ 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Function 8 | from torch.autograd.function import once_differentiable 9 | from torch.cuda.amp import custom_bwd, custom_fwd 10 | 11 | import nr3d_lib.bindings._shencoder as _backend 12 | from nr3d_lib.profile import profile 13 | 14 | class _sh_encoder(Function): 15 | @staticmethod 16 | @custom_fwd(cast_inputs=torch.float32) # force float32 for better precision 17 | def forward(ctx, inputs, degree, calc_grad_inputs=False): 18 | # inputs: [B, input_dim], float in [-1, 1] 19 | # RETURN: [B, F], float 20 | 21 | inputs = inputs.contiguous() 22 | B, input_dim = inputs.shape # batch size, coord dim 23 | out_features = degree ** 2 24 | 25 | outputs = torch.empty(B, out_features, dtype=inputs.dtype, device=inputs.device) 26 | 27 | if calc_grad_inputs: 28 | dy_dx = torch.empty(B, input_dim * out_features, dtype=inputs.dtype, device=inputs.device) 29 | else: 30 | dy_dx = torch.empty(1, dtype=inputs.dtype, device=inputs.device) 31 | 32 | _backend.sh_encode_forward(inputs, outputs, B, input_dim, degree, calc_grad_inputs, dy_dx) 33 | 34 | if calc_grad_inputs: # inputs 35 | ctx.save_for_backward(inputs, dy_dx) 36 | ctx.dims = [B, input_dim, degree] 37 | ctx.calc_grad_inputs = calc_grad_inputs 38 | 39 | return outputs 40 | 41 | @staticmethod 42 | #@once_differentiable 43 | @custom_bwd 44 | def backward(ctx, grad): 45 | # grad: [B, C * C] 46 | 47 | if ctx.calc_grad_inputs: 48 | grad = grad.contiguous() 49 | inputs, dy_dx = ctx.saved_tensors 50 | B, input_dim, degree = ctx.dims 51 | grad_inputs = torch.zeros_like(inputs) 52 | _backend.sh_encode_backward(grad, inputs, B, input_dim, degree, dy_dx, grad_inputs) 53 | return grad_inputs, None, None 54 | else: 55 | return None, None, None 56 | 57 | def sh_encode(input: torch.Tensor, degree: int, calc_grad_inputs=False) -> torch.Tensor: 58 | return _sh_encoder.apply(input, degree, calc_grad_inputs) 59 | 60 | class SHEncoder(nn.Module): 61 | def __init__(self, input_dim=3, degree=4): 62 | super().__init__() 63 | self.degree = degree # 0 ~ 4 64 | self.in_features = input_dim # coord dims, must be 3 65 | self.out_features = degree ** 2 66 | assert self.in_features == 3, "SH encoder only support input dim == 3" 67 | assert self.degree > 0 and self.degree <= 8, "SH encoder only supports degree in [1, 8]" 68 | 69 | def __repr__(self): 70 | return f"SHEncoder: input_dim={self.in_features}, output_dim={self.out_features}, degree={self.degree}" 71 | 72 | @profile 73 | def forward(self, inputs: torch.Tensor, size=1) -> torch.Tensor: 74 | # inputs: [..., input_dim], normalized real world positions in [-size, size] 75 | # return: [..., degree^2] 76 | 77 | prefix = inputs.shape[:-1] 78 | inputs = (inputs / size).flatten(0, -2) # [-1, 1] 79 | 80 | outputs = sh_encode(inputs, self.degree, inputs.requires_grad).unflatten(0, prefix) 81 | return outputs 82 | 83 | if __name__ == "__main__": 84 | def test(): 85 | from icecream import ic 86 | m = SHEncoder() 87 | ic(m) 88 | x = torch.randn([7,3]).cuda().requires_grad_(True) 89 | y = m(x) 90 | ic(y) 91 | y.mean().backward() 92 | ic(x.grad) 93 | test() -------------------------------------------------------------------------------- /nr3d_lib/models/embeddings/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .embedding import * 3 | from .sequential import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_nerf import * 3 | from .permuto_nerf import * 4 | from .mlp_nerf import * 5 | from .tcnn_nerf import * 6 | from .renderer_mixin import * 7 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields/nerf/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields/nerf/utils.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields/neus/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_neus import * 3 | from .permuto_neus import * 4 | from .mlp_neus import * 5 | from .variance import * 6 | from .renderer_mixin import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields/sdf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_sdf import * 3 | from .permuto_sdf import * 4 | from .mlp_sdf import * 5 | from .utils import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields/sdf/renderer_mixin.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file renderer_mixin.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief SDF renderer mixin based on AABB space & OctreeAS 5 | """ 6 | 7 | from nr3d_lib.config import ConfigDict 8 | from nr3d_lib.models.model_base import ModelMixin 9 | 10 | class SDFRendererMixin(ModelMixin): 11 | """ 12 | SDF Renderer Mixin class 13 | 14 | NOTE: This is a mixin class! 15 | Refer: https://stackoverflow.com/questions/533631/what-is-a-mixin-and-why-are-they-useful 16 | !!!!: The target class for this mixin should also inherit from `ModelMixin`. 17 | """ 18 | def __init__( 19 | self, 20 | # Renderer mixin kwargs 21 | ray_query_cfg: dict = dict(), 22 | # Network kwargs 23 | **net_kwargs) -> None: 24 | 25 | mro = type(self).mro() 26 | super_class = mro[mro.index(SDFRendererMixin)+1] 27 | assert super_class is not ModelMixin, "Incorrect class inheritance. Three possible misuse scenarios:\n"\ 28 | "Case 1: The Net class for mixin should also inherit from `ModelMixin`.\n"\ 29 | "Case 2: RendererMixin should come before the Net class when inheriting.\n"\ 30 | "Case 3: You should not directly instantiate this mixin class." 31 | 32 | raise NotImplementedError 33 | 34 | super().__init__(**net_kwargs) 35 | 36 | self.ray_query_cfg = ray_query_cfg 37 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_conditional/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .style_lotd_nerf import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/nerf/style_lotd_nerf.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file style_lotd_nerf.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Style-LoTD Encoding + sigma decoder + radiance decoder 5 | """ -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/neus/__init__.py: -------------------------------------------------------------------------------- 1 | from .renderer_mixin import * 2 | from .style_lotd_neus import * 3 | from .generative_permuto_neus import * 4 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/sdf/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .style_lotd_sdf import * 3 | from .generative_permuto_sdf import * 4 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional/sdf/dit_sdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file dit_sdf.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Conditional SDF using deep implicit templates (DIT) and deformation modules. 5 | """ 6 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional_dynamic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_conditional_dynamic/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional_dynamic/neus/__init__.py: -------------------------------------------------------------------------------- 1 | from .renderer_mixin import * 2 | from .dynamic_generative_permuto_neus import * 3 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_conditional_dynamic/neus/dynamic_generative_permuto_neus.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file dynamic_generative_permuto_neus.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief _description_ 5 | """ 6 | 7 | __all__ = [ 8 | 'DynamicGenerativePermutoConcatNeuS', 9 | 'DynamicGenerativePermutoConcatNeuSModel', 10 | ] 11 | 12 | from nr3d_lib.models.fields_conditional.neus import GenerativePermutoConcatNeuS 13 | from nr3d_lib.models.fields_conditional_dynamic.neus.renderer_mixin import NeusRendererMixinBatchedDynamic 14 | 15 | class DynamicGenerativePermutoConcatNeuS(GenerativePermutoConcatNeuS): 16 | # def __init__(self, *args, **kwargs) -> None: 17 | # super().__init__(*args, **kwargs) 18 | def populate(self, *args, **kwargs): 19 | self.surface_cfg['encoding_cfg'].setdefault( 20 | 'space_cfg', {'type': 'batched_dynamic'}) 21 | return super().populate(*args, **kwargs) 22 | def query_sdf(self, *args, **kwargs): 23 | kwargs.pop('ts', None) # `ts` is already taken care of when computing `z` 24 | return super().query_sdf(*args, **kwargs) 25 | def forward_sdf(self, *args, **kwargs): 26 | kwargs.pop('ts', None) # `ts` is already taken care of when computing `z` 27 | return super().forward_sdf(*args, **kwargs) 28 | def forward_sdf_nablas(self, *args, **kwargs): 29 | kwargs.pop('ts', None) # `ts` is already taken care of when computing `z` 30 | return super().forward_sdf_nablas(*args, **kwargs) 31 | def forward(self, *args, **kwargs): 32 | kwargs.pop('ts', None) # `ts` is already taken care of when computing `z` 33 | return super().forward(*args, **kwargs) 34 | 35 | class DynamicGenerativePermutoConcatNeuSModel(NeusRendererMixinBatchedDynamic, DynamicGenerativePermutoConcatNeuS): 36 | """ 37 | MRO: 38 | -> NeusRendererMixinBatchedDynamic 39 | -> DynamicGenerativePermutoConcatNeuS 40 | -> GenerativePermutoConcatNeuS 41 | -> ModelMixin 42 | -> nn.Module 43 | """ 44 | pass 45 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_directvox/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_directvox/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_directvox/nerf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_directvox/nerf -------------------------------------------------------------------------------- /nr3d_lib/models/fields_directvox/neus: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_directvox/neus -------------------------------------------------------------------------------- /nr3d_lib/models/fields_directvox/sdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_directvox/sdf -------------------------------------------------------------------------------- /nr3d_lib/models/fields_directvox/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_directvox/utils.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_distant/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_distant/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_distant/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_nerf import * 3 | from .permuto_nerf import * 4 | from .nerf import * 5 | from .renderer_mixin import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_distant/nerf/lotd_nerf.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file lotd_nerf.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief LoTD-encoding-based NeRF++ 5 | """ 6 | 7 | __all__ = [ 8 | 'LoTDNeRFDistantModel' 9 | ] 10 | 11 | from nr3d_lib.config import ConfigDict 12 | from nr3d_lib.models.fields.nerf import LoTDNeRF 13 | from nr3d_lib.models.fields_distant.nerf.renderer_mixin import NeRFRendererMixinDistant 14 | 15 | class LoTDNeRFDistantModel(NeRFRendererMixinDistant, LoTDNeRF): 16 | """ 17 | MRO: 18 | -> NeRFRendererMixinDistant 19 | -> LoTDNeRF 20 | -> ModelMixin 21 | -> nn.Module 22 | """ 23 | pass 24 | # def ray_test(self, *args, **kwargs): 25 | # if self.cr_obj is not None: 26 | # # NOTE: nerf++ background should always directly use foreground's ray_test results. 27 | # return self.cr_obj.model.ray_test(*args, **kwargs) -------------------------------------------------------------------------------- /nr3d_lib/models/fields_distant/nerf/nerf.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file nerf.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief MLP-based NeRF++ 5 | """ 6 | 7 | __all__ = [ 8 | 'NeRFDistantModel' 9 | ] 10 | 11 | from nr3d_lib.config import ConfigDict 12 | from nr3d_lib.models.fields.nerf import EmbededNeRF 13 | from nr3d_lib.models.fields_distant.nerf.renderer_mixin import NeRFRendererMixinDistant 14 | 15 | class NeRFDistantModel(NeRFRendererMixinDistant, EmbededNeRF): 16 | """ 17 | MRO: 18 | -> NeRFRendererMixinDistant 19 | -> EmbededNeRF 20 | -> ModelMixin 21 | -> nn.Module 22 | """ 23 | pass 24 | 25 | # def ray_test(self, *args, **kwargs): 26 | # if self.cr_obj is not None: 27 | # # NOTE: nerf++ background should always directly use foreground's ray_test results. 28 | # return self.cr_obj.model.ray_test(*args, **kwargs) -------------------------------------------------------------------------------- /nr3d_lib/models/fields_distant/nerf/permuto_nerf.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file permuto_nerf.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief NeRF++ network characterized using the Permutohedral-encoding model. 5 | """ 6 | 7 | __all__ = [ 8 | 'PermutoNeRFDistantModel' 9 | ] 10 | 11 | from nr3d_lib.config import ConfigDict 12 | from nr3d_lib.models.fields.nerf import PermutoNeRF 13 | from nr3d_lib.models.fields_distant.nerf.renderer_mixin import NeRFRendererMixinDistant 14 | 15 | class PermutoNeRFDistantModel(NeRFRendererMixinDistant, PermutoNeRF): 16 | """ 17 | MRO: 18 | -> NeRFRendererMixinDistant 19 | -> PermutoNeRF 20 | -> ModelMixin 21 | -> nn.Module 22 | """ 23 | pass -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_dynamic/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/common: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_dynamic/common -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | from .emernerf import * 2 | from .renderer_mixin import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/neus/__init__.py: -------------------------------------------------------------------------------- 1 | from .dynamic_permuto_neus import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/neus/emernerf_neus.py: -------------------------------------------------------------------------------- 1 | """ 2 | SDF-version EmerNeRF 3 | """ -------------------------------------------------------------------------------- /nr3d_lib/models/fields_dynamic/sdf/__init__.py: -------------------------------------------------------------------------------- 1 | from .dynamic_permuto_sdf import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_forest/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/nerf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_forest_nerf import * 3 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/nerf/block_nerf.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/fields_forest/nerf/block_nerf.py -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/nerf/renderer_mixin.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file renderer_mixin.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Renderer mixin for forest NeRF with acceleration. 5 | """ 6 | -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/neus/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_forest_neus import * 3 | from .renderer_mixin import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/sdf/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd_forest_sdf import * 3 | from .utils import * -------------------------------------------------------------------------------- /nr3d_lib/models/fields_forest/sdf/renderer_mixin.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file renderer_mixin.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief SDF renderer mixin based on Forest space & OctforestAS 5 | """ 6 | 7 | from typing import Any 8 | from nr3d_lib.models.model_base import ModelMixin 9 | 10 | 11 | class SDFRendererMixinForest(ModelMixin): 12 | def __init__( 13 | self, 14 | # Renderer mixin kwargs 15 | ray_query_cfg: dict = dict(), 16 | # Network kwargs 17 | **net_kwargs) -> None: 18 | 19 | mro = type(self).mro() 20 | super_class = mro[mro.index(SDFRendererMixinForest)+1] 21 | assert super_class is not ModelMixin, "Incorrect class inheritance. Three possible misuse scenarios:\n"\ 22 | "Case 1: The Net class for mixin should also inherit from `ModelMixin`.\n"\ 23 | "Case 2: RendererMixin should come before the Net class when inheriting.\n"\ 24 | "Case 3: You should not directly instantiate this mixin class." 25 | 26 | raise NotImplementedError 27 | 28 | super().__init__(**net_kwargs) 29 | 30 | self.ray_query_cfg = ray_query_cfg 31 | -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/grid_encodings/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/lotd/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .lotd import * 3 | from .lotd_cfg import * 4 | from .lotd_helpers import * 5 | 6 | from .lotd_encoding import * 7 | from .lotd_forest import * 8 | 9 | from .lotd_batched import * 10 | from .lotd_batched_growers import * -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/lotd/tests/test_extra_pos_nablas.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | 4 | from nr3d_lib.config import ConfigDict 5 | from nr3d_lib.logger import Logger 6 | from nr3d_lib.models.fields.sdf import LoTDSDF, pretrain_sdf_sphere 7 | 8 | device = torch.device('cuda') 9 | 10 | m_noextra = LoTDSDF( 11 | dtype='half', device=device, 12 | encoding_cfg=ConfigDict( 13 | lotd_cfg=ConfigDict( 14 | lod_res=[8, 13, 21, 34, 55, 89], 15 | lod_n_feats=[2, 2, 2, 2, 2, 2], 16 | lod_types=['Dense', 'Dense', 'Dense', 'Dense', 'Hash', 'Hash'], 17 | hashmap_size=32768 18 | ), 19 | bounding_size=2.0, 20 | param_init_cfg=ConfigDict( 21 | method='uniform_to_type', 22 | bound=1.0e-4 23 | ) 24 | ), 25 | decoder_cfg=ConfigDict( 26 | D=1, W=64, activation='relu' 27 | ), 28 | geo_init_method='pretrain' 29 | ) 30 | 31 | m_geo = LoTDSDF( 32 | dtype='half', device=device, 33 | encoding_cfg=ConfigDict( 34 | lotd_cfg=ConfigDict( 35 | lod_res=[8, 13, 21, 34, 55, 89], 36 | lod_n_feats=[2, 2, 2, 2, 2, 2], 37 | lod_types=['Dense', 'Dense', 'Dense', 'Dense', 'Hash', 'Hash'], 38 | hashmap_size=32768 39 | ), 40 | bounding_size=2.0, 41 | param_init_cfg=ConfigDict( 42 | method='uniform_to_type', 43 | bound=1.0e-4 44 | ) 45 | ), 46 | decoder_cfg=ConfigDict( 47 | D=1, W=64, activation=ConfigDict(type='softplus', beta=100.0) 48 | ), 49 | extra_pos_embed_cfg=ConfigDict(type='identity'), 50 | geo_init_method='pretrain_after_geometric' 51 | ) 52 | 53 | 54 | print(m_noextra) 55 | print(m_geo) 56 | 57 | logger = Logger('./dev_test/test_extra_nablas', monitoring='tensorboard', save_imgs=False) 58 | pretrain_sdf_sphere(m_noextra, lr=1.0e-3, num_iters=1000, w_eikonal=0, logger=logger, log_prefix='noextra.') 59 | pretrain_sdf_sphere(m_geo, lr=1.0e-3, num_iters=1000, w_eikonal=0, logger=logger, log_prefix='geo.') 60 | 61 | 62 | -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/lotd/tests/unit_test_grid_inds.py: -------------------------------------------------------------------------------- 1 | import faulthandler; faulthandler.enable() 2 | 3 | import math 4 | import torch 5 | import numpy as np 6 | 7 | import nr3d_lib.bindings._lotd as _backend 8 | 9 | torch.cuda.manual_seed_all(42) 10 | 11 | device = torch.device('cuda') 12 | input_dtype = torch.float 13 | param_dtype = torch.float16 14 | 15 | max_level = None 16 | lod_meta = _backend.LoDMeta( 17 | 3, 18 | [4, 8], 19 | [4, 4], 20 | ['Dense','Dense'], 21 | None, 22 | False # use_smooth_step 23 | ) 24 | 25 | params = torch.randn([lod_meta.n_params], device=device, dtype=param_dtype) / 1.0e+2 26 | 27 | x = torch.tensor([[0.7, 0.5, 0.2], [-0.9, 0, 0.3]], device=device, dtype=input_dtype) 28 | # x = torch.rand([365365, 3], device=device, dtype=input_dtype) 29 | # x = torch.tensor(np.load('./dev_test/test_lotd/input.npz')['x'], device=device, dtype=input_dtype)/2+0.5 # [3.6M] 30 | 31 | grid_inds = _backend.lod_get_grid_index(lod_meta, x, None, None, None, max_level) 32 | y, dydx = _backend.lod_fwd(lod_meta, x, params, None, None, None, max_level, True) 33 | print(y) 34 | 35 | params[grid_inds] = 0 36 | y, dydx = _backend.lod_fwd(lod_meta, x, params, None, None, None, max_level, True) 37 | print(y) 38 | 39 | y, dydx = _backend.lod_fwd(lod_meta, x+0.2, params, None, None, None, max_level, True) 40 | print(y) 41 | -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/permuto/__init__.py: -------------------------------------------------------------------------------- 1 | from .permuto import * 2 | from .generative_permuto_concat import * 3 | from .permuto_encoding import * 4 | from .mll import * -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/permuto/tests/benchmark_save_intermediate.py: -------------------------------------------------------------------------------- 1 | import faulthandler; faulthandler.enable() 2 | 3 | import time 4 | import numpy as np 5 | from icecream import ic 6 | 7 | import torch 8 | from torch import autograd 9 | from torch.utils.benchmark import Timer 10 | 11 | import nr3d_lib.bindings._permuto_intermediate as _backend 12 | 13 | import permutohedral_encoding as permuto_enc # Original built 14 | 15 | input_dtype = torch.float32 16 | param_dtype = torch.float32 17 | device = torch.device('cuda') 18 | 19 | pos_dim=7 20 | capacity=2**18 21 | nr_levels = 24 22 | nr_feat_per_level = 2 23 | coarsest_scale = 1.0 24 | finest_scale = 0.0001 25 | scale_list = np.geomspace(coarsest_scale, finest_scale, num=nr_levels) 26 | res_list = 1./ scale_list 27 | 28 | lattice_values0 = torch.randn([nr_levels, capacity, nr_feat_per_level], dtype=param_dtype, device=device) 29 | lattice_values = lattice_values0.flatten() 30 | 31 | #---- Ours 32 | meta = _backend.PermutoEncMeta(pos_dim, capacity, res_list.tolist(), [nr_feat_per_level] * len(res_list)) 33 | 34 | # batch_size = 1 35 | # batch_size = 4 36 | # batch_size = 1024 37 | batch_size = 3653653 38 | positions = torch.rand([batch_size, meta.n_dims_to_encode], dtype=input_dtype, device=device) 39 | 40 | #---- Ours Forward 41 | # encoded, rank, rem0 = _backend.permuto_enc_fwd(meta, positions, lattice_values, None, None, None, None, True, True) 42 | @torch.no_grad() 43 | def fn_ours_fwd(need_intermediate=False): 44 | # need_intermediate significantly affects the runtime, a process originally taking 17.59 milliseconds might become 123.38 ms. 45 | _backend.permuto_enc_fwd(meta, positions, lattice_values, None, None, None, None, True, need_intermediate) 46 | 47 | time.sleep(3) 48 | 49 | print(Timer( 50 | stmt="fn_ours_fwd(need_intermediate=False)", 51 | globals={'fn_ours_fwd': fn_ours_fwd} 52 | ).blocked_autorange()) 53 | 54 | print(Timer( 55 | stmt="fn_ours_fwd(need_intermediate=True)", 56 | globals={'fn_ours_fwd': fn_ours_fwd} 57 | ).blocked_autorange()) -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/permuto/tests/match_with_original_save_intermediate.py: -------------------------------------------------------------------------------- 1 | import faulthandler; faulthandler.enable() 2 | 3 | import numpy as np 4 | from icecream import ic 5 | 6 | import torch 7 | from torch import autograd 8 | from torch.utils.benchmark import Timer 9 | 10 | import nr3d_lib.bindings._permuto_intermediate as _backend 11 | 12 | import permutohedral_encoding as permuto_enc # Original built 13 | 14 | input_dtype = torch.float32 15 | param_dtype = torch.float32 16 | device = torch.device('cuda') 17 | 18 | pos_dim=7 19 | capacity=2**18 20 | nr_levels = 24 21 | nr_feat_per_level = 2 22 | coarsest_scale = 1.0 23 | finest_scale = 0.0001 24 | scale_list = np.geomspace(coarsest_scale, finest_scale, num=nr_levels) 25 | res_list = 1./ scale_list 26 | 27 | lattice_values0 = torch.randn([nr_levels, capacity, nr_feat_per_level], dtype=param_dtype, device=device) 28 | lattice_values = lattice_values0.flatten() 29 | 30 | #---- Original 31 | enc0 = permuto_enc.PermutoEncoding(pos_dim, capacity, nr_levels, nr_feat_per_level, scale_list) 32 | enc0.lattice_values.data = lattice_values0 33 | 34 | #---- Ours 35 | meta = _backend.PermutoEncMeta(pos_dim, capacity, res_list.tolist(), [nr_feat_per_level] * len(res_list), enc0.random_shift_per_level.tolist()) 36 | 37 | # batch_size = 1 38 | # batch_size = 4 39 | # batch_size = 1024 40 | batch_size = 3653653 41 | positions = torch.rand([batch_size, meta.n_dims_to_encode], dtype=input_dtype, device=device) 42 | 43 | #---- Original Forward 44 | encoded0 = enc0(positions) 45 | @torch.no_grad() 46 | def fn_original_fwd(): 47 | enc0(positions) 48 | 49 | #---- Ours Forward 50 | encoded, rank, rem0 = _backend.permuto_enc_fwd(meta, positions, lattice_values, None, None, None, None, True, True) 51 | @torch.no_grad() 52 | def fn_ours_fwd(need_intermediate=False): 53 | # need_intermediate can significantly impact the execution time, potentially turning a 17.59 millisecond process into a 123.38 millisecond one. 54 | _backend.permuto_enc_fwd(meta, positions, lattice_values, None, None, None, None, True, need_intermediate) 55 | 56 | # The error is slightly larger for higher resolutions. 57 | print(torch.allclose(encoded0.data, encoded.data, atol=3.0e-2, rtol=3.0e-2)) 58 | 59 | # print(Timer( 60 | # stmt="fn_original_fwd()", 61 | # globals={'fn_original_fwd': fn_original_fwd} 62 | # ).blocked_autorange()) 63 | 64 | print(Timer( 65 | stmt="fn_ours_fwd()", 66 | globals={'fn_ours_fwd': fn_ours_fwd} 67 | ).blocked_autorange()) 68 | 69 | print(Timer( 70 | stmt="fn_ours_fwd(need_intermediate=True)", 71 | globals={'fn_ours_fwd': fn_ours_fwd} 72 | ).blocked_autorange()) 73 | 74 | print(Timer( 75 | stmt=f"a=torch.zeros([batch_size, nr_levels, pos_dim+1], dtype=torch.int32, device=device)", 76 | globals={'device': device, 'batch_size': batch_size, 'nr_levels': nr_levels, 'pos_dim': pos_dim} 77 | ).blocked_autorange()) -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/permuto/tests/math_test.py: -------------------------------------------------------------------------------- 1 | 2 | import faulthandler; faulthandler.enable() 3 | 4 | import numpy as np 5 | from icecream import ic 6 | 7 | import torch 8 | import torch.nn as nn 9 | from torch import autograd 10 | from torch.utils.benchmark import Timer 11 | 12 | import nr3d_lib.bindings._permuto as _backend 13 | 14 | from nr3d_lib.models.grid_encodings.permuto import PermutoEncImpl 15 | 16 | device = torch.device('cuda') 17 | input_dtype = torch.float32 18 | param_dtype = torch.float32 19 | 20 | pos_scale = 10.0 21 | 22 | meta = PermutoEncImpl( 23 | 4, 24 | [8.0, 16.0], 25 | [2, 2], 26 | log2_hashmap_size=4, 27 | apply_random_shifts_per_level=True, 28 | pos_scale=pos_scale, 29 | dtype=param_dtype, device=device 30 | ) 31 | 32 | lattice_values = torch.randn(meta.n_params, dtype=torch.float, device=device, requires_grad=True) 33 | x_eg = torch.randn([7, 4], dtype=input_dtype, device=device) 34 | y_eg = meta.forward(x_eg, lattice_values) 35 | dLdy_eg = torch.randn_like(y_eg) 36 | 37 | 38 | def apply_on_x(x): 39 | return meta.forward(x, lattice_values, need_dL_dinput=True) 40 | 41 | # NOTE: 42 | # y w.r.t. x i.e. dy_dx 43 | autograd.gradcheck( 44 | apply_on_x, 45 | x_eg.data.clone().requires_grad_(True), 46 | eps=1.0e-5, rtol=0.2, atol=0.5 * pos_scale 47 | ) 48 | 49 | 50 | 51 | 52 | 53 | 54 | # NOTE: 55 | # y w.r.t. param i.e. dy_dparam (passed) 56 | 57 | def apply_on_param(param): 58 | return meta.forward(x_eg, param, need_dL_dinput=False) 59 | 60 | autograd.gradcheck( 61 | apply_on_param, 62 | lattice_values, 63 | eps=1.0e-5, rtol=0.2, atol=0.05 * pos_scale 64 | ) 65 | 66 | 67 | 68 | 69 | 70 | # NOTE: 71 | # dLdx w.r.t. dLdy i.e. d(dLdx)_d(dLdy) (passed) 72 | 73 | def bwdinput_apply_on_dldy(dldy): 74 | return meta.backward_dydx(dldy, x_eg, lattice_values) 75 | 76 | autograd.gradcheck( 77 | bwdinput_apply_on_dldy, 78 | dLdy_eg.data.clone().requires_grad_(True), 79 | eps=1.0e-5, rtol=0.2, atol=0.1 * pos_scale 80 | ) 81 | 82 | 83 | 84 | 85 | # NOTE: Most are correct, a very few have larger errors due to misalignment. 86 | # dLdx w.r.t. param i.e. d(dLdx)_d(param) 87 | 88 | def bwdinput_apply_on_param(param): 89 | return meta.backward_dydx(dLdy_eg, x_eg, param) 90 | 91 | autograd.gradcheck( 92 | bwdinput_apply_on_param, 93 | lattice_values, 94 | eps=1.0e-5, rtol=0.2, atol=0.1 * pos_scale 95 | ) -------------------------------------------------------------------------------- /nr3d_lib/models/grid_encodings/permuto/tests/unit_test_intermediate.py: -------------------------------------------------------------------------------- 1 | import faulthandler; faulthandler.enable() 2 | from icecream import ic 3 | import numpy as np 4 | 5 | import torch 6 | from torch.utils.benchmark import Timer 7 | 8 | import nr3d_lib.bindings._permuto_intermediate as _backend 9 | 10 | input_dtype = torch.float32 11 | param_dtype = torch.float16 12 | device = torch.device('cuda') 13 | 14 | meta = _backend.PermutoEncMeta( 15 | 7, # n_input_dim 16 | 2**16, # hashmap_size 17 | [32.0], # res_list 18 | [4], # n_feats_list 19 | ) 20 | 21 | batch_size = 1 22 | # batch_size = 4 23 | # batch_size = 1024 24 | # batch_size = 3653653 25 | positions = torch.rand([batch_size, meta.n_dims_to_encode], dtype=input_dtype, device=device) 26 | lattice_values = torch.randn([meta.n_params], dtype=param_dtype, device=device) 27 | level_random_shifts = 10.0 * torch.randn([meta.n_levels, meta.n_dims_to_encode], dtype=input_dtype, device=device) 28 | 29 | _, rank, rank2, rank3, rem0, elevated = _backend.permuto_enc_fwd(meta, positions, lattice_values, None, None, None, None, False, True) 30 | delta = elevated - rem0 31 | _rank = torch.argsort(torch.argsort(delta, descending=True)) 32 | 33 | print(torch.equal(rank, _rank)) 34 | print(torch.equal(rank, rank2)) 35 | print(torch.equal(rank, rank3)) 36 | 37 | delta_sorted = delta.sort(descending=True, dim=-1).values 38 | _delta = torch.gather(delta_sorted, -1, rank.long()) 39 | print(torch.equal(delta, _delta)) 40 | 41 | _ = 1 -------------------------------------------------------------------------------- /nr3d_lib/models/loss/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # from .clip_loss import CLIPLoss 3 | # from .GEM import Llm_process, Iso_loss 4 | # from .recon import * 5 | # from .safe import * 6 | # from .utils import reduce 7 | -------------------------------------------------------------------------------- /nr3d_lib/models/loss/clip_loss.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file clip_loss.py 3 | @author Qiusheng Huang, Shanghai AI Lab 4 | @brief CLIP guidance loss 5 | """ 6 | 7 | import torch 8 | import torch.nn as nn 9 | 10 | from nr3d_lib.fmt import log 11 | 12 | try: 13 | import clip 14 | except ImportError: 15 | log.info("clip is not installed. CLIP related model & losses are disabled.") 16 | 17 | class CLIPLoss(nn.Module): 18 | 19 | def __init__(self, img_len, text, w, path="/mnt/petrelfs/huangqiusheng/pretrained_models/ViT-B-32.pt"): 20 | super(CLIPLoss, self).__init__() 21 | self.model, self.preprocess = clip.load(path, device="cuda") 22 | self.upsample = nn.Upsample(scale_factor=7) 23 | self.avg_pool = nn.AvgPool2d(kernel_size=128 // (32//(img_len//128))) # resize img to 224 24 | self.text = clip.tokenize(text).cuda() 25 | self.img_len = img_len 26 | self.w = w 27 | 28 | def forward(self, image, text=None): 29 | if len(image.shape)==3: 30 | image = torch.unsqueeze(image, 0) 31 | image = torch.reshape(image, [1,3,self.img_len,self.img_len]) 32 | if text is None: 33 | text = self.text 34 | 35 | image = self.avg_pool(self.upsample(image)) 36 | similarity = 1 - self.model(image, text)[0] / 100 37 | return similarity*self.w 38 | 39 | def dis_imgs(self, image_s, image_t): 40 | # used by semantic infos guidance for novel views 41 | 42 | image_s = self.avg_pool(self.upsample(image_s)) 43 | image_t = self.avg_pool(self.upsample(image_t)) 44 | 45 | img_s_features = self.img_enc(image_s) 46 | img_t_features = self.img_enc(image_t) 47 | 48 | # normalized features 49 | img_s_features = img_s_features / img_s_features.norm(dim=1, keepdim=True) 50 | img_t_features = img_t_features / img_t_features.norm(dim=1, keepdim=True) 51 | 52 | # accumulate the features 53 | if self.direction_mean is None: 54 | self.direction_mean = img_t_features 55 | else: 56 | self.direction_mean += img_t_features 57 | 58 | # cosine similarity as logits 59 | logit_scale = self.model.logit_scale.exp() 60 | logits_per_image = logit_scale * img_s_features @ self.direction_mean.t() 61 | 62 | similarity = 1.0 - logits_per_image / 100.0 63 | 64 | return similarity 65 | 66 | def img_enc(self, image): 67 | return self.model.encode_image(image) 68 | 69 | def text_enc(self, text): 70 | return self.model.encode_image(text) -------------------------------------------------------------------------------- /nr3d_lib/models/loss/lpipsPyTorch/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | # import lpips 4 | from .modules.lpips import LPIPS 5 | 6 | # lpips_vgg = lpips.LPIPS(net="vgg").cuda() -------------------------------------------------------------------------------- /nr3d_lib/models/loss/lpipsPyTorch/modules/lpips.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from .networks import get_network, LinLayers 5 | from .utils import get_state_dict 6 | 7 | 8 | class LPIPS(nn.Module): 9 | r"""Creates a criterion that measures 10 | Learned Perceptual Image Patch Similarity (LPIPS). 11 | 12 | Arguments: 13 | net_type (str): the network type to compare the features: 14 | 'alex' | 'squeeze' | 'vgg'. Default: 'alex'. 15 | version (str): the version of LPIPS. Default: 0.1. 16 | """ 17 | def __init__(self, net_type: str = 'alex', version: str = '0.1', device=None): 18 | 19 | assert version in ['0.1'], 'v0.1 is only supported now' 20 | 21 | super(LPIPS, self).__init__() 22 | 23 | # pretrained network 24 | self.net = get_network(net_type) 25 | 26 | # linear layers 27 | self.lin = LinLayers(self.net.n_channels_list) 28 | self.lin.load_state_dict(get_state_dict(net_type, version, device=device)) 29 | 30 | def forward(self, x: torch.Tensor, y: torch.Tensor): 31 | feat_x, feat_y = self.net(x), self.net(y) 32 | 33 | diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)] 34 | res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)] # [B,1,1,1] 35 | 36 | # return torch.sum(torch.cat(res, 0), 0, True) # [5xB,1,1,1] --sum--> [1,1,1,1] 37 | return torch.sum(torch.stack(res, 0), 0, False) # [5,B,1,1,1] --sum--> [B,1,1,1] 38 | -------------------------------------------------------------------------------- /nr3d_lib/models/loss/lpipsPyTorch/modules/networks.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence 2 | 3 | from itertools import chain 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torchvision import models 8 | 9 | from .utils import normalize_activation 10 | 11 | 12 | def get_network(net_type: str): 13 | if net_type == 'alex': 14 | return AlexNet() 15 | elif net_type == 'squeeze': 16 | return SqueezeNet() 17 | elif net_type == 'vgg': 18 | return VGG16() 19 | else: 20 | raise NotImplementedError('choose net_type from [alex, squeeze, vgg].') 21 | 22 | 23 | class LinLayers(nn.ModuleList): 24 | def __init__(self, n_channels_list: Sequence[int]): 25 | super(LinLayers, self).__init__([ 26 | nn.Sequential( 27 | nn.Identity(), 28 | nn.Conv2d(nc, 1, 1, 1, 0, bias=False) 29 | ) for nc in n_channels_list 30 | ]) 31 | 32 | for param in self.parameters(): 33 | param.requires_grad = False 34 | 35 | 36 | class BaseNet(nn.Module): 37 | def __init__(self): 38 | super(BaseNet, self).__init__() 39 | 40 | # register buffer 41 | self.register_buffer( 42 | 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) 43 | self.register_buffer( 44 | 'std', torch.Tensor([.458, .448, .450])[None, :, None, None]) 45 | 46 | def set_requires_grad(self, state: bool): 47 | for param in chain(self.parameters(), self.buffers()): 48 | param.requires_grad = state 49 | 50 | def z_score(self, x: torch.Tensor): 51 | return (x - self.mean) / self.std 52 | 53 | def forward(self, x: torch.Tensor): 54 | x = self.z_score(x) 55 | 56 | output = [] 57 | for i, (_, layer) in enumerate(self.layers._modules.items(), 1): 58 | x = layer(x) 59 | if i in self.target_layers: 60 | output.append(normalize_activation(x)) 61 | if len(output) == len(self.target_layers): 62 | break 63 | return output 64 | 65 | 66 | class SqueezeNet(BaseNet): 67 | def __init__(self): 68 | super(SqueezeNet, self).__init__() 69 | 70 | self.layers = models.squeezenet1_1(True).features 71 | self.target_layers = [2, 5, 8, 10, 11, 12, 13] 72 | self.n_channels_list = [64, 128, 256, 384, 384, 512, 512] 73 | 74 | self.set_requires_grad(False) 75 | 76 | 77 | class AlexNet(BaseNet): 78 | def __init__(self): 79 | super(AlexNet, self).__init__() 80 | 81 | self.layers = models.alexnet(True).features 82 | self.target_layers = [2, 5, 8, 10, 12] 83 | self.n_channels_list = [64, 192, 384, 256, 256] 84 | 85 | self.set_requires_grad(False) 86 | 87 | 88 | class VGG16(BaseNet): 89 | def __init__(self): 90 | super(VGG16, self).__init__() 91 | 92 | # self.layers = models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).features 93 | self.layers = models.vgg16(pretrained=True).features 94 | self.target_layers = [4, 9, 16, 23, 30] 95 | self.n_channels_list = [64, 128, 256, 512, 512] 96 | 97 | self.set_requires_grad(False) 98 | -------------------------------------------------------------------------------- /nr3d_lib/models/loss/lpipsPyTorch/modules/utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | 5 | 6 | def normalize_activation(x, eps=1e-10): 7 | norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) 8 | return x / (norm_factor + eps) 9 | 10 | 11 | def get_state_dict(net_type: str = 'alex', version: str = '0.1', device=None): 12 | # build url 13 | url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \ 14 | + f'master/lpips/weights/v{version}/{net_type}.pth' 15 | 16 | # download 17 | old_state_dict = torch.hub.load_state_dict_from_url( 18 | url, progress=True, 19 | map_location=device if torch.cuda.is_available() else torch.device('cpu') 20 | ) 21 | 22 | # rename keys 23 | new_state_dict = OrderedDict() 24 | for key, val in old_state_dict.items(): 25 | new_key = key 26 | new_key = new_key.replace('lin', '') 27 | new_key = new_key.replace('model.', '') 28 | new_state_dict[new_key] = val 29 | 30 | return new_state_dict 31 | -------------------------------------------------------------------------------- /nr3d_lib/models/loss/ssim.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file ssim.py 3 | @author Qiusheng Huang, Shanghai AI Lab 4 | @brief ssim loss 5 | 6 | The source code is adapted from: 7 | https://github.com/Po-Hsun-Su/pytorch-ssim 8 | Reference: 9 | [1] Wang Z, Bovik A C, Sheikh H R, et al. 10 | Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing 11 | """ 12 | 13 | __all__ = [ 14 | 'ssim', 15 | 'ssim_module' 16 | ] 17 | 18 | import numpy as np 19 | from math import exp 20 | from numbers import Number 21 | 22 | import torch 23 | import torch.nn as nn 24 | import torch.nn.functional as F 25 | 26 | def create_gaussian(window_size: int, sigma: Number, dtype=torch.float) -> torch.Tensor: 27 | gauss = torch.tensor([exp(-(ws - window_size//2)**2 / float(2*sigma**2)) for ws in range(window_size)], dtype=dtype) 28 | return gauss / gauss.sum() 29 | 30 | def create_window(window_size: int, channel: int, dtype=torch.float) -> torch.Tensor: 31 | window_1d = create_gaussian(window_size, 1.5, dtype=dtype).unsqueeze(1) 32 | window_2d = window_1d.mm(window_1d.t())[None, None, ...].expand(channel, 1, window_size, window_size).contiguous() 33 | return window_2d 34 | 35 | def compute_ssim( 36 | img1: torch.Tensor, img2: torch.Tensor, window: torch.Tensor, 37 | window_size: int, channel: int, size_average = True, stride: int=None, 38 | C1: float = 0.01**2, C2: float = 0.03**2) -> torch.Tensor: 39 | 40 | mu1 = F.conv2d(img1, window, padding=(window_size-1)//2, groups=channel, stride=stride) 41 | mu2 = F.conv2d(img2, window, padding=(window_size-1)//2, groups=channel, stride=stride) 42 | 43 | mu1_sq = mu1.pow(2) 44 | mu2_sq = mu2.pow(2) 45 | mu1_mu2 = mu1*mu2 46 | 47 | sigma1_sq = F.conv2d(img1*img1, window, padding=(window_size-1)//2, groups=channel, stride=stride) - mu1_sq 48 | sigma2_sq = F.conv2d(img2*img2, window, padding=(window_size-1)//2, groups=channel, stride=stride) - mu2_sq 49 | sigma12 = F.conv2d(img1*img2, window, padding=(window_size-1)//2, groups=channel, stride=stride) - mu1_mu2 50 | 51 | ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) 52 | 53 | if size_average: 54 | return ssim_map.mean() 55 | else: 56 | return ssim_map.mean(1).mean(1).mean(1) 57 | 58 | #---- Function to directly compute SSIM 59 | def ssim(img1: torch.Tensor, img2: torch.Tensor, window_size: int = 11, size_average = True): 60 | (_, channel, _, _) = img1.size() 61 | window = create_window(window_size, channel).to(img1) 62 | return compute_ssim(img1, img2, window, window_size, channel, size_average) 63 | 64 | #---- Module storage for SSIM 65 | class ssim_module(nn.Module): 66 | def __init__( 67 | self, 68 | window_size: int = 3, 69 | size_average = True, 70 | stride: int = 3, 71 | channel: int = 3, 72 | dtype=torch.float, device=None): 73 | super().__init__() 74 | self.window_size = window_size 75 | self.size_average = size_average 76 | self.channel = channel 77 | self.stride = stride 78 | window = create_window(window_size, self.channel, dtype=dtype).to(device=device) 79 | self.register_buffer('window', window, persistent=False) 80 | 81 | def forward(self, img1: torch.Tensor, img2: torch.Tensor): 82 | """ 83 | img1, img2: torch.Tensor([b,c,h,w]) 84 | """ 85 | (_, channel, _, _) = img1.size() 86 | assert self.channel == channel, f"Input channel does not match (should be [B, {self.channel}, H, W], but got {list(img1.shape)})" 87 | return compute_ssim(img1, img2, self.window.to(img1), self.window_size, channel, self.size_average, stride=self.stride) 88 | -------------------------------------------------------------------------------- /nr3d_lib/models/loss/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file utils.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Utilities for loss calculations 5 | """ 6 | 7 | import torch 8 | import numpy as np 9 | from typing import Literal, Union 10 | 11 | def reduce( 12 | loss: Union[torch.Tensor, np.ndarray], 13 | mask: Union[torch.Tensor, np.ndarray] = None, 14 | reduction: Literal['mean', 'mean_in_mask', 'sum', 'max', 'min', 'none']='mean'): 15 | 16 | if mask is not None: 17 | if mask.dim() == loss.dim() - 1: 18 | mask = mask.view(*loss.shape[:-1], 1).expand_as(loss) 19 | assert loss.dim() == mask.dim(), f"Expects loss.dim={loss.dim()} to be equal to mask.dim()={mask.dim()}" 20 | 21 | if reduction == 'mean': 22 | return loss.mean() if mask is None else (loss * mask).mean() 23 | elif reduction == 'mean_in_mask': 24 | return loss.mean() if mask is None else (loss * mask).sum() / mask.sum().clip(1e-5) 25 | elif reduction == 'sum': 26 | return loss.sum() if mask is None else (loss * mask).sum() 27 | elif reduction == 'max': 28 | return loss.max() if mask is None else loss[mask].max() 29 | elif reduction == 'min': 30 | return loss.min() if mask is None else loss[mask].min() 31 | elif reduction == 'none': 32 | return loss if mask is None else loss * mask 33 | else: 34 | raise RuntimeError(f"Invalid reduction={reduction}") -------------------------------------------------------------------------------- /nr3d_lib/models/modulations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/modulations/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/spatial/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from .aabb import * 4 | from .aabb_dynamic import * 5 | from .batched import * 6 | from .batched_dynamic import * 7 | from .forest import * 8 | from .utils import * 9 | 10 | def get_space(space_cfg: Union[str, dict] = None): 11 | if space_cfg is None: 12 | return None 13 | 14 | if isinstance(space_cfg, str): 15 | space_cfg = {'type': space_cfg} 16 | else: 17 | space_cfg = space_cfg.copy() 18 | space_type = space_cfg.pop('type').lower() 19 | if space_type == 'aabb': 20 | space = AABBSpace(**space_cfg) 21 | elif space_type == 'aabb_dynamic': 22 | space = AABBDynamicSpace(**space_cfg) 23 | elif space_type == 'batched': 24 | space = BatchedBlockSpace(**space_cfg) 25 | elif space_type == 'batched_dynamic': 26 | space = BatchedDynamicSpace(**space_cfg) 27 | elif space_type == 'forest': 28 | space = ForestBlockSpace(**space_cfg) 29 | elif space_type == 'unbounded' or space_type == 'none': 30 | space = None 31 | else: 32 | raise RuntimeError(f"Invalid space_type={space_type}") 33 | return space -------------------------------------------------------------------------------- /nr3d_lib/models/spatial/aabb_dynamic.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file aabb.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Handles a single block AABB (Axis-Aligned-Bounding-Box) space 5 | """ 6 | 7 | __all__ = [ 8 | 'AABBDynamicSpace', 9 | ] 10 | 11 | import numpy as np 12 | from typing import Literal, Tuple, Union 13 | 14 | import torch 15 | import torch.nn as nn 16 | 17 | from nr3d_lib.utils import check_to_torch 18 | 19 | from .aabb import AABBSpace 20 | 21 | class AABBDynamicSpace(AABBSpace): 22 | # def __init__( 23 | # self, *args, 24 | # ts_keyframes: Union[np.ndarray, torch.Tensor] = ..., 25 | # **kwargs) -> None: 26 | # super().__init__(*args, **kwargs) 27 | # ts_keyframes = check_to_torch(ts_keyframes, dtype=self.dtype, device=self.device) 28 | # self.register_buffer('ts_keyframes', ts_keyframes, persistent=True) 29 | 30 | def normalize_ts(self, ts: torch.Tensor): 31 | return ts 32 | 33 | def unnormalize_ts(self, ts: torch.Tensor): 34 | return ts 35 | 36 | def sample_pts_uniform(self, num_pts: int) -> Tuple[torch.Tensor, torch.Tensor]: 37 | x = super().sample_pts_uniform(num_pts) 38 | ts = torch.empty([num_pts,], dtype=torch.float, device=self.device).uniform_(-1,1) 39 | return x, ts 40 | 41 | # def sample_pts_uniform(self, num_pts: int) -> Tuple[torch.Tensor, torch.Tensor]: 42 | # x = super().sample_pts_uniform(num_pts) 43 | # ts_w = torch.rand([num_pts], dtype=torch.float, device=self.device) 44 | # ts_i = torch.randint(len(self.ts_keyframes)-1, dtype=torch.long, device=self.device) 45 | # ts = torch.lerp(self.ts_keyframes[ts_i], self.ts_keyframes[ts_i+1], ts_w) 46 | # return x, ts 47 | 48 | # def extra_repr(self) -> str: 49 | # extra_repr = super().extra_repr() 50 | # extra_repr += \ 51 | # f", num_frames={len(self.ts_keyframes)}"\ 52 | # f", ts_from={self.ts_keyframes[0].item():.3f}"\ 53 | # f", ts_to={self.ts_keyframes[-1].item():.3f}" 54 | # return extra_repr 55 | -------------------------------------------------------------------------------- /nr3d_lib/models/spatial/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | @file utils.py 3 | @author Jianfei Guo, Shanghai AI Lab 4 | @brief Utility functions for acceleration structs. 5 | """ 6 | 7 | __all__ = [ 8 | 'create_dense_grid', 9 | 'create_octree_dense', 10 | 'create_octree_root_only', 11 | 'octree_to_spc_ins' 12 | ] 13 | import torch 14 | 15 | try: 16 | from kaolin.rep.spc import Spc 17 | from kaolin.ops.spc import unbatched_points_to_octree 18 | except: 19 | from nr3d_lib.fmt import log 20 | log.warning("kaolin is not installed. OctreeAS / ForestAS disabled.") 21 | 22 | def create_dense_grid(level: int, device=torch.device('cuda')): 23 | coords = torch.stack( 24 | torch.meshgrid( 25 | [torch.arange(2**level, device=device, dtype=torch.short) for _ in range(3)], 26 | indexing='ij'), 27 | dim=-1).reshape(-1, 3) 28 | return coords 29 | 30 | def create_octree_dense(level: int, device=torch.device('cuda')): 31 | assert level > 0, "level must be > 0 during creation of octree." 32 | coords = create_dense_grid(level, device=device) 33 | octree = unbatched_points_to_octree(coords, level) 34 | return octree 35 | 36 | def create_octree_root_only(device=torch.device('cuda')): 37 | return torch.tensor([255], device=device, dtype=torch.uint8) 38 | 39 | def octree_to_spc_ins(octree): 40 | lengths = torch.tensor([len(octree)], dtype=torch.int32) 41 | spc = Spc(octree, lengths) 42 | spc._apply_scan_octrees() 43 | spc._apply_generate_points() 44 | return spc -------------------------------------------------------------------------------- /nr3d_lib/models/temporal/flow: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/temporal/flow -------------------------------------------------------------------------------- /nr3d_lib/models/tetrahedral/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/models/tetrahedral/__init__.py -------------------------------------------------------------------------------- /nr3d_lib/models/tetrahedral/splatet.py: -------------------------------------------------------------------------------- 1 | 2 | import torch 3 | import torch.nn as nn -------------------------------------------------------------------------------- /nr3d_lib/models/tetrahedral/unit_test.py: -------------------------------------------------------------------------------- 1 | 2 | from vedo import * 3 | 4 | import torch 5 | from nr3d_lib.models.explicits.tetrahedral.dmtet import DMTet, auto_face_normals 6 | 7 | if __name__ == "__main__": 8 | def test_isosurface_1(): 9 | device = torch.device('cuda') 10 | 11 | dmtet = DMTet() 12 | 13 | pos_nx3 = torch.tensor([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.]], device=device) 14 | tet_fx4 = torch.tensor([[0, 1, 2, 3]], device=device) 15 | sdf_n = torch.tensor([-1., 1., 1., 1.], device=device) 16 | verts, faces, uvs, uv_idx = dmtet(pos_nx3, sdf_n, tet_fx4) 17 | 18 | pos_nx3 = torch.tensor([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.], [-1., 0., 0.]], device=device) 19 | tet_fx4 = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 4]], device=device) 20 | sdf_n = torch.tensor([-1., 1., 1., 1., 1.], device=device) 21 | verts, faces, uvs, uv_idx = dmtet(pos_nx3, sdf_n, tet_fx4) 22 | m = Mesh([verts.cpu().numpy(), faces.cpu().numpy()]) 23 | show(m) 24 | 25 | def test_isosurface_2(): 26 | device = torch.device('cuda') 27 | dmtet = DMTet() 28 | """ 29 | Test isosurfaces at multiple levels 30 | """ 31 | pos_nx3 = torch.tensor([[0., 0., 0.], [0., 0., 1.], [0., 1., 0.], [1., 0., 0.]], device=device) 32 | tet_fx4 = torch.tensor([[0, 1, 2, 3]], device=device) 33 | sdf_n = torch.tensor([-1., 1., 2., 3.], device=device) 34 | 35 | mesh_actor_list = [] 36 | normals_list = [] 37 | levels = np.linspace(-1, 3, 8) 38 | 39 | for level in levels: 40 | c = color_map(level, 'RdBu', vmin=levels[0], vmax=levels[-1]) 41 | verts, faces, *_ = dmtet(pos_nx3, sdf_n, tet_fx4, level) 42 | face_normals = auto_face_normals(verts, faces) 43 | m = Mesh([verts.cpu().numpy(), faces.cpu().numpy()]) 44 | m.lighting('ambient').color(c) 45 | mesh_actor_list.append(m) 46 | # Even if the isosurface passes through different edges, there are still only minor floating-point differences between these normals, indicating that it must be a quantity independent of the value of the isosurface. 47 | normals_list.append(face_normals.tolist()) 48 | print(np.array(normals_list)) 49 | 50 | # Actor for tetrahedra 51 | tet_edges_v_idx = tet_fx4[:, dmtet.base_tet_edges] 52 | tet_edges_v_flat = pos_nx3[tet_edges_v_idx] 53 | tet_edges_np = tet_edges_v_flat.view(-1, 6, 2, 3).view(-1, 2, 3).data.cpu().numpy() 54 | tet_actor = Lines(tet_edges_np[:, 0], tet_edges_np[:, 1]) 55 | 56 | show(*mesh_actor_list, tet_actor) 57 | 58 | def test_isosurface_3(): 59 | pass 60 | 61 | # test_isosurface_1() 62 | test_isosurface_2() 63 | test_isosurface_3() -------------------------------------------------------------------------------- /nr3d_lib/plot/__init__.py: -------------------------------------------------------------------------------- 1 | from .plot_basic import * 2 | from .plot_2d import * 3 | from .plot_3d import * 4 | from .plot_dynamic import * 5 | -------------------------------------------------------------------------------- /nr3d_lib/tests/dbg_forest.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PJLab-ADG/nr3d_lib/1ba93ac63b00810ea4f713280aeb2baf21cbfb4c/nr3d_lib/tests/dbg_forest.pt -------------------------------------------------------------------------------- /set_env.sh: -------------------------------------------------------------------------------- 1 | # NOTE: Insert parent directory into PYTHONPATH 2 | # Usage: source set_env.sh 3 | 4 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 5 | script_dir=$(realpath $script_dir) 6 | parent_dir=$(dirname $script_dir) 7 | export PYTHONPATH="${parent_dir}":$PYTHONPATH 8 | echo "Added $parent_dir to PYTHONPATH" --------------------------------------------------------------------------------