├── .gitattributes ├── .github └── workflows │ └── rust.yml ├── .gitignore ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── Cargo.toml ├── DESIGN.md ├── LICENSE ├── README.md ├── crates ├── building_blocks_core │ ├── Cargo.toml │ └── src │ │ ├── axis.rs │ │ ├── crate_doc.md │ │ ├── extent.rs │ │ ├── lib.rs │ │ ├── morton.rs │ │ ├── orthant.rs │ │ ├── point.rs │ │ ├── point │ │ ├── cgmath_conversions.rs │ │ ├── glam_conversions.rs │ │ ├── mint_conversions.rs │ │ ├── nalgebra_conversions.rs │ │ ├── point2.rs │ │ ├── point3.rs │ │ ├── point_traits.rs │ │ ├── sdfu_integration.rs │ │ └── vox_format_conversions.rs │ │ └── sphere.rs ├── building_blocks_mesh │ ├── Cargo.toml │ ├── benches │ │ ├── greedy_quads.rs │ │ ├── height_map.rs │ │ └── surface_nets.rs │ └── src │ │ ├── crate_doc.md │ │ ├── greedy_quads.rs │ │ ├── height_map.rs │ │ ├── lib.rs │ │ ├── quad.rs │ │ └── surface_nets.rs ├── building_blocks_search │ ├── Cargo.toml │ ├── benches │ │ ├── find_surface.rs │ │ └── flood_fill.rs │ └── src │ │ ├── collision.rs │ │ ├── collision │ │ ├── ball.rs │ │ └── ray.rs │ │ ├── find_surface.rs │ │ ├── flood_fill.rs │ │ ├── grid_ray_traversal.rs │ │ ├── lib.rs │ │ ├── octree_dbvt.rs │ │ └── pathfinding.rs ├── building_blocks_storage │ ├── Cargo.toml │ ├── benches │ │ ├── accessors.rs │ │ ├── compression.rs │ │ ├── database.rs │ │ ├── octree_set.rs │ │ └── sampling.rs │ └── src │ │ ├── access_traits.rs │ │ ├── array.rs │ │ ├── array │ │ ├── channels.rs │ │ ├── channels │ │ │ ├── channel.rs │ │ │ ├── compression.rs │ │ │ └── multichannel.rs │ │ ├── compression.rs │ │ ├── coords.rs │ │ ├── dot_vox_conversions.rs │ │ ├── for_each.rs │ │ ├── for_each │ │ │ ├── for_each2.rs │ │ │ ├── for_each3.rs │ │ │ ├── lock_step.rs │ │ │ ├── single_array.rs │ │ │ └── stride_iter.rs │ │ ├── image_conversions.rs │ │ └── indexer.rs │ │ ├── bitset.rs │ │ ├── caching.rs │ │ ├── caching │ │ ├── local_cache.rs │ │ └── lru_cache.rs │ │ ├── chunk_tree.rs │ │ ├── chunk_tree │ │ ├── builder.rs │ │ ├── clipmap.rs │ │ ├── indexer.rs │ │ ├── lod_view.rs │ │ ├── sampling.rs │ │ ├── sampling │ │ │ ├── point.rs │ │ │ └── sdf_mean.rs │ │ ├── storage.rs │ │ └── storage │ │ │ ├── compressible.rs │ │ │ └── hash_map.rs │ │ ├── compression.rs │ │ ├── compression │ │ ├── compressed_bincode.rs │ │ ├── lz4_compression.rs │ │ └── snappy_compression.rs │ │ ├── crate_doc.md │ │ ├── database.rs │ │ ├── database │ │ ├── chunk_db.rs │ │ ├── delta_batch.rs │ │ ├── key.rs │ │ ├── read_result.rs │ │ ├── readable.rs │ │ └── versioned_chunk_db.rs │ │ ├── dot_vox_conversions.rs │ │ ├── func.rs │ │ ├── image_conversions.rs │ │ ├── lib.rs │ │ ├── multi_ptr.rs │ │ ├── octree_set.rs │ │ ├── signed_distance.rs │ │ ├── test_utilities.rs │ │ ├── transform_map.rs │ │ └── vox_format.rs └── utilities │ ├── Cargo.toml │ └── src │ ├── data_sets.rs │ ├── lib.rs │ ├── noise.rs │ └── test.rs ├── examples ├── Cargo.toml ├── README.md ├── array_texture_materials │ ├── array_texture_materials.rs │ └── camera_rotation.rs ├── assets │ ├── materials.png │ ├── test_single_model_default_palette.vox │ └── uv_checker.png ├── bevy_utilities │ ├── Cargo.toml │ └── src │ │ ├── camera.rs │ │ ├── lib.rs │ │ ├── mesh.rs │ │ ├── noise.rs │ │ └── thread_local_resource.rs ├── lod_terrain │ ├── chunk_compressor.rs │ ├── chunk_generator.rs │ ├── clip_spheres.rs │ ├── frame_budget.rs │ ├── lod_terrain.rs │ ├── map.ron │ ├── mesh_generator.rs │ ├── new_slot_detector.rs │ ├── sync_batch.rs │ ├── voxel_map.rs │ └── voxel_mesh.rs ├── mesh_showcase │ ├── camera_rotation.rs │ ├── mesh_generator.rs │ └── mesh_showcase.rs ├── quad_mesh_uvs │ ├── camera_rotation.rs │ └── quad_mesh_uvs.rs ├── screenshots │ ├── array_texture_materials.png │ ├── lod_terrain.png │ ├── mesh_showcase.gif │ ├── quad_mesh_uvs.png │ ├── sdf_mesh.png │ └── wireframe.png └── sdf_mesh │ └── sdf_mesh.rs └── src └── lib.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | *.png filter=lfs diff=lfs merge=lfs -text 2 | assets/ filter=lfs diff=lfs merge=lfs -text 3 | examples/screenshots/ filter=lfs diff=lfs merge=lfs -text 4 | *.gif filter=lfs diff=lfs merge=lfs -text 5 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | paths: 7 | - '**/*.rs' 8 | - '.github/workflows/*' 9 | pull_request: 10 | branches: [ main ] 11 | paths: 12 | - '**/*.rs' 13 | - '.github/workflows/*' 14 | 15 | env: 16 | CARGO_TERM_COLOR: always 17 | 18 | jobs: 19 | build-and-test: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - name: Cancel Workflow Action 25 | uses: styfle/cancel-workflow-action@0.6.0 26 | with: 27 | access_token: ${{ github.token }} 28 | - uses: actions/checkout@v2 29 | - name: Get Latest Stable Rust 30 | uses: actions-rs/toolchain@v1 31 | with: 32 | toolchain: stable 33 | default: true 34 | components: clippy 35 | - name: Install Dependencies 36 | run: sudo apt install libasound2-dev libudev-dev 37 | - name: Lint 38 | run: cargo clippy --all --all-features 39 | - name: Build 40 | run: cargo build --verbose --release --all --all-features && cargo bench --all --no-run 41 | - name: Run tests 42 | run: cargo test --verbose --release --all --all-features 43 | - name: Build Examples 44 | run: cd examples && cargo build --examples --verbose --release 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.features": [ 3 | "dot_vox", 4 | "glam", 5 | "image", 6 | "lz4", 7 | "mesh", 8 | "mint", 9 | "nalgebra", 10 | "ncollide", 11 | "sdfu" 12 | ], 13 | "files.eol": "\n" 14 | } -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We strongly encourage users to contribute improvements to Building Blocks when 4 | they identify gaps in performance or functionality. Please also be cognizant of 5 | the fact that we desire an elegant and coherent design for our feature set. If 6 | you have an idea for how to improve Building Blocks, please consult the roadmap, 7 | issue tracker, and project board to see if your idea is already tracking. 8 | Otherwise, please discuss your plan via either our Discord server or by opening 9 | an issue. We generally prefer for large new features to first take the form of a 10 | working prototype in a separate repository to demonstrate the added value, then 11 | we can begin discussing how to integrate those ideas into the library. 12 | 13 | Pull requests are expected to add unit tests for complex logic and benchmarks 14 | where offering good performance is expected to be challenging. Please ask if you 15 | have questions about how to test something. 16 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building-blocks" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | description = "Data types, collections, and algorithms for working with maps on 2D and 3D integer lattices. Commonly known as voxel data." 7 | license = "MIT" 8 | repository = "https://github.com/bonsairobo/building-blocks" 9 | keywords = ["voxel"] 10 | 11 | [package.metadata.docs.rs] 12 | all-features = true 13 | 14 | [profile] 15 | dev = { opt-level = 2 } 16 | release = { lto = "thin" } 17 | bench = { lto = "thin" } 18 | 19 | [features] 20 | default = ["lz4", "mesh", "sdfu", "search", "sled"] 21 | 22 | # Optional crates. 23 | mesh = ["building_blocks_mesh"] 24 | search = ["building_blocks_search"] 25 | 26 | # Integrations. 27 | dot_vox = ["building_blocks_storage/dot_vox"] 28 | vox-format = ["building_blocks_storage/vox-format-1", "building_blocks_core/vox-format"] 29 | image = ["building_blocks_storage/image"] 30 | sdfu = ["building_blocks_core/sdfu"] 31 | sled = ["building_blocks_storage/sled"] 32 | sled-snapshots = ["building_blocks_storage/sled-snapshots"] 33 | trace = ["building_blocks_storage/tracing"] 34 | 35 | # Math type conversions. 36 | glam = ["building_blocks_core/glam"] 37 | mint = ["building_blocks_core/mint"] 38 | nalgebra = ["building_blocks_core/nalgebra"] 39 | cgmath = ["building_blocks_core/cgmath"] 40 | 41 | # Compression backends. 42 | lz4 = ["building_blocks_storage/lz4"] 43 | snappy = ["building_blocks_storage/snap"] 44 | 45 | # Collisions with `OctreeSet` and `OctreeDBVT`. 46 | ncollide = ["building_blocks_search/ncollide"] 47 | 48 | [workspace] 49 | members = ["crates/*"] 50 | exclude = ["benches", "examples"] 51 | 52 | # TODO: use RFC #2906 to deduplicate dependency specs once it is merged 53 | 54 | [dependencies] 55 | building_blocks_core = { path = "crates/building_blocks_core", version = "0.7.0", default-features = false } 56 | building_blocks_storage = { path = "crates/building_blocks_storage", version = "0.7.0", default-features = false } 57 | 58 | # Optional, feature-gated 59 | building_blocks_mesh = { path = "crates/building_blocks_mesh", version = "0.7.0", default-features = false, optional = true } 60 | building_blocks_search = { path = "crates/building_blocks_search", version = "0.7.0", default-features = false, optional = true } 61 | 62 | [dev-dependencies] 63 | utilities = { path = "crates/utilities" } 64 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 bonsairobo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /crates/building_blocks_core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building_blocks_core" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | license = "MIT" 7 | repository = "https://github.com/bonsairobo/building-blocks" 8 | keywords = ["voxel"] 9 | 10 | description = "The core data types for defining 2D and 3D integer lattices." 11 | 12 | [package.metadata.docs.rs] 13 | all-features = true 14 | 15 | [features] 16 | default = [] 17 | 18 | [dependencies] 19 | bytemuck = "1.7" 20 | itertools = "0.10" 21 | morton-encoding = "2.0" 22 | num = "0.4" 23 | 24 | # Optional, feature-gated. 25 | serde = { version = "1.0", features = ["derive"], optional = true } 26 | glam = { version = "0.13.0", optional = true } 27 | mint = { version = "0.5.0", optional = true } 28 | nalgebra = { version = "0.28", optional = true } 29 | sdfu = { version = "0.3", optional = true } 30 | vox-format = { version = "0.1", optional = true } 31 | cgmath = { version = "0.18", optional = true } 32 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/axis.rs: -------------------------------------------------------------------------------- 1 | use crate::{Point2i, Point3i, PointN}; 2 | 3 | /// Either the X or Y axis. 4 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 5 | pub enum Axis2 { 6 | X = 0, 7 | Y = 1, 8 | } 9 | 10 | impl Axis2 { 11 | /// The index for a point's component on this axis. 12 | #[inline] 13 | pub fn index(&self) -> usize { 14 | *self as usize 15 | } 16 | 17 | #[inline] 18 | pub fn get_unit_vector(&self) -> Point2i { 19 | match self { 20 | Axis2::X => PointN([1, 0]), 21 | Axis2::Y => PointN([0, 1]), 22 | } 23 | } 24 | } 25 | 26 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 27 | pub struct SignedAxis2 { 28 | pub sign: i32, 29 | pub axis: Axis2, 30 | } 31 | 32 | impl SignedAxis2 { 33 | #[inline] 34 | pub fn new(sign: i32, axis: Axis2) -> Self { 35 | Self { sign, axis } 36 | } 37 | 38 | #[inline] 39 | pub fn get_vector(&self) -> Point2i { 40 | self.axis.get_unit_vector() * self.sign 41 | } 42 | 43 | #[inline] 44 | pub fn from_vector(v: Point2i) -> Option { 45 | match v { 46 | PointN([x, 0]) => Some(SignedAxis2::new(x, Axis2::X)), 47 | PointN([0, y]) => Some(SignedAxis2::new(y, Axis2::Y)), 48 | _ => None, 49 | } 50 | } 51 | } 52 | 53 | /// Either the X, Y, or Z axis. 54 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 55 | pub enum Axis3 { 56 | X = 0, 57 | Y = 1, 58 | Z = 2, 59 | } 60 | 61 | impl Axis3 { 62 | /// The index for a point's component on this axis. 63 | #[inline] 64 | pub fn index(&self) -> usize { 65 | *self as usize 66 | } 67 | 68 | #[inline] 69 | pub const fn get_unit_vector(&self) -> Point3i { 70 | match self { 71 | Axis3::X => PointN([1, 0, 0]), 72 | Axis3::Y => PointN([0, 1, 0]), 73 | Axis3::Z => PointN([0, 0, 1]), 74 | } 75 | } 76 | } 77 | 78 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 79 | pub enum Axis3Permutation { 80 | // Even permutations 81 | Xyz, 82 | Zxy, 83 | Yzx, 84 | // Odd permutations 85 | Zyx, 86 | Xzy, 87 | Yxz, 88 | } 89 | 90 | impl Axis3Permutation { 91 | #[inline] 92 | pub const fn even_with_normal_axis(axis: Axis3) -> Self { 93 | match axis { 94 | Axis3::X => Axis3Permutation::Xyz, 95 | Axis3::Y => Axis3Permutation::Yzx, 96 | Axis3::Z => Axis3Permutation::Zxy, 97 | } 98 | } 99 | 100 | #[inline] 101 | pub const fn odd_with_normal_axis(axis: Axis3) -> Self { 102 | match axis { 103 | Axis3::X => Axis3Permutation::Xzy, 104 | Axis3::Y => Axis3Permutation::Yxz, 105 | Axis3::Z => Axis3Permutation::Zyx, 106 | } 107 | } 108 | 109 | #[inline] 110 | pub const fn sign(&self) -> i32 { 111 | match self { 112 | Axis3Permutation::Xyz => 1, 113 | Axis3Permutation::Zxy => 1, 114 | Axis3Permutation::Yzx => 1, 115 | Axis3Permutation::Zyx => -1, 116 | Axis3Permutation::Xzy => -1, 117 | Axis3Permutation::Yxz => -1, 118 | } 119 | } 120 | 121 | #[inline] 122 | pub const fn axes(&self) -> [Axis3; 3] { 123 | match self { 124 | Axis3Permutation::Xyz => [Axis3::X, Axis3::Y, Axis3::Z], 125 | Axis3Permutation::Zxy => [Axis3::Z, Axis3::X, Axis3::Y], 126 | Axis3Permutation::Yzx => [Axis3::Y, Axis3::Z, Axis3::X], 127 | Axis3Permutation::Zyx => [Axis3::Z, Axis3::Y, Axis3::X], 128 | Axis3Permutation::Xzy => [Axis3::X, Axis3::Z, Axis3::Y], 129 | Axis3Permutation::Yxz => [Axis3::Y, Axis3::X, Axis3::Z], 130 | } 131 | } 132 | } 133 | 134 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 135 | pub struct SignedAxis3 { 136 | pub sign: i32, 137 | pub axis: Axis3, 138 | } 139 | 140 | impl SignedAxis3 { 141 | #[inline] 142 | pub const fn new(sign: i32, axis: Axis3) -> Self { 143 | Self { sign, axis } 144 | } 145 | 146 | #[inline] 147 | pub fn get_vector(&self) -> Point3i { 148 | self.axis.get_unit_vector() * self.sign 149 | } 150 | 151 | #[inline] 152 | pub const fn from_vector(v: Point3i) -> Option { 153 | match v { 154 | PointN([x, 0, 0]) => Some(SignedAxis3::new(x, Axis3::X)), 155 | PointN([0, y, 0]) => Some(SignedAxis3::new(y, Axis3::Y)), 156 | PointN([0, 0, z]) => Some(SignedAxis3::new(z, Axis3::Z)), 157 | _ => None, 158 | } 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/crate_doc.md: -------------------------------------------------------------------------------- 1 | The core data types for defining 2D and 3D integer lattices. 2 | 3 | - [`PointN`]: an N-dimensional point, most importantly [`Point2i`] and [`Point3i`] 4 | - [`ExtentN`]: an N-dimensional extent, most importantly [`Extent2i`] and [`Extent3i`] 5 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(clippy::missing_inline_in_public_items)] 2 | #![deny( 3 | rust_2018_compatibility, 4 | rust_2018_idioms, 5 | nonstandard_style, 6 | unused, 7 | future_incompatible 8 | )] 9 | #![warn(clippy::doc_markdown)] 10 | #![doc = include_str!("crate_doc.md")] 11 | 12 | mod axis; 13 | mod extent; 14 | mod morton; 15 | mod orthant; 16 | mod point; 17 | mod sphere; 18 | 19 | pub use axis::*; 20 | pub use extent::*; 21 | pub use morton::*; 22 | pub use orthant::*; 23 | pub use point::*; 24 | pub use sphere::*; 25 | 26 | pub use bytemuck; 27 | pub use num; 28 | pub use itertools; 29 | 30 | #[doc(hidden)] 31 | pub mod prelude { 32 | pub use super::{ 33 | point::point_traits::*, Axis2, Axis3, Extent2, Extent2f, Extent2i, Extent3, Extent3f, 34 | Extent3i, ExtentN, Morton2, Morton3, Octant, Orthant, Point2, Point2f, Point2i, Point3, 35 | Point3f, Point3i, PointN, Quadrant, Sphere2, Sphere3, 36 | }; 37 | } 38 | 39 | #[cfg(feature = "glam")] 40 | pub use glam; 41 | 42 | #[cfg(feature = "mint")] 43 | pub use mint; 44 | 45 | #[cfg(feature = "nalgebra")] 46 | pub use nalgebra as na; 47 | 48 | #[cfg(feature = "sdfu")] 49 | pub use sdfu; 50 | 51 | /// Given an array of 4 corners of a rectangle, this contains pairs of indices that make up the edges. 52 | pub const EDGES_2: [[usize; 2]; 4] = [[0b00, 0b01], [0b00, 0b10], [0b10, 0b11], [0b01, 0b11]]; 53 | 54 | /// Given an array of 8 corners of a rectangular prism, this contains pairs of indices that make up the edges. 55 | pub const EDGES_3: [[usize; 2]; 12] = [ 56 | [0b000, 0b001], 57 | [0b000, 0b010], 58 | [0b000, 0b100], 59 | [0b001, 0b011], 60 | [0b001, 0b101], 61 | [0b010, 0b011], 62 | [0b010, 0b110], 63 | [0b011, 0b111], 64 | [0b100, 0b101], 65 | [0b100, 0b110], 66 | [0b101, 0b111], 67 | [0b110, 0b111], 68 | ]; 69 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/morton.rs: -------------------------------------------------------------------------------- 1 | use crate::{Point2i, Point3i}; 2 | 3 | use morton_encoding::{morton_decode, morton_encode}; 4 | use std::fmt; 5 | 6 | // ██████╗ ██████╗ 7 | // ╚════██╗██╔══██╗ 8 | // █████╔╝██║ ██║ 9 | // ██╔═══╝ ██║ ██║ 10 | // ███████╗██████╔╝ 11 | // ╚══════╝╚═════╝ 12 | 13 | /// A Morton-encoded `Point2i`. Uses a `u64` to support the full set of `Point2i`s. 14 | /// 15 | /// 16 | #[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] 17 | pub struct Morton2(pub u64); 18 | 19 | impl fmt::Debug for Morton2 { 20 | #[inline] 21 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 22 | write!(f, "{} = {:b}", self.0, self.0) 23 | } 24 | } 25 | 26 | impl From for Morton2 { 27 | #[inline] 28 | fn from(p: Point2i) -> Self { 29 | Self(morton_encode([ 30 | translate(p.y()) as u32, 31 | translate(p.x()) as u32, 32 | ])) 33 | } 34 | } 35 | 36 | impl From for Point2i { 37 | #[inline] 38 | fn from(m: Morton2) -> Self { 39 | let yx: [u32; 2] = morton_decode(m.0); 40 | Self([untranslate(yx[1] as i32), untranslate(yx[0] as i32)]) 41 | } 42 | } 43 | 44 | // ██████╗ ██████╗ 45 | // ╚════██╗██╔══██╗ 46 | // █████╔╝██║ ██║ 47 | // ╚═══██╗██║ ██║ 48 | // ██████╔╝██████╔╝ 49 | // ╚═════╝ ╚═════╝ 50 | 51 | /// A Morton-encoded `Point3i`. Uses a `u128` to support the full set of `Point3i`s. 52 | /// 53 | /// 54 | #[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)] 55 | pub struct Morton3(pub u128); 56 | 57 | impl fmt::Debug for Morton3 { 58 | #[inline] 59 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 60 | write!(f, "{} = {:b}", self.0, self.0) 61 | } 62 | } 63 | 64 | impl From for Morton3 { 65 | #[inline] 66 | fn from(p: Point3i) -> Self { 67 | Self(morton_encode([ 68 | translate(p.z()) as u32, 69 | translate(p.y()) as u32, 70 | translate(p.x()) as u32, 71 | ])) 72 | } 73 | } 74 | 75 | impl From for Point3i { 76 | #[inline] 77 | fn from(m: Morton3) -> Self { 78 | let zyx: [u32; 3] = morton_decode(m.0); 79 | Self([ 80 | untranslate(zyx[2] as i32), 81 | untranslate(zyx[1] as i32), 82 | untranslate(zyx[0] as i32), 83 | ]) 84 | } 85 | } 86 | 87 | /// Send the supported range of i32 into the lower 32 bits of a u64 while preserving the total order. 88 | #[inline] 89 | fn translate(x: i32) -> i32 { 90 | x.wrapping_sub(i32::MIN) 91 | } 92 | 93 | /// The inverse of `translate`. 94 | #[inline] 95 | fn untranslate(x: i32) -> i32 { 96 | x.wrapping_add(i32::MIN) 97 | } 98 | 99 | // ████████╗███████╗███████╗████████╗ 100 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 101 | // ██║ █████╗ ███████╗ ██║ 102 | // ██║ ██╔══╝ ╚════██║ ██║ 103 | // ██║ ███████╗███████║ ██║ 104 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 105 | 106 | #[cfg(test)] 107 | mod test { 108 | use super::*; 109 | use crate::PointN; 110 | 111 | #[test] 112 | fn limits_of_i32() { 113 | let min = PointN([i32::MIN; 3]); 114 | let max = PointN([i32::MAX; 3]); 115 | 116 | assert_eq!(Morton3::from(min), Morton3(0)); 117 | assert_eq!(Morton3::from(max), Morton3((1 << 96) - 1)); 118 | 119 | assert_eq!(min, Point3i::from(Morton3::from(min))); 120 | assert_eq!(max, Point3i::from(Morton3::from(max))); 121 | } 122 | 123 | #[test] 124 | fn octants_are_contiguous_in_morton_space() { 125 | let octant_mins = [ 126 | [-2, -2, -2], 127 | [0, -2, -2], 128 | [-2, 0, -2], 129 | [0, 0, -2], 130 | [-2, -2, 0], 131 | [0, -2, 0], 132 | [-2, 0, 0], 133 | [0, 0, 0], 134 | ]; 135 | 136 | for &octant_min in octant_mins.iter() { 137 | let octant_points: Vec<_> = Z_OFFSETS 138 | .iter() 139 | .cloned() 140 | .map(|offset| PointN(octant_min) + PointN(offset)) 141 | .collect(); 142 | 143 | // Decode is inverse of encode. 144 | for &p in octant_points.iter() { 145 | assert_eq!(p, Point3i::from(Morton3::from(p))); 146 | } 147 | 148 | let octant_mortons: Vec<_> = octant_points 149 | .into_iter() 150 | .map(|p| Morton3::from(p)) 151 | .collect(); 152 | 153 | assert!(mortons_are_contiguous(&octant_mortons)); 154 | } 155 | } 156 | 157 | fn mortons_are_contiguous(mortons: &[Morton3]) -> bool { 158 | let min = mortons[0].0; 159 | let upper_bound = min + mortons.len() as u128; 160 | for (i, expected) in (min..upper_bound).enumerate() { 161 | if mortons[i] != Morton3(expected) { 162 | return false; 163 | } 164 | } 165 | 166 | true 167 | } 168 | 169 | const Z_OFFSETS: [[i32; 3]; 8] = [ 170 | [0, 0, 0], 171 | [1, 0, 0], 172 | [0, 1, 0], 173 | [1, 1, 0], 174 | [0, 0, 1], 175 | [1, 0, 1], 176 | [0, 1, 1], 177 | [1, 1, 1], 178 | ]; 179 | } 180 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/point.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod point_traits; 3 | 4 | #[cfg(feature = "cgmath")] 5 | mod cgmath_conversions; 6 | #[cfg(feature = "glam")] 7 | mod glam_conversions; 8 | #[cfg(feature = "mint")] 9 | mod mint_conversions; 10 | #[cfg(feature = "nalgebra")] 11 | mod nalgebra_conversions; 12 | #[cfg(feature = "sdfu")] 13 | mod sdfu_integration; 14 | #[cfg(feature = "vox-format")] 15 | mod vox_format_conversions; 16 | 17 | mod point2; 18 | mod point3; 19 | 20 | pub use point2::*; 21 | pub use point3::*; 22 | 23 | use point_traits::*; 24 | 25 | use bytemuck::{Pod, Zeroable}; 26 | use core::ops::{Add, AddAssign, Neg, Sub, SubAssign}; 27 | use num::{Signed, Zero}; 28 | 29 | #[cfg(feature = "serde")] 30 | use serde::{Deserialize, Serialize}; 31 | 32 | /// An N-dimensional point (where N=2 or N=3), which is usually just a primitive array like 33 | /// `[i32; 2]` or `[i32; 3]`. It is most convenient to construct points of any dimension as: 34 | /// 35 | /// ``` 36 | /// use building_blocks_core::PointN; 37 | /// 38 | /// let p2 = PointN([1, 2]); // 2D 39 | /// let p3 = PointN([1, 2, 3]); // 3D 40 | /// ``` 41 | /// 42 | /// Points support basic linear algebraic operations such as addition, subtraction, scalar 43 | /// multiplication, and scalar division. 44 | /// 45 | /// ``` 46 | /// # use building_blocks_core::prelude::*; 47 | /// # 48 | /// let p1 = PointN([1, 2]); 49 | /// let p2 = PointN([3, 4]); 50 | /// 51 | /// assert_eq!(p1 + p2, PointN([4, 6])); 52 | /// assert_eq!(p1 - p2, PointN([-2, -2])); 53 | /// 54 | /// assert_eq!(p1 * 2, PointN([2, 4])); 55 | /// assert_eq!(p1 / 2, PointN([0, 1])); 56 | /// 57 | /// // Also some component-wise operations. 58 | /// assert_eq!(p1 * p2, PointN([3, 8])); 59 | /// assert_eq!(p1 / p2, PointN([0, 0])); 60 | /// assert_eq!(p2 / p1, PointN([3, 2])); 61 | /// ``` 62 | /// 63 | /// There is also a partial order defined on points which says that a point A is greater than a 64 | /// point B if and only if all of the components of point A are greater than point B. This is useful 65 | /// for easily checking is a point is inside of the extent between two other points: 66 | /// 67 | /// ``` 68 | /// # use building_blocks_core::PointN; 69 | /// # 70 | /// let min = PointN([0, 0, 0]); 71 | /// let least_upper_bound = PointN([3, 3, 3]); 72 | /// 73 | /// let p = PointN([0, 1, 2]); 74 | /// assert!(min <= p && p < least_upper_bound); 75 | /// ``` 76 | #[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq)] 77 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 78 | pub struct PointN(pub N); 79 | 80 | unsafe impl Zeroable for PointN where N: Zeroable {} 81 | unsafe impl Pod for PointN where N: Pod {} 82 | 83 | impl PointN 84 | where 85 | Self: MapComponents, 86 | { 87 | #[inline] 88 | pub fn signum(self) -> Self 89 | where 90 | ::Scalar: Signed, 91 | { 92 | self.map_components_unary(|c| c.signum()) 93 | } 94 | } 95 | 96 | impl Abs for PointN 97 | where 98 | Self: MapComponents, 99 | ::Scalar: Signed, 100 | { 101 | #[inline] 102 | fn abs(self) -> Self { 103 | self.map_components_unary(|c| c.abs()) 104 | } 105 | } 106 | 107 | impl Neg for PointN 108 | where 109 | Self: Copy + Sub + Zero, 110 | { 111 | type Output = Self; 112 | 113 | #[inline] 114 | fn neg(self) -> Self::Output { 115 | Self::zero() - self 116 | } 117 | } 118 | 119 | impl Add for PointN 120 | where 121 | Self: MapComponents, 122 | T: Add, 123 | { 124 | type Output = Self; 125 | 126 | #[inline] 127 | fn add(self, rhs: Self) -> Self::Output { 128 | self.map_components_binary(rhs, |c1, c2| c1 + c2) 129 | } 130 | } 131 | 132 | impl Sub for PointN 133 | where 134 | Self: MapComponents, 135 | T: Sub, 136 | { 137 | type Output = Self; 138 | 139 | #[inline] 140 | fn sub(self, rhs: Self) -> Self::Output { 141 | self.map_components_binary(rhs, |c1, c2| c1 - c2) 142 | } 143 | } 144 | 145 | impl AddAssign for PointN 146 | where 147 | Self: Copy + Add, 148 | { 149 | #[inline] 150 | fn add_assign(&mut self, rhs: Self) { 151 | *self = *self + rhs; 152 | } 153 | } 154 | 155 | impl SubAssign for PointN 156 | where 157 | Self: Copy + Sub, 158 | { 159 | #[inline] 160 | fn sub_assign(&mut self, rhs: Self) { 161 | *self = *self - rhs; 162 | } 163 | } 164 | 165 | impl Zero for PointN 166 | where 167 | Self: Point + ConstZero, 168 | { 169 | #[inline] 170 | fn zero() -> Self { 171 | Self::ZERO 172 | } 173 | 174 | #[inline] 175 | fn is_zero(&self) -> bool { 176 | *self == Self::zero() 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/point/glam_conversions.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use glam as gl; 4 | 5 | impl From for Point2f { 6 | #[inline] 7 | fn from(p: gl::Vec2) -> Self { 8 | PointN([p.x, p.y]) 9 | } 10 | } 11 | 12 | impl From for gl::Vec2 { 13 | #[inline] 14 | fn from(p: Point2f) -> Self { 15 | gl::Vec2::new(p.x(), p.y()) 16 | } 17 | } 18 | 19 | impl From for Point3f { 20 | #[inline] 21 | fn from(p: gl::Vec3) -> Self { 22 | PointN([p.x, p.y, p.z]) 23 | } 24 | } 25 | 26 | impl From for gl::Vec3 { 27 | #[inline] 28 | fn from(p: Point3f) -> Self { 29 | gl::Vec3::new(p.x(), p.y(), p.z()) 30 | } 31 | } 32 | 33 | impl From for Point3f { 34 | #[inline] 35 | fn from(p: gl::Vec3A) -> Self { 36 | PointN([p.x, p.y, p.z]) 37 | } 38 | } 39 | 40 | impl From for gl::Vec3A { 41 | #[inline] 42 | fn from(p: Point3f) -> Self { 43 | gl::Vec3A::new(p.x(), p.y(), p.z()) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/point/mint_conversions.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | impl From> for Point2 { 4 | #[inline] 5 | fn from(p: mint::Point2) -> Self { 6 | PointN([p.x, p.y]) 7 | } 8 | } 9 | 10 | impl From> for mint::Point2 11 | where 12 | T: Clone, 13 | { 14 | #[inline] 15 | fn from(p: Point2) -> Self { 16 | mint::Point2::from_slice(&p.0) 17 | } 18 | } 19 | 20 | impl From> for Point2 { 21 | #[inline] 22 | fn from(p: mint::Vector2) -> Self { 23 | PointN([p.x, p.y]) 24 | } 25 | } 26 | 27 | impl From> for mint::Vector2 28 | where 29 | T: Clone, 30 | { 31 | #[inline] 32 | fn from(p: Point2) -> Self { 33 | mint::Vector2::from_slice(&p.0) 34 | } 35 | } 36 | 37 | impl From> for Point3 { 38 | #[inline] 39 | fn from(p: mint::Point3) -> Self { 40 | PointN([p.x, p.y, p.z]) 41 | } 42 | } 43 | 44 | impl From> for mint::Point3 45 | where 46 | T: Clone, 47 | { 48 | #[inline] 49 | fn from(p: Point3) -> Self { 50 | mint::Point3::from_slice(&p.0) 51 | } 52 | } 53 | 54 | impl From> for Point3 { 55 | #[inline] 56 | fn from(p: mint::Vector3) -> Self { 57 | PointN([p.x, p.y, p.z]) 58 | } 59 | } 60 | 61 | impl From> for mint::Vector3 62 | where 63 | T: Clone, 64 | { 65 | #[inline] 66 | fn from(p: Point3) -> Self { 67 | mint::Vector3::from_slice(&p.0) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/point/sdfu_integration.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use mt::MaxMin; 4 | use sdfu::mathtypes as mt; 5 | 6 | impl mt::Zero for Point2f { 7 | #[inline] 8 | fn zero() -> Self { 9 | Self::ZERO 10 | } 11 | } 12 | 13 | impl mt::One for Point2f { 14 | #[inline] 15 | fn one() -> Self { 16 | Self::ONES 17 | } 18 | } 19 | 20 | impl mt::Clamp for Point2f { 21 | #[inline] 22 | fn clamp(&self, low: Self, high: Self) -> Self { 23 | self.min(high).max(low) 24 | } 25 | } 26 | 27 | impl mt::MaxMin for Point2f { 28 | #[inline] 29 | fn max(&self, other: Self) -> Self { 30 | self.join(other) 31 | } 32 | #[inline] 33 | fn min(&self, other: Self) -> Self { 34 | self.meet(other) 35 | } 36 | } 37 | 38 | impl mt::Zero for Point3f { 39 | #[inline] 40 | fn zero() -> Self { 41 | Point3f::ZERO 42 | } 43 | } 44 | 45 | impl mt::One for Point3f { 46 | #[inline] 47 | fn one() -> Self { 48 | Point3f::ONES 49 | } 50 | } 51 | 52 | impl mt::Clamp for Point3f { 53 | #[inline] 54 | fn clamp(&self, low: Self, high: Self) -> Self { 55 | self.min(high).max(low) 56 | } 57 | } 58 | 59 | impl mt::MaxMin for Point3f { 60 | #[inline] 61 | fn max(&self, other: Self) -> Self { 62 | self.join(other) 63 | } 64 | #[inline] 65 | fn min(&self, other: Self) -> Self { 66 | self.meet(other) 67 | } 68 | } 69 | 70 | impl mt::Vec2 for Point2f { 71 | #[inline] 72 | fn new(x: f32, y: f32) -> Self { 73 | Self([x, y]) 74 | } 75 | #[inline] 76 | fn x(&self) -> f32 { 77 | self.0[0] 78 | } 79 | #[inline] 80 | fn y(&self) -> f32 { 81 | self.0[1] 82 | } 83 | } 84 | 85 | impl mt::Vec3 for Point3f { 86 | #[inline] 87 | fn new(x: f32, y: f32, z: f32) -> Self { 88 | Self([x, y, z]) 89 | } 90 | #[inline] 91 | fn x(&self) -> f32 { 92 | self.0[0] 93 | } 94 | #[inline] 95 | fn y(&self) -> f32 { 96 | self.0[1] 97 | } 98 | #[inline] 99 | fn z(&self) -> f32 { 100 | self.0[2] 101 | } 102 | } 103 | 104 | impl mt::Vec for Point2f { 105 | type Dimension = mt::Dim3D; 106 | type Vec2 = Point2f; 107 | type Vec3 = Point3f; 108 | 109 | #[inline] 110 | fn dot(&self, other: Self) -> f32 { 111 | DotProduct::dot(*self, other) 112 | } 113 | 114 | #[inline] 115 | fn abs(&self) -> Self { 116 | Abs::abs(*self) 117 | } 118 | 119 | #[inline] 120 | fn normalized(&self) -> Self { 121 | *self / self.norm() 122 | } 123 | 124 | #[inline] 125 | fn magnitude(&self) -> f32 { 126 | self.norm() 127 | } 128 | } 129 | 130 | impl mt::Vec for Point3f { 131 | type Dimension = mt::Dim3D; 132 | type Vec2 = Point2f; 133 | type Vec3 = Point3f; 134 | 135 | #[inline] 136 | fn dot(&self, other: Self) -> f32 { 137 | DotProduct::dot(*self, other) 138 | } 139 | 140 | #[inline] 141 | fn abs(&self) -> Self { 142 | Abs::abs(*self) 143 | } 144 | 145 | #[inline] 146 | fn normalized(&self) -> Self { 147 | *self / self.norm() 148 | } 149 | 150 | #[inline] 151 | fn magnitude(&self) -> f32 { 152 | self.norm() 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/point/vox_format_conversions.rs: -------------------------------------------------------------------------------- 1 | //! Provides conversions for point types from the [`vox-format`] crate. 2 | //! 3 | //! [`vox-format`]: https://docs.rs/vox-format 4 | 5 | use vox_format::types::{Point, Size, Vector}; 6 | 7 | use crate::{Extent3i, Point3i, PointN}; 8 | 9 | impl From> for PointN<[T; 3]> { 10 | #[inline] 11 | fn from(v: Vector) -> Self { 12 | PointN(v.into()) 13 | } 14 | } 15 | 16 | impl From for Extent3i { 17 | #[inline] 18 | fn from(size: Size) -> Self { 19 | // Note: This can fail, if the component is greater than `i32::MAX` 20 | Extent3i::from_min_and_shape( 21 | Default::default(), 22 | PointN([size.x as i32, size.y as i32, size.z as i32]), 23 | ) 24 | } 25 | } 26 | 27 | impl From for Point3i { 28 | #[inline] 29 | fn from(point: Point) -> Self { 30 | PointN([point.x as i32, point.y as i32, point.z as i32]) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /crates/building_blocks_core/src/sphere.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::{Distance, ExtentN, FloatPoint, Point, PointN}; 2 | 3 | #[derive(Debug)] 4 | pub struct Sphere { 5 | pub center: PointN, 6 | pub radius: f32, 7 | } 8 | 9 | pub type Sphere2 = Sphere<[f32; 2]>; 10 | pub type Sphere3 = Sphere<[f32; 3]>; 11 | 12 | impl Clone for Sphere 13 | where 14 | PointN: Clone, 15 | { 16 | #[inline] 17 | fn clone(&self) -> Self { 18 | Self { 19 | center: self.center.clone(), 20 | radius: self.radius, 21 | } 22 | } 23 | } 24 | impl Copy for Sphere where PointN: Copy {} 25 | 26 | impl Sphere 27 | where 28 | PointN: FloatPoint, 29 | { 30 | #[inline] 31 | pub fn contains(&self, other: &Self) -> bool { 32 | let dist = self.center.l2_distance_squared(other.center).sqrt(); 33 | dist + other.radius < self.radius 34 | } 35 | 36 | #[inline] 37 | pub fn intersects(&self, other: &Self) -> bool { 38 | let dist = self.center.l2_distance_squared(other.center).sqrt(); 39 | dist - other.radius < self.radius 40 | } 41 | 42 | #[inline] 43 | pub fn aabb(&self) -> ExtentN { 44 | ExtentN::from_min_and_shape(PointN::fill(-self.radius), PointN::fill(2.0 * self.radius)) 45 | + self.center 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building_blocks_mesh" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | license = "MIT" 7 | repository = "https://github.com/bonsairobo/building-blocks" 8 | keywords = ["voxel"] 9 | 10 | description = "Fast meshing algorithms for voxel data structures." 11 | 12 | [dependencies] 13 | building_blocks_core = { path = "../building_blocks_core", version = "0.7.0", default-features = false } 14 | building_blocks_storage = { path = "../building_blocks_storage", version = "0.7.0", default-features = false } 15 | 16 | [dev-dependencies] 17 | criterion = "0.3" 18 | 19 | [[bench]] 20 | name = "surface_nets" 21 | harness = false 22 | 23 | [[bench]] 24 | name = "height_map" 25 | harness = false 26 | 27 | [[bench]] 28 | name = "greedy_quads" 29 | harness = false 30 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/benches/greedy_quads.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_mesh::*; 3 | use building_blocks_storage::{prelude::*, IsEmpty}; 4 | 5 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 6 | 7 | fn greedy_quads_terrace(c: &mut Criterion) { 8 | let mut group = c.benchmark_group("greedy_quads_terrace"); 9 | for size in [8, 16, 32, 64].iter() { 10 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 11 | b.iter_with_setup( 12 | || { 13 | let extent = 14 | Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(size)).padded(1); 15 | let mut voxels = Array3x1::fill(extent, CubeVoxel(false)); 16 | for i in 0..size { 17 | let level = Extent3i::from_min_and_shape( 18 | Point3i::fill(i), 19 | PointN([size - i, 1, size - i]), 20 | ); 21 | voxels.fill_extent(&level, CubeVoxel(true)); 22 | } 23 | 24 | // Do a single run first to allocate the buffer to the right size. 25 | let mut buffer = GreedyQuadsBuffer::new( 26 | *voxels.extent(), 27 | RIGHT_HANDED_Y_UP_CONFIG.quad_groups(), 28 | ); 29 | greedy_quads(&voxels, voxels.extent(), &mut buffer); 30 | 31 | (voxels, buffer) 32 | }, 33 | |(voxels, mut buffer)| greedy_quads(&voxels, voxels.extent(), &mut buffer), 34 | ); 35 | }); 36 | } 37 | group.finish(); 38 | } 39 | 40 | criterion_group!(benches, greedy_quads_terrace); 41 | criterion_main!(benches); 42 | 43 | #[derive(Clone)] 44 | struct CubeVoxel(bool); 45 | 46 | #[derive(Eq, PartialEq)] 47 | struct TrivialMergeValue; 48 | 49 | impl MergeVoxel for CubeVoxel { 50 | type VoxelValue = TrivialMergeValue; 51 | 52 | fn voxel_merge_value(&self) -> Self::VoxelValue { 53 | TrivialMergeValue 54 | } 55 | } 56 | 57 | impl IsEmpty for CubeVoxel { 58 | fn is_empty(&self) -> bool { 59 | !self.0 60 | } 61 | } 62 | 63 | impl IsOpaque for CubeVoxel { 64 | fn is_opaque(&self) -> bool { 65 | true 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/benches/height_map.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_mesh::*; 3 | use building_blocks_storage::prelude::*; 4 | 5 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 6 | 7 | fn height_map_plane(c: &mut Criterion) { 8 | let mut group = c.benchmark_group("height_map_plane"); 9 | for size in [8, 16, 32, 64].iter() { 10 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 11 | b.iter_with_setup( 12 | || { 13 | let sample_extent = 14 | Extent2i::from_min_and_max(PointN([0; 2]), PointN([size; 2])); 15 | let mut samples = Array2x1::fill(sample_extent, Pixel(0.0)); 16 | copy_extent(&sample_extent, &Func(plane), &mut samples); 17 | 18 | // Do a single run first to allocate the buffer to the right size. 19 | let mut buffer = HeightMapMeshBuffer::default(); 20 | triangulate_height_map(&samples, samples.extent(), &mut buffer); 21 | 22 | (samples, buffer) 23 | }, 24 | |(samples, mut buffer)| { 25 | triangulate_height_map(&samples, samples.extent(), &mut buffer) 26 | }, 27 | ); 28 | }); 29 | } 30 | group.finish(); 31 | } 32 | 33 | criterion_group!(benches, height_map_plane); 34 | criterion_main!(benches); 35 | 36 | fn plane(p: Point2i) -> Pixel { 37 | Pixel(p.x() as f32 + p.y() as f32) 38 | } 39 | 40 | #[derive(Clone)] 41 | struct Pixel(f32); 42 | 43 | impl Height for Pixel { 44 | fn height(&self) -> f32 { 45 | self.0 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/benches/surface_nets.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_mesh::*; 3 | use building_blocks_storage::prelude::*; 4 | 5 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 6 | 7 | fn surface_nets_sine_sdf(c: &mut Criterion) { 8 | let mut group = c.benchmark_group("surface_nets_sine_sdf"); 9 | for diameter in [8, 16, 32, 64].iter() { 10 | let radius = diameter >> 1; 11 | let sample_extent = 12 | Extent3i::from_min_and_max(Point3i::fill(-radius), Point3i::fill(radius)); 13 | let mut samples = Array3x1::fill(sample_extent, Sd8(0)); 14 | copy_extent(&sample_extent, &Func(sine_sdf), &mut samples); 15 | 16 | // Do a single run first to allocate the buffer to the right size. 17 | let mut buffer = SurfaceNetsBuffer::default(); 18 | surface_nets(&samples, samples.extent(), 1.0, true, &mut buffer); 19 | 20 | group.bench_with_input(BenchmarkId::from_parameter(diameter), &(), |b, _| { 21 | b.iter(|| surface_nets(&samples, samples.extent(), 1.0, true, &mut buffer)); 22 | }); 23 | } 24 | group.finish(); 25 | } 26 | 27 | criterion_group!(benches, surface_nets_sine_sdf); 28 | criterion_main!(benches); 29 | 30 | // About the largest radius that can be meshed in a single frame, single-threaded (16.6 ms) 31 | const EXTENT_RADIUS: i32 = 30; 32 | 33 | // The higher the frequency (n) the more surface area to mesh. 34 | fn sine_sdf(p: Point3i) -> Sd8 { 35 | let n = 10.0; 36 | let val = ((p.x() as f32 / EXTENT_RADIUS as f32) * n * std::f32::consts::PI / 2.0).sin() 37 | + ((p.y() as f32 / EXTENT_RADIUS as f32) * n * std::f32::consts::PI / 2.0).sin() 38 | + ((p.z() as f32 / EXTENT_RADIUS as f32) * n * std::f32::consts::PI / 2.0).sin(); 39 | 40 | Sd8::from(val) 41 | } 42 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/src/crate_doc.md: -------------------------------------------------------------------------------- 1 | Algorithms for generating triangle meshes. 2 | 3 | - height maps 4 | - signed distance fields 5 | - voxel occupancy grids 6 | 7 | All of the algorithms are designed to be used with a `ChunkTree`, such that each chunk will have its own mesh. In order to 8 | update the mesh for a chunk, you must copy not only the chunk, but also some adjacent points, into an array before running 9 | the meshing algorithm. 10 | 11 | An example of updating chunk meshes for a height map is shown below. The same general pattern applies to all meshing 12 | algorithms, where you: 13 | 14 | 1. get the desired chunk extent 15 | 1. pad the extent for a particular meshing algorithm 16 | 1. copy that extent into an array 17 | 1. mesh that array 18 | 19 | ```rust 20 | use building_blocks_core::prelude::*; 21 | use building_blocks_storage::prelude::*; 22 | use building_blocks_mesh::*; 23 | 24 | use std::collections::HashSet; 25 | 26 | let chunk_shape = PointN([16; 2]); 27 | let builder = ChunkTreeBuilder2x1::new(ChunkTreeConfig { chunk_shape, ambient_value: 0.0, root_lod: 0 }); 28 | let mut map = builder.build_with_hash_map_storage(); 29 | 30 | // ...mutate one or more of the chunks... 31 | 32 | let mutated_chunk_keys = [PointN([0; 2]), PointN([16; 2])]; 33 | 34 | // For each mutated chunk, and any adjacent chunk, the mesh will need to be updated. 35 | let mut chunk_keys_to_update: HashSet = HashSet::new(); 36 | let offsets = Point2i::moore_offsets(); 37 | for chunk_key in mutated_chunk_keys.into_iter() { 38 | chunk_keys_to_update.insert(*chunk_key); 39 | for offset in offsets.iter() { 40 | chunk_keys_to_update.insert(*chunk_key + *offset * chunk_shape); 41 | } 42 | } 43 | 44 | // Now we generate mesh vertices for each chunk. 45 | for chunk_key in chunk_keys_to_update.into_iter() { 46 | // It's crucial that we pad the chunk so we have access to adjacent points during meshing. 47 | let padded_chunk_extent = padded_height_map_chunk_extent( 48 | &map.indexer.extent_for_chunk_with_min(chunk_key) 49 | ); 50 | let mut padded_chunk = Array2x1::fill(padded_chunk_extent, 0.0); 51 | copy_extent(&padded_chunk_extent, &map.lod_view(0), &mut padded_chunk); 52 | 53 | let mut hm_buffer = HeightMapMeshBuffer::default(); 54 | triangulate_height_map(&padded_chunk, &padded_chunk_extent, &mut hm_buffer); 55 | // Do something with the mesh output... 56 | } 57 | ``` 58 | 59 | All of the meshing algorithms are generic enough to work with an array wrapped in a `TransformMap`. 60 | 61 | ```rust 62 | # use building_blocks_core::prelude::*; 63 | # use building_blocks_storage::prelude::*; 64 | # use building_blocks_mesh::*; 65 | # 66 | struct OtherHeight(f32); 67 | 68 | impl Height for OtherHeight { 69 | fn height(&self) -> f32 { self.0 } 70 | } 71 | 72 | let extent = Extent2i::from_min_and_shape(PointN([0; 2]), PointN([50; 2])); 73 | let array = Array2x1::fill(extent, 0.0); 74 | let tfm_array = TransformMap::new(&array, |h: f32| OtherHeight(h)); 75 | let mut hm_buffer = HeightMapMeshBuffer::default(); 76 | triangulate_height_map(&tfm_array, &extent, &mut hm_buffer); 77 | ``` 78 | -------------------------------------------------------------------------------- /crates/building_blocks_mesh/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::type_complexity, 3 | clippy::needless_collect, 4 | clippy::too_many_arguments 5 | )] 6 | #![deny( 7 | rust_2018_compatibility, 8 | rust_2018_idioms, 9 | nonstandard_style, 10 | unused, 11 | future_incompatible 12 | )] 13 | #![warn(clippy::doc_markdown)] 14 | #![doc = include_str!("crate_doc.md")] 15 | 16 | mod greedy_quads; 17 | mod height_map; 18 | mod quad; 19 | mod surface_nets; 20 | 21 | pub use greedy_quads::*; 22 | pub use height_map::*; 23 | pub use quad::*; 24 | pub use surface_nets::*; 25 | 26 | use std::convert::TryInto; 27 | 28 | #[derive(Clone, Default)] 29 | pub struct PosNormMesh { 30 | pub positions: Vec<[f32; 3]>, 31 | /// Surface normal vectors. Not guaranteed to be normalized. 32 | pub normals: Vec<[f32; 3]>, 33 | /// All of the triangles in the mesh, wound counter-clockwise (right-hand rule). 34 | pub indices: Vec, 35 | } 36 | 37 | impl PosNormMesh { 38 | pub fn is_empty(&self) -> bool { 39 | self.indices.is_empty() 40 | } 41 | 42 | pub fn clear(&mut self) { 43 | self.positions.clear(); 44 | self.normals.clear(); 45 | self.indices.clear(); 46 | } 47 | 48 | pub fn append(&mut self, other: &mut Self) { 49 | let n: u32 = self.positions.len().try_into().unwrap(); 50 | 51 | self.positions.append(&mut other.positions); 52 | self.normals.append(&mut other.normals); 53 | 54 | self.indices.extend(other.indices.drain(..).map(|i| n + i)); 55 | } 56 | 57 | /// Create a new mesh with equivalent triangles such that no vertex is shared by any two triangles. 58 | /// 59 | /// Also computes a normal for each triangle using the cross product. The pre-existing normals are not used. 60 | pub fn process_for_flat_shading(&self) -> PosNormMesh { 61 | let indices_len = self.indices.len(); 62 | let mut mesh = PosNormMesh { 63 | positions: Vec::with_capacity(indices_len), 64 | normals: Vec::with_capacity(indices_len), 65 | indices: Vec::new(), 66 | }; 67 | 68 | for triangle_i in self.indices.chunks(3) { 69 | let p1 = self.positions[triangle_i[0] as usize]; 70 | let p2 = self.positions[triangle_i[1] as usize]; 71 | let p3 = self.positions[triangle_i[2] as usize]; 72 | 73 | let u = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]; 74 | let v = [p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]; 75 | 76 | let n = [ 77 | u[1] * v[2] - u[2] * v[1], 78 | u[2] * v[0] - u[0] * v[2], 79 | u[0] * v[1] - u[1] * v[0], 80 | ]; 81 | 82 | mesh.positions.push(p1); 83 | mesh.positions.push(p2); 84 | mesh.positions.push(p3); 85 | 86 | mesh.normals.push(n); 87 | mesh.normals.push(n); 88 | mesh.normals.push(n); 89 | } 90 | 91 | mesh 92 | } 93 | } 94 | 95 | #[derive(Clone, Default)] 96 | pub struct PosNormTexMesh { 97 | pub positions: Vec<[f32; 3]>, 98 | /// Surface normal vectors. Not guaranteed to be normalized. 99 | pub normals: Vec<[f32; 3]>, 100 | /// Texture coordinates, AKA UVs. 101 | pub tex_coords: Vec<[f32; 2]>, 102 | /// All of the triangles in the mesh, wound counter-clockwise (right-hand rule). 103 | pub indices: Vec, 104 | } 105 | 106 | impl PosNormTexMesh { 107 | pub fn is_empty(&self) -> bool { 108 | self.indices.is_empty() 109 | } 110 | 111 | pub fn clear(&mut self) { 112 | self.positions.clear(); 113 | self.normals.clear(); 114 | self.tex_coords.clear(); 115 | self.indices.clear(); 116 | } 117 | } 118 | 119 | pub trait IsOpaque { 120 | /// Returns `true` if light cannot pass through this voxel. 121 | fn is_opaque(&self) -> bool; 122 | } 123 | -------------------------------------------------------------------------------- /crates/building_blocks_search/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building_blocks_search" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | license = "MIT" 7 | repository = "https://github.com/bonsairobo/building-blocks" 8 | keywords = ["voxel"] 9 | 10 | description = "Search algorithms for voxel data." 11 | 12 | [package.metadata.docs.rs] 13 | all-features = true 14 | 15 | [features] 16 | default = [] 17 | 18 | ncollide = ["nalgebra", "ncollide3d", "building_blocks_core/nalgebra"] 19 | 20 | [dependencies] 21 | indexmap = "1.5" 22 | pathfinding = "2.1" 23 | 24 | # Optional, feature-gated 25 | nalgebra = { version = "0.28", optional = true } 26 | ncollide3d = { version = "0.31", optional = true } 27 | 28 | building_blocks_core = { path = "../building_blocks_core", version = "0.7.0", default-features = false } 29 | building_blocks_storage = { path = "../building_blocks_storage", version = "0.7.0", default-features = false } 30 | 31 | [dev-dependencies] 32 | criterion = "0.3" 33 | # Common code for tests and examples. 34 | utilities = { path = "../utilities" } 35 | 36 | [[bench]] 37 | name = "find_surface" 38 | harness = false 39 | 40 | [[bench]] 41 | name = "flood_fill" 42 | harness = false 43 | -------------------------------------------------------------------------------- /crates/building_blocks_search/benches/find_surface.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_search::find_surface_points; 2 | use building_blocks_storage::IsEmpty; 3 | 4 | use utilities::data_sets::sphere_bit_array; 5 | 6 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 7 | 8 | fn sphere_surface(c: &mut Criterion) { 9 | let mut group = c.benchmark_group("sphere_surface"); 10 | for array_edge_length in [8, 16, 32].iter() { 11 | group.bench_with_input( 12 | BenchmarkId::from_parameter(array_edge_length), 13 | array_edge_length, 14 | |b, &array_edge_length| { 15 | b.iter_with_setup( 16 | || sphere_bit_array(array_edge_length, Voxel(true), Voxel(false)).0, 17 | |map| find_surface_points(&map, &map.extent().padded(-1)), 18 | ); 19 | }, 20 | ); 21 | } 22 | group.finish(); 23 | } 24 | 25 | criterion_group!(benches, sphere_surface); 26 | criterion_main!(benches); 27 | 28 | #[derive(Clone, Copy)] 29 | struct Voxel(bool); 30 | 31 | impl IsEmpty for Voxel { 32 | fn is_empty(&self) -> bool { 33 | !self.0 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /crates/building_blocks_search/benches/flood_fill.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_search::von_neumann_flood_fill3; 3 | use building_blocks_storage::prelude::*; 4 | 5 | use utilities::data_sets::sphere_bit_array; 6 | 7 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 8 | 9 | fn flood_fill_sphere(c: &mut Criterion) { 10 | let background_color = Color(0); 11 | let old_color = Color(1); 12 | let new_color = Color(2); 13 | 14 | let mut group = c.benchmark_group("flood_fill_sphere"); 15 | for array_edge_length in [16, 32, 64].iter() { 16 | group.bench_with_input( 17 | BenchmarkId::from_parameter(array_edge_length), 18 | array_edge_length, 19 | |b, &array_edge_length| { 20 | b.iter_with_setup( 21 | || { 22 | ( 23 | sphere_bit_array(array_edge_length, old_color, background_color).0, 24 | Point3i::ZERO, 25 | ) 26 | }, 27 | |(mut map, seed)| { 28 | let extent = *map.extent(); 29 | let visitor = |p: Point3i| { 30 | if map.get(p) != old_color { 31 | return false; 32 | } 33 | 34 | *map.get_mut(p) = new_color; 35 | 36 | true 37 | }; 38 | von_neumann_flood_fill3(extent, seed, visitor); 39 | }, 40 | ); 41 | }, 42 | ); 43 | } 44 | group.finish(); 45 | } 46 | 47 | criterion_group!(benches, flood_fill_sphere); 48 | criterion_main!(benches); 49 | 50 | #[derive(Clone, Copy, Eq, PartialEq)] 51 | struct Color(u8); 52 | -------------------------------------------------------------------------------- /crates/building_blocks_search/src/collision.rs: -------------------------------------------------------------------------------- 1 | pub mod ball; 2 | pub mod ray; 3 | 4 | pub use ball::*; 5 | pub use ray::*; 6 | 7 | use building_blocks_core::prelude::*; 8 | 9 | use nalgebra as na; 10 | 11 | /// The result of a collision query against an `OctreeDbvt`. 12 | #[derive(Clone, Debug)] 13 | pub struct VoxelImpact { 14 | /// The voxel point. 15 | pub point: Point3i, 16 | /// The impact type, which depends on the query. 17 | pub impact: I, 18 | } 19 | 20 | fn impact_with_leaf_octant( 21 | octant: &Octant, 22 | contact: &na::Point3, 23 | octant_normal: &na::Vector3, 24 | ) -> Point3i { 25 | if octant.edge_length() == 1 { 26 | octant.minimum() 27 | } else { 28 | // Octant is not a single voxel, so we need to calculate which voxel in the 29 | // octant was hit. 30 | // 31 | // Maybe converting the intersection coordinates to integers will not always 32 | // land in the correct voxel. It should help to nudge the point along the 33 | // intersection normal by some amount less than 1.0. 34 | const NUDGE_AMOUNT: f32 = 0.25; 35 | let nudged_p = contact - NUDGE_AMOUNT * octant_normal; 36 | 37 | Point3f::from(nudged_p).in_voxel() 38 | } 39 | } 40 | 41 | #[cfg(test)] 42 | mod test_util { 43 | use crate::OctreeDbvt; 44 | 45 | use building_blocks_core::prelude::*; 46 | use building_blocks_storage::prelude::*; 47 | 48 | pub fn bvt_with_voxels_filled(fill_points: &[Point3i]) -> OctreeDbvt { 49 | let extent = Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(16)); 50 | let mut voxels = Array3x1::fill(extent, Voxel(false)); 51 | for &p in fill_points.iter() { 52 | *voxels.get_mut(p) = Voxel(true); 53 | } 54 | 55 | let octree = OctreeSet::from_array3(&voxels, *voxels.extent()); 56 | let mut bvt = OctreeDbvt::default(); 57 | let key = 0; // unimportant 58 | bvt.insert(key, octree); 59 | 60 | bvt 61 | } 62 | 63 | pub fn bvt_with_all_voxels_filled() -> OctreeDbvt { 64 | let extent = Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(16)); 65 | let voxels = Array3x1::fill(extent, Voxel(true)); 66 | 67 | let octree = OctreeSet::from_array3(&voxels, *voxels.extent()); 68 | let mut bvt = OctreeDbvt::default(); 69 | let key = 0; // unimportant 70 | bvt.insert(key, octree); 71 | 72 | bvt 73 | } 74 | 75 | #[derive(Clone)] 76 | pub struct Voxel(bool); 77 | 78 | impl IsEmpty for Voxel { 79 | fn is_empty(&self) -> bool { 80 | !self.0 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /crates/building_blocks_search/src/collision/ray.rs: -------------------------------------------------------------------------------- 1 | use super::impact_with_leaf_octant; 2 | 3 | use crate::{OctreeDbvt, OctreeDbvtVisitor, VoxelImpact}; 4 | 5 | use building_blocks_core::prelude::*; 6 | use building_blocks_storage::prelude::VisitStatus; 7 | 8 | use core::hash::Hash; 9 | use nalgebra::Isometry3; 10 | use ncollide3d::{ 11 | bounding_volume::AABB, 12 | query::{Ray, RayCast, RayIntersection}, 13 | }; 14 | 15 | /// The impact of a ray with an `OctreeDbvt`. 16 | pub type VoxelRayImpact = VoxelImpact>; 17 | 18 | /// Casts a ray and returns the coordinates of the first voxel that intersects the ray. Voxels are modeled as axis-aligned 19 | /// bounding boxes (AABBs). 20 | /// 21 | /// `ray.dir` is the velocity vector of the ray, and any collisions that would occur after `max_toi` will not be considered. 22 | /// 23 | /// `predicate` can be used to filter voxels by returning `false`. 24 | pub fn cast_ray_at_voxels( 25 | octree: &OctreeDbvt, 26 | ray: Ray, 27 | max_toi: f32, 28 | predicate: impl Fn(Point3i) -> bool, 29 | ) -> Option 30 | where 31 | K: Eq + Hash, 32 | { 33 | let mut visitor = VoxelRayCast::new(ray, max_toi, predicate); 34 | octree.visit(&mut visitor); 35 | 36 | visitor.earliest_impact 37 | } 38 | 39 | struct VoxelRayCast { 40 | earliest_impact: Option>>, 41 | ray: Ray, 42 | max_toi: f32, 43 | predicate: F, 44 | } 45 | 46 | impl VoxelRayCast { 47 | fn new(ray: Ray, max_toi: f32, predicate: F) -> Self { 48 | Self { 49 | earliest_impact: None, 50 | ray, 51 | max_toi, 52 | predicate, 53 | } 54 | } 55 | 56 | fn earliest_toi(&self) -> f32 { 57 | self.earliest_impact 58 | .as_ref() 59 | .map(|i| i.impact.toi) 60 | .unwrap_or(std::f32::INFINITY) 61 | } 62 | } 63 | 64 | impl OctreeDbvtVisitor for VoxelRayCast 65 | where 66 | F: Fn(Point3i) -> bool, 67 | { 68 | fn visit(&mut self, aabb: &AABB, octant: Option<&Octant>, is_leaf: bool) -> VisitStatus { 69 | let solid = true; 70 | if let Some(toi) = aabb.toi_with_ray(&Isometry3::identity(), &self.ray, self.max_toi, solid) 71 | { 72 | if toi < self.earliest_toi() { 73 | if is_leaf { 74 | // This calculation is more expensive than just TOI, so we only do it for leaves. 75 | let impact = aabb 76 | .toi_and_normal_with_ray( 77 | &Isometry3::identity(), 78 | &self.ray, 79 | self.max_toi, 80 | true, 81 | ) 82 | .unwrap(); 83 | 84 | let octant = octant.expect("All leaves are octants"); 85 | let point = impact_with_leaf_octant( 86 | octant, 87 | &self.ray.point_at(impact.toi), 88 | &impact.normal, 89 | ); 90 | 91 | if (self.predicate)(point) { 92 | self.earliest_impact = Some(VoxelImpact { point, impact }); 93 | } 94 | } 95 | 96 | VisitStatus::Continue 97 | } else { 98 | // The TOI with any voxels in this octant can't be earliest. 99 | VisitStatus::Stop 100 | } 101 | } else { 102 | // There's no impact with any voxels in this octant. 103 | VisitStatus::Stop 104 | } 105 | } 106 | } 107 | 108 | // ████████╗███████╗███████╗████████╗ 109 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 110 | // ██║ █████╗ ███████╗ ██║ 111 | // ██║ ██╔══╝ ╚════██║ ██║ 112 | // ██║ ███████╗███████║ ██║ 113 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 114 | 115 | #[cfg(test)] 116 | mod tests { 117 | use super::*; 118 | use crate::collision::test_util::*; 119 | 120 | use nalgebra as na; 121 | 122 | #[test] 123 | fn raycast_hits_expected_voxel() { 124 | let bvt = bvt_with_voxels_filled(&[PointN([0, 0, 0]), PointN([0, 15, 0])]); 125 | 126 | // Cast rays at the corners. 127 | 128 | let start = na::Point3::new(-1.0, -1.0, -1.0); 129 | 130 | let ray = Ray::new(start, na::Point3::new(0.5, 0.5, 0.5) - start); 131 | let result = cast_ray_at_voxels(&bvt, ray, std::f32::INFINITY, |_| true).unwrap(); 132 | assert_eq!(result.point, PointN([0, 0, 0])); 133 | 134 | let ray = Ray::new(start, na::Point3::new(0.0, 15.5, 0.0) - start); 135 | let result = cast_ray_at_voxels(&bvt, ray, std::f32::INFINITY, |_| true).unwrap(); 136 | assert_eq!(result.point, PointN([0, 15, 0])); 137 | 138 | // Cast into the middle where we shouldn't hit anything. 139 | 140 | let ray = Ray::new(start, na::Point3::new(0.0, 3.0, 0.0) - start); 141 | let result = cast_ray_at_voxels(&bvt, ray, std::f32::INFINITY, |_| true); 142 | assert!(result.is_none()); 143 | } 144 | 145 | #[test] 146 | fn raycast_hits_expected_voxel_for_collapsed_leaf() { 147 | let bvt = bvt_with_all_voxels_filled(); 148 | 149 | let start = na::Point3::new(-1.0, -1.0, -1.0); 150 | let ray = Ray::new(start, na::Point3::new(0.5, 0.5, 0.5) - start); 151 | let result = cast_ray_at_voxels(&bvt, ray, std::f32::INFINITY, |_| true).unwrap(); 152 | assert_eq!(result.point, PointN([0, 0, 0])); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /crates/building_blocks_search/src/find_surface.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::{dev_prelude::*, IsEmpty}; 3 | 4 | /// Returns the "surface points" i.e. those points that are non-empty and Von-Neumann-adjacent to an empty point. Since this 5 | /// algorithm does adjacency checks for all points in `extent`, you must ensure that those points are within the bounds of 6 | /// `map`. 7 | pub fn find_surface_points( 8 | map: &Map, 9 | extent: &ExtentN, 10 | ) -> (Vec>, Vec) 11 | where 12 | Map: IndexedArray 13 | + ForEach, Stride), Item = T> 14 | + GetUnchecked, 15 | T: IsEmpty, 16 | PointN: IntegerPoint, 17 | ExtentN: std::fmt::Debug, 18 | Local: Copy, 19 | { 20 | assert!( 21 | extent.is_subset_of(map.extent()), 22 | "{:?} does not contain {:?}; would cause access out-of-bounds", 23 | map.extent(), 24 | extent 25 | ); 26 | 27 | // Precompute the strides for adjacency checks. 28 | let vn_offsets = Local::localize_points_slice(&PointN::von_neumann_offsets()); 29 | let mut vn_strides = vec![Stride(0); vn_offsets.len()]; 30 | map.strides_from_local_points(&vn_offsets, &mut vn_strides); 31 | 32 | let mut surface_points = Vec::new(); 33 | let mut surface_strides = Vec::new(); 34 | map.for_each(extent, |(p, s), value| { 35 | if value.is_empty() { 36 | return; 37 | } 38 | 39 | for vn_stride in vn_strides.iter() { 40 | if unsafe { map.get_unchecked(s + *vn_stride).is_empty() } { 41 | surface_points.push(p); 42 | surface_strides.push(s); 43 | break; 44 | } 45 | } 46 | }); 47 | 48 | (surface_points, surface_strides) 49 | } 50 | 51 | // ████████╗███████╗███████╗████████╗███████╗ 52 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝██╔════╝ 53 | // ██║ █████╗ ███████╗ ██║ ███████╗ 54 | // ██║ ██╔══╝ ╚════██║ ██║ ╚════██║ 55 | // ██║ ███████╗███████║ ██║ ███████║ 56 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚══════╝ 57 | 58 | #[cfg(test)] 59 | mod test { 60 | use super::*; 61 | 62 | use core::hash::Hash; 63 | use std::collections::HashSet; 64 | use std::fmt::Debug; 65 | use std::iter::FromIterator; 66 | 67 | #[derive(Clone)] 68 | struct Voxel(bool); 69 | 70 | impl IsEmpty for Voxel { 71 | fn is_empty(&self) -> bool { 72 | !self.0 73 | } 74 | } 75 | 76 | #[test] 77 | fn find_surface_points_cube_side_length_3() { 78 | let mut map = Array3x1::fill( 79 | Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(5)), 80 | Voxel(false), 81 | ); 82 | 83 | let solid_extent = Extent3i::from_min_and_shape(Point3i::fill(1), Point3i::fill(3)); 84 | map.for_each_mut(&solid_extent, |_s: Stride, value| *value = Voxel(true)); 85 | 86 | // Also set one point on the boundary for an edge case, since it can't be considered, as not 87 | // all of its neighbors exist. 88 | *map.get_mut(Point3i::ZERO) = Voxel(true); 89 | 90 | let (surface_points, _surface_strides) = find_surface_points(&map, &solid_extent); 91 | 92 | // Should exclude the center point. 93 | let center = Point3i::fill(2); 94 | let expected_surface_points = solid_extent 95 | .iter_points() 96 | .filter(|p| *p != center) 97 | .collect(); 98 | assert_elements_eq(&surface_points, &expected_surface_points); 99 | } 100 | 101 | fn assert_elements_eq(v1: &Vec, v2: &Vec) { 102 | let set1: HashSet = HashSet::from_iter(v1.iter().cloned()); 103 | let set2: HashSet = HashSet::from_iter(v2.iter().cloned()); 104 | assert_eq!(set1, set2); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /crates/building_blocks_search/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::type_complexity, 3 | clippy::needless_collect, 4 | clippy::too_many_arguments 5 | )] 6 | #![deny( 7 | rust_2018_compatibility, 8 | rust_2018_idioms, 9 | nonstandard_style, 10 | unused, 11 | future_incompatible 12 | )] 13 | #![warn(clippy::doc_markdown)] 14 | 15 | mod find_surface; 16 | mod flood_fill; 17 | mod grid_ray_traversal; 18 | mod pathfinding; 19 | 20 | pub use self::pathfinding::*; 21 | pub use find_surface::*; 22 | pub use flood_fill::*; 23 | pub use grid_ray_traversal::*; 24 | 25 | #[cfg(feature = "ncollide")] 26 | pub mod collision; 27 | #[cfg(feature = "ncollide")] 28 | pub use collision::*; 29 | 30 | #[cfg(feature = "ncollide")] 31 | pub mod octree_dbvt; 32 | #[cfg(feature = "ncollide")] 33 | pub use octree_dbvt::*; 34 | 35 | #[cfg(feature = "ncollide")] 36 | pub use ncollide3d; 37 | -------------------------------------------------------------------------------- /crates/building_blocks_search/src/octree_dbvt.rs: -------------------------------------------------------------------------------- 1 | //! The analog of `ncollide3d::DBVT` for voxel octrees. 2 | //! 3 | //! This structure works well in tandem with a `ChunkTree3`, where an `Octree` can be generated from a chunk and subsequently 4 | //! placed into the `OctreeDbvt`. 5 | 6 | use building_blocks_core::prelude::*; 7 | use building_blocks_storage::dev_prelude::{ 8 | OctreeNode, OctreeSet, OctreeVisitor, SmallKeyHashMap, VisitStatus, 9 | }; 10 | 11 | use core::hash::Hash; 12 | use ncollide3d::{ 13 | bounding_volume::AABB, 14 | partitioning::{self as nc_part, DBVTLeaf, DBVTLeafId, BVH, DBVT}, 15 | }; 16 | 17 | /// An ncollide `DBVT` containing `OctreeSet`s. This turns the bounded `OctreeSet` into an unbounded acceleration structure. You 18 | /// may use whatever key type `K` to uniquely identify the octrees. 19 | pub struct OctreeDbvt { 20 | dbvt: DBVT>, 21 | leaf_ids: SmallKeyHashMap, 22 | } 23 | 24 | impl Default for OctreeDbvt { 25 | fn default() -> Self { 26 | Self { 27 | dbvt: DBVT::new(), 28 | leaf_ids: Default::default(), 29 | } 30 | } 31 | } 32 | 33 | impl OctreeDbvt 34 | where 35 | K: Eq + Hash, 36 | { 37 | /// Inserts the octree, replacing any old octree at `key` and returning it. 38 | pub fn insert(&mut self, key: K, octree: OctreeSet) -> Option { 39 | let aabb = octant_aabb(&octree.octant()); 40 | let new_leaf_id = self.dbvt.insert(DBVTLeaf::new(aabb, octree)); 41 | 42 | self.leaf_ids 43 | .insert(key, new_leaf_id) 44 | .map(|old_leaf_id| self.dbvt.remove(old_leaf_id).data) 45 | } 46 | 47 | /// Remove the octree at `key`. 48 | pub fn remove(&mut self, key: &K) -> Option { 49 | self.leaf_ids 50 | .remove(key) 51 | .map(|leaf_id| self.dbvt.remove(leaf_id).data) 52 | } 53 | 54 | /// Get a reference to the `OctreeSet` at `key`. 55 | pub fn get(&self, key: &K) -> Option<&OctreeSet> { 56 | self.leaf_ids 57 | .get(key) 58 | .and_then(|leaf_id| self.dbvt.get(*leaf_id).map(|leaf| &leaf.data)) 59 | } 60 | 61 | /// Returns `true` iff there is an `OctreeSet` for `key`. 62 | pub fn contains_key(&self, key: &K) -> bool { 63 | self.leaf_ids.contains_key(key) 64 | } 65 | 66 | /// Visit every bounding volume (AABB) in the DBVT. This is a heterogeneous tree, meaning that not all nodes have the same 67 | /// representation. Upper nodes simply store a bounding volume (AABB), while octree nodes will provide both a bounding 68 | /// volume and an `Octant`, which is completely full for leaf nodes. 69 | pub fn visit(&self, visitor: &mut impl OctreeDbvtVisitor) { 70 | self.dbvt.visit(&mut DbvtVisitorImpl(visitor)); 71 | } 72 | } 73 | 74 | struct DbvtVisitorImpl<'a, V>(&'a mut V); 75 | 76 | impl<'a, V> OctreeVisitor for DbvtVisitorImpl<'a, V> 77 | where 78 | V: OctreeDbvtVisitor, 79 | { 80 | fn visit_octant(&mut self, node: &OctreeNode) -> VisitStatus { 81 | let aabb = octant_aabb(node.octant()); 82 | 83 | self.0.visit(&aabb, Some(node.octant()), node.is_full()) 84 | } 85 | } 86 | 87 | impl<'a, V> nc_part::Visitor> for DbvtVisitorImpl<'a, V> 88 | where 89 | V: OctreeDbvtVisitor, 90 | { 91 | fn visit(&mut self, aabb: &AABB, octree: Option<&OctreeSet>) -> nc_part::VisitStatus { 92 | let status = if let Some(octree) = octree { 93 | octree.visit_branches_and_fat_leaves_in_preorder(self) 94 | } else { 95 | self.0.visit(aabb, None, false) 96 | }; 97 | 98 | match status { 99 | VisitStatus::Continue => nc_part::VisitStatus::Continue, 100 | VisitStatus::Stop => nc_part::VisitStatus::Stop, 101 | VisitStatus::ExitEarly => nc_part::VisitStatus::ExitEarly, 102 | } 103 | } 104 | } 105 | 106 | pub trait OctreeDbvtVisitor { 107 | /// `octant` is only `Some` when traversing an `Octree`. Otherwise, you are traversing an upper-level internal node. 108 | fn visit(&mut self, aabb: &AABB, octant: Option<&Octant>, is_full: bool) -> VisitStatus; 109 | } 110 | 111 | /// Returns the axis-aligned bounding box that bounds `octant`. 112 | pub fn octant_aabb(octant: &Octant) -> AABB { 113 | let aabb_min = Point3f::from(octant.minimum()).into(); 114 | let aabb_max = Point3f::from(octant.minimum() + Point3i::fill(octant.edge_length())).into(); 115 | 116 | AABB::new(aabb_min, aabb_max) 117 | } 118 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building_blocks_storage" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | license = "MIT" 7 | repository = "https://github.com/bonsairobo/building-blocks" 8 | keywords = ["voxel", "mesh"] 9 | 10 | description = "Efficient storage for maps on sparse or dense, 2D and 3D integer lattices." 11 | 12 | [package.metadata.docs.rs] 13 | all-features = true 14 | 15 | [dependencies] 16 | ahash = { version = "0.7", features = ["serde"] } 17 | auto_impl = "0.4" 18 | bytemuck = "1.7" 19 | either = "1.6" 20 | float-ord = "0.3" 21 | futures = "0.3" 22 | slab = "0.4" 23 | thread_local = "1.1" 24 | 25 | building_blocks_core = { path = "../building_blocks_core", version = "0.7.0", default-features = false } 26 | 27 | # Optional, feature-gated. 28 | bincode = { version = "1.3", optional = true } 29 | serde = { version = "1.0", features = ["derive"], optional = true } 30 | dot_vox = { version = "4.1", optional = true } 31 | image = { version = "0.23", optional = true } 32 | lz4 = { version = "1.23", optional = true } 33 | sled = { git = "https://github.com/spacejam/sled", rev = "a0d51f2", optional = true } 34 | sled-snapshots = { git = "https://github.com/bonsairobo/sled-snapshots", rev = "45445c35", optional = true } 35 | snap = { version = "1.0", optional = true } 36 | tracing = { version = "0.1", features = ["release_max_level_info"], optional = true } 37 | vox-format = { version = "0.1", optional = true } 38 | 39 | [dev-dependencies] 40 | criterion = "0.3" 41 | pretty_assertions = "0.7" 42 | rand = "0.8" 43 | 44 | # Common code for tests and examples. 45 | utilities = { path = "../utilities" } 46 | 47 | [features] 48 | default = [] 49 | # This can be renamed to `vox-format`, once `namespaces-features`[1] is stabilized. 50 | # [1] https://github.com/rust-lang/cargo/issues/5565 51 | vox-format-1 = ["vox-format", "building_blocks_core/vox-format"] 52 | 53 | [[bench]] 54 | name = "accessors" 55 | harness = false 56 | 57 | [[bench]] 58 | name = "sampling" 59 | harness = false 60 | 61 | [[bench]] 62 | name = "compression" 63 | harness = false 64 | required-features = ["bincode", "lz4", "snap", "serde"] 65 | 66 | [[bench]] 67 | name = "octree_set" 68 | harness = false 69 | 70 | [[bench]] 71 | name = "database" 72 | harness = false 73 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/benches/compression.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::prelude::*; 3 | 4 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 5 | 6 | fn decompress_array_with_bincode_lz4(c: &mut Criterion) { 7 | let mut group = c.benchmark_group("decompress_array_with_bincode_lz4"); 8 | for size in ARRAY_SIZES.iter() { 9 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 10 | b.iter_with_setup( 11 | || BincodeCompression::new(Lz4 { level: 10 }).compress(&set_up_array(size)), 12 | |compressed_array| { 13 | compressed_array.decompress(); 14 | }, 15 | ); 16 | }); 17 | } 18 | group.finish(); 19 | } 20 | 21 | fn decompress_array_with_fast_lz4(c: &mut Criterion) { 22 | let mut group = c.benchmark_group("decompress_array_with_fast_lz4"); 23 | for size in ARRAY_SIZES.iter() { 24 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 25 | b.iter_with_setup( 26 | || { 27 | FastArrayCompressionNx1::from_bytes_compression(Lz4 { level: 10 }) 28 | .compress(&set_up_array(size)) 29 | }, 30 | |compressed_array| { 31 | compressed_array.decompress(); 32 | }, 33 | ); 34 | }); 35 | } 36 | group.finish(); 37 | } 38 | 39 | fn compress_array_with_fast_lz4(c: &mut Criterion) { 40 | let mut group = c.benchmark_group("compress_array_with_fast_lz4"); 41 | for size in ARRAY_SIZES.iter() { 42 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 43 | b.iter_with_setup( 44 | || set_up_array(size), 45 | |array| { 46 | FastArrayCompressionNx1::from_bytes_compression(Lz4 { level: 10 }) 47 | .compress(&array) 48 | }, 49 | ); 50 | }); 51 | } 52 | group.finish(); 53 | } 54 | 55 | fn decompress_array_with_bincode_snappy(c: &mut Criterion) { 56 | let mut group = c.benchmark_group("decompress_array_with_bincode_snappy"); 57 | for size in ARRAY_SIZES.iter() { 58 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 59 | b.iter_with_setup( 60 | || BincodeCompression::new(Snappy).compress(&set_up_array(size)), 61 | |compressed_array| { 62 | compressed_array.decompress(); 63 | }, 64 | ); 65 | }); 66 | } 67 | group.finish(); 68 | } 69 | 70 | fn decompress_array_with_fast_snappy(c: &mut Criterion) { 71 | let mut group = c.benchmark_group("decompress_array_with_fast_snappy"); 72 | for size in ARRAY_SIZES.iter() { 73 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 74 | b.iter_with_setup( 75 | || { 76 | FastArrayCompressionNx1::from_bytes_compression(Snappy) 77 | .compress(&set_up_array(size)) 78 | }, 79 | |compressed_array| { 80 | compressed_array.decompress(); 81 | }, 82 | ); 83 | }); 84 | } 85 | group.finish(); 86 | } 87 | 88 | criterion_group!( 89 | lz4_benches, 90 | decompress_array_with_fast_lz4, 91 | compress_array_with_fast_lz4, 92 | decompress_array_with_bincode_lz4 93 | ); 94 | criterion_group!( 95 | snappy_benches, 96 | decompress_array_with_bincode_snappy, 97 | decompress_array_with_fast_snappy, 98 | ); 99 | criterion_main!(lz4_benches, snappy_benches); 100 | 101 | const ARRAY_SIZES: [i32; 3] = [16, 32, 64]; 102 | 103 | fn set_up_array(size: i32) -> Array3x1 { 104 | let array_extent = Extent3::from_min_and_shape(Point3i::ZERO, Point3i::fill(size)); 105 | 106 | // Might be tough to compress this. 107 | Array3x1::fill_with(array_extent, |p: Point3i| p.x() % 3 + p.y() % 3 + p.z() % 3) 108 | } 109 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/benches/database.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::{ 3 | access_traits::*, 4 | database::{ChunkDb3, Delta, ReadableChunkDb}, 5 | prelude::{ 6 | ChunkKey, ChunkTreeBuilder, ChunkTreeBuilder3x1, ChunkTreeConfig, FastArrayCompressionNx1, 7 | FromBytesCompression, Lz4, 8 | }, 9 | }; 10 | 11 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 12 | 13 | fn db_read_all_chunks(c: &mut Criterion) { 14 | let mut group = c.benchmark_group("db_read_all_chunks"); 15 | 16 | for map_chunks in [1, 2, 4, 8].iter() { 17 | group.bench_with_input( 18 | BenchmarkId::from_parameter(map_chunks), 19 | map_chunks, 20 | |b, &map_chunks| { 21 | b.iter_with_setup( 22 | || { 23 | let chunk_exponent = 4; 24 | let chunk_shape = Point3i::fill(1 << chunk_exponent); 25 | 26 | let builder = ChunkTreeBuilder3x1::new(ChunkTreeConfig { 27 | chunk_shape, 28 | ambient_value: 1, 29 | root_lod: 0, 30 | }); 31 | let mut map = builder.build_with_hash_map_storage(); 32 | 33 | let map_extent = 34 | Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(map_chunks)) 35 | * chunk_shape; 36 | map.lod_view_mut(0) 37 | .for_each_mut(&map_extent, |p: Point3i, d| { 38 | *d = p.x() % 3 + p.y() % 3 + p.z() % 3 39 | }); 40 | 41 | let db = sled::Config::default().temporary(true).open().unwrap(); 42 | let tree = db.open_tree("test").unwrap(); 43 | let chunk_db = ChunkDb3::new_with_compression( 44 | tree, 45 | FastArrayCompressionNx1::from_bytes_compression(Lz4 { level: 10 }), 46 | ); 47 | 48 | let mut batch = chunk_db.start_delta_batch(); 49 | futures::executor::block_on( 50 | batch.add_and_compress_deltas( 51 | map.take_storages().pop().unwrap().into_iter().filter_map( 52 | |(k, v)| { 53 | v.user_chunk.map(|u| Delta::Insert(ChunkKey::new(0, k), u)) 54 | }, 55 | ), 56 | ), 57 | ); 58 | 59 | chunk_db.apply_deltas(batch.build()).unwrap(); 60 | futures::executor::block_on(chunk_db.flush()).unwrap(); 61 | 62 | chunk_db 63 | }, 64 | |chunk_db| { 65 | let _result = chunk_db.read_all_chunks::<[i32; 3]>(0).unwrap(); 66 | }, 67 | ); 68 | }, 69 | ); 70 | } 71 | group.finish(); 72 | } 73 | 74 | criterion_group!(benches, db_read_all_chunks); 75 | criterion_main!(benches); 76 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/benches/octree_set.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::{ 3 | octree_set::{OctreeNode, OctreeSet, VisitStatus}, 4 | prelude::*, 5 | IsEmpty, 6 | }; 7 | use utilities::data_sets::sphere_bit_array; 8 | 9 | use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; 10 | 11 | fn octree_from_array3_sphere(c: &mut Criterion) { 12 | let mut group = c.benchmark_group("octree_from_array3_sphere"); 13 | for power in [4, 5, 6].iter() { 14 | let edge_len = 1 << *power; 15 | group.bench_with_input( 16 | BenchmarkId::from_parameter(edge_len), 17 | &edge_len, 18 | |b, &edge_len| { 19 | b.iter_with_setup( 20 | || sphere_bit_array(edge_len, Voxel(true), Voxel(false)).0, 21 | |map| OctreeSet::from_array3(&map, *map.extent()), 22 | ); 23 | }, 24 | ); 25 | } 26 | group.finish(); 27 | } 28 | 29 | fn octree_from_array3_full(c: &mut Criterion) { 30 | let mut group = c.benchmark_group("octree_from_array3_full"); 31 | for power in [4, 5, 6].iter() { 32 | let edge_len = 1 << *power; 33 | group.bench_with_input( 34 | BenchmarkId::from_parameter(edge_len), 35 | &edge_len, 36 | |b, &edge_len| { 37 | b.iter_with_setup( 38 | || { 39 | Array3x1::fill( 40 | Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(edge_len)), 41 | Voxel(true), 42 | ) 43 | }, 44 | |map| OctreeSet::from_array3(&map, *map.extent()), 45 | ); 46 | }, 47 | ); 48 | } 49 | group.finish(); 50 | } 51 | 52 | fn octree_visit_branches_and_fat_leaves_of_sphere(c: &mut Criterion) { 53 | let mut group = c.benchmark_group("octree_visit_branches_and_fat_leaves_of_sphere"); 54 | for power in [4, 5, 6].iter() { 55 | let edge_len = 1 << *power; 56 | group.bench_with_input( 57 | BenchmarkId::from_parameter(edge_len), 58 | &edge_len, 59 | |b, &edge_len| { 60 | b.iter_with_setup( 61 | || { 62 | let map = sphere_bit_array(edge_len, Voxel(true), Voxel(false)).0; 63 | 64 | OctreeSet::from_array3(&map, *map.extent()) 65 | }, 66 | |octree| { 67 | octree.visit_branches_and_fat_leaves_in_preorder( 68 | &mut |node: &OctreeNode| { 69 | black_box(node); 70 | 71 | VisitStatus::Continue 72 | }, 73 | ) 74 | }, 75 | ); 76 | }, 77 | ); 78 | } 79 | group.finish(); 80 | } 81 | 82 | fn octree_visit_branch_and_leaf_nodes_of_sphere(c: &mut Criterion) { 83 | let mut group = c.benchmark_group("octree_visit_branch_and_leaf_nodes_of_sphere"); 84 | for power in [4, 5, 6].iter() { 85 | let edge_len = 1 << *power; 86 | group.bench_with_input( 87 | BenchmarkId::from_parameter(edge_len), 88 | &edge_len, 89 | |b, &edge_len| { 90 | b.iter_with_setup( 91 | || { 92 | let map = sphere_bit_array(edge_len, Voxel(true), Voxel(false)).0; 93 | 94 | OctreeSet::from_array3(&map, *map.extent()) 95 | }, 96 | |octree| { 97 | let mut queue = vec![octree.root_node()]; 98 | while !queue.is_empty() { 99 | if let Some(node) = queue.pop().unwrap() { 100 | black_box(&node); 101 | if !node.is_full() { 102 | for octant in 0..8 { 103 | queue.push(octree.get_child(&node, octant)); 104 | } 105 | } 106 | } 107 | } 108 | }, 109 | ); 110 | }, 111 | ); 112 | } 113 | group.finish(); 114 | } 115 | 116 | criterion_group!( 117 | benches, 118 | octree_from_array3_sphere, 119 | octree_from_array3_full, 120 | octree_visit_branches_and_fat_leaves_of_sphere, 121 | octree_visit_branch_and_leaf_nodes_of_sphere 122 | ); 123 | criterion_main!(benches); 124 | 125 | #[derive(Clone, Copy)] 126 | struct Voxel(bool); 127 | 128 | impl IsEmpty for Voxel { 129 | fn is_empty(&self) -> bool { 130 | !self.0 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/benches/sampling.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::prelude::{ 3 | Array3x1, ChunkDownsampler, ChunkTreeBuilder, ChunkTreeBuilder3x1, ChunkTreeConfig, FillExtent, 4 | Local, PointDownsampler, Sd8, SdfMeanDownsampler, 5 | }; 6 | 7 | use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; 8 | 9 | fn point_downsample3(c: &mut Criterion) { 10 | let mut group = c.benchmark_group("point_downsample3"); 11 | for size in CHUNK_SIZES.iter() { 12 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 13 | b.iter_with_setup( 14 | || { 15 | let chunk_shape = Point3i::fill(size); 16 | let extent = Extent3i::from_min_and_shape(Point3i::ZERO, chunk_shape); 17 | let src = Array3x1::fill(extent, 1); 18 | let dst = Array3x1::fill(extent, 0); 19 | 20 | (src, dst, chunk_shape) 21 | }, 22 | |(src, mut dst, chunk_shape)| { 23 | PointDownsampler.downsample(&src, &mut dst, Local(chunk_shape / 2)); 24 | black_box(dst); 25 | }, 26 | ); 27 | }); 28 | } 29 | group.finish(); 30 | } 31 | 32 | fn sdf_mean_downsample3(c: &mut Criterion) { 33 | let mut group = c.benchmark_group("sdf_mean_downsample3"); 34 | for size in CHUNK_SIZES.iter() { 35 | group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { 36 | b.iter_with_setup( 37 | || { 38 | let chunk_shape = Point3i::fill(size); 39 | let extent = Extent3i::from_min_and_shape(Point3i::ZERO, chunk_shape); 40 | let src = Array3x1::fill(extent, Sd8::ONE); 41 | let dst = Array3x1::fill(extent, Sd8(0)); 42 | 43 | (src, dst, chunk_shape) 44 | }, 45 | |(src, mut dst, chunk_shape)| { 46 | SdfMeanDownsampler.downsample(&src, &mut dst, Local(chunk_shape / 2)); 47 | black_box(dst); 48 | }, 49 | ); 50 | }); 51 | } 52 | group.finish(); 53 | } 54 | 55 | fn sdf_mean_downsample_chunk_map(c: &mut Criterion) { 56 | let mut group = c.benchmark_group("sdf_mean_downsample_chunk_map"); 57 | 58 | for map_chunks in [1, 2, 4, 8].iter() { 59 | group.bench_with_input( 60 | BenchmarkId::from_parameter(map_chunks), 61 | map_chunks, 62 | |b, &map_chunks| { 63 | b.iter_with_setup( 64 | || { 65 | let chunk_exponent = 4; 66 | let chunk_shape = Point3i::fill(1 << chunk_exponent); 67 | 68 | let builder = ChunkTreeBuilder3x1::new(ChunkTreeConfig { 69 | chunk_shape, 70 | ambient_value: Sd8::ONE, 71 | root_lod: 5, 72 | }); 73 | let mut map = builder.build_with_hash_map_storage(); 74 | 75 | let map_extent = 76 | Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(map_chunks)) 77 | * chunk_shape; 78 | map.lod_view_mut(0).fill_extent(&map_extent, Sd8::NEG_ONE); 79 | 80 | (map, map_extent) 81 | }, 82 | |(mut map, map_extent)| { 83 | map.downsample_extent_into_self(&SdfMeanDownsampler, 0, 5, map_extent) 84 | }, 85 | ); 86 | }, 87 | ); 88 | } 89 | group.finish(); 90 | } 91 | 92 | criterion_group!( 93 | benches, 94 | point_downsample3, 95 | sdf_mean_downsample3, 96 | sdf_mean_downsample_chunk_map 97 | ); 98 | criterion_main!(benches); 99 | 100 | const CHUNK_SIZES: [i32; 3] = [16, 32, 64]; 101 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/channels.rs: -------------------------------------------------------------------------------- 1 | pub mod channel; 2 | pub mod compression; 3 | pub mod multichannel; 4 | 5 | pub use channel::*; 6 | pub use compression::*; 7 | pub use multichannel::*; 8 | 9 | use crate::multi_ptr::MultiMutPtr; 10 | 11 | /// Implemented by any tuple of `Channel`s to indicate the types of data being stored. 12 | pub trait Channels { 13 | type Data; 14 | type Ptr: MultiMutPtr; 15 | type UninitSelf: UninitChannels; 16 | } 17 | 18 | /// Converts a tuple of channels into a tuple of slices. 19 | pub trait Slices<'a> { 20 | type Target; 21 | 22 | fn slices(&'a self) -> Self::Target; 23 | } 24 | 25 | /// Converts a tuple of channels into a tuple of mutable slices. 26 | pub trait SlicesMut<'a> { 27 | type Target; 28 | 29 | fn slices_mut(&'a mut self) -> Self::Target; 30 | } 31 | 32 | pub trait CopySlices<'a> { 33 | type Src; 34 | 35 | fn copy_slices(&mut self, src: Self::Src); 36 | } 37 | 38 | /// Converts a tuple of channels that own their data into a tuple of channels that borrow their data. 39 | pub trait BorrowChannels<'a> { 40 | type Borrowed; 41 | 42 | fn borrow(&'a self) -> Self::Borrowed; 43 | } 44 | 45 | /// Converts a tuple of channels that own their data into a tuple of channels that mutably borrow their data. 46 | pub trait BorrowChannelsMut<'a> { 47 | type Borrowed; 48 | 49 | fn borrow_mut(&'a mut self) -> Self::Borrowed; 50 | } 51 | 52 | pub trait ResetChannels: Channels { 53 | fn reset_values(&mut self, value: Self::Data); 54 | } 55 | 56 | pub trait FillChannels: Channels { 57 | fn fill(length: usize, value: Self::Data) -> Self; 58 | } 59 | 60 | pub trait UninitChannels: Channels { 61 | type InitSelf; 62 | 63 | /// # Safety 64 | /// Elements should not be read until they are initialized. 65 | unsafe fn maybe_uninit(size: usize) -> Self; 66 | 67 | /// # Safety 68 | /// All elements of the channel must be initialized. 69 | unsafe fn assume_init(self) -> Self::InitSelf; 70 | } 71 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/channels/compression.rs: -------------------------------------------------------------------------------- 1 | use crate::dev_prelude::{BytesCompression, Channel, Compression, FromBytesCompression}; 2 | 3 | use bytemuck::{bytes_of, bytes_of_mut, cast_slice, cast_slice_mut, Pod}; 4 | use std::{io, mem::MaybeUninit}; 5 | 6 | /// Compresses a tuple of `Channel`s into a tuple of `FastCompressedChannel`s. 7 | pub struct FastChannelsCompression { 8 | bytes_compression: By, 9 | marker: std::marker::PhantomData, 10 | } 11 | 12 | impl Clone for FastChannelsCompression 13 | where 14 | By: Clone, 15 | { 16 | fn clone(&self) -> Self { 17 | Self { 18 | bytes_compression: self.bytes_compression.clone(), 19 | marker: Default::default(), 20 | } 21 | } 22 | } 23 | 24 | impl Copy for FastChannelsCompression where By: Copy {} 25 | 26 | impl FastChannelsCompression { 27 | pub fn new(bytes_compression: By) -> Self { 28 | Self { 29 | bytes_compression, 30 | marker: Default::default(), 31 | } 32 | } 33 | 34 | pub fn bytes_compression(&self) -> &By { 35 | &self.bytes_compression 36 | } 37 | } 38 | 39 | impl FromBytesCompression for FastChannelsCompression { 40 | fn from_bytes_compression(bytes_compression: By) -> Self { 41 | Self::new(bytes_compression) 42 | } 43 | } 44 | 45 | impl Compression for FastChannelsCompression> 46 | where 47 | By: BytesCompression, 48 | T: Pod, 49 | Store: AsRef<[T]>, 50 | { 51 | type Data = Channel; 52 | 53 | // Compress the map using some `B: BytesCompression`. 54 | // 55 | // WARNING: For performance, this reinterprets the inner vector as a byte slice without accounting for endianness. This is 56 | // not compatible across platforms. 57 | fn compress_to_writer( 58 | &self, 59 | data: &Self::Data, 60 | mut compressed_bytes: impl io::Write, 61 | ) -> io::Result<()> { 62 | // Start with the number of values in the channel so we can allocate that up front during decompression. 63 | compressed_bytes.write_all(bytes_of(&data.store().len()))?; 64 | 65 | // Compress the values. 66 | self.bytes_compression 67 | .compress_bytes(cast_slice(data.store()), compressed_bytes) 68 | } 69 | 70 | fn decompress_from_reader(mut compressed_bytes: impl io::Read) -> io::Result { 71 | // Extract the number of values in the original channel. 72 | let mut num_values = 0usize; 73 | compressed_bytes.read_exact(bytes_of_mut(&mut num_values))?; 74 | 75 | // Allocate the vector with element type T so the alignment is correct. T: Pod implies T: Zeroable so this is sound. 76 | let mut decompressed_values: Vec = 77 | vec![unsafe { MaybeUninit::zeroed().assume_init() }; num_values]; 78 | 79 | // Decompress the values by consuming the rest of the bytes. 80 | By::decompress_bytes( 81 | compressed_bytes, 82 | cast_slice_mut(decompressed_values.as_mut_slice()), 83 | )?; 84 | 85 | Ok(Channel::new(decompressed_values.into_boxed_slice())) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/coords.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::num::Zero; 2 | use building_blocks_core::prelude::{ConstZero, PointN}; 3 | use core::ops::{Add, AddAssign, Deref, Mul, Sub, SubAssign}; 4 | 5 | /// Map-local coordinates. 6 | /// 7 | /// Most commonly, you will index a lattice map with a `PointN`, which is assumed to be in global 8 | /// coordinates. `Local` only applies to lattice maps where a point must first be translated from 9 | /// global coordinates into map-local coordinates before indexing with `Get>`. 10 | #[derive(Debug, Eq, PartialEq)] 11 | pub struct Local(pub PointN); 12 | 13 | /// Map-local coordinates, wrapping a `Point2i`. 14 | pub type Local2i = Local<[i32; 2]>; 15 | /// Map-local coordinates, wrapping a `Point3i`. 16 | pub type Local3i = Local<[i32; 3]>; 17 | 18 | impl Clone for Local 19 | where 20 | PointN: Clone, 21 | { 22 | fn clone(&self) -> Self { 23 | Local(self.0.clone()) 24 | } 25 | } 26 | impl Copy for Local where PointN: Copy {} 27 | 28 | impl Local { 29 | /// Wraps all of the `points` using the `Local` constructor. 30 | #[inline] 31 | pub fn localize_points_slice(points: &[PointN]) -> Vec> 32 | where 33 | PointN: Clone, 34 | { 35 | points.iter().cloned().map(Local).collect() 36 | } 37 | 38 | /// Wraps all of the `points` using the `Local` constructor. 39 | #[inline] 40 | pub fn localize_points_array(points: &[PointN; LEN]) -> [Local; LEN] 41 | where 42 | PointN: ConstZero, 43 | { 44 | let mut locals = [Local(PointN::ZERO); LEN]; 45 | for (l, p) in locals.iter_mut().zip(points.iter()) { 46 | *l = Local(*p); 47 | } 48 | 49 | locals 50 | } 51 | } 52 | 53 | impl Deref for Local { 54 | type Target = PointN; 55 | 56 | #[inline] 57 | fn deref(&self) -> &Self::Target { 58 | &self.0 59 | } 60 | } 61 | 62 | /// The most efficient coordinates for slice-backed lattice maps. A single number that translates directly to a slice offset. 63 | #[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] 64 | pub struct Stride(pub usize); 65 | 66 | impl Zero for Stride { 67 | #[inline] 68 | fn zero() -> Self { 69 | Stride(0) 70 | } 71 | 72 | #[inline] 73 | fn is_zero(&self) -> bool { 74 | self.0 == 0 75 | } 76 | } 77 | 78 | impl Add for Stride { 79 | type Output = Self; 80 | 81 | #[inline] 82 | fn add(self, rhs: Self) -> Self::Output { 83 | // Wraps for negative point offsets. 84 | Self(self.0.wrapping_add(rhs.0)) 85 | } 86 | } 87 | 88 | impl Sub for Stride { 89 | type Output = Self; 90 | 91 | #[inline] 92 | fn sub(self, rhs: Self) -> Self::Output { 93 | // Wraps for negative point offsets. 94 | Self(self.0.wrapping_sub(rhs.0)) 95 | } 96 | } 97 | 98 | impl Mul for Stride { 99 | type Output = Self; 100 | 101 | #[inline] 102 | fn mul(self, rhs: usize) -> Self::Output { 103 | Self(self.0.wrapping_mul(rhs)) 104 | } 105 | } 106 | 107 | impl AddAssign for Stride { 108 | #[inline] 109 | fn add_assign(&mut self, rhs: Self) { 110 | *self = *self + rhs; 111 | } 112 | } 113 | 114 | impl SubAssign for Stride { 115 | #[inline] 116 | fn sub_assign(&mut self, rhs: Self) { 117 | *self = *self - rhs; 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/dot_vox_conversions.rs: -------------------------------------------------------------------------------- 1 | use crate::{dot_vox_conversions::VoxColor, prelude::*}; 2 | 3 | pub use dot_vox; 4 | 5 | use building_blocks_core::prelude::*; 6 | 7 | use dot_vox::*; 8 | 9 | impl Array3x1 { 10 | pub fn decode_vox(vox_data: &DotVoxData, model_index: usize) -> Self { 11 | let Model { 12 | size: Size { x, y, z }, 13 | voxels, 14 | } = &vox_data.models[model_index]; 15 | let shape = PointN([*x as i32, *y as i32, *z as i32]); 16 | let extent = Extent3i::from_min_and_shape(PointN([0, 0, 0]), shape); 17 | let mut map = Array3x1::fill(extent, VoxColor::Empty); 18 | for Voxel { x, y, z, i } in voxels.iter() { 19 | let point = PointN([*x as i32, *y as i32, *z as i32]); 20 | *map.get_mut(point) = VoxColor::Color(*i); 21 | } 22 | 23 | map 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod for_each2; 3 | #[macro_use] 4 | mod for_each3; 5 | 6 | mod lock_step; 7 | mod single_array; 8 | mod stride_iter; 9 | 10 | pub use lock_step::*; 11 | pub use single_array::*; 12 | 13 | pub(crate) use for_each2::*; 14 | pub(crate) use for_each3::*; 15 | pub(crate) use stride_iter::*; 16 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each/for_each2.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | 3 | /// Steps a generic 2D iterator `iter` through some "extent," for some interpretation of an extent determined by the iterator. 4 | /// 5 | /// The visitor `f` will see every point in `extent`, as well as whatever coordinates `iter` would like to associated with 6 | /// that point. 7 | #[inline] 8 | pub fn for_each2(mut iter: I, extent: &Extent2i, mut f: impl FnMut(Point2i, I::Coords)) 9 | where 10 | I: Iter2, 11 | { 12 | let min = extent.minimum; 13 | let lub = extent.least_upper_bound(); 14 | iter.start_y(); 15 | for y in min.y()..lub.y() { 16 | iter.start_x(); 17 | for x in min.x()..lub.x() { 18 | f(PointN([x, y]), iter.coords()); 19 | iter.incr_x(); 20 | } 21 | iter.incr_y(); 22 | } 23 | } 24 | 25 | pub trait Iter2 { 26 | type Coords; 27 | 28 | fn coords(&self) -> Self::Coords; 29 | 30 | fn start_y(&mut self); 31 | fn start_x(&mut self); 32 | 33 | fn incr_x(&mut self); 34 | fn incr_y(&mut self); 35 | } 36 | 37 | macro_rules! impl_iter2_for_tuple { 38 | ( $( $var:ident : $t:ident ),+ ) => { 39 | impl<$($t),+> Iter2 for ($($t,)+) 40 | where 41 | $($t: Iter2),+ 42 | { 43 | type Coords = ($($t::Coords,)+); 44 | 45 | #[inline] 46 | fn coords(&self) -> Self::Coords { 47 | let ($($var,)+) = self; 48 | 49 | ($($var.coords(),)+) 50 | } 51 | 52 | #[inline] 53 | fn start_y(&mut self) { 54 | let ($($var,)+) = self; 55 | $( $var.start_y(); )+ 56 | } 57 | #[inline] 58 | fn start_x(&mut self) { 59 | let ($($var,)+) = self; 60 | $( $var.start_x(); )+ 61 | } 62 | 63 | #[inline] 64 | fn incr_x(&mut self) { 65 | let ($($var,)+) = self; 66 | $( $var.incr_x(); )+ 67 | } 68 | #[inline] 69 | fn incr_y(&mut self) { 70 | let ($($var,)+) = self; 71 | $( $var.incr_y(); )+ 72 | } 73 | } 74 | }; 75 | } 76 | 77 | impl_iter2_for_tuple! { a: A } 78 | impl_iter2_for_tuple! { a: A, b: B } 79 | impl_iter2_for_tuple! { a: A, b: B, c: C } 80 | impl_iter2_for_tuple! { a: A, b: B, c: C, d: D } 81 | impl_iter2_for_tuple! { a: A, b: B, c: C, d: D, e: E } 82 | impl_iter2_for_tuple! { a: A, b: B, c: C, d: D, e: E, f: F } 83 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each/for_each3.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | 3 | /// Steps a generic 3D iterator `iter` through some "extent," for some interpretation of an extent determined by the iterator. 4 | /// 5 | /// The visitor `f` will see every point in `extent`, as well as whatever coordinates `iter` would like to associated with 6 | /// that point. 7 | #[inline] 8 | pub fn for_each3(mut iter: I, extent: &Extent3i, mut f: impl FnMut(Point3i, I::Coords)) 9 | where 10 | I: Iter3, 11 | { 12 | let min = extent.minimum; 13 | let lub = extent.least_upper_bound(); 14 | iter.start_z(); 15 | for z in min.z()..lub.z() { 16 | iter.start_y(); 17 | for y in min.y()..lub.y() { 18 | iter.start_x(); 19 | for x in min.x()..lub.x() { 20 | f(PointN([x, y, z]), iter.coords()); 21 | iter.incr_x(); 22 | } 23 | iter.incr_y(); 24 | } 25 | iter.incr_z(); 26 | } 27 | } 28 | 29 | pub trait Iter3 { 30 | type Coords; 31 | 32 | fn coords(&self) -> Self::Coords; 33 | 34 | fn start_z(&mut self); 35 | fn start_y(&mut self); 36 | fn start_x(&mut self); 37 | 38 | fn incr_x(&mut self); 39 | fn incr_y(&mut self); 40 | fn incr_z(&mut self); 41 | } 42 | 43 | macro_rules! impl_iter3_for_tuple { 44 | ( $( $var:ident : $t:ident ),+ ) => { 45 | impl<$($t),+> Iter3 for ($($t,)+) 46 | where 47 | $($t: Iter3),+ 48 | { 49 | type Coords = ($($t::Coords,)+); 50 | 51 | #[inline] 52 | fn coords(&self) -> Self::Coords { 53 | let ($($var,)+) = self; 54 | 55 | ($($var.coords(),)+) 56 | } 57 | 58 | #[inline] 59 | fn start_z(&mut self) { 60 | let ($($var,)+) = self; 61 | $( $var.start_z(); )+ 62 | } 63 | #[inline] 64 | fn start_y(&mut self) { 65 | let ($($var,)+) = self; 66 | $( $var.start_y(); )+ 67 | } 68 | #[inline] 69 | fn start_x(&mut self) { 70 | let ($($var,)+) = self; 71 | $( $var.start_x(); )+ 72 | } 73 | 74 | #[inline] 75 | fn incr_x(&mut self) { 76 | let ($($var,)+) = self; 77 | $( $var.incr_x(); )+ 78 | } 79 | #[inline] 80 | fn incr_y(&mut self) { 81 | let ($($var,)+) = self; 82 | $( $var.incr_y(); )+ 83 | } 84 | #[inline] 85 | fn incr_z(&mut self) { 86 | let ($($var,)+) = self; 87 | $( $var.incr_z(); )+ 88 | } 89 | } 90 | }; 91 | } 92 | 93 | impl_iter3_for_tuple! { a: A } 94 | impl_iter3_for_tuple! { a: A, b: B } 95 | impl_iter3_for_tuple! { a: A, b: B, c: C } 96 | impl_iter3_for_tuple! { a: A, b: B, c: C, d: D } 97 | impl_iter3_for_tuple! { a: A, b: B, c: C, d: D, e: E } 98 | impl_iter3_for_tuple! { a: A, b: B, c: C, d: D, e: E, f: F } 99 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each/lock_step.rs: -------------------------------------------------------------------------------- 1 | use crate::array::{ArrayIndexer, ArrayStrideIter, Local}; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | /// All information required to do strided iteration over two arrays in lock step. 6 | /// 7 | /// This means that the same extent will be iterated for both arrays, but each array may interpret that extent differently. For 8 | /// example, one array might have a different step size or local origin than the other, causing it to visit different points 9 | /// than the actual `iter_extent`. For this reason, `iter_extent` should only be used as a shared reference point. 10 | #[derive(Clone)] 11 | pub struct LockStepArrayForEach { 12 | pub(crate) iter_extent: ExtentN, 13 | pub(crate) iter1: ArrayStrideIter, 14 | pub(crate) iter2: ArrayStrideIter, 15 | } 16 | 17 | pub type LockStepArrayForEach2 = LockStepArrayForEach<[i32; 2]>; 18 | pub type LockStepArrayForEach3 = LockStepArrayForEach<[i32; 3]>; 19 | 20 | impl LockStepArrayForEach 21 | where 22 | N: ArrayIndexer, 23 | PointN: IntegerPoint, 24 | { 25 | pub fn new(iter_extent: ExtentN, iter1: ArrayStrideIter, iter2: ArrayStrideIter) -> Self { 26 | Self { 27 | iter_extent, 28 | iter1, 29 | iter2, 30 | } 31 | } 32 | 33 | pub fn new_global_unchecked( 34 | iter_extent: ExtentN, 35 | array1_extent: ExtentN, 36 | array2_extent: ExtentN, 37 | ) -> Self { 38 | // Translate to local coordinates. 39 | let origin1 = iter_extent.minimum - array1_extent.minimum; 40 | let origin2 = iter_extent.minimum - array2_extent.minimum; 41 | 42 | let iter1 = N::make_stride_iter(array1_extent.shape, Local(origin1), PointN::ONES); 43 | let iter2 = N::make_stride_iter(array2_extent.shape, Local(origin2), PointN::ONES); 44 | 45 | Self::new(iter_extent, iter1, iter2) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each/single_array.rs: -------------------------------------------------------------------------------- 1 | use crate::array::{ArrayIndexer, ArrayStrideIter, Local, Stride}; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | /// All information required to do strided iteration over an extent of a single array. 6 | #[derive(Clone)] 7 | pub struct ArrayForEach { 8 | pub(crate) iter_extent: ExtentN, 9 | pub(crate) iter: ArrayStrideIter, 10 | } 11 | 12 | /// A 2D `ArrayForEach`. 13 | pub type Array2ForEach = ArrayForEach<[i32; 2]>; 14 | /// A 3D `ArrayForEach`. 15 | pub type Array3ForEach = ArrayForEach<[i32; 3]>; 16 | 17 | impl ArrayForEach 18 | where 19 | N: ArrayIndexer, 20 | PointN: IntegerPoint, 21 | { 22 | #[inline] 23 | pub fn new_local_unchecked( 24 | array_shape: PointN, 25 | origin: Local, 26 | iter_extent: ExtentN, 27 | ) -> Self { 28 | Self { 29 | iter_extent, 30 | iter: N::make_stride_iter(array_shape, origin, PointN::ONES), 31 | } 32 | } 33 | 34 | #[inline] 35 | pub fn new_local(array_shape: PointN, iter_extent: ExtentN) -> Self { 36 | // Make sure we don't index out of array bounds. 37 | let iter_extent = 38 | iter_extent.intersection(&ExtentN::from_min_and_shape(PointN::ZERO, array_shape)); 39 | 40 | Self::new_local_unchecked(array_shape, Local(iter_extent.minimum), iter_extent) 41 | } 42 | 43 | #[inline] 44 | pub fn new_global_unchecked(array_extent: ExtentN, iter_extent: ExtentN) -> Self { 45 | // Translate to local coordinates. 46 | let origin = Local(iter_extent.minimum - array_extent.minimum); 47 | 48 | Self { 49 | iter_extent, 50 | iter: N::make_stride_iter(array_extent.shape, origin, PointN::ONES), 51 | } 52 | } 53 | 54 | #[inline] 55 | pub fn new_global(array_extent: ExtentN, iter_extent: ExtentN) -> Self { 56 | // Make sure we don't index out of array bounds. 57 | let iter_extent = iter_extent.intersection(&array_extent); 58 | 59 | Self::new_global_unchecked(array_extent, iter_extent) 60 | } 61 | } 62 | 63 | impl ArrayForEach 64 | where 65 | N: ArrayIndexer, 66 | { 67 | pub fn for_each(self, f: impl FnMut(PointN, Stride)) { 68 | N::for_each(self, f) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/for_each/stride_iter.rs: -------------------------------------------------------------------------------- 1 | use crate::array::{Iter2, Iter3, Local2i, Local3i, Stride}; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | /// Used for both 2D and 3D strided iteration. (Z only used for 3D). 6 | #[derive(Clone)] 7 | pub struct ArrayStrideIter { 8 | x_stride: usize, 9 | y_stride: usize, 10 | z_stride: usize, 11 | x_start: usize, 12 | y_start: usize, 13 | z_start: usize, 14 | x_i: usize, 15 | y_i: usize, 16 | z_i: usize, 17 | } 18 | 19 | impl ArrayStrideIter { 20 | pub fn new_2d(array_shape: Point2i, origin: Local2i, step: Point2i) -> Self { 21 | debug_assert!(array_shape >= Point2i::ONES); 22 | debug_assert!(origin.0 >= Point2i::ZERO); 23 | debug_assert!(step >= Point2i::ONES); 24 | 25 | let mut x_stride = 1usize; 26 | let mut y_stride = array_shape.x() as usize; 27 | 28 | let x_start = x_stride * origin.0.x() as usize; 29 | let y_start = y_stride * origin.0.y() as usize; 30 | 31 | x_stride *= step.x() as usize; 32 | y_stride *= step.y() as usize; 33 | 34 | Self { 35 | x_stride, 36 | y_stride, 37 | z_stride: 0, 38 | x_start, 39 | y_start, 40 | z_start: 0, 41 | x_i: 0, 42 | y_i: 0, 43 | z_i: 0, 44 | } 45 | } 46 | 47 | pub fn new_3d(array_shape: Point3i, origin: Local3i, step: Point3i) -> Self { 48 | debug_assert!(array_shape >= Point3i::ONES); 49 | debug_assert!(origin.0 >= Point3i::ZERO); 50 | debug_assert!(step >= Point3i::ONES); 51 | 52 | let mut x_stride = 1usize; 53 | let mut y_stride = array_shape.x() as usize; 54 | let mut z_stride = (array_shape.y() * array_shape.x()) as usize; 55 | 56 | let x_start = x_stride * origin.0.x() as usize; 57 | let y_start = y_stride * origin.0.y() as usize; 58 | let z_start = z_stride * origin.0.z() as usize; 59 | 60 | x_stride *= step.x() as usize; 61 | y_stride *= step.y() as usize; 62 | z_stride *= step.z() as usize; 63 | 64 | Self { 65 | x_stride, 66 | y_stride, 67 | z_stride, 68 | x_start, 69 | y_start, 70 | z_start, 71 | x_i: 0, 72 | y_i: 0, 73 | z_i: 0, 74 | } 75 | } 76 | } 77 | 78 | impl Iter2 for ArrayStrideIter { 79 | type Coords = Stride; 80 | 81 | #[inline] 82 | fn coords(&self) -> Self::Coords { 83 | Stride(self.x_i) 84 | } 85 | 86 | #[inline] 87 | fn start_y(&mut self) { 88 | self.y_i = self.y_start; 89 | } 90 | #[inline] 91 | fn start_x(&mut self) { 92 | self.x_i = self.y_i + self.x_start; 93 | } 94 | 95 | #[inline] 96 | fn incr_x(&mut self) { 97 | self.x_i += self.x_stride; 98 | } 99 | #[inline] 100 | fn incr_y(&mut self) { 101 | self.y_i += self.y_stride; 102 | } 103 | } 104 | 105 | impl Iter3 for ArrayStrideIter { 106 | type Coords = Stride; 107 | 108 | #[inline] 109 | fn coords(&self) -> Self::Coords { 110 | Stride(self.x_i) 111 | } 112 | 113 | #[inline] 114 | fn start_z(&mut self) { 115 | self.z_i = self.z_start; 116 | } 117 | #[inline] 118 | fn start_y(&mut self) { 119 | self.y_i = self.z_i + self.y_start; 120 | } 121 | #[inline] 122 | fn start_x(&mut self) { 123 | self.x_i = self.y_i + self.x_start; 124 | } 125 | 126 | #[inline] 127 | fn incr_x(&mut self) { 128 | self.x_i += self.x_stride; 129 | } 130 | #[inline] 131 | fn incr_y(&mut self) { 132 | self.y_i += self.y_stride; 133 | } 134 | #[inline] 135 | fn incr_z(&mut self) { 136 | self.z_i += self.z_stride; 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/image_conversions.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | use core::mem::MaybeUninit; 6 | use image::GenericImageView; 7 | 8 | impl From<&Im> for Array2x1<::Pixel> 9 | where 10 | Im: GenericImageView, 11 | { 12 | fn from(image: &Im) -> Self { 13 | let shape = PointN([image.width() as i32, image.height() as i32]); 14 | let extent = Extent2i::from_min_and_shape(Point2i::ZERO, shape); 15 | let mut map: Array2x1::Pixel>> = 16 | unsafe { Array2x1::maybe_uninit(extent) }; 17 | for (x, y, pixel) in image.pixels() { 18 | let point = PointN([x as i32, y as i32]); 19 | unsafe { 20 | map.get_mut(Local(point)).as_mut_ptr().write(pixel); 21 | } 22 | } 23 | 24 | unsafe { map.assume_init() } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/array/indexer.rs: -------------------------------------------------------------------------------- 1 | use crate::array::{ 2 | for_each2, for_each3, Array2ForEach, Array3ForEach, ArrayForEach, ArrayStrideIter, Local, 3 | Local2i, Local3i, LockStepArrayForEach, LockStepArrayForEach2, LockStepArrayForEach3, Stride, 4 | }; 5 | 6 | use building_blocks_core::prelude::*; 7 | 8 | pub trait ArrayIndexer { 9 | fn stride_from_local_point(shape: PointN, point: Local) -> Stride; 10 | 11 | fn make_stride_iter( 12 | array_shape: PointN, 13 | origin: Local, 14 | step: PointN, 15 | ) -> ArrayStrideIter; 16 | 17 | fn for_each(for_each: ArrayForEach, f: impl FnMut(PointN, Stride)); 18 | 19 | fn for_each_lockstep_unchecked( 20 | for_each: LockStepArrayForEach, 21 | f: impl FnMut(PointN, (Stride, Stride)), 22 | ); 23 | 24 | #[inline] 25 | fn strides_from_local_points(shape: PointN, points: &[Local], strides: &mut [Stride]) 26 | where 27 | PointN: Copy, 28 | { 29 | for (i, p) in points.iter().enumerate() { 30 | strides[i] = Self::stride_from_local_point(shape, *p); 31 | } 32 | } 33 | } 34 | 35 | impl ArrayIndexer<[i32; 2]> for [i32; 2] { 36 | #[inline] 37 | fn stride_from_local_point(s: Point2i, p: Local2i) -> Stride { 38 | Stride((p.y() * s.x() + p.x()) as usize) 39 | } 40 | 41 | #[inline] 42 | fn make_stride_iter(array_shape: Point2i, origin: Local2i, step: Point2i) -> ArrayStrideIter { 43 | ArrayStrideIter::new_2d(array_shape, origin, step) 44 | } 45 | 46 | #[inline] 47 | fn for_each(for_each: Array2ForEach, f: impl FnMut(Point2i, Stride)) { 48 | let Array2ForEach { iter_extent, iter } = for_each; 49 | for_each2(iter, &iter_extent, f); 50 | } 51 | 52 | #[inline] 53 | fn for_each_lockstep_unchecked( 54 | for_each: LockStepArrayForEach2, 55 | f: impl FnMut(Point2i, (Stride, Stride)), 56 | ) { 57 | let LockStepArrayForEach2 { 58 | iter_extent, 59 | iter1, 60 | iter2, 61 | } = for_each; 62 | for_each2((iter1, iter2), &iter_extent, f); 63 | } 64 | } 65 | 66 | impl ArrayIndexer<[i32; 3]> for [i32; 3] { 67 | #[inline] 68 | fn stride_from_local_point(s: Point3i, p: Local3i) -> Stride { 69 | Stride((p.z() * s.y() * s.x() + p.y() * s.x() + p.x()) as usize) 70 | } 71 | 72 | #[inline] 73 | fn make_stride_iter(array_shape: Point3i, origin: Local3i, step: Point3i) -> ArrayStrideIter { 74 | ArrayStrideIter::new_3d(array_shape, origin, step) 75 | } 76 | 77 | #[inline] 78 | fn for_each(for_each: Array3ForEach, f: impl FnMut(Point3i, Stride)) { 79 | let Array3ForEach { iter_extent, iter } = for_each; 80 | for_each3(iter, &iter_extent, f); 81 | } 82 | 83 | #[inline] 84 | fn for_each_lockstep_unchecked( 85 | for_each: LockStepArrayForEach3, 86 | f: impl FnMut(Point3i, (Stride, Stride)), 87 | ) { 88 | let LockStepArrayForEach3 { 89 | iter_extent, 90 | iter1, 91 | iter2, 92 | } = for_each; 93 | for_each3((iter1, iter2), &iter_extent, f) 94 | } 95 | } 96 | 97 | /// When a lattice map implements `IndexedArray`, that means there is some underlying array with the location and shape dictated 98 | /// by the extent. 99 | /// 100 | /// For the sake of generic impls, if the same map also implements `Get*`, it must use the same data layout as `Array`. 101 | pub trait IndexedArray { 102 | type Indexer: ArrayIndexer; 103 | 104 | fn extent(&self) -> &ExtentN; 105 | 106 | #[inline] 107 | fn stride_from_local_point(&self, p: Local) -> Stride 108 | where 109 | PointN: Copy, 110 | { 111 | Self::Indexer::stride_from_local_point(self.extent().shape, p) 112 | } 113 | 114 | #[inline] 115 | fn strides_from_local_points(&self, points: &[Local], strides: &mut [Stride]) 116 | where 117 | PointN: Copy, 118 | { 119 | Self::Indexer::strides_from_local_points(self.extent().shape, points, strides) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/bitset.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU8, Ordering}; 2 | 3 | #[cfg(feature = "serde")] 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Clone, Copy, Debug, Default)] 7 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 8 | pub struct Bitset8 { 9 | pub bits: u8, 10 | } 11 | 12 | impl Bitset8 { 13 | #[inline] 14 | pub fn bit_is_set(&self, bit: u8) -> bool { 15 | self.bits & (1 << bit) != 0 16 | } 17 | 18 | #[inline] 19 | pub fn any(&self) -> bool { 20 | self.bits != 0 21 | } 22 | 23 | #[inline] 24 | pub fn none(&self) -> bool { 25 | self.bits == 0 26 | } 27 | 28 | #[inline] 29 | pub fn all(&self) -> bool { 30 | self.bits == 0xFF 31 | } 32 | 33 | #[inline] 34 | pub fn set_all(&mut self) { 35 | self.bits = 0xFF; 36 | } 37 | 38 | #[inline] 39 | pub fn set_bit(&mut self, bit: u8) { 40 | self.bits |= 1 << bit; 41 | } 42 | 43 | #[inline] 44 | pub fn unset_bit(&mut self, bit: u8) { 45 | self.bits &= !(1 << bit); 46 | } 47 | } 48 | 49 | #[derive(Debug, Default)] 50 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 51 | pub struct AtomicBitset8 { 52 | pub bits: AtomicU8, 53 | } 54 | 55 | impl Clone for AtomicBitset8 { 56 | fn clone(&self) -> Self { 57 | Self { 58 | bits: AtomicU8::new(self.load()), 59 | } 60 | } 61 | } 62 | 63 | // PERF: relax the memory ordering? 64 | impl AtomicBitset8 { 65 | #[inline] 66 | pub fn bit_is_set(&self, bit: u8) -> bool { 67 | self.load() & (1 << bit) != 0 68 | } 69 | 70 | #[inline] 71 | pub fn any(&self) -> bool { 72 | self.load() != 0 73 | } 74 | 75 | #[inline] 76 | pub fn all(&self) -> bool { 77 | self.load() == 0xFF 78 | } 79 | 80 | #[inline] 81 | pub fn set_bit(&self, bit: u8) { 82 | self.bits.fetch_or(1 << bit, Ordering::SeqCst); 83 | } 84 | 85 | #[inline] 86 | pub fn unset_bit(&self, bit: u8) { 87 | self.bits.fetch_and(!(1 << bit), Ordering::SeqCst); 88 | } 89 | 90 | #[inline] 91 | pub fn fetch_and_unset_bit(&self, bit: u8) -> bool { 92 | let mask = 1 << bit; 93 | self.bits.fetch_and(!mask, Ordering::SeqCst) & mask != 0 94 | } 95 | 96 | #[inline] 97 | pub fn fetch_and_set_bit(&self, bit: u8) -> bool { 98 | let mask = 1 << bit; 99 | self.bits.fetch_or(mask, Ordering::SeqCst) & mask != 0 100 | } 101 | 102 | fn load(&self) -> u8 { 103 | self.bits.load(Ordering::SeqCst) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/caching.rs: -------------------------------------------------------------------------------- 1 | mod local_cache; 2 | mod lru_cache; 3 | 4 | pub use local_cache::*; 5 | pub use lru_cache::*; 6 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/caching/local_cache.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | use std::cell::UnsafeCell; 3 | use std::collections::HashMap; 4 | use std::pin::Pin; 5 | 6 | /// A cache with a very specific niche. When reading from shared, two-tier storage, if you miss the cache and need to fetch from 7 | /// the cold tier, then you also need a place to store the fetched data. Rather than doing interior mutation of the storage, 8 | /// which requires synchronization, the fetched data can be stored in a thread-local cache, the `LocalCache`. 9 | /// 10 | /// # Safety 11 | /// 12 | /// We guarantee in these APIs that all references returned are valid until `LocalCache::drain_iter` is called, even as new 13 | /// values are added to the map. The invariants are: 14 | /// 1. Once a value is placed here, it will never get dropped or moved until calling `drain_iter` or `delete`. 15 | /// 2. Callers of `delete` must take precautions to ensure no one is borrowing the deleted data. 16 | /// 3. Returned references must be dropped before calling `drain_iter` (since it borrows self mutably). 17 | /// 4. The values are placed into `Pin>` so the memory address is guaranteed stable. 18 | pub struct LocalCache { 19 | store: UnsafeCell>, H>>, 20 | } 21 | 22 | impl Default for LocalCache 23 | where 24 | H: Default, 25 | { 26 | fn default() -> Self { 27 | Self::new() 28 | } 29 | } 30 | 31 | impl LocalCache 32 | where 33 | H: Default, 34 | { 35 | pub fn new() -> Self { 36 | LocalCache { 37 | store: UnsafeCell::new(HashMap::with_hasher(Default::default())), 38 | } 39 | } 40 | } 41 | 42 | impl LocalCache 43 | where 44 | K: Eq + Hash, 45 | H: Default + BuildHasher, 46 | { 47 | pub fn is_empty(&self) -> bool { 48 | self.len() == 0 49 | } 50 | 51 | pub fn len(&self) -> usize { 52 | let store = unsafe { &*self.store.get() }; 53 | store.len() 54 | } 55 | 56 | /// Fetch the value for `key`. 57 | pub fn get(&self, key: K) -> Option<&V> { 58 | let store = unsafe { &*self.store.get() }; 59 | store.get(&key).map(|v| &**v) 60 | } 61 | 62 | /// Fetch the value for `key`. If it's not here, call `f` to fetch it. 63 | pub fn get_or_insert_with(&self, key: K, f: impl FnOnce() -> V) -> &V { 64 | let mut_store = unsafe { &mut *self.store.get() }; 65 | 66 | mut_store.entry(key).or_insert_with(|| Box::pin(f())) 67 | } 68 | 69 | /// Deletes the value at `key`. 70 | /// 71 | /// # Safety 72 | /// This is only safe if you know that no one is currently borrowing the value at `key`. 73 | pub unsafe fn delete(&self, key: &K) { 74 | let mut_store = &mut *self.store.get(); 75 | mut_store.remove(key); 76 | } 77 | 78 | /// Consume and iterate over all (key, value) pairs. 79 | pub fn drain_iter(&mut self) -> impl '_ + Iterator { 80 | self.store 81 | .get_mut() 82 | .drain() 83 | .map(|(k, v)| (k, unsafe { *Pin::into_inner_unchecked(v) })) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/chunk_tree/builder.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | array::FillChannels, 3 | dev_prelude::{Array, Channel, ChunkStorage, ChunkTree, HashMapChunkTree, SmallKeyHashMap}, 4 | }; 5 | 6 | use building_blocks_core::{point_traits::IntegerPoint, ExtentN, PointN}; 7 | 8 | use core::hash::Hash; 9 | 10 | #[cfg(feature = "serde")] 11 | use serde::{Deserialize, Serialize}; 12 | 13 | /// Constant parameters required to construct a [`ChunkTreeBuilder`]. 14 | #[derive(Clone, Copy)] 15 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 16 | pub struct ChunkTreeConfig { 17 | /// The shape of every chunk. 18 | pub chunk_shape: PointN, 19 | /// The voxel value taken in regions where chunks are vacant. 20 | pub ambient_value: T, 21 | /// The level of detail of root nodes. This implies there are `root_lod + 1` levels of detail, where level 0 (leaves of the 22 | /// tree) has the highest sample rate. 23 | pub root_lod: u8, 24 | } 25 | 26 | /// An object that knows how to construct chunks for a `ChunkTree`. 27 | pub trait ChunkTreeBuilder: Sized { 28 | type Chunk; 29 | 30 | fn config(&self) -> &ChunkTreeConfig; 31 | 32 | /// Construct a new chunk with entirely ambient values. 33 | fn new_ambient(&self, extent: ExtentN) -> Self::Chunk; 34 | 35 | #[inline] 36 | fn chunk_shape(&self) -> PointN 37 | where 38 | PointN: Clone, 39 | { 40 | self.config().chunk_shape.clone() 41 | } 42 | 43 | #[inline] 44 | fn ambient_value(&self) -> T 45 | where 46 | T: Clone, 47 | { 48 | self.config().ambient_value.clone() 49 | } 50 | 51 | #[inline] 52 | fn root_lod(&self) -> u8 { 53 | self.config().root_lod 54 | } 55 | 56 | #[inline] 57 | fn num_lods(&self) -> u8 { 58 | self.root_lod() + 1 59 | } 60 | 61 | /// Create a new `ChunkTree` with the given `storage` which must implement both `ChunkReadStorage` and `ChunkWriteStorage`. 62 | fn build_with_storage( 63 | self, 64 | storage_factory: impl Fn() -> Store, 65 | ) -> ChunkTree 66 | where 67 | PointN: IntegerPoint, 68 | T: Clone, 69 | Store: ChunkStorage, 70 | { 71 | let storages = (0..self.num_lods()).map(|_| storage_factory()).collect(); 72 | ChunkTree::new(self, storages) 73 | } 74 | 75 | /// Create a new `ChunkTree` using a `SmallKeyHashMap` as the chunk storage. 76 | fn build_with_hash_map_storage(self) -> HashMapChunkTree 77 | where 78 | PointN: Hash + IntegerPoint, 79 | T: Clone, 80 | { 81 | Self::build_with_storage(self, SmallKeyHashMap::default) 82 | } 83 | } 84 | 85 | /// A `ChunkTreeBuilder` for `Array` chunks. 86 | #[derive(Clone, Copy)] 87 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 88 | pub struct ChunkTreeBuilderNxM { 89 | pub config: ChunkTreeConfig, 90 | marker: std::marker::PhantomData, 91 | } 92 | 93 | impl ChunkTreeBuilderNxM { 94 | pub const fn new(config: ChunkTreeConfig) -> Self { 95 | Self { 96 | config, 97 | marker: std::marker::PhantomData, 98 | } 99 | } 100 | } 101 | 102 | macro_rules! builder_type_alias { 103 | ($name:ident, $dim:ty, $( $chan:ident ),+ ) => { 104 | pub type $name<$( $chan ),+> = ChunkTreeBuilderNxM<$dim, ($($chan),+), ($(Channel<$chan>),+)>; 105 | }; 106 | } 107 | 108 | pub mod multichannel_aliases { 109 | use super::*; 110 | 111 | /// A `ChunkTreeBuilder` for `ArrayNx1` chunks. 112 | pub type ChunkTreeBuilderNx1 = ChunkTreeBuilderNxM>; 113 | 114 | /// A `ChunkTreeBuilder` for `Array2x1` chunks. 115 | pub type ChunkTreeBuilder2x1 = ChunkTreeBuilderNxM<[i32; 2], A, Channel>; 116 | builder_type_alias!(ChunkTreeBuilder2x2, [i32; 2], A, B); 117 | builder_type_alias!(ChunkTreeBuilder2x3, [i32; 2], A, B, C); 118 | builder_type_alias!(ChunkTreeBuilder2x4, [i32; 2], A, B, C, D); 119 | builder_type_alias!(ChunkTreeBuilder2x5, [i32; 2], A, B, C, D, E); 120 | builder_type_alias!(ChunkTreeBuilder2x6, [i32; 2], A, B, C, D, E, F); 121 | 122 | /// A `ChunkTreeBuilder` for `Array3x1` chunks. 123 | pub type ChunkTreeBuilder3x1 = ChunkTreeBuilderNxM<[i32; 3], A, Channel>; 124 | builder_type_alias!(ChunkTreeBuilder3x2, [i32; 3], A, B); 125 | builder_type_alias!(ChunkTreeBuilder3x3, [i32; 3], A, B, C); 126 | builder_type_alias!(ChunkTreeBuilder3x4, [i32; 3], A, B, C, D); 127 | builder_type_alias!(ChunkTreeBuilder3x5, [i32; 3], A, B, C, D, E); 128 | builder_type_alias!(ChunkTreeBuilder3x6, [i32; 3], A, B, C, D, E, F); 129 | } 130 | 131 | pub use multichannel_aliases::*; 132 | 133 | impl ChunkTreeBuilder for ChunkTreeBuilderNxM 134 | where 135 | PointN: IntegerPoint, 136 | T: Clone, 137 | Chan: FillChannels, 138 | { 139 | type Chunk = Array; 140 | 141 | fn config(&self) -> &ChunkTreeConfig { 142 | &self.config 143 | } 144 | 145 | fn new_ambient(&self, extent: ExtentN) -> Self::Chunk { 146 | Array::fill(extent, self.ambient_value()) 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/chunk_tree/sampling/point.rs: -------------------------------------------------------------------------------- 1 | use super::chunk_downsample_for_each; 2 | use crate::{ 3 | array::ArrayIndexer, 4 | dev_prelude::{ChunkDownsampler, GetMutUnchecked, GetUnchecked, IndexedArray, Local, Stride}, 5 | }; 6 | 7 | use building_blocks_core::prelude::*; 8 | 9 | /// A `ChunkDownsampler` that just selects a single point from each `2x2x2` region, ignoring the rest. 10 | pub struct PointDownsampler; 11 | 12 | impl ChunkDownsampler for PointDownsampler 13 | where 14 | N: ArrayIndexer, 15 | PointN: IntegerPoint, 16 | Src: GetUnchecked + IndexedArray, 17 | Dst: for<'r> GetMutUnchecked<'r, Stride, Item = &'r mut T>, 18 | { 19 | fn downsample(&self, src_chunk: &Src, dst_chunk: &mut Dst, dst_min: Local) { 20 | let chunk_shape = src_chunk.extent().shape; // Doesn't matter which chunk we choose, they should have the same shape. 21 | let for_each = chunk_downsample_for_each(chunk_shape, dst_min); 22 | N::for_each_lockstep_unchecked(for_each, |_p, (s_dst, s_src)| unsafe { 23 | *dst_chunk.get_mut_unchecked(s_dst) = src_chunk.get_unchecked(s_src); 24 | }); 25 | } 26 | } 27 | 28 | // ████████╗███████╗███████╗████████╗ 29 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 30 | // ██║ █████╗ ███████╗ ██║ 31 | // ██║ ██╔══╝ ╚════██║ ██║ 32 | // ██║ ███████╗███████║ ██║ 33 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 34 | 35 | #[cfg(test)] 36 | mod test { 37 | use super::*; 38 | use crate::prelude::{Array3x1, ForEach}; 39 | 40 | #[test] 41 | fn point_downsample_only_ones() { 42 | let step = 2; 43 | 44 | let chunk_extent = Extent3i::from_min_and_shape(Point3i::ZERO, Point3i::fill(16)); 45 | 46 | // Make an array where only points with components divisible by 2 have value 1. These are exactly the points that will 47 | // be sampled. 48 | let src_chunk = Array3x1::fill_with(chunk_extent, |p| { 49 | if p.x() % step == 0 && p.y() % step == 0 && p.z() % step == 0 { 50 | 1 51 | } else { 52 | 0 53 | } 54 | }); 55 | 56 | let mut dst_chunk = Array3x1::fill(chunk_extent, 0); 57 | let dst_min = Local(Point3i::ZERO); 58 | PointDownsampler.downsample(&src_chunk, &mut dst_chunk, dst_min); 59 | 60 | let dst_extent = chunk_extent >> 1; 61 | dst_chunk.for_each(&dst_extent, |p: Point3i, x| assert_eq!(x, 1, "p = {:?}", p)); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/chunk_tree/sampling/sdf_mean.rs: -------------------------------------------------------------------------------- 1 | use super::chunk_downsample_for_each; 2 | use crate::{ 3 | array::{ArrayForEach, ArrayIndexer}, 4 | dev_prelude::{ChunkDownsampler, IndexedArray, Local, Stride}, 5 | prelude::{GetMutUnchecked, GetUnchecked}, 6 | }; 7 | 8 | use building_blocks_core::prelude::*; 9 | 10 | /// A `ChunkDownsampler` that takes the mean of each `2x2x2` region of a signed distance field. It also renormalizes the values 11 | /// to lie in the range `[-1.0, 1.0]`. 12 | pub struct SdfMeanDownsampler; 13 | 14 | impl ChunkDownsampler for SdfMeanDownsampler 15 | where 16 | N: ArrayIndexer, 17 | PointN: IntegerPoint, 18 | ArrayForEach: Clone, 19 | T: From, 20 | f32: From, 21 | Src: GetUnchecked + IndexedArray, 22 | Dst: for<'r> GetMutUnchecked<'r, Stride, Item = &'r mut T> + IndexedArray, 23 | { 24 | fn downsample(&self, src_chunk: &Src, dst_chunk: &mut Dst, dst_min: Local) { 25 | let chunk_shape = src_chunk.extent().shape; // Doesn't matter which chunk we choose, they should have the same shape. 26 | 27 | let src_shape_per_point = PointN::fill(2); 28 | 29 | let kernel_for_each = ArrayForEach::new_local_unchecked( 30 | chunk_shape, 31 | Local(PointN::ZERO), 32 | ExtentN::from_min_and_shape(PointN::ZERO, src_shape_per_point), 33 | ); 34 | 35 | // Not only do we get the mean signed distance value by dividing by the volume, but we also re-normalize by dividing 36 | // by the scale factor (the ratio between voxel edge lengths at the different resolutions). 37 | let rescale = 1.0 / (2 * src_shape_per_point.volume()) as f32; 38 | 39 | let for_each = chunk_downsample_for_each(chunk_shape, dst_min); 40 | N::for_each_lockstep_unchecked(for_each, |_p, (s_dst, s_src)| { 41 | let mut sum = 0.0; 42 | N::for_each(kernel_for_each.clone(), |_p, neighbor_offset| { 43 | sum += f32::from(unsafe { src_chunk.get_unchecked(s_src + neighbor_offset) }); 44 | }); 45 | unsafe { 46 | *dst_chunk.get_mut_unchecked(s_dst) = T::from(rescale * sum); 47 | } 48 | }); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/chunk_tree/storage.rs: -------------------------------------------------------------------------------- 1 | pub mod compressible; 2 | pub mod hash_map; 3 | 4 | pub use compressible::*; 5 | pub use hash_map::*; 6 | 7 | use super::{ChunkNode, NodeState}; 8 | 9 | use building_blocks_core::prelude::*; 10 | 11 | use auto_impl::auto_impl; 12 | use either::Either; 13 | 14 | /// Methods for reading and writing chunk nodes from/to storage. 15 | /// 16 | /// Depending on the implementation, any method that fetches a `Self::Chunk` might trigger decompression. 17 | pub trait ChunkStorage { 18 | /// The data stored for an occupied [`ChunkNode`]. This should probably implement [`UserChunk`](super::UserChunk). 19 | type Chunk; 20 | 21 | /// The "cold" representation of a chunk, e.g. it may be compressed or live somewhere else entirely. 22 | /// 23 | /// Simple implementations of `ChunkStorage` can just have `ColdChunk = Chunk`. 24 | type ColdChunk; 25 | 26 | /// Inserts `node` at `key` and returns the previous raw node. 27 | fn insert_node( 28 | &mut self, 29 | key: PointN, 30 | node: ChunkNode, 31 | ) -> Option>>; 32 | 33 | /// Borrow the node at `key`. 34 | fn get_node(&self, key: PointN) -> Option<&ChunkNode>; 35 | 36 | /// Borrow the node at `key` in its raw representation. 37 | fn get_raw_node( 38 | &self, 39 | key: PointN, 40 | ) -> Option<(&NodeState, Either, &Self::ColdChunk>)>; 41 | 42 | /// Borrow the node state at `key`. The returned `bool` is `true` iff this node has data. 43 | fn get_node_state(&self, key: PointN) -> Option<(&NodeState, bool)>; 44 | 45 | /// Mutably borrow the node at `key`. Iff `drop_chunk` is `true`, then the chunk on the node is dropped. 46 | fn get_mut_node( 47 | &mut self, 48 | key: PointN, 49 | drop_chunk: bool, 50 | ) -> Option<&mut ChunkNode>; 51 | 52 | /// Mutably borrow the node at `key`. If it doesn't exist, insert the return value of `create_node`. 53 | /// 54 | /// Iff `drop_chunk` is `true`, then the node's chunk is dropped before being returned. This can be used to prevent 55 | /// decompression when the chunk isn't needed. 56 | /// 57 | /// Returns a `bool` indicating if the node had a chunk, for convenience. 58 | fn get_mut_node_or_insert_with( 59 | &mut self, 60 | key: PointN, 61 | drop_chunk: bool, 62 | create_node: impl FnOnce() -> ChunkNode, 63 | ) -> (&mut ChunkNode, bool); 64 | 65 | /// Mutably borrow the node state at `key`. The returned `bool` is `true` iff this node has data. 66 | fn get_mut_node_state(&mut self, key: PointN) -> Option<(&mut NodeState, bool)>; 67 | 68 | /// Mutably borrow the node state at `key`. If it doesn't exist, insert the return value of `create_node`. The returned 69 | /// `bool` is `true` iff this node has data. 70 | fn get_mut_node_state_or_insert_with( 71 | &mut self, 72 | key: PointN, 73 | create_node: impl FnOnce() -> ChunkNode, 74 | ) -> (&mut NodeState, bool); 75 | 76 | /// Removes and returns the node at `key`. 77 | fn pop_node(&mut self, key: PointN, drop_chunk: bool) -> Option>; 78 | 79 | /// Removes and returns the raw node at `key`. 80 | fn pop_raw_node( 81 | &mut self, 82 | key: PointN, 83 | ) -> Option>>; 84 | } 85 | 86 | #[auto_impl(&, &mut)] 87 | pub trait IterChunkKeys<'a, N> 88 | where 89 | PointN: 'a, 90 | { 91 | type Iter: Iterator>; 92 | 93 | fn chunk_keys(&'a self) -> Self::Iter; 94 | } 95 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/compression.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(feature = "serde", feature = "bincode"))] 2 | mod compressed_bincode; 3 | #[cfg(feature = "lz4")] 4 | mod lz4_compression; 5 | #[cfg(feature = "snap")] 6 | mod snappy_compression; 7 | 8 | #[cfg(all(feature = "serde", feature = "bincode"))] 9 | pub use compressed_bincode::BincodeCompression; 10 | #[cfg(feature = "lz4")] 11 | pub use lz4_compression::Lz4; 12 | #[cfg(feature = "snap")] 13 | pub use snappy_compression::Snappy; 14 | 15 | use std::io; 16 | 17 | #[cfg(feature = "serde")] 18 | use serde::{Deserialize, Serialize}; 19 | 20 | /// An algorithm for: 21 | /// 1. compressing a specific type `Data` into raw bytes 22 | /// 2. decompressing raw bytes back into `Data` 23 | pub trait Compression: Sized { 24 | type Data; 25 | 26 | fn compress_to_writer( 27 | &self, 28 | data: &Self::Data, 29 | compressed_bytes: impl io::Write, 30 | ) -> io::Result<()>; 31 | 32 | fn decompress_from_reader(compressed_bytes: impl io::Read) -> io::Result; 33 | 34 | /// To preserve type information. prefer this method over `compress_to_writer`. 35 | fn compress(&self, data: &Self::Data) -> Compressed { 36 | Compressed::new(self, data) 37 | } 38 | } 39 | 40 | pub trait FromBytesCompression { 41 | fn from_bytes_compression(bytes_compression: B) -> Self; 42 | } 43 | 44 | /// A wrapper for bytes from compression algorithm `A`. This is slightly safer than manually calling `decompress` on any byte 45 | /// slice, since it remembers the original data type. 46 | #[derive(Clone)] 47 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 48 | pub struct Compressed { 49 | pub compressed_bytes: Vec, 50 | marker: std::marker::PhantomData, 51 | } 52 | 53 | impl Compressed 54 | where 55 | A: Compression, 56 | { 57 | pub fn new(compression: &A, data: &A::Data) -> Self { 58 | let mut compressed_bytes = Vec::new(); 59 | A::compress_to_writer(compression, data, &mut compressed_bytes).unwrap(); 60 | 61 | Self { 62 | compressed_bytes, 63 | marker: Default::default(), 64 | } 65 | } 66 | 67 | pub fn decompress(&self) -> A::Data { 68 | A::decompress_from_reader(self.compressed_bytes.as_slice()).unwrap() 69 | } 70 | 71 | pub fn take_bytes(self) -> Vec { 72 | self.compressed_bytes 73 | } 74 | } 75 | 76 | /// A compression algorithm that reads a stream of bytes. 77 | pub trait BytesCompression { 78 | fn compress_bytes( 79 | &self, 80 | bytes: impl io::Read, 81 | compressed_bytes: impl io::Write, 82 | ) -> io::Result<()>; 83 | 84 | fn decompress_bytes(compressed_bytes: impl io::Read, bytes: impl io::Write) -> io::Result<()>; 85 | } 86 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/compression/compressed_bincode.rs: -------------------------------------------------------------------------------- 1 | use super::{BytesCompression, Compression}; 2 | 3 | use serde::{de::DeserializeOwned, Serialize}; 4 | use std::io; 5 | 6 | /// Run some compression algorithm `A` after bincode serializing a type `T`. This provides a decent 7 | /// default compression for any serializable type. 8 | pub struct BincodeCompression { 9 | pub compression: A, 10 | marker: std::marker::PhantomData, 11 | } 12 | 13 | impl Clone for BincodeCompression 14 | where 15 | A: Clone, 16 | { 17 | fn clone(&self) -> Self { 18 | Self { 19 | compression: self.compression.clone(), 20 | marker: Default::default(), 21 | } 22 | } 23 | } 24 | 25 | impl Copy for BincodeCompression where A: Copy {} 26 | 27 | impl BincodeCompression { 28 | pub fn new(compression: A) -> Self { 29 | Self { 30 | compression, 31 | marker: Default::default(), 32 | } 33 | } 34 | } 35 | 36 | impl Compression for BincodeCompression 37 | where 38 | T: DeserializeOwned + Serialize, 39 | A: BytesCompression, 40 | { 41 | type Data = T; 42 | 43 | fn compress_to_writer( 44 | &self, 45 | data: &Self::Data, 46 | compressed_bytes: impl io::Write, 47 | ) -> io::Result<()> { 48 | self.compression.compress_bytes( 49 | bincode::serialize(data).unwrap().as_slice(), 50 | compressed_bytes, 51 | ) 52 | } 53 | 54 | fn decompress_from_reader(compressed_bytes: impl io::Read) -> io::Result { 55 | let mut decompressed_bytes = Vec::new(); 56 | A::decompress_bytes(compressed_bytes, &mut decompressed_bytes)?; 57 | 58 | Ok(bincode::deserialize(&decompressed_bytes).unwrap()) 59 | } 60 | } 61 | 62 | // ████████╗███████╗███████╗████████╗ 63 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 64 | // ██║ █████╗ ███████╗ ██║ 65 | // ██║ ██╔══╝ ╚════██║ ██║ 66 | // ██║ ███████╗███████║ ██║ 67 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 68 | 69 | #[cfg(all(test, feature = "snap"))] 70 | mod tests { 71 | use super::*; 72 | use crate::prelude::Snappy; 73 | use serde::Deserialize; 74 | 75 | #[derive(Clone, Debug, Eq, Deserialize, Serialize, PartialEq)] 76 | struct Foo(Vec); 77 | 78 | #[test] 79 | fn compress_and_decompress_serializable_type() { 80 | let foo = Foo((0u8..100).collect()); 81 | 82 | let compression = BincodeCompression::new(Snappy); 83 | let compressed = compression.compress(&foo); 84 | let decompressed_foo = compressed.decompress(); 85 | 86 | assert_eq!(foo, decompressed_foo); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/compression/lz4_compression.rs: -------------------------------------------------------------------------------- 1 | use super::BytesCompression; 2 | 3 | use std::io; 4 | 5 | #[cfg(feature = "serde")] 6 | use serde::{Deserialize, Serialize}; 7 | 8 | /// The [LZ4 compression algorithm](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)). 9 | #[derive(Clone, Copy, Debug)] 10 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 11 | pub struct Lz4 { 12 | /// The compression level, from 0 to 10. 0 is fastest and least aggressive. 10 is slowest and 13 | /// most aggressive. 14 | pub level: u32, 15 | } 16 | 17 | impl BytesCompression for Lz4 { 18 | fn compress_bytes( 19 | &self, 20 | mut bytes: impl io::Read, 21 | compressed_bytes: impl io::Write, 22 | ) -> io::Result<()> { 23 | let mut encoder = lz4::EncoderBuilder::new() 24 | .level(self.level) 25 | .build(compressed_bytes)?; 26 | io::copy(&mut bytes, &mut encoder)?; 27 | let (_output, result) = encoder.finish(); 28 | 29 | result 30 | } 31 | 32 | fn decompress_bytes( 33 | compressed_bytes: impl io::Read, 34 | mut bytes: impl io::Write, 35 | ) -> io::Result<()> { 36 | let mut decoder = lz4::Decoder::new(compressed_bytes)?; 37 | io::copy(&mut decoder, &mut bytes)?; 38 | Ok(()) 39 | } 40 | } 41 | 42 | // ████████╗███████╗███████╗████████╗ 43 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 44 | // ██║ █████╗ ███████╗ ██║ 45 | // ██║ ██╔══╝ ╚════██║ ██║ 46 | // ██║ ███████╗███████║ ██║ 47 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 48 | 49 | #[cfg(test)] 50 | mod tests { 51 | use super::*; 52 | 53 | #[test] 54 | fn compress_and_decompress_serializable_type() { 55 | let bytes: Vec = (0u8..100).collect(); 56 | 57 | let mut compressed_bytes = Vec::new(); 58 | Lz4 { level: 10 } 59 | .compress_bytes(bytes.as_slice(), &mut compressed_bytes) 60 | .unwrap(); 61 | let mut decompressed_bytes = Vec::new(); 62 | Lz4::decompress_bytes(compressed_bytes.as_slice(), &mut decompressed_bytes).unwrap(); 63 | 64 | assert_eq!(bytes, decompressed_bytes); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/compression/snappy_compression.rs: -------------------------------------------------------------------------------- 1 | use super::BytesCompression; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use std::io; 5 | 6 | /// The [Snappy compression algorithm](https://en.wikipedia.org/wiki/Snappy_(compression)). 7 | /// Uses a pure Rust implementation, making it suitable for use with the WASM target. 8 | #[derive(Clone, Copy, Debug, Deserialize, Serialize)] 9 | pub struct Snappy; 10 | 11 | impl BytesCompression for Snappy { 12 | fn compress_bytes( 13 | &self, 14 | mut bytes: impl io::Read, 15 | compressed_bytes: impl io::Write, 16 | ) -> io::Result<()> { 17 | let mut encoder = snap::write::FrameEncoder::new(compressed_bytes); 18 | io::copy(&mut bytes, &mut encoder)?; 19 | encoder.into_inner().expect("failed to flush the writer"); 20 | Ok(()) 21 | } 22 | 23 | fn decompress_bytes( 24 | compressed_bytes: impl io::Read, 25 | mut bytes: impl io::Write, 26 | ) -> io::Result<()> { 27 | let mut decoder = snap::read::FrameDecoder::new(compressed_bytes); 28 | io::copy(&mut decoder, &mut bytes)?; 29 | Ok(()) 30 | } 31 | } 32 | 33 | // ████████╗███████╗███████╗████████╗ 34 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 35 | // ██║ █████╗ ███████╗ ██║ 36 | // ██║ ██╔══╝ ╚════██║ ██║ 37 | // ██║ ███████╗███████║ ██║ 38 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 39 | 40 | #[cfg(test)] 41 | mod tests { 42 | use super::*; 43 | 44 | #[test] 45 | fn compress_and_decompress_serializable_type() { 46 | let bytes: Vec = (0u8..100).collect(); 47 | 48 | let mut compressed_bytes = Vec::new(); 49 | Snappy 50 | .compress_bytes(bytes.as_slice(), &mut compressed_bytes) 51 | .unwrap(); 52 | let mut decompressed_bytes = Vec::new(); 53 | Snappy::decompress_bytes(compressed_bytes.as_slice(), &mut decompressed_bytes).unwrap(); 54 | 55 | assert_eq!(bytes, decompressed_bytes); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/crate_doc.md: -------------------------------------------------------------------------------- 1 | Various types of storage and indexing for voxels in 2 or 3 dimensions. 2 | 3 | The core storage types are: 4 | 5 | - [`Array`](self::array): N-dimensional, single resolution, bounded, 6 | dense array 7 | - [`ChunkTree`](self::chunk_tree): N-dimensional, multiple resolution, 8 | unbounded, sparse array 9 | - Backed by generic chunk storage, with [`SmallKeyHashMap`] or 10 | [`CompressibleChunkStorage`](self::chunk_tree::CompressibleChunkStorage) implementations 11 | - [`ChunkDb`](self::database::ChunkDb): A persistent database for chunks backed by `sled`. 12 | 13 | Then there are "meta" lattice maps that provide some extra utility: 14 | 15 | - [`TransformMap`](self::transform_map::TransformMap): a wrapper of any kind of 16 | lattice map that performs an arbitrary transformation 17 | - [`Func`](self::func::Func): some lattice map traits are implemented for 18 | closures (like SDFs) 19 | 20 | If you need to store signed distance values in your voxels, consider using the 21 | [`Sd8`](self::signed_distance::Sd8) and [`Sd16`](self::signed_distance::Sd16) 22 | fixed-precision types which implement the 23 | [`SignedDistance`](self::signed_distance::SignedDistance) trait required for 24 | smooth meshing. 25 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/database.rs: -------------------------------------------------------------------------------- 1 | mod chunk_db; 2 | mod delta_batch; 3 | mod key; 4 | mod read_result; 5 | mod readable; 6 | 7 | #[cfg(feature = "sled-snapshots")] 8 | mod versioned_chunk_db; 9 | 10 | pub use chunk_db::*; 11 | pub use delta_batch::*; 12 | pub use key::*; 13 | pub use read_result::*; 14 | pub use readable::*; 15 | 16 | #[cfg(feature = "sled-snapshots")] 17 | pub use versioned_chunk_db::*; 18 | 19 | pub use sled; 20 | 21 | #[cfg(feature = "sled-snapshots")] 22 | pub use sled_snapshots; 23 | 24 | pub enum Delta { 25 | Insert(K, V), 26 | Remove(K), 27 | } 28 | 29 | impl Delta { 30 | fn key(&self) -> &K { 31 | match self { 32 | Self::Insert(k, _) => k, 33 | Self::Remove(k) => k, 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/database/delta_batch.rs: -------------------------------------------------------------------------------- 1 | use crate::dev_prelude::{ChunkKey, Compression, DatabaseKey, Delta}; 2 | 3 | use futures::future::join_all; 4 | use sled::IVec; 5 | use std::borrow::Borrow; 6 | 7 | /// Creates a [`DeltaBatch`]. This handles sorting the deltas in Morton order and compressing the chunk data. 8 | pub struct DeltaBatchBuilder { 9 | raw_deltas: Vec>, 10 | compression: Compr, 11 | marker: std::marker::PhantomData, 12 | } 13 | 14 | impl DeltaBatchBuilder { 15 | pub fn new(compression: Compr) -> Self { 16 | Self { 17 | raw_deltas: Default::default(), 18 | compression, 19 | marker: Default::default(), 20 | } 21 | } 22 | } 23 | 24 | impl DeltaBatchBuilder 25 | where 26 | ChunkKey: DatabaseKey, 27 | { 28 | pub fn add_raw_deltas(&mut self, deltas: impl Iterator, IVec>>) { 29 | self.raw_deltas.extend(deltas.map(|delta| match delta { 30 | Delta::Insert(k, v) => Delta::Insert(ChunkKey::::into_ord_key(k), v), 31 | Delta::Remove(k) => Delta::Remove(ChunkKey::::into_ord_key(k)), 32 | })); 33 | } 34 | 35 | /// Compresses `deltas` concurrently and adds them to the batch. 36 | pub async fn add_and_compress_deltas( 37 | &mut self, 38 | deltas: impl Iterator, Data>>, 39 | ) where 40 | Compr: Compression + Copy, 41 | Data: Borrow, 42 | { 43 | // Compress all of the chunks in parallel. 44 | let compression = self.compression; 45 | let mut raw_deltas: Vec<_> = join_all(deltas.map(|delta| async move { 46 | match delta { 47 | Delta::Insert(k, v) => Delta::Insert( 48 | ChunkKey::::into_ord_key(k), 49 | // PERF: IVec will copy the bytes instead of moving, because it needs to also allocate room for an internal 50 | // header 51 | IVec::from(compression.compress(v.borrow()).take_bytes()), 52 | ), 53 | Delta::Remove(k) => Delta::Remove(ChunkKey::::into_ord_key(k)), 54 | } 55 | })) 56 | .await; 57 | self.raw_deltas.append(&mut raw_deltas); 58 | } 59 | 60 | /// Sorts the deltas by Morton key and converts them to `IVec` key-value pairs for `sled`. 61 | pub fn build(mut self) -> DeltaBatch 62 | where 63 | K: Copy + Ord, 64 | { 65 | // Sort them by the Ord key. 66 | self.raw_deltas.sort_by_key(|delta| *delta.key()); 67 | 68 | let deltas: Vec<_> = self 69 | .raw_deltas 70 | .into_iter() 71 | .map(|delta| match delta { 72 | Delta::Insert(k, v) => Delta::Insert( 73 | IVec::from(ChunkKey::::ord_key_to_be_bytes(k).as_ref()), 74 | v, 75 | ), 76 | Delta::Remove(k) => { 77 | Delta::Remove(IVec::from(ChunkKey::::ord_key_to_be_bytes(k).as_ref())) 78 | } 79 | }) 80 | .collect(); 81 | 82 | DeltaBatch { deltas } 83 | } 84 | } 85 | 86 | /// A set of [Delta]s to be atomically applied to a [`ChunkDb`](super::ChunkDb) or 87 | /// [`VersionedChunkDb`](super::VersionedChunkDb). 88 | /// 89 | /// Can be created with a [`DeltaBatchBuilder`]. 90 | #[derive(Default)] 91 | pub struct DeltaBatch { 92 | pub(crate) deltas: Vec>, 93 | } 94 | 95 | impl From for sled::Batch { 96 | fn from(batch: DeltaBatch) -> Self { 97 | let mut new_batch = sled::Batch::default(); 98 | for delta in batch.deltas.into_iter() { 99 | match delta { 100 | Delta::Insert(key_bytes, chunk_bytes) => { 101 | new_batch.insert(key_bytes.as_ref(), chunk_bytes); 102 | } 103 | Delta::Remove(key_bytes) => new_batch.remove(key_bytes.as_ref()), 104 | } 105 | } 106 | new_batch 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/database/key.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::{ChunkKey, ChunkKey2, ChunkKey3}; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | use core::ops::{Bound, RangeInclusive}; 6 | 7 | pub trait DatabaseKey { 8 | type OrdKey: Copy + Ord; 9 | type KeyBytes: AsRef<[u8]>; 10 | 11 | fn into_ord_key(self) -> Self::OrdKey; 12 | fn from_ord_key(key: Self::OrdKey) -> Self; 13 | 14 | fn ord_key_to_be_bytes(key: Self::OrdKey) -> Self::KeyBytes; 15 | fn ord_key_from_be_bytes(bytes: &[u8]) -> Self::OrdKey; 16 | 17 | fn orthant_range(lod: u8, orthant: Orthant) -> RangeInclusive; 18 | 19 | fn min_key(lod: u8) -> Self::OrdKey; 20 | fn max_key(lod: u8) -> Self::OrdKey; 21 | 22 | fn full_range(lod: u8) -> RangeInclusive { 23 | Self::min_key(lod)..=Self::max_key(lod) 24 | } 25 | } 26 | 27 | impl DatabaseKey<[i32; 2]> for ChunkKey2 { 28 | type OrdKey = (u8, Morton2); 29 | 30 | // 1 for LOD and 8 for the morton code. 31 | type KeyBytes = [u8; 9]; 32 | 33 | #[inline] 34 | fn into_ord_key(self) -> Self::OrdKey { 35 | (self.lod, Morton2::from(self.minimum)) 36 | } 37 | 38 | #[inline] 39 | fn from_ord_key((lod, morton): Self::OrdKey) -> Self { 40 | ChunkKey::new(lod, Point2i::from(morton)) 41 | } 42 | 43 | #[inline] 44 | fn ord_key_to_be_bytes((lod, morton): Self::OrdKey) -> Self::KeyBytes { 45 | let mut bytes = [0; 9]; 46 | bytes[0] = lod; 47 | bytes[1..].copy_from_slice(&morton.0.to_be_bytes()); 48 | bytes 49 | } 50 | 51 | #[inline] 52 | fn ord_key_from_be_bytes(bytes: &[u8]) -> Self::OrdKey { 53 | let lod = bytes[0]; 54 | let mut morton_bytes = [0; 8]; 55 | morton_bytes.copy_from_slice(&bytes[1..]); 56 | let morton_int = u64::from_be_bytes(morton_bytes); 57 | (lod, Morton2(morton_int)) 58 | } 59 | 60 | #[inline] 61 | fn orthant_range(lod: u8, quad: Quadrant) -> RangeInclusive { 62 | let extent = Extent2i::from(quad); 63 | let min_morton = Morton2::from(extent.minimum); 64 | let max_morton = Morton2::from(extent.max()); 65 | (lod, min_morton)..=(lod, max_morton) 66 | } 67 | 68 | #[inline] 69 | fn min_key(lod: u8) -> Self::OrdKey { 70 | (lod, Morton2::from(Point2i::MIN)) 71 | } 72 | 73 | #[inline] 74 | fn max_key(lod: u8) -> Self::OrdKey { 75 | (lod, Morton2::from(Point2i::MAX)) 76 | } 77 | } 78 | 79 | impl DatabaseKey<[i32; 3]> for ChunkKey3 { 80 | type OrdKey = (u8, Morton3); 81 | 82 | // 1 for LOD and 12 for the morton code. Although a `Morton3` uses a u128, it only actually uses the least significant 96 83 | // bits (12 bytes). 84 | type KeyBytes = [u8; 13]; 85 | 86 | #[inline] 87 | fn into_ord_key(self) -> Self::OrdKey { 88 | (self.lod, Morton3::from(self.minimum)) 89 | } 90 | 91 | #[inline] 92 | fn from_ord_key((lod, morton): Self::OrdKey) -> Self { 93 | ChunkKey::new(lod, Point3i::from(morton)) 94 | } 95 | 96 | #[inline] 97 | fn ord_key_to_be_bytes((lod, morton): Self::OrdKey) -> Self::KeyBytes { 98 | let mut bytes = [0; 13]; 99 | bytes[0] = lod; 100 | bytes[1..].copy_from_slice(&morton.0.to_be_bytes()[4..]); 101 | bytes 102 | } 103 | 104 | #[inline] 105 | fn ord_key_from_be_bytes(bytes: &[u8]) -> Self::OrdKey { 106 | let lod = bytes[0]; 107 | // The most significant 4 bytes of the u128 are not used. 108 | let mut morton_bytes = [0; 16]; 109 | morton_bytes[4..16].copy_from_slice(&bytes[1..]); 110 | let morton_int = u128::from_be_bytes(morton_bytes); 111 | (lod, Morton3(morton_int)) 112 | } 113 | 114 | #[inline] 115 | fn orthant_range(lod: u8, octant: Octant) -> RangeInclusive { 116 | let extent = Extent3i::from(octant); 117 | let min_morton = Morton3::from(extent.minimum); 118 | let max_morton = Morton3::from(extent.max()); 119 | (lod, min_morton)..=(lod, max_morton) 120 | } 121 | 122 | #[inline] 123 | fn min_key(lod: u8) -> Self::OrdKey { 124 | (lod, Morton3::from(Point3i::MIN)) 125 | } 126 | 127 | #[inline] 128 | fn max_key(lod: u8) -> Self::OrdKey { 129 | (lod, Morton3::from(Point3i::MAX)) 130 | } 131 | } 132 | 133 | // TODO: replace this when https://github.com/rust-lang/rust/issues/86026 is stabilized 134 | pub(crate) fn map_bound(b: Bound<&X>, f: impl FnOnce(&X) -> Y) -> Bound { 135 | match b { 136 | Bound::Excluded(x) => Bound::Excluded(f(x)), 137 | Bound::Included(x) => Bound::Included(f(x)), 138 | Bound::Unbounded => Bound::Unbounded, 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/database/read_result.rs: -------------------------------------------------------------------------------- 1 | use crate::dev_prelude::{ChunkKey, Compression, DatabaseKey}; 2 | 3 | use futures::future::join_all; 4 | use sled::IVec; 5 | 6 | /// A wrapper around key-value pairs read from a `ChunkDb`. 7 | pub struct ReadResult { 8 | pub(crate) key_value_pairs: Vec<(IVec, IVec)>, 9 | marker: std::marker::PhantomData, 10 | } 11 | 12 | impl Default for ReadResult { 13 | fn default() -> Self { 14 | Self::new(Default::default()) 15 | } 16 | } 17 | 18 | impl ReadResult { 19 | pub(crate) fn new(key_value_pairs: Vec<(IVec, IVec)>) -> Self { 20 | Self { 21 | key_value_pairs, 22 | marker: Default::default(), 23 | } 24 | } 25 | 26 | pub(crate) fn append(&mut self, mut result: Self) { 27 | self.key_value_pairs.append(&mut result.key_value_pairs) 28 | } 29 | 30 | /// Take the key-value pairs where keys and values are left in a raw byte format. 31 | pub fn take_with_raw_key_values(self) -> Vec<(IVec, IVec)> { 32 | self.key_value_pairs 33 | } 34 | 35 | /// Take the key-value pairs where values are left in a raw byte format. 36 | pub fn take_with_raw_values(self) -> impl Iterator, IVec)> 37 | where 38 | ChunkKey: DatabaseKey, 39 | { 40 | self.key_value_pairs.into_iter().map(|(k, v)| { 41 | ( 42 | ChunkKey::from_ord_key(ChunkKey::ord_key_from_be_bytes(&k)), 43 | v, 44 | ) 45 | }) 46 | } 47 | 48 | /// Concurrently decompress all values, calling `chunk_rx` on each key-value pair. 49 | pub async fn decompress(self, mut chunk_rx: F) 50 | where 51 | ChunkKey: DatabaseKey, 52 | Compr: Compression, 53 | F: FnMut(ChunkKey, Compr::Data), 54 | { 55 | for batch in self.key_value_pairs.chunks(16) { 56 | for (chunk_key, chunk) in 57 | join_all(batch.iter().map(|(key, compressed_chunk)| async move { 58 | let ord_key = ChunkKey::::ord_key_from_be_bytes(key.as_ref()); 59 | let chunk_key = ChunkKey::::from_ord_key(ord_key); 60 | 61 | let chunk = Compr::decompress_from_reader(compressed_chunk.as_ref()).unwrap(); 62 | 63 | (chunk_key, chunk) 64 | })) 65 | .await 66 | { 67 | chunk_rx(chunk_key, chunk); 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/database/readable.rs: -------------------------------------------------------------------------------- 1 | use super::{key::map_bound, DatabaseKey, ReadResult}; 2 | 3 | use crate::prelude::ChunkKey; 4 | 5 | use building_blocks_core::{orthants_covering_extent, prelude::*}; 6 | 7 | use core::ops::RangeBounds; 8 | 9 | /// Shared behavior for chunk databases, i.e. those that are keyed on `ChunkKey`. 10 | pub trait ReadableChunkDb { 11 | type Compr; 12 | 13 | fn data_tree(&self) -> &sled::Tree; 14 | 15 | /// Scans the given orthant for chunks. Because chunk keys are stored in Morton order, the chunks in any orthant are 16 | /// guaranteed to be contiguous. 17 | /// 18 | /// The `orthant` is expected in voxel units, not chunk units. 19 | fn read_chunks_in_orthant( 20 | &self, 21 | lod: u8, 22 | orthant: Orthant, 23 | ) -> sled::Result> 24 | where 25 | ChunkKey: DatabaseKey, 26 | { 27 | let range = ChunkKey::::orthant_range(lod, orthant); 28 | self.read_morton_range(range) 29 | } 30 | 31 | /// This is like `read_chunks_in_orthant`, but it works for the given `extent`. Since Morton order only guarantees 32 | /// contiguity within a single `Orthant`, we should not naively scan from the Morton of `extent.minimum` to `extent.max()`. 33 | /// Rather, we scan a set of `Orthant`s that covers `extent`. This covering is *at least* sufficient to cover the extent, 34 | /// and it gets more exact as `orthant_exponent` (log2 of the side length) gets smaller. However, for exactness, you must 35 | /// necessarily do more scans. 36 | fn read_orthants_covering_extent( 37 | &self, 38 | lod: u8, 39 | orthant_exponent: i32, 40 | extent: ExtentN, 41 | ) -> sled::Result> 42 | where 43 | PointN: IntegerPoint, 44 | ChunkKey: DatabaseKey, 45 | { 46 | // PERF: more parallelism? 47 | let mut result = ReadResult::default(); 48 | for orthant in orthants_covering_extent(extent, orthant_exponent) { 49 | result.append(self.read_chunks_in_orthant(lod, orthant)?); 50 | } 51 | Ok(result) 52 | } 53 | 54 | /// Reads all chunks in the given `lod`. 55 | fn read_all_chunks(&self, lod: u8) -> sled::Result> 56 | where 57 | ChunkKey: DatabaseKey, 58 | { 59 | self.read_morton_range(ChunkKey::::full_range(lod)) 60 | } 61 | 62 | /// Reads all chunks in the given `range` of Morton codes. 63 | fn read_morton_range(&self, range: R) -> sled::Result> 64 | where 65 | ChunkKey: DatabaseKey, 66 | R: RangeBounds< as DatabaseKey>::OrdKey>, 67 | { 68 | let key_range_start = map_bound(range.start_bound(), |k| ChunkKey::ord_key_to_be_bytes(*k)); 69 | let key_range_end = map_bound(range.end_bound(), |k| ChunkKey::ord_key_to_be_bytes(*k)); 70 | let key_value_pairs = self 71 | .data_tree() 72 | .range((key_range_start, key_range_end)) 73 | .collect::, _>>()?; 74 | Ok(ReadResult::new(key_value_pairs)) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/dot_vox_conversions.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | 3 | pub use dot_vox; 4 | 5 | use building_blocks_core::prelude::*; 6 | 7 | use dot_vox::*; 8 | 9 | #[derive(Copy, Clone, Eq, PartialEq)] 10 | pub enum VoxColor { 11 | Color(u8), 12 | Empty, 13 | } 14 | 15 | impl IsEmpty for VoxColor { 16 | fn is_empty(&self) -> bool { 17 | matches!(self, VoxColor::Empty) 18 | } 19 | } 20 | 21 | pub fn encode_vox(map: &Map, map_extent: Extent3i) -> DotVoxData 22 | where 23 | Map: Get, 24 | { 25 | let shape = map_extent.shape; 26 | let vox_extent = map_extent - map_extent.minimum; 27 | 28 | // VOX coordinates are limited to u8. 29 | assert!(shape <= Point3i::fill(std::u8::MAX as i32)); 30 | 31 | let size = dot_vox::Size { 32 | x: shape.x() as u32, 33 | y: shape.y() as u32, 34 | z: shape.z() as u32, 35 | }; 36 | 37 | let mut voxels = Vec::new(); 38 | for (vox_p, map_p) in vox_extent.iter_points().zip(map_extent.iter_points()) { 39 | if let VoxColor::Color(i) = map.get(map_p) { 40 | voxels.push(dot_vox::Voxel { 41 | x: vox_p.x() as u8, 42 | y: vox_p.y() as u8, 43 | z: vox_p.z() as u8, 44 | i, 45 | }); 46 | } 47 | } 48 | 49 | let model = dot_vox::Model { size, voxels }; 50 | 51 | DotVoxData { 52 | version: 150, 53 | models: vec![model], 54 | palette: Vec::new(), 55 | materials: Vec::new(), 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/func.rs: -------------------------------------------------------------------------------- 1 | //! Lattice map access traits implemented for functions and closures. 2 | //! 3 | //! This is particularly useful for sampling from signed-distance fields. 4 | //! 5 | //! ``` 6 | //! use building_blocks_core::prelude::*; 7 | //! use building_blocks_storage::prelude::*; 8 | //! 9 | //! let sample_extent = Extent3i::from_min_and_max(Point3i::fill(-15), Point3i::fill(15)); 10 | //! let mut sampled_sphere = Array3x1::fill(sample_extent, 0.0); 11 | //! 12 | //! copy_extent(&sample_extent, &Func(|p: Point3i| (p.dot(p) - 10) as f32), &mut sampled_sphere); 13 | //!``` 14 | 15 | use crate::prelude::{ForEach, Get, ReadExtent}; 16 | 17 | use building_blocks_core::prelude::*; 18 | 19 | use core::iter::{once, Once}; 20 | 21 | pub struct Func(pub F); 22 | 23 | impl Get for Func 24 | where 25 | F: Fn(Coord) -> T, 26 | { 27 | type Item = T; 28 | 29 | fn get(&self, c: Coord) -> T { 30 | (self.0)(c) 31 | } 32 | } 33 | 34 | impl ForEach> for Func 35 | where 36 | F: Fn(PointN) -> T, 37 | PointN: IntegerPoint, 38 | { 39 | type Item = T; 40 | 41 | fn for_each(&self, extent: &ExtentN, mut f: impl FnMut(PointN, Self::Item)) { 42 | for p in extent.iter_points() { 43 | f(p, (self.0)(p)) 44 | } 45 | } 46 | } 47 | 48 | impl<'a, F, N, T> ReadExtent<'a, N> for Func 49 | where 50 | F: 'a + Fn(PointN) -> T, 51 | PointN: IntegerPoint, 52 | { 53 | type Src = &'a F; 54 | type SrcIter = Once<(ExtentN, Self::Src)>; 55 | 56 | fn read_extent(&'a self, extent: &ExtentN) -> Self::SrcIter { 57 | once((*extent, &self.0)) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/image_conversions.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | 3 | pub use image; 4 | 5 | use building_blocks_core::prelude::*; 6 | 7 | use image::{ImageBuffer, Pixel}; 8 | 9 | pub fn encode_image( 10 | map: &Map, 11 | map_extent: Extent2i, 12 | ) -> ImageBuffer::Subpixel>> 13 | where 14 | T: Into

, 15 | Map: Get, 16 | P: Pixel + 'static, 17 | { 18 | let img_extent = map_extent - map_extent.minimum; 19 | let shape = img_extent.shape; 20 | assert!(shape.x() > 0); 21 | assert!(shape.y() > 0); 22 | let (width, height) = (shape.x() as u32, shape.y() as u32); 23 | 24 | let mut img = ImageBuffer::new(width, height); 25 | for (map_p, img_p) in map_extent.iter_points().zip(img_extent.iter_points()) { 26 | let pixel = map.get(map_p).into(); 27 | *img.get_pixel_mut(img_p.x() as u32, img_p.y() as u32) = pixel; 28 | } 29 | 30 | img 31 | } 32 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::type_complexity, 3 | clippy::needless_collect, 4 | clippy::too_many_arguments 5 | )] 6 | #![deny( 7 | rust_2018_compatibility, 8 | rust_2018_idioms, 9 | nonstandard_style, 10 | future_incompatible 11 | )] 12 | #![warn(clippy::doc_markdown)] 13 | #![doc = include_str!("crate_doc.md")] 14 | 15 | #[macro_use] 16 | pub mod access_traits; 17 | pub mod array; 18 | pub mod bitset; 19 | pub mod caching; 20 | pub mod chunk_tree; 21 | pub mod compression; 22 | pub mod func; 23 | #[doc(hidden)] 24 | pub mod multi_ptr; 25 | pub mod octree_set; 26 | pub mod signed_distance; 27 | pub mod transform_map; 28 | 29 | #[cfg(feature = "sled")] 30 | pub mod database; 31 | 32 | /// Used in many generic algorithms to check if a voxel is considered empty. 33 | pub trait IsEmpty { 34 | fn is_empty(&self) -> bool; 35 | } 36 | 37 | impl IsEmpty for bool { 38 | fn is_empty(&self) -> bool { 39 | !*self 40 | } 41 | } 42 | 43 | // Hash types to use for small keys like `PointN`. 44 | pub type SmallKeyHashMap = ahash::AHashMap; 45 | pub type SmallKeyHashSet = ahash::AHashSet; 46 | pub type SmallKeyBuildHasher = ahash::RandomState; 47 | 48 | #[doc(hidden)] 49 | pub mod prelude { 50 | pub use super::{ 51 | array::{IndexedArray, Local, Stride}, 52 | bitset::{AtomicBitset8, Bitset8}, 53 | chunk_tree::{ 54 | chunk_bounding_sphere, clipmap_chunks_intersecting_sphere, 55 | clipmap_new_chunks_intersecting_sphere, AmbientExtent, ChunkDownsampler, ChunkIndexer2, 56 | ChunkIndexer3, ChunkKey, ChunkKey2, ChunkKey3, ChunkNode, ChunkStorage, 57 | ChunkTreeBuilder, ChunkTreeConfig, ChunkUnits, ClipmapSlot2, ClipmapSlot3, 58 | IterChunkKeys, LodChange2, LodChange3, NodeState, PointDownsampler, SdfMeanDownsampler, 59 | UserChunk, 60 | }, 61 | compression::{BytesCompression, Compressed, Compression, FromBytesCompression}, 62 | func::Func, 63 | octree_set::{OctreeNode, OctreeSet, OctreeVisitor, VisitStatus}, 64 | signed_distance::{Sd16, Sd8, SignedDistance}, 65 | transform_map::TransformMap, 66 | IsEmpty, 67 | }; 68 | 69 | pub use super::access_traits::*; 70 | pub use super::array::compression::multichannel_aliases::*; 71 | pub use super::array::multichannel_aliases::*; 72 | pub use super::chunk_tree::builder::multichannel_aliases::*; 73 | pub use super::chunk_tree::storage::compressible::multichannel_aliases::*; 74 | pub use super::chunk_tree::storage::hash_map::multichannel_aliases::*; 75 | 76 | #[cfg(all(feature = "serde", feature = "bincode"))] 77 | pub use super::compression::BincodeCompression; 78 | #[cfg(feature = "lz4")] 79 | pub use super::compression::Lz4; 80 | #[cfg(feature = "snap")] 81 | pub use super::compression::Snappy; 82 | #[cfg(feature = "sled")] 83 | pub use super::database::{ 84 | ChunkDb, ChunkDb2, ChunkDb3, Delta, DeltaBatch, DeltaBatchBuilder, ReadResult, 85 | ReadableChunkDb, 86 | }; 87 | #[cfg(feature = "sled-snapshots")] 88 | pub use super::database::{VersionedChunkDb, VersionedChunkDb2, VersionedChunkDb3}; 89 | } 90 | 91 | /// Includes all of `prelude` plus the extra-generic types and internal traits used for library development. 92 | #[doc(hidden)] 93 | pub mod dev_prelude { 94 | pub use super::prelude::*; 95 | 96 | pub use super::{ 97 | array::{ 98 | channels::{Channel, Channels, FastChannelsCompression}, 99 | compression::FastArrayCompression, 100 | Array, IndexedArray, 101 | }, 102 | chunk_tree::{ 103 | ChunkIndexer, ChunkTree, ChunkTree2, ChunkTree3, ChunkTreeBuilderNxM, 104 | CompressibleChunkStorage, HashMapChunkTree, 105 | }, 106 | SmallKeyHashMap, SmallKeyHashSet, 107 | }; 108 | 109 | #[cfg(feature = "sled")] 110 | pub use super::database::DatabaseKey; 111 | } 112 | 113 | #[cfg(feature = "dot_vox")] 114 | pub mod dot_vox_conversions; 115 | #[cfg(feature = "image")] 116 | pub mod image_conversions; 117 | #[cfg(feature = "vox-format")] 118 | pub mod vox_format; 119 | 120 | #[cfg(test)] 121 | mod test_utilities; 122 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/multi_ptr.rs: -------------------------------------------------------------------------------- 1 | use core::mem::MaybeUninit; 2 | 3 | /// Used for variadic conversion from `&(A, B, ...)` to `(&A, &B, ...)`. 4 | pub trait MultiRef<'a> { 5 | type Data; 6 | 7 | fn from_data_ref(data_ref: &'a Self::Data) -> Self; 8 | } 9 | 10 | impl<'a, T> MultiRef<'a> for &'a T { 11 | type Data = T; 12 | 13 | #[inline] 14 | fn from_data_ref(data_ref: &'a Self::Data) -> Self { 15 | data_ref 16 | } 17 | } 18 | 19 | /// Used for variadic conversion from `(*mut A, *mut B, ...)` to `(&'a mut A, &'a mut B, ...)`. 20 | pub trait IntoMultiMut<'a> { 21 | type MultiMut; 22 | 23 | fn into_multi_mut(self) -> Self::MultiMut; 24 | } 25 | 26 | impl<'a, T> IntoMultiMut<'a> for *mut T 27 | where 28 | T: 'a, 29 | { 30 | type MultiMut = &'a mut T; 31 | 32 | #[inline] 33 | fn into_multi_mut(self) -> Self::MultiMut { 34 | unsafe { &mut *self } 35 | } 36 | } 37 | 38 | /// Used for variadic conversion from `(*mut MaybeUninit, *mut MaybeUninit, ...)` to `(*mut A, *mut B)`. 39 | pub trait IntoMultiMutPtr { 40 | type Data; 41 | type Ptr: MultiMutPtr; 42 | 43 | /// # Safety 44 | /// `Self::Ptr` is intended to contain `*mut` pointers, so this carries the same safety concerns as any such pointer. 45 | unsafe fn into_multi_mut_ptr(self) -> Self::Ptr; 46 | } 47 | 48 | impl IntoMultiMutPtr for *mut MaybeUninit { 49 | type Data = T; 50 | type Ptr = *mut T; 51 | 52 | #[inline] 53 | unsafe fn into_multi_mut_ptr(self) -> Self::Ptr { 54 | (&mut *self).as_mut_ptr() 55 | } 56 | } 57 | 58 | /// Used for variadic copying of source data `(A, B, ...)` to destination pointers `(*mut A, *mut B, ...)`. 59 | pub trait MultiMutPtr { 60 | type Data; 61 | 62 | /// # Safety 63 | /// `self` is intended to contain `*mut` pointers, so this carries the same safety concerns as any such pointer. 64 | unsafe fn write(self, data: Self::Data); 65 | } 66 | 67 | impl MultiMutPtr for *mut T { 68 | type Data = T; 69 | 70 | #[inline] 71 | unsafe fn write(self, data: Self::Data) { 72 | self.write(data); 73 | } 74 | } 75 | 76 | macro_rules! impl_tuple { 77 | ( $( $var1:ident, $var2:ident : $t:ident ),+ ) => { 78 | impl<'a, $($t),+> MultiRef<'a> for ($($t,)+) 79 | where 80 | $($t: 'a + MultiRef<'a>,)+ 81 | { 82 | type Data = ($($t::Data,)+); 83 | 84 | #[inline] 85 | fn from_data_ref(data_ref: &'a Self::Data) -> Self { 86 | let ($($var1,)+) = data_ref; 87 | 88 | ($($t::from_data_ref($var1),)+) 89 | } 90 | } 91 | 92 | impl<'a, $($t),+> IntoMultiMut<'a> for ($(*mut $t,)+) 93 | where 94 | $($t: 'a,)+ 95 | { 96 | type MultiMut = ($(&'a mut $t,)+); 97 | 98 | #[inline] 99 | fn into_multi_mut(self) -> Self::MultiMut { 100 | let ($($var1,)+) = self; 101 | 102 | unsafe { ($(&mut *$var1,)+) } 103 | } 104 | } 105 | 106 | impl<$($t),+> IntoMultiMutPtr for ($($t,)+) 107 | where 108 | $($t: IntoMultiMutPtr),+ 109 | { 110 | type Data = ($($t::Data,)+); 111 | type Ptr = ($($t::Ptr,)+); 112 | 113 | #[inline] 114 | unsafe fn into_multi_mut_ptr(self) -> Self::Ptr { 115 | let ($($var1,)+) = self; 116 | 117 | ($($var1.into_multi_mut_ptr(),)+) 118 | } 119 | } 120 | 121 | impl<$($t),+> MultiMutPtr for ($($t,)+) 122 | where 123 | $($t: MultiMutPtr,)+ 124 | { 125 | type Data = ($($t::Data,)+); 126 | 127 | #[inline] 128 | unsafe fn write(self, data: Self::Data) { 129 | let ($($var1,)+) = self; 130 | let ($($var2,)+) = data; 131 | 132 | $( $var1.write($var2); )+ 133 | } 134 | } 135 | }; 136 | } 137 | 138 | impl_tuple! { a1, a2: A } 139 | impl_tuple! { a1, a2: A, b1, b2: B } 140 | impl_tuple! { a1, a2: A, b1, b2: B, c1, c2: C } 141 | impl_tuple! { a1, a2: A, b1, b2: B, c1, c2: C, d1, d2: D } 142 | impl_tuple! { a1, a2: A, b1, b2: B, c1, c2: C, d1, d2: D, e1, e2: E } 143 | impl_tuple! { a1, a2: A, b1, b2: B, c1, c2: C, d1, d2: D, e1, e2: E, f1, f2: F } 144 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/signed_distance.rs: -------------------------------------------------------------------------------- 1 | use bytemuck::{Pod, Zeroable}; 2 | 3 | #[cfg(feature = "serde")] 4 | use serde::{Deserialize, Serialize}; 5 | 6 | pub trait SignedDistance: Into { 7 | fn is_negative(&self) -> bool; 8 | } 9 | 10 | impl SignedDistance for f32 { 11 | #[inline] 12 | fn is_negative(&self) -> bool { 13 | *self < 0.0 14 | } 15 | } 16 | 17 | /// A signed distance value in the range `[-1.0, 1.0]` with 8 bits of precision. 18 | #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] 19 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 20 | pub struct Sd8(pub i8); 21 | /// A signed distance value in the range `[-1.0, 1.0]` with 16 bits of precision. 22 | #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] 23 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 24 | pub struct Sd16(pub i16); 25 | 26 | unsafe impl Zeroable for Sd8 {} 27 | unsafe impl Pod for Sd8 {} 28 | 29 | unsafe impl Zeroable for Sd16 {} 30 | unsafe impl Pod for Sd16 {} 31 | 32 | impl Sd8 { 33 | pub const RESOLUTION: f32 = std::i8::MAX as f32; 34 | pub const PRECISION: f32 = 1.0 / Self::RESOLUTION; 35 | pub const NEG_ONE: Self = Self(-std::i8::MAX); 36 | pub const ONE: Self = Self(std::i8::MAX); 37 | } 38 | 39 | impl Sd16 { 40 | pub const RESOLUTION: f32 = std::i16::MAX as f32; 41 | pub const PRECISION: f32 = 1.0 / Self::RESOLUTION; 42 | pub const NEG_ONE: Self = Self(-std::i16::MAX); 43 | pub const ONE: Self = Self(std::i16::MAX); 44 | } 45 | 46 | impl From for f32 { 47 | #[inline] 48 | fn from(s: Sd8) -> f32 { 49 | s.0 as f32 * Sd8::PRECISION 50 | } 51 | } 52 | impl From for Sd8 { 53 | #[inline] 54 | fn from(s: f32) -> Self { 55 | Sd8((Self::RESOLUTION * s.min(1.0).max(-1.0)) as i8) 56 | } 57 | } 58 | impl SignedDistance for Sd8 { 59 | #[inline] 60 | fn is_negative(&self) -> bool { 61 | self.0 < 0 62 | } 63 | } 64 | 65 | impl From for f32 { 66 | #[inline] 67 | fn from(s: Sd16) -> f32 { 68 | s.0 as f32 * Sd16::PRECISION 69 | } 70 | } 71 | impl From for Sd16 { 72 | #[inline] 73 | fn from(s: f32) -> Self { 74 | Sd16((Self::RESOLUTION * s.min(1.0).max(-1.0)) as i16) 75 | } 76 | } 77 | impl SignedDistance for Sd16 { 78 | #[inline] 79 | fn is_negative(&self) -> bool { 80 | self.0 < 0 81 | } 82 | } 83 | 84 | // ████████╗███████╗███████╗████████╗ 85 | // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ 86 | // ██║ █████╗ ███████╗ ██║ 87 | // ██║ ██╔══╝ ╚════██║ ██║ 88 | // ██║ ███████╗███████║ ██║ 89 | // ╚═╝ ╚══════╝╚══════╝ ╚═╝ 90 | 91 | #[cfg(test)] 92 | mod tests { 93 | use super::*; 94 | 95 | #[test] 96 | fn sd8_boundary_conversions() { 97 | assert_eq!(-1.0, f32::from(Sd8::NEG_ONE)); 98 | assert_eq!(1.0, f32::from(Sd8::ONE)); 99 | assert_eq!(0.0, f32::from(Sd8(0))); 100 | 101 | assert_eq!(Sd8::NEG_ONE, Sd8::from(-1.0)); 102 | assert_eq!(Sd8::ONE, Sd8::from(1.0)); 103 | assert_eq!(Sd8(0), Sd8::from(0.0)); 104 | } 105 | 106 | #[test] 107 | fn sd16_boundary_conversions() { 108 | assert_eq!(-1.0, f32::from(Sd16::NEG_ONE)); 109 | assert_eq!(1.0, f32::from(Sd16::ONE)); 110 | assert_eq!(0.0, f32::from(Sd16(0))); 111 | 112 | assert_eq!(Sd16::NEG_ONE, Sd16::from(-1.0)); 113 | assert_eq!(Sd16::ONE, Sd16::from(1.0)); 114 | assert_eq!(Sd16(0), Sd16::from(0.0)); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /crates/building_blocks_storage/src/test_utilities.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::Array3x1; 2 | 3 | use building_blocks_core::prelude::*; 4 | 5 | // TODO: it would be nice if all crates could share this module, but it causes this issue: 6 | // https://github.com/rust-lang/cargo/issues/6765 7 | 8 | pub fn sphere_bit_array( 9 | array_edge_length: i32, 10 | inner_value: T, 11 | outer_value: T, 12 | ) -> (Array3x1, i32) 13 | where 14 | T: Copy, 15 | { 16 | let array_radius = array_edge_length / 2; 17 | let sphere_radius = array_radius - 1; 18 | let array_extent = Extent3i::from_min_and_shape( 19 | Point3i::fill(-array_radius), 20 | Point3i::fill(array_edge_length), 21 | ); 22 | 23 | let map = Array3x1::fill_with(array_extent, |p| { 24 | if p.norm() < sphere_radius as f32 { 25 | inner_value 26 | } else { 27 | outer_value 28 | } 29 | }); 30 | 31 | (map, sphere_radius) 32 | } 33 | -------------------------------------------------------------------------------- /crates/utilities/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "utilities" 3 | version = "0.1.0" 4 | authors = ["Duncan "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | building_blocks_core = { path = "../../crates/building_blocks_core", default-features = false } 9 | building_blocks_storage = { path = "../../crates/building_blocks_storage", default-features = false } 10 | 11 | # Only needed for examples. 12 | simdnoise = { version = "3.1", optional = true } 13 | -------------------------------------------------------------------------------- /crates/utilities/src/data_sets.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::prelude::Array3x1; 3 | 4 | // TODO: it would be nice if all crates could share this module, but it causes this issue: 5 | // https://github.com/rust-lang/cargo/issues/6765 6 | 7 | pub fn sphere_bit_array( 8 | array_edge_length: i32, 9 | inner_value: T, 10 | outer_value: T, 11 | ) -> (Array3x1, i32) 12 | where 13 | T: Copy, 14 | { 15 | let array_radius = array_edge_length / 2; 16 | let sphere_radius = array_radius - 1; 17 | let array_extent = Extent3i::from_min_and_shape( 18 | Point3i::fill(-array_radius), 19 | Point3i::fill(array_edge_length), 20 | ); 21 | 22 | let map = Array3x1::fill_with(array_extent, |p| { 23 | if p.norm() < sphere_radius as f32 { 24 | inner_value 25 | } else { 26 | outer_value 27 | } 28 | }); 29 | 30 | (map, sphere_radius) 31 | } 32 | -------------------------------------------------------------------------------- /crates/utilities/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny( 2 | rust_2018_compatibility, 3 | rust_2018_idioms, 4 | nonstandard_style, 5 | unused, 6 | future_incompatible 7 | )] 8 | #![warn(clippy::doc_markdown)] 9 | pub mod data_sets; 10 | pub mod test; 11 | 12 | #[cfg(feature = "simdnoise")] 13 | pub mod noise; 14 | -------------------------------------------------------------------------------- /crates/utilities/src/noise.rs: -------------------------------------------------------------------------------- 1 | use building_blocks_core::prelude::*; 2 | use building_blocks_storage::prelude::{Array2x1, Array3x1}; 3 | use simdnoise::NoiseBuilder; 4 | 5 | pub fn noise_array2(extent: Extent2i, freq: f32, seed: i32, octaves: u8) -> Array2x1 { 6 | let min = Point2f::from(extent.minimum); 7 | let (noise, _min_val, _max_val) = NoiseBuilder::fbm_2d_offset( 8 | min.x(), 9 | extent.shape.x() as usize, 10 | min.y(), 11 | extent.shape.y() as usize, 12 | ) 13 | .with_freq(freq) 14 | .with_seed(seed) 15 | .with_octaves(octaves) 16 | .generate(); 17 | 18 | Array2x1::new_one_channel(extent, noise.into_boxed_slice()) 19 | } 20 | 21 | pub fn noise_array3(extent: Extent3i, freq: f32, seed: i32, octaves: u8) -> Array3x1 { 22 | let min = Point3f::from(extent.minimum); 23 | let (noise, _min_val, _max_val) = NoiseBuilder::fbm_3d_offset( 24 | min.x(), 25 | extent.shape.x() as usize, 26 | min.y(), 27 | extent.shape.y() as usize, 28 | min.z(), 29 | extent.shape.z() as usize, 30 | ) 31 | .with_freq(freq) 32 | .with_seed(seed) 33 | .with_octaves(octaves) 34 | .generate(); 35 | 36 | Array3x1::new_one_channel(extent, noise.into_boxed_slice()) 37 | } 38 | -------------------------------------------------------------------------------- /crates/utilities/src/test.rs: -------------------------------------------------------------------------------- 1 | /// Escapes the capturing of output from tests so it's visible even when the test succeeds. 2 | pub fn test_print(message: &str) { 3 | use std::io::Write; 4 | 5 | std::io::stdout() 6 | .lock() 7 | .write_all(message.as_bytes()) 8 | .unwrap(); 9 | } 10 | -------------------------------------------------------------------------------- /examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "building-blocks-examples" 3 | version = "0.7.0" 4 | edition = "2018" 5 | authors = ["Duncan "] 6 | description = "Examples for the building-blocks crate." 7 | license = "MIT" 8 | 9 | [profile] 10 | dev = { opt-level = 2 } 11 | release = { lto = "thin" } 12 | bench = { lto = "thin" } 13 | 14 | [features] 15 | trace = ["building-blocks/trace", "bevy_utilities/trace"] 16 | 17 | [dev-dependencies] 18 | futures-lite = "1.12" 19 | ron = "0.6" 20 | serde = "1.0" 21 | structopt = "0.3" 22 | 23 | bevy_utilities = { path = "bevy_utilities" } 24 | building-blocks = { path = "..", features = ["glam", "mesh", "sdfu"] } 25 | utilities = { path = "../crates/utilities", features = ["simdnoise"]} 26 | 27 | [[example]] 28 | name = "mesh_showcase" 29 | path = "mesh_showcase/mesh_showcase.rs" 30 | 31 | [[example]] 32 | name = "sdf_mesh" 33 | path = "sdf_mesh/sdf_mesh.rs" 34 | 35 | [[example]] 36 | name = "quad_mesh_uvs" 37 | path = "quad_mesh_uvs/quad_mesh_uvs.rs" 38 | 39 | [[example]] 40 | name = "array_texture_materials" 41 | path = "array_texture_materials/array_texture_materials.rs" 42 | 43 | [[example]] 44 | name = "lod_terrain" 45 | path = "lod_terrain/lod_terrain.rs" 46 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | The `examples/` directory is not in the `building-blocks` cargo workspace, so to run an example, you need to `cd examples` 4 | before running `cargo run --example foo`. 5 | 6 | All examples use the Bevy engine. This list is roughly ordered by complexity. 7 | 8 | ## SDF Mesh 9 | 10 | A simple example of how to generate a mesh from signed distance voxels. 11 | 12 | ```sh 13 | cargo run --example sdf_mesh 14 | ``` 15 | 16 | SDF Mesh 17 | 18 | ## Mesh Showcase 19 | 20 | A showcase of all the meshing algorithms, fetching samples from a `ChunkTree`. 21 | 22 | ```sh 23 | cargo run --example mesh_showcase 24 | ``` 25 | 26 | Controls: 27 | 28 | - Left and right arrow keys cycle through shapes 29 | - W key to toggle wireframes 30 | - F key to toggle flat shading 31 | 32 | ![Mesh Showcase](/examples/screenshots/mesh_showcase.gif) 33 | 34 | ## Quad Mesh UVs 35 | 36 | A `greedy_quads` mesh of a cube with UV coordinates mapped on all faces. This is useful for seeing that textures are oriented 37 | correctly. 38 | 39 | ```sh 40 | cargo run --example quad_mesh_uvs 41 | ``` 42 | 43 | ![Quad Mesh UVs](/examples/screenshots/quad_mesh_uvs.png) 44 | 45 | ## Array Texture Materials 46 | 47 | Shows how to use an "array texture" to give each type of a voxel a different material. 48 | 49 | ```sh 50 | cargo run --example array_texture_materials 51 | ``` 52 | 53 | ![Array Texture Materials](/examples/screenshots/array_texture_materials.png) 54 | 55 | ## LOD Terrain 56 | 57 | A larger scale example of terrain generated with 3D fractional brownian motion 58 | ([SIMDnoise](https://docs.rs/simdnoise/3.1.6/simdnoise/) crate). The further chunks are from the camera, the more they get 59 | downsampled. Chunk meshes are dynamically generated as the camera moves. 60 | 61 | You can run with either a blocky map or a smooth map. 62 | 63 | ```sh 64 | cargo run --example lod_terrain blocky 65 | cargo run --example lod_terrain smooth 66 | ``` 67 | 68 | Controls: 69 | 70 | - WASD to move the camera in XZ plane 71 | - Shift and spacebar to move the camera on Y axis 72 | - Ctrl to sprint 73 | - Mouse to rotate the camera 74 | 75 | ![LOD Terrain](/examples/screenshots/lod_terrain.png) 76 | 77 | ## Official Related Projects 78 | 79 | - [feldspar](https://github.com/bonsairobo/feldspar): A smooth voxel plugin for Bevy Engine 80 | - [feldspar-editor](https://github.com/bonsairobo/feldspar-editor): A map editor for feldspar 81 | 82 | ## Community Projects Using building-blocks 83 | 84 | - [Colonize](https://github.com/indiv0/colonize): A 3D web game similar to Dwarf Fortress 85 | - [Counterproduction](https://github.com/Counterproduction-game/Counterproduction): A game about competitive spaceship building 86 | - [Minkraft](https://github.com/superdump/minkraft): A Minecraft clone 87 | -------------------------------------------------------------------------------- /examples/array_texture_materials/camera_rotation.rs: -------------------------------------------------------------------------------- 1 | use bevy_utilities::bevy::prelude::*; 2 | 3 | pub struct CameraRotationState { 4 | camera: Entity, 5 | } 6 | 7 | impl CameraRotationState { 8 | pub fn new(camera: Entity) -> Self { 9 | Self { camera } 10 | } 11 | } 12 | 13 | pub fn camera_rotation_system( 14 | state: Res, 15 | time: Res