├── Cargo.toml ├── README.md ├── img ├── Npix0.jpg ├── Npix1.jpg ├── Npix10.jpg ├── Npix11.jpg ├── Npix2.jpg ├── Npix3.jpg ├── Npix4.jpg ├── Npix5.jpg ├── Npix6.jpg ├── Npix7.jpg ├── Npix8.jpg └── Npix9.jpg ├── index.html ├── package.json ├── screenshot.png ├── src ├── angle.rs ├── lib.rs ├── main.rs ├── math.rs ├── projection.rs ├── render_desc.rs ├── shaders │ ├── allsky.frag │ └── allsky.vert ├── texture.rs ├── time.rs ├── triangulation.rs └── vertex.rs └── vite.config.ts /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wgpu-sky-rendering" 3 | version = "0.1.0" 4 | authors = ["Matthieu Baumann "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | [dependencies] 9 | image = "0.23.14" 10 | winit = { version = "0.29", features = ["rwh_05"]} 11 | env_logger = "0.10" 12 | log = "0.4" 13 | futures = "0.3" 14 | bytemuck = { version = "1.4", features = [ "derive" ] } 15 | cgmath = "0.18.0" 16 | num-traits = "0.2.14" 17 | byte-slice-cast = "1.0.0" 18 | wgpu = { version = "22.1.0", features = [ "glsl" ] } 19 | naga = "22.1.0" 20 | pollster = "0.3" 21 | 22 | [lib] 23 | crate-type = ["cdylib", "rlib"] 24 | 25 | #[build-dependencies] 26 | #anyhow = "1.0" 27 | #fs_extra = "1.2.0" 28 | #glob = "0.3" 29 | #shaderc = "0.8" 30 | 31 | # used to generate SPIR-V for the Web target 32 | #[target.'cfg(target_arch = "wasm32")'.dependencies.naga] 33 | #git = "https://github.com/gfx-rs/naga" 34 | #tag = "gfx-25" 35 | #features = ["wgsl-in", "spv-out"] 36 | 37 | [target.'cfg(target_arch = "wasm32")'.dependencies] 38 | wasm-bindgen = "0.2.73" # remember to change version in wiki as well 39 | console_error_panic_hook = "0.1.6" 40 | console_log = "1.0" 41 | wgpu = { version = "22.1.0", features = ["webgl", "glsl"]} 42 | web-sys = { version = "0.3", features = [ 43 | "Document", 44 | "Window", 45 | "Element", 46 | "HtmlCanvasElement", 47 | "Performance" 48 | ]} 49 | js-sys = "0.3.50" 50 | wasm-bindgen-futures = "0.4.30" 51 | getrandom = { version = "0.2", features = ["js"] } 52 | 53 | [profile.release] 54 | opt-level = "z" 55 | debug = false 56 | debug-assertions = false 57 | overflow-checks = false 58 | lto = true 59 | panic = "abort" 60 | incremental = false 61 | codegen-units = 16 62 | rpath = false 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # All sky rendering using WebGPU 2 | 3 | ## Live demo [here](https://bmatthieu3.github.io/sky_rendering_wgpu/index.html) 4 | 5 | ## What is it ? 6 | 7 | An example showing the rendering of allsky projections using wgpu-rs. 8 | Allsky projections are used in many ways to represented the earth or the sky on a 2d plane. 9 | This method does the projection per-pixel and runs in realtime. 10 | 11 | ![Aitoff projection rendering of the sky](./screenshot.png) 12 | 13 | Supported projections are: 14 | * [Aitoff](https://en.wikipedia.org/wiki/Aitoff_projection) 15 | * [Mercator](https://en.wikipedia.org/wiki/Mercator_projection) 16 | * [Mollweide](https://en.wikipedia.org/wiki/Mollweide_projection) 17 | * [Azimuthal](https://en.wikipedia.org/wiki/Azimuthal_equidistant_projection) 18 | * [Gnomonic](https://en.wikipedia.org/wiki/Gnomonic_projection) 19 | 20 | See this [link](https://en.wikipedia.org/wiki/List_of_map_projections) for information about map projections. 21 | 22 | ## How it works 23 | 24 | If you want more precision about the algorithm and techniques used: 25 | * Read this [article](https://bmatthieu3.github.io/posts/allskyrendering/) I wrote on my personal blog. 26 | * See my talk at ADASS 2020 at this [link](https://www.youtube.com/watch?v=TILtJOiiRoc). 27 | 28 | ### Test as a native application 29 | 30 | ```sh 31 | # Run 32 | $ cargo run --release 33 | ``` 34 | 35 | ### Test on the browser (WIP) 36 | 37 | 1. Install [wasm-pack](https://rustwasm.github.io/wasm-pack/installer/) 38 | 39 | 2. Install dev dependencies (e.g. vite) 40 | 41 | ```sh 42 | npm install 43 | ``` 44 | 45 | 3. Test the web (WASM) version 46 | 47 | ```sh 48 | npm run demo 49 | ``` 50 | -------------------------------------------------------------------------------- /img/Npix0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix0.jpg -------------------------------------------------------------------------------- /img/Npix1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix1.jpg -------------------------------------------------------------------------------- /img/Npix10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix10.jpg -------------------------------------------------------------------------------- /img/Npix11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix11.jpg -------------------------------------------------------------------------------- /img/Npix2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix2.jpg -------------------------------------------------------------------------------- /img/Npix3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix3.jpg -------------------------------------------------------------------------------- /img/Npix4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix4.jpg -------------------------------------------------------------------------------- /img/Npix5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix5.jpg -------------------------------------------------------------------------------- /img/Npix6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix6.jpg -------------------------------------------------------------------------------- /img/Npix7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix7.jpg -------------------------------------------------------------------------------- /img/Npix8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix8.jpg -------------------------------------------------------------------------------- /img/Npix9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/img/Npix9.jpg -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Learn WGPU 9 | 19 | 20 | 21 | 22 |

WebGPU demo of a sky raytracer

23 |

This displays a unresolued rendering of the DSS survey images

24 |

Commands

25 | 29 | 30 | 31 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "wgpu-sky-rendering", 3 | "version": "1.0.0", 4 | "description": "A simple sky rendering example featuring wgpu basic setup", 5 | "main": "pkg/wgpu-sky-rendering.js", 6 | "scripts": { 7 | "wasm-dev": "wasm-pack build --target web", 8 | "wasm": "wasm-pack build --target web --release", 9 | "build": "npm run wasm && vite build", 10 | "dev": "cargo watch -s \"npm run wasm-dev\"", 11 | "serve": "vite", 12 | "demo": "npm run wasm && vite", 13 | "test": "echo \"Error: no test specified\" && exit 1" 14 | }, 15 | "author": "", 16 | "license": "ISC", 17 | "dependencies": { 18 | "vite": "^5.4.2", 19 | "vite-plugin-top-level-await": "^1.4.4", 20 | "vite-plugin-wasm": "^3.3.0", 21 | "vite-plugin-wasm-pack": "^0.1.12" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bmatthieu3/wgpu-sky-rendering/ba99e150d87bdf6a07bd6b0ad4b678c05fc482c5/screenshot.png -------------------------------------------------------------------------------- /src/angle.rs: -------------------------------------------------------------------------------- 1 | use cgmath::BaseFloat; 2 | // ArcDeg wrapper structure 3 | #[derive(Clone, Copy)] 4 | #[allow(dead_code)] 5 | pub struct ArcDeg(pub T); 6 | 7 | impl ArcDeg 8 | where 9 | T: BaseFloat, 10 | { 11 | #[allow(dead_code)] 12 | fn get_frac_minutes(&self) -> ArcMin { 13 | let deg = *self; 14 | 15 | let frac = deg.fract(); 16 | let minutes_per_degree = T::from(60_f32).unwrap(); 17 | ArcMin(frac * minutes_per_degree) 18 | } 19 | 20 | #[allow(dead_code)] 21 | fn truncate(&mut self) { 22 | *self = Self((*self).trunc()); 23 | } 24 | /*fn round(&mut self) { 25 | *self = Self((*self).round()); 26 | }*/ 27 | } 28 | 29 | use cgmath::{Deg, Rad}; 30 | // Convert a Rad to an ArcDeg 31 | impl From> for ArcDeg 32 | where 33 | T: BaseFloat, 34 | { 35 | fn from(angle: Rad) -> Self { 36 | let deg: Deg = angle.into(); 37 | ArcDeg(deg.0) 38 | } 39 | } 40 | // Convert an ArcMin to a Rad 41 | impl From> for Rad 42 | where 43 | T: BaseFloat, 44 | { 45 | fn from(degrees: ArcDeg) -> Self { 46 | let deg = Deg(*degrees); 47 | deg.into() 48 | } 49 | } 50 | 51 | use core::ops::Deref; 52 | impl Deref for ArcDeg 53 | where 54 | T: BaseFloat, 55 | { 56 | type Target = T; 57 | 58 | fn deref(&'_ self) -> &'_ Self::Target { 59 | &self.0 60 | } 61 | } 62 | 63 | // ArcMin wrapper structure 64 | #[derive(Clone, Copy)] 65 | #[allow(dead_code)] 66 | pub struct ArcMin(pub T); 67 | 68 | impl ArcMin 69 | where 70 | T: BaseFloat, 71 | { 72 | #[allow(dead_code)] 73 | fn get_frac_seconds(&self) -> ArcSec { 74 | let min = *self; 75 | 76 | let frac = min.fract(); 77 | let seconds_per_minute = T::from(60_f32).unwrap(); 78 | ArcSec(seconds_per_minute * frac) 79 | } 80 | 81 | /*fn truncate(&mut self) { 82 | *self = Self((*self).trunc()); 83 | }*/ 84 | } 85 | 86 | // Convert a Rad to an ArcMin 87 | impl From> for ArcMin 88 | where 89 | T: BaseFloat, 90 | { 91 | fn from(angle: Rad) -> Self { 92 | let deg: Deg = angle.into(); 93 | 94 | // There is 60 minutes in one degree 95 | let minutes_per_degree = T::from(60_f32).unwrap(); 96 | let minutes = deg.0 * minutes_per_degree; 97 | ArcMin(minutes) 98 | } 99 | } 100 | // Convert an ArcMin to a Rad 101 | impl From> for Rad 102 | where 103 | T: BaseFloat, 104 | { 105 | fn from(minutes: ArcMin) -> Self { 106 | let minutes_per_degree = T::from(60_f32).unwrap(); 107 | let deg: Deg = Deg(*minutes / minutes_per_degree); 108 | 109 | deg.into() 110 | } 111 | } 112 | 113 | impl Deref for ArcMin 114 | where 115 | T: BaseFloat, 116 | { 117 | type Target = T; 118 | 119 | fn deref(&'_ self) -> &'_ Self::Target { 120 | &self.0 121 | } 122 | } 123 | 124 | // ArcSec wrapper structure 125 | 126 | #[derive(Clone, Copy)] 127 | pub struct ArcSec(pub T); 128 | 129 | impl ArcSec 130 | where 131 | T: BaseFloat, 132 | { 133 | fn _truncate(&mut self) { 134 | *self = Self((*self).trunc()); 135 | } 136 | } 137 | 138 | impl From> for ArcSec 139 | where 140 | T: BaseFloat, 141 | { 142 | fn from(angle: Rad) -> Self { 143 | let deg: Deg = angle.into(); 144 | 145 | // There is 3600 seconds in one degree 146 | let seconds_per_degree = T::from(3600_f32).unwrap(); 147 | let seconds = deg.0 * seconds_per_degree; 148 | ArcSec(seconds) 149 | } 150 | } 151 | // Convert an ArcMin to a Rad 152 | impl From> for Rad 153 | where 154 | T: BaseFloat, 155 | { 156 | fn from(seconds: ArcSec) -> Self { 157 | let seconds_per_degree = T::from(3600_f32).unwrap(); 158 | let deg: Deg = Deg(seconds.0 / seconds_per_degree); 159 | 160 | deg.into() 161 | } 162 | } 163 | 164 | impl Deref for ArcSec 165 | where 166 | T: BaseFloat, 167 | { 168 | type Target = T; 169 | 170 | fn deref(&'_ self) -> &'_ Self::Target { 171 | &self.0 172 | } 173 | } 174 | 175 | #[derive(Clone, Copy, Debug, Eq, Hash)] 176 | #[repr(C)] 177 | pub struct Angle(pub S); 178 | impl Angle 179 | where 180 | S: BaseFloat, 181 | { 182 | pub fn to_radians(&self) -> S { 183 | self.0 184 | } 185 | 186 | pub fn new>>(angle: T) -> Angle { 187 | let radians: Rad = angle.into(); 188 | Angle(radians.0) 189 | } 190 | 191 | pub fn cos(&self) -> S { 192 | self.0.cos() 193 | } 194 | 195 | pub fn sin(&self) -> S { 196 | self.0.sin() 197 | } 198 | 199 | pub fn tan(&self) -> S { 200 | self.0.tan() 201 | } 202 | 203 | pub fn asin(self) -> S { 204 | self.0.asin() 205 | } 206 | 207 | pub fn acos(self) -> S { 208 | self.0.acos() 209 | } 210 | 211 | pub fn atan(self) -> S { 212 | self.0.atan() 213 | } 214 | 215 | pub fn atan2(self, other: Self) -> S { 216 | self.0.atan2(other.0) 217 | } 218 | 219 | pub fn floor(self) -> Self { 220 | Angle(self.0.floor()) 221 | } 222 | 223 | pub fn ceil(self) -> Self { 224 | Angle(self.0.ceil()) 225 | } 226 | 227 | pub fn round(self) -> Self { 228 | Angle(self.0.round()) 229 | } 230 | 231 | pub fn trunc(self) -> Self { 232 | Angle(self.0.trunc()) 233 | } 234 | 235 | pub fn fract(self) -> S { 236 | self.0.fract() 237 | } 238 | 239 | pub fn abs(self) -> Self { 240 | Angle(self.0.abs()) 241 | } 242 | 243 | pub fn max(self, other: Self) -> Self { 244 | Angle(self.0.max(other.0)) 245 | } 246 | 247 | pub fn min(self, other: Self) -> Self { 248 | Angle(self.0.min(other.0)) 249 | } 250 | 251 | pub fn min_value() -> Self { 252 | Angle(S::min_value()) 253 | } 254 | pub fn max_value() -> Self { 255 | Angle(S::max_value()) 256 | } 257 | } 258 | 259 | // Convert from and to Rad 260 | impl From> for Angle 261 | where 262 | S: BaseFloat, 263 | { 264 | fn from(rad: Rad) -> Self { 265 | Angle(rad.0) 266 | } 267 | } 268 | impl From> for Rad 269 | where 270 | S: BaseFloat, 271 | { 272 | fn from(angle: Angle) -> Self { 273 | Rad(angle.0) 274 | } 275 | } 276 | /* 277 | trait AngleUnit: Into> 278 | where 279 | S: BaseFloat 280 | {} 281 | 282 | impl AngleUnit for ArcSec {} 283 | */ 284 | impl PartialEq for Angle 285 | where 286 | S: BaseFloat, 287 | T: Into> + Clone + Copy, 288 | { 289 | fn eq(&self, other: &T) -> bool { 290 | let angle: Angle = (*other).into(); 291 | angle.0 == self.0 292 | } 293 | } 294 | 295 | use std::cmp::PartialOrd; 296 | impl PartialOrd for Angle 297 | where 298 | S: BaseFloat, 299 | T: Into> + Clone + Copy, 300 | { 301 | fn partial_cmp(&self, other: &T) -> Option { 302 | let angle: Angle = (*other).into(); 303 | self.0.partial_cmp(&angle.0) 304 | } 305 | } 306 | 307 | // Convert from and to ArcDeg 308 | impl From> for Angle 309 | where 310 | S: BaseFloat, 311 | { 312 | fn from(deg: ArcDeg) -> Self { 313 | let rad: Rad = deg.into(); 314 | Angle(rad.0) 315 | } 316 | } 317 | impl From> for ArcDeg 318 | where 319 | S: BaseFloat, 320 | { 321 | fn from(angle: Angle) -> Self { 322 | let rad: Rad = angle.into(); 323 | let deg: Deg = rad.into(); 324 | ArcDeg(deg.0) 325 | } 326 | } 327 | 328 | // Convert from ArcMin 329 | impl From> for Angle 330 | where 331 | S: BaseFloat, 332 | { 333 | fn from(min: ArcMin) -> Self { 334 | let rad: Rad = min.into(); 335 | Angle(rad.0) 336 | } 337 | } 338 | // Convert from ArcSec 339 | impl From> for Angle 340 | where 341 | S: BaseFloat, 342 | { 343 | fn from(sec: ArcSec) -> Self { 344 | let rad: Rad = sec.into(); 345 | Angle(rad.0) 346 | } 347 | } 348 | /* 349 | impl PartialEq for Angle 350 | where 351 | S: BaseFloat + !AngleUnit, 352 | { 353 | fn eq(&self, other: &S) -> bool { 354 | self.0 == *other 355 | } 356 | } 357 | */ 358 | use std::cmp::Ordering; 359 | /*impl PartialOrd for Angle 360 | where 361 | S: BaseFloat, 362 | { 363 | fn partial_cmp(&self, other: &S) -> Option { 364 | self.0.partial_cmp(other) 365 | } 366 | }*/ 367 | 368 | use std::ops::Div; 369 | impl Div for Angle 370 | where 371 | S: BaseFloat, 372 | { 373 | type Output = Self; 374 | 375 | fn div(self, rhs: Self) -> Self::Output { 376 | let angle = self.0 / rhs.0; 377 | Angle(angle) 378 | } 379 | } 380 | impl Div for Angle 381 | where 382 | S: BaseFloat, 383 | { 384 | type Output = Self; 385 | 386 | fn div(self, rhs: S) -> Self::Output { 387 | let angle = self.0 / rhs; 388 | Angle(angle) 389 | } 390 | } 391 | 392 | use std::ops::Mul; 393 | impl Mul for Angle 394 | where 395 | S: BaseFloat, 396 | { 397 | type Output = Self; 398 | 399 | fn mul(self, rhs: Self) -> Self::Output { 400 | let angle = self.0 * rhs.0; 401 | Angle(angle) 402 | } 403 | } 404 | impl Mul for Angle 405 | where 406 | S: BaseFloat, 407 | { 408 | type Output = Self; 409 | 410 | fn mul(self, rhs: S) -> Self::Output { 411 | let angle = self.0 * rhs; 412 | Angle(angle) 413 | } 414 | } 415 | 416 | use std::ops::Sub; 417 | impl Sub for Angle 418 | where 419 | S: BaseFloat, 420 | { 421 | type Output = Self; 422 | 423 | fn sub(self, other: Self) -> Self::Output { 424 | let angle = self.0 - other.0; 425 | Angle(angle) 426 | } 427 | } 428 | impl Sub for Angle 429 | where 430 | S: BaseFloat, 431 | { 432 | type Output = Self; 433 | 434 | fn sub(self, other: S) -> Self::Output { 435 | let angle = self.0 - other; 436 | Angle(angle) 437 | } 438 | } 439 | 440 | use std::ops::Add; 441 | impl Add for Angle 442 | where 443 | S: BaseFloat, 444 | { 445 | type Output = Self; 446 | 447 | fn add(self, other: Self) -> Self::Output { 448 | let angle = self.0 + other.0; 449 | Angle(angle) 450 | } 451 | } 452 | impl Add for Angle 453 | where 454 | S: BaseFloat, 455 | { 456 | type Output = Self; 457 | 458 | fn add(self, other: S) -> Self::Output { 459 | let angle = self.0 + other; 460 | Angle(angle) 461 | } 462 | } 463 | 464 | use std::ops::AddAssign; 465 | impl AddAssign for Angle 466 | where 467 | S: BaseFloat, 468 | { 469 | fn add_assign(&mut self, other: S) { 470 | *self = *self + other; 471 | } 472 | } 473 | impl AddAssign> for Angle 474 | where 475 | S: BaseFloat, 476 | { 477 | fn add_assign(&mut self, other: Angle) { 478 | *self = *self + other; 479 | } 480 | } 481 | 482 | use std::ops::SubAssign; 483 | impl SubAssign for Angle 484 | where 485 | S: BaseFloat, 486 | { 487 | fn sub_assign(&mut self, other: S) { 488 | *self = *self - other; 489 | } 490 | } 491 | impl SubAssign> for Angle 492 | where 493 | S: BaseFloat, 494 | { 495 | fn sub_assign(&mut self, other: Angle) { 496 | *self = *self - other; 497 | } 498 | } 499 | 500 | use std::ops::Rem; 501 | impl Rem for Angle 502 | where 503 | S: BaseFloat, 504 | { 505 | type Output = Self; 506 | 507 | fn rem(self, other: Self) -> Self::Output { 508 | let angle = self.0 % other.0; 509 | Angle(angle) 510 | } 511 | } 512 | 513 | use std::ops::Neg; 514 | impl Neg for Angle 515 | where 516 | S: BaseFloat, 517 | { 518 | type Output = Self; 519 | fn neg(self) -> Self::Output { 520 | Angle(-self.0) 521 | } 522 | } 523 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate byte_slice_cast; 2 | 3 | #[cfg(target_arch="wasm32")] 4 | use wasm_bindgen::prelude::*; 5 | #[cfg(target_arch="wasm32")] 6 | extern crate console_error_panic_hook; 7 | 8 | use std::iter; 9 | 10 | use wgpu::util::DeviceExt; 11 | use winit::{ 12 | event::*, 13 | event_loop::EventLoop, 14 | window::{Window, WindowBuilder}, 15 | }; 16 | use winit::keyboard::PhysicalKey; 17 | use winit::keyboard::KeyCode; 18 | use winit::window::Fullscreen; 19 | mod texture; 20 | mod vertex; 21 | mod time; 22 | 23 | use time::Clock; 24 | use vertex::Vertex; 25 | use texture::Texture; 26 | use crate::math::Vec4; 27 | const NUM_PROJECTIONS: i32 = 6; 28 | 29 | struct State<'a> { 30 | surface: wgpu::Surface<'a>, 31 | device: wgpu::Device, 32 | queue: wgpu::Queue, 33 | config: wgpu::SurfaceConfiguration, 34 | size: winit::dpi::PhysicalSize, 35 | 36 | // The window must be declared after the surface so 37 | // it gets dropped after it as the surface contains 38 | // unsafe references to the window's resources. 39 | window: &'a Window, 40 | 41 | render_pipeline: wgpu::RenderPipeline, 42 | vertex_buffer: wgpu::Buffer, 43 | index_buffer: wgpu::Buffer, 44 | num_indices: u32, 45 | 46 | map_texture: texture::Texture, 47 | texture_bind_group_layout: wgpu::BindGroupLayout, 48 | diffuse_bind_group: wgpu::BindGroup, 49 | 50 | // uniforms 51 | rot_mat_buf: wgpu::Buffer, 52 | window_size_buf: wgpu::Buffer, 53 | 54 | clock: Clock, 55 | } 56 | 57 | mod angle; 58 | mod math; 59 | mod projection; 60 | mod triangulation; 61 | use crate::projection::*; 62 | use crate::triangulation::Triangulation; 63 | use math::Vec2; 64 | use crate::math::Vec3; 65 | fn generate_position>(size: u32) -> Vec { 66 | let (w, h) = (size as f32, size as f32); 67 | let mut data = vec![]; 68 | 69 | for y in 0..(h as u32) { 70 | for x in 0..(w as u32) { 71 | let xy = Vec2::new(x, y); 72 | let clip_xy = Vec2::new( 73 | 2.0 * ((xy.x as f32) / (w as f32)) - 1.0, 74 | 2.0 * ((xy.y as f32) / (h as f32)) - 1.0, 75 | ); 76 | if let Some(pos) = P::clip_to_world_space(&clip_xy) { 77 | let pos = Vec3::new( 78 | pos.x * 0.5 + 0.5, 79 | pos.y * 0.5 + 0.5, 80 | pos.z * 0.5 + 0.5, 81 | ); 82 | 83 | data.extend(&[(pos.x * 256.0) as u8, (pos.y * 256.0) as u8, (pos.z * 256.0) as u8, 255]); 84 | } else { 85 | data.extend(&[255, 255, 255, 255]); 86 | } 87 | } 88 | } 89 | 90 | data 91 | } 92 | 93 | pub fn create_position_texture>( 94 | device: &wgpu::Device, 95 | queue: &wgpu::Queue, 96 | size: u32, 97 | ) -> Texture { 98 | let texels = generate_position::

(size); 99 | let bytes = texels.as_slice(); 100 | 101 | let dimensions = (size, size, 1); 102 | let num_bytes_per_pixel = 4; 103 | Texture::from_raw_bytes::(&device, &queue, Some(bytes), dimensions, num_bytes_per_pixel, "position") 104 | } 105 | 106 | use crate::math::Mat4; 107 | impl<'a> State<'a> { 108 | async fn new(window: &'a Window) -> Self { 109 | let size = window.inner_size(); 110 | 111 | // The instance is a handle to our GPU 112 | // BackendBit::all => Vulkan + Metal + DX12 + Browser WebGPU 113 | let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { 114 | #[cfg(not(target_arch="wasm32"))] 115 | backends: wgpu::Backends::PRIMARY, 116 | #[cfg(target_arch="wasm32")] 117 | backends: wgpu::Backends::GL, 118 | ..Default::default() 119 | }); 120 | 121 | let surface = instance.create_surface(window).unwrap(); 122 | 123 | let adapter = instance 124 | .request_adapter(&wgpu::RequestAdapterOptions { 125 | power_preference: wgpu::PowerPreference::default(), 126 | compatible_surface: Some(&surface), 127 | force_fallback_adapter: false, 128 | }) 129 | .await 130 | .unwrap(); 131 | 132 | let (device, queue) = adapter 133 | .request_device( 134 | &wgpu::DeviceDescriptor { 135 | required_features: wgpu::Features::empty(), 136 | // favor performane over the memory usage 137 | memory_hints: Default::default(), 138 | // WebGL doesn't support all of wgpu's features, so if 139 | // we're building for the web, we'll have to disable some. 140 | required_limits: if cfg!(target_arch = "wasm32") { 141 | wgpu::Limits { 142 | max_texture_dimension_3d: 512, 143 | ..wgpu::Limits::downlevel_webgl2_defaults() 144 | } 145 | } else { 146 | wgpu::Limits::default() 147 | }, 148 | label: None, 149 | }, 150 | None, // Trace path 151 | ) 152 | .await 153 | .unwrap(); 154 | 155 | let surface_caps = surface.get_capabilities(&adapter); 156 | // Shader code in this tutorial assumes an sRGB surface texture. Using a different 157 | // one will result in all the colors coming out darker. If you want to support non 158 | // sRGB surfaces, you'll need to account for that when drawing to the frame. 159 | let surface_format = surface_caps.formats.iter() 160 | .find(|f| f.is_srgb()) 161 | .copied() 162 | .unwrap_or(surface_caps.formats[0]); 163 | let config = wgpu::SurfaceConfiguration { 164 | usage: wgpu::TextureUsages::RENDER_ATTACHMENT, 165 | format: surface_format, 166 | width: size.width, 167 | height: size.height, 168 | present_mode: surface_caps.present_modes[0], 169 | alpha_mode: surface_caps.alpha_modes[0], 170 | view_formats: vec![surface_format.add_srgb_suffix()], 171 | desired_maximum_frame_latency: 2, 172 | }; 173 | 174 | /*let bytes = include_bytes!("../img/map.png"); 175 | let img = image::load_from_memory(bytes).unwrap(); 176 | let map_texture = texture::Texture::from_image(&device, &queue, &img, "map.png");*/ 177 | 178 | let map_texture = Texture::from_raw_bytes::( 179 | &device, 180 | &queue, 181 | None, 182 | (512, 512, 12), 183 | 4, 184 | "base HEALPix cells" 185 | ); 186 | 187 | let tiles = [ 188 | include_bytes!("../img/Npix0.jpg").to_vec(), 189 | include_bytes!("../img/Npix1.jpg").to_vec(), 190 | include_bytes!("../img/Npix2.jpg").to_vec(), 191 | include_bytes!("../img/Npix3.jpg").to_vec(), 192 | include_bytes!("../img/Npix4.jpg").to_vec(), 193 | include_bytes!("../img/Npix5.jpg").to_vec(), 194 | include_bytes!("../img/Npix6.jpg").to_vec(), 195 | include_bytes!("../img/Npix7.jpg").to_vec(), 196 | include_bytes!("../img/Npix8.jpg").to_vec(), 197 | include_bytes!("../img/Npix9.jpg").to_vec(), 198 | include_bytes!("../img/Npix10.jpg").to_vec(), 199 | include_bytes!("../img/Npix11.jpg").to_vec() 200 | ]; 201 | 202 | for (idx, tile_bytes) in tiles.iter().enumerate() { 203 | let rgba_tile = image::load_from_memory(&tile_bytes).unwrap().to_rgba8(); 204 | map_texture.write_data( 205 | &queue, 206 | (0, 0, idx as u32), 207 | &rgba_tile, 208 | (512, 512, 1) 209 | ); 210 | } 211 | 212 | // Uniform buffer 213 | let rot_mat_buf = device.create_buffer(&wgpu::BufferDescriptor { 214 | label: Some("rot matrix uniform"), 215 | size: 64, 216 | usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 217 | mapped_at_creation: false, 218 | }); 219 | 220 | let window_size_buf = device.create_buffer(&wgpu::BufferDescriptor { 221 | label: Some("window size uniform"), 222 | size: 16, 223 | usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, 224 | mapped_at_creation: false, 225 | }); 226 | 227 | let texture_bind_group_layout = 228 | device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { 229 | entries: &[ 230 | wgpu::BindGroupLayoutEntry { 231 | binding: 0, 232 | visibility: wgpu::ShaderStages::FRAGMENT, 233 | ty: wgpu::BindingType::Texture { 234 | multisampled: false, 235 | view_dimension: wgpu::TextureViewDimension::D3, 236 | sample_type: wgpu::TextureSampleType::Float { filterable: true }, 237 | }, 238 | count: None, 239 | }, 240 | wgpu::BindGroupLayoutEntry { 241 | binding: 1, 242 | visibility: wgpu::ShaderStages::FRAGMENT, 243 | ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), 244 | count: None, 245 | }, 246 | // rot matrix uniform 247 | wgpu::BindGroupLayoutEntry { 248 | binding: 2, 249 | visibility: wgpu::ShaderStages::FRAGMENT, 250 | ty: wgpu::BindingType::Buffer { 251 | ty: wgpu::BufferBindingType::Uniform, 252 | has_dynamic_offset: false, 253 | min_binding_size: wgpu::BufferSize::new( 254 | std::mem::size_of::>() as _, 255 | ), 256 | }, 257 | count: None, 258 | }, 259 | // window size uniform 260 | wgpu::BindGroupLayoutEntry { 261 | binding: 3, 262 | visibility: wgpu::ShaderStages::VERTEX, 263 | ty: wgpu::BindingType::Buffer { 264 | ty: wgpu::BufferBindingType::Uniform, 265 | has_dynamic_offset: false, 266 | min_binding_size: wgpu::BufferSize::new( 267 | std::mem::size_of::>() as wgpu::BufferAddress, 268 | ), 269 | }, 270 | count: None, 271 | }, 272 | ], 273 | label: Some("texture_bind_group_layout"), 274 | }); 275 | 276 | let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { 277 | layout: &texture_bind_group_layout, 278 | entries: &[ 279 | wgpu::BindGroupEntry { 280 | binding: 0, 281 | resource: wgpu::BindingResource::TextureView(&map_texture.view), 282 | }, 283 | wgpu::BindGroupEntry { 284 | binding: 1, 285 | resource: wgpu::BindingResource::Sampler(&map_texture.sampler), 286 | }, 287 | wgpu::BindGroupEntry { 288 | binding: 2, 289 | resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding { 290 | buffer: &rot_mat_buf, 291 | offset: 0, 292 | size: wgpu::BufferSize::new( 293 | std::mem::size_of::>() as wgpu::BufferAddress 294 | ), 295 | }), 296 | }, 297 | wgpu::BindGroupEntry { 298 | binding: 3, 299 | resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding { 300 | buffer: &window_size_buf, 301 | offset: 0, 302 | size: wgpu::BufferSize::new( 303 | 16 304 | ), 305 | }), 306 | }, 307 | ], 308 | label: Some("diffuse_bind_group"), 309 | }); 310 | 311 | // uniform buffer 312 | let vs_shader = 313 | device.create_shader_module(wgpu::ShaderModuleDescriptor { 314 | label: Some("allsky vert shader"), 315 | source: wgpu::ShaderSource::Glsl { 316 | shader: include_str!("shaders/allsky.vert").into(), 317 | stage: naga::ShaderStage::Vertex, 318 | defines: Default::default() 319 | } 320 | }); 321 | let fs_shader = 322 | device.create_shader_module(wgpu::ShaderModuleDescriptor { 323 | label: Some("allsky frag shader"), 324 | source: wgpu::ShaderSource::Glsl { 325 | shader: include_str!("shaders/allsky.frag").into(), 326 | stage: naga::ShaderStage::Fragment, 327 | defines: Default::default() 328 | }, 329 | }); 330 | 331 | let render_pipeline_layout = 332 | device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { 333 | label: Some("Render Pipeline Layout"), 334 | bind_group_layouts: &[&texture_bind_group_layout], 335 | push_constant_ranges: &[], 336 | }); 337 | 338 | let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { 339 | label: Some("Render Pipeline"), 340 | layout: Some(&render_pipeline_layout), 341 | vertex: wgpu::VertexState { 342 | module: &vs_shader, 343 | entry_point: "main", 344 | compilation_options: Default::default(), 345 | buffers: &[Vertex::desc()], 346 | }, 347 | fragment: Some(wgpu::FragmentState { 348 | module: &fs_shader, 349 | entry_point: "main", 350 | compilation_options: Default::default(), 351 | targets: &[Some(wgpu::ColorTargetState { 352 | format: config.format, 353 | blend: Some(wgpu::BlendState::REPLACE), 354 | write_mask: wgpu::ColorWrites::ALL, 355 | })], 356 | }), 357 | primitive: wgpu::PrimitiveState { 358 | topology: wgpu::PrimitiveTopology::TriangleList, 359 | strip_index_format: None, 360 | front_face: wgpu::FrontFace::Ccw, 361 | cull_mode: Some(wgpu::Face::Back), 362 | // Setting this to anything other than Fill requires Features::NON_FILL_POLYGON_MODE 363 | polygon_mode: wgpu::PolygonMode::Fill, 364 | // Requires Features::DEPTH_CLIP_CONTROL 365 | unclipped_depth: false, 366 | // Requires Features::CONSERVATIVE_RASTERIZATION 367 | conservative: false, 368 | }, 369 | depth_stencil: None, 370 | multisample: wgpu::MultisampleState { 371 | count: 1, 372 | mask: !0, 373 | alpha_to_coverage_enabled: false, 374 | }, 375 | multiview: None, // 5. 376 | cache: None, // 6. 377 | }); 378 | 379 | let (vertices, indices) = Triangulation::create::(); 380 | 381 | let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { 382 | label: Some("Vertex Buffer"), 383 | contents: bytemuck::cast_slice(&vertices), 384 | usage: wgpu::BufferUsages::VERTEX, 385 | }); 386 | let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { 387 | label: Some("Index Buffer"), 388 | contents: bytemuck::cast_slice(&indices), 389 | usage: wgpu::BufferUsages::INDEX, 390 | }); 391 | let num_indices = indices.len() as u32; 392 | 393 | let clock = Clock::now(); 394 | let mut app = Self { 395 | surface, 396 | device, 397 | queue, 398 | config, 399 | size, 400 | window, 401 | render_pipeline, 402 | vertex_buffer, 403 | index_buffer, 404 | num_indices, 405 | 406 | map_texture, 407 | 408 | texture_bind_group_layout, 409 | diffuse_bind_group, 410 | 411 | // uniforms 412 | window_size_buf, 413 | rot_mat_buf, 414 | clock, 415 | }; 416 | app.resize::(size); 417 | 418 | app 419 | } 420 | 421 | fn resize>(&mut self, mut new_size: winit::dpi::PhysicalSize) { 422 | if new_size.width > 0 && new_size.height > 0 { 423 | #[cfg(target_arch="wasm32")] { 424 | new_size.width = new_size.width.min(wgpu::Limits::downlevel_webgl2_defaults().max_texture_dimension_2d); 425 | new_size.height = new_size.height.min(wgpu::Limits::downlevel_webgl2_defaults().max_texture_dimension_2d); 426 | } 427 | 428 | self.size = new_size; 429 | self.config.width = new_size.width; 430 | self.config.height = new_size.height; 431 | self.surface.configure(&self.device, &self.config); 432 | } 433 | 434 | let ndc = P::compute_ndc_to_clip_factor(self.size.width as f32, self.size.height as f32); 435 | self.queue.write_buffer( 436 | &self.window_size_buf, 437 | 0, 438 | bytemuck::bytes_of(&[ndc.x, ndc.y, 0.0, 0.0]), 439 | ); 440 | } 441 | 442 | #[allow(unused_variables)] 443 | fn input(&mut self, event: &WindowEvent) -> bool { 444 | false 445 | } 446 | 447 | fn update(&mut self) { 448 | let elapsed = self.clock.elapsed_as_secs(); 449 | 450 | let rot = Mat4::from_angle_y(cgmath::Rad(elapsed)); 451 | let rot: &[[f32; 4]; 4] = rot.as_ref(); 452 | 453 | self.queue 454 | .write_buffer(&self.rot_mat_buf, 0, bytemuck::bytes_of(rot)); 455 | } 456 | 457 | fn set_projection(&mut self, idx: usize) { 458 | // Update the vertex and index buffers 459 | let (vertices, indices) = match idx { 460 | 0 => Triangulation::create::(), 461 | 1 => Triangulation::create::(), 462 | 2 => Triangulation::create::(), 463 | 3 => Triangulation::create::(), 464 | 4 => Triangulation::create::(), 465 | 5 => Triangulation::create::(), 466 | _ => unimplemented!(), 467 | }; 468 | 469 | self.vertex_buffer = self 470 | .device 471 | .create_buffer_init(&wgpu::util::BufferInitDescriptor { 472 | label: Some("Vertex Buffer"), 473 | contents: bytemuck::cast_slice(&vertices), 474 | usage: wgpu::BufferUsages::VERTEX, 475 | }); 476 | self.index_buffer = self 477 | .device 478 | .create_buffer_init(&wgpu::util::BufferInitDescriptor { 479 | label: Some("Index Buffer"), 480 | contents: bytemuck::cast_slice(&indices), 481 | usage: wgpu::BufferUsages::INDEX, 482 | }); 483 | self.num_indices = indices.len() as u32; 484 | 485 | // Update the uniforms 486 | let aspect = match idx { 487 | 0 => { 488 | Aitoff::compute_ndc_to_clip_factor(self.size.width as f32, self.size.height as f32) 489 | } 490 | 1 => Ortho::compute_ndc_to_clip_factor(self.size.width as f32, self.size.height as f32), 491 | 2 => Mollweide::compute_ndc_to_clip_factor( 492 | self.size.width as f32, 493 | self.size.height as f32, 494 | ), 495 | 3 => Mercator::compute_ndc_to_clip_factor( 496 | self.size.width as f32, 497 | self.size.height as f32, 498 | ), 499 | 4 => AzimuthalEquidistant::compute_ndc_to_clip_factor( 500 | self.size.width as f32, 501 | self.size.height as f32, 502 | ), 503 | 5 => Gnomonic::compute_ndc_to_clip_factor( 504 | self.size.width as f32, 505 | self.size.height as f32, 506 | ), 507 | _ => unimplemented!(), 508 | }; 509 | self.queue.write_buffer( 510 | &self.window_size_buf, 511 | 0, 512 | bytemuck::bytes_of(&[aspect.x, aspect.y, 0.0, 0.0]), 513 | ); 514 | 515 | // Update the bind group with the texture position from the current projection 516 | self.diffuse_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor { 517 | layout: &self.texture_bind_group_layout, 518 | entries: &[ 519 | wgpu::BindGroupEntry { 520 | binding: 0, 521 | resource: wgpu::BindingResource::TextureView(&self.map_texture.view), 522 | }, 523 | wgpu::BindGroupEntry { 524 | binding: 1, 525 | resource: wgpu::BindingResource::Sampler(&self.map_texture.sampler), 526 | }, 527 | wgpu::BindGroupEntry { 528 | binding: 2, 529 | resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding { 530 | buffer: &self.rot_mat_buf, 531 | offset: 0, 532 | size: wgpu::BufferSize::new( 533 | std::mem::size_of::>() as wgpu::BufferAddress 534 | ), 535 | }), 536 | }, 537 | wgpu::BindGroupEntry { 538 | binding: 3, 539 | resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding { 540 | buffer: &self.window_size_buf, 541 | offset: 0, 542 | size: wgpu::BufferSize::new( 543 | 16 544 | ), 545 | }), 546 | }, 547 | ], 548 | label: Some("diffuse_bind_group"), 549 | }); 550 | } 551 | 552 | fn render(&mut self) -> Result<(), wgpu::SurfaceError> { 553 | let size = self.window.inner_size(); 554 | if size.width == 0 || size.height == 0 { 555 | return Ok(()) 556 | } 557 | 558 | if let Ok(frame) = self.surface.get_current_texture() { 559 | let view = frame.texture.create_view(&wgpu::TextureViewDescriptor { 560 | format: Some(self.config.format.add_srgb_suffix()), 561 | ..Default::default() 562 | }); 563 | 564 | let mut encoder = self 565 | .device 566 | .create_command_encoder(&wgpu::CommandEncoderDescriptor { 567 | label: Some("Render Encoder"), 568 | }); 569 | 570 | { 571 | let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { 572 | label: Some("Render Pass"), 573 | color_attachments: &[Some(wgpu::RenderPassColorAttachment { 574 | view: &view, 575 | resolve_target: None, 576 | ops: wgpu::Operations { 577 | load: wgpu::LoadOp::Clear(wgpu::Color { 578 | r: 0.01, 579 | g: 0.01, 580 | b: 0.01, 581 | a: 1.0, 582 | }), 583 | store: wgpu::StoreOp::Store, 584 | }, 585 | })], 586 | depth_stencil_attachment: None, 587 | occlusion_query_set: None, 588 | timestamp_writes: None, 589 | }); 590 | 591 | render_pass.set_pipeline(&self.render_pipeline); 592 | render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]); 593 | render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); 594 | render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32); 595 | render_pass.draw_indexed(0..self.num_indices, 0, 0..1); 596 | } 597 | 598 | self.queue.submit(iter::once(encoder.finish())); 599 | frame.present(); 600 | } 601 | 602 | Ok(()) 603 | } 604 | } 605 | 606 | #[cfg_attr(target_arch="wasm32", wasm_bindgen(start))] 607 | pub async fn run() { 608 | #[cfg(target_arch = "wasm32")] 609 | std::panic::set_hook(Box::new(console_error_panic_hook::hook)); 610 | #[cfg(target_arch = "wasm32")] 611 | console_log::init_with_level(log::Level::Warn).expect("Couldn't initialize logger"); 612 | #[cfg(not(target_arch = "wasm32"))] 613 | env_logger::init(); 614 | 615 | let event_loop = EventLoop::new().unwrap(); 616 | let mut builder = WindowBuilder::new(); 617 | 618 | #[cfg(target_arch = "wasm32")] 619 | { 620 | use wasm_bindgen::JsCast; 621 | use winit::platform::web::WindowBuilderExtWebSys; 622 | let canvas = web_sys::window() 623 | .unwrap() 624 | .document() 625 | .unwrap() 626 | .get_element_by_id("canvas") 627 | .unwrap() 628 | .dyn_into::() 629 | .unwrap(); 630 | 631 | builder = builder.with_canvas(Some(canvas)); 632 | } 633 | let window = builder.with_title("allsky projections") 634 | .build(&event_loop).unwrap(); 635 | 636 | // Winit prevents sizing with CSS, so we have to set 637 | // the size manually when on web. 638 | #[cfg(target_arch = "wasm32")] 639 | { 640 | use winit::dpi::LogicalSize; 641 | let _ = window.request_inner_size(LogicalSize::new(768, 512)); 642 | } 643 | 644 | let mut state = State::new(&window).await; 645 | 646 | let mut count: i32 = 0; 647 | 648 | event_loop.run(move |event, control_flow| { 649 | match event { 650 | Event::WindowEvent { 651 | ref event, 652 | window_id, 653 | } if window_id == state.window.id() => { 654 | if !state.input(event) { 655 | match event { 656 | #[cfg(not(target_arch="wasm32"))] 657 | WindowEvent::CloseRequested 658 | | WindowEvent::KeyboardInput { 659 | event: 660 | KeyEvent { 661 | state: ElementState::Pressed, 662 | physical_key: PhysicalKey::Code(KeyCode::Escape), 663 | .. 664 | }, 665 | .. 666 | } => control_flow.exit(), 667 | WindowEvent::KeyboardInput { 668 | event: 669 | KeyEvent { 670 | state: ElementState::Pressed, 671 | physical_key: PhysicalKey::Code(KeyCode::ArrowLeft), 672 | .. 673 | }, 674 | .. 675 | } => { 676 | count += 1; 677 | count %= NUM_PROJECTIONS; 678 | 679 | state.set_projection(count as usize); 680 | }, 681 | WindowEvent::KeyboardInput { 682 | event: 683 | KeyEvent { 684 | state: ElementState::Pressed, 685 | physical_key: PhysicalKey::Code(KeyCode::Enter), 686 | .. 687 | }, 688 | .. 689 | } => { 690 | // toggle fullscreen 691 | state.window.set_fullscreen(Some(Fullscreen::Borderless(None))); 692 | }, 693 | WindowEvent::KeyboardInput { 694 | event: 695 | KeyEvent { 696 | state: ElementState::Pressed, 697 | physical_key: PhysicalKey::Code(KeyCode::ArrowRight), 698 | .. 699 | }, 700 | .. 701 | } => { 702 | count -= 1; 703 | if count < 0 { 704 | count += NUM_PROJECTIONS; 705 | } 706 | count %= NUM_PROJECTIONS; 707 | 708 | state.set_projection(count as usize); 709 | }, 710 | WindowEvent::Resized(physical_size) => match count { 711 | 0 => state.resize::(*physical_size), 712 | 1 => state.resize::(*physical_size), 713 | 2 => state.resize::(*physical_size), 714 | 3 => state.resize::(*physical_size), 715 | 4 => state.resize::(*physical_size), 716 | 5 => state.resize::(*physical_size), 717 | _ => unimplemented!(), 718 | }, 719 | WindowEvent::RedrawRequested => { 720 | state.update(); 721 | match state.render() { 722 | Ok(_) => {} 723 | // Reconfigure the surface if lost 724 | Err(wgpu::SurfaceError::Lost) => match count { 725 | 0 => state.resize::(state.size), 726 | 1 => state.resize::(state.size), 727 | 2 => state.resize::(state.size), 728 | 3 => state.resize::(state.size), 729 | 4 => state.resize::(state.size), 730 | 5 => state.resize::(state.size), 731 | _ => unimplemented!(), 732 | }, 733 | // The system is out of memory, we should probably quit 734 | Err(wgpu::SurfaceError::OutOfMemory) => control_flow.exit(), 735 | // All other errors (Outdated, Timeout) should be resolved by the next frame 736 | Err(e) => { eprintln!("{}", e); }, 737 | } 738 | } 739 | /*WindowEvent::ScaleFactorChanged { scale_factor, inner_size_writer } => { 740 | state.inner_size 741 | // new_inner_size is &mut so w have to dereference it twice 742 | match count { 743 | 0 => state.resize::(**new_inner_size), 744 | 1 => state.resize::(**new_inner_size), 745 | 2 => state.resize::(**new_inner_size), 746 | 3 => state.resize::(**new_inner_size), 747 | 4 => state.resize::(**new_inner_size), 748 | 5 => state.resize::(**new_inner_size), 749 | _ => unimplemented!(), 750 | } 751 | }*/ 752 | _ => {} 753 | } 754 | } 755 | } 756 | // ... at the end of the WindowEvent block 757 | Event::AboutToWait => { 758 | // RedrawRequested will only trigger once unless we manually 759 | // request it. 760 | state.window.request_redraw(); 761 | } 762 | _ => {} 763 | } 764 | }).unwrap(); 765 | } 766 | 767 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use wgpu_sky_rendering::run; 2 | fn main() { 3 | pollster::block_on(run()); 4 | } 5 | -------------------------------------------------------------------------------- /src/math.rs: -------------------------------------------------------------------------------- 1 | use crate::angle::Angle; 2 | 3 | use cgmath::BaseFloat; 4 | 5 | #[allow(dead_code)] 6 | pub type Vec2 = cgmath::Vector2; 7 | #[allow(dead_code)] 8 | pub type Vec3 = cgmath::Vector3; 9 | #[allow(dead_code)] 10 | pub type Vec4 = cgmath::Vector4; 11 | 12 | #[allow(dead_code)] 13 | pub type Mat4 = cgmath::Matrix4; 14 | #[allow(dead_code)] 15 | pub type Mat3 = cgmath::Matrix3; 16 | 17 | use cgmath::num_traits::FloatConst; 18 | pub trait Float: cgmath::BaseFloat + FloatConst {} 19 | impl Float for f32 {} 20 | impl Float for f64 {} 21 | 22 | #[allow(dead_code)] 23 | #[inline] 24 | pub fn xyz_to_radec(v: &cgmath::Vector3) -> (Angle, Angle) { 25 | let lon = Angle(v.x.atan2(v.z)); 26 | let lat = Angle(v.y.atan2((v.x * v.x + v.z * v.z).sqrt())); 27 | 28 | (lon, lat) 29 | } 30 | #[allow(dead_code)] 31 | #[inline] 32 | pub fn xyzw_to_radec(v: &cgmath::Vector4) -> (Angle, Angle) { 33 | let lon = Angle(v.x.atan2(v.z)); 34 | let lat = Angle(v.y.atan2((v.x * v.x + v.z * v.z).sqrt())); 35 | 36 | (lon, lat) 37 | } 38 | 39 | #[inline] 40 | #[allow(dead_code)] 41 | pub fn radec_to_xyzw(theta: Angle, delta: Angle) -> Vec4 { 42 | let (d_s, d_c) = delta.to_radians().sin_cos(); 43 | let (t_s, t_c) = theta.to_radians().sin_cos(); 44 | 45 | Vec4::::new(d_c * t_s, d_s, d_c * t_c, S::one()) 46 | } 47 | 48 | #[inline] 49 | #[allow(dead_code)] 50 | pub fn radec_to_xyz(theta: Angle, delta: Angle) -> Vec3 { 51 | let (d_s, d_c) = delta.to_radians().sin_cos(); 52 | let (t_s, t_c) = theta.to_radians().sin_cos(); 53 | 54 | Vec3::::new(d_c * t_s, d_s, d_c * t_c) 55 | } 56 | 57 | #[inline] 58 | pub fn asinc_positive(mut x: T) -> T { 59 | assert!(x >= T::zero()); 60 | if x > T::from(1.0e-4).unwrap() { 61 | x.asin() / x 62 | } else { 63 | // If a is mall, use Taylor expension of asin(a) / a 64 | // a = 1e-4 => a^4 = 1.e-16 65 | x *= x; 66 | let nine = T::from(9.0).unwrap(); 67 | let twenty = T::from(20.0).unwrap(); 68 | let six = T::from(6.0).unwrap(); 69 | 70 | T::one() + x * (T::one() + x * nine / twenty) / six 71 | } 72 | } 73 | 74 | #[inline] 75 | pub fn sinc_positive(mut x: T) -> T { 76 | assert!(x >= T::zero()); 77 | if x > T::from(1.0e-4).unwrap() { 78 | x.sin() / x 79 | } else { 80 | // If a is mall, use Taylor expension of asin(a) / a 81 | // a = 1e-4 => a^4 = 1.e-16 82 | x *= x; 83 | let _nine = T::from(9.0).unwrap(); 84 | let twenty = T::from(20.0).unwrap(); 85 | let six = T::from(6.0).unwrap(); 86 | T::one() - x * (T::one() - x / twenty) / six 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/projection.rs: -------------------------------------------------------------------------------- 1 | // Screen space: pixels space between 2 | // * x_px in [0, width-1] 3 | // * y_px in [0, height-1] 4 | 5 | // Homogeneous space 6 | // * x_h in [-1, 1] 7 | // * y_h in [-1, 1] 8 | 9 | use crate::math::Float; 10 | 11 | use cgmath::Vector4; 12 | pub trait Projection { 13 | /// World to screen space projection 14 | fn world_to_normalized_device_space( 15 | pos_world_space: &Vector4, 16 | width_screen: T, 17 | height_screen: T, 18 | ) -> Option> { 19 | if let Some(pos_clip_space) = Self::world_to_clip_space(pos_world_space) { 20 | let ndc_to_clip = Self::compute_ndc_to_clip_factor(width_screen, height_screen); 21 | 22 | let pos_normalized_device = Vector2::new( 23 | pos_clip_space.x / (ndc_to_clip.x), 24 | pos_clip_space.y / (ndc_to_clip.y), 25 | ); 26 | Some(pos_normalized_device) 27 | } else { 28 | None 29 | } 30 | } 31 | 32 | /// Perform a clip to the world space deprojection 33 | /// 34 | /// # Arguments 35 | /// 36 | /// * ``pos_clip_space`` - The position in the clipping space (orthonorlized space) 37 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option>; 38 | /// World to the clipping space deprojection 39 | /// 40 | /// # Arguments 41 | /// 42 | /// * ``pos_world_space`` - The position in the world space 43 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option>; 44 | 45 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool; 46 | 47 | fn is_front_of_camera(pos_world_space: &Vector4) -> bool; 48 | 49 | fn compute_ndc_to_clip_factor(width: T, height: T) -> Vector2; 50 | 51 | fn solve_along_abscissa(y: T) -> Option<(T, T)>; 52 | fn solve_along_ordinate(x: T) -> Option<(T, T)>; 53 | } 54 | 55 | pub struct Aitoff; 56 | pub struct Mollweide; 57 | pub struct Ortho; 58 | pub struct AzimuthalEquidistant; 59 | pub struct Gnomonic; 60 | pub struct Mercator; 61 | 62 | use crate::math; 63 | use cgmath::Vector2; 64 | 65 | impl Projection for Aitoff 66 | where 67 | T: Float, 68 | { 69 | fn compute_ndc_to_clip_factor(width: T, height: T) -> Vector2 { 70 | if width > T::from(2.0).unwrap() * height { 71 | // reduce width 72 | Vector2::new( 73 | T::from(2.0).unwrap() * height / width, 74 | T::from(2.0).unwrap(), 75 | ) 76 | } else { 77 | // reduce height 78 | Vector2::new(T::one(), width / height) 79 | } 80 | } 81 | 82 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 83 | // Semi-major axis length 84 | let a = T::one(); 85 | // Semi-minor axis length 86 | let b = T::one() * T::from(0.5).unwrap(); 87 | 88 | let a2 = a * a; 89 | let b2 = b * b; 90 | let px2 = pos_clip_space.x * pos_clip_space.x; 91 | let py2 = pos_clip_space.y * pos_clip_space.y; 92 | 93 | (px2 * b2 + py2 * a2) < a2 * b2 94 | } 95 | 96 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 97 | let t = T::from(1e-3).unwrap(); 98 | if y.abs() > T::from(0.5).unwrap() { 99 | None 100 | } else { 101 | let x = (T::one() - T::from(4.0).unwrap() * y * y).sqrt(); 102 | Some((-x + t, x - t)) 103 | } 104 | } 105 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 106 | let t = T::from(1e-3).unwrap(); 107 | if x.abs() > T::one() { 108 | None 109 | } else { 110 | let y = (T::one() - x * x).sqrt() * T::from(0.5).unwrap(); 111 | Some((-y + t, y - t)) 112 | } 113 | } 114 | 115 | /// View to world space transformation 116 | /// 117 | /// This returns a normalized vector along its first 3 dimensions. 118 | /// Its fourth component is set to 1. 119 | /// 120 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 121 | /// 122 | /// # Arguments 123 | /// 124 | /// * `x` - in normalized device coordinates between [-1; 1] 125 | /// * `y` - in normalized device coordinates between [-1; 1] 126 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 127 | if Self::is_included_inside_projection(&pos_clip_space) { 128 | let u = pos_clip_space.x * T::PI() * T::from(0.5).unwrap(); 129 | let v = pos_clip_space.y * T::PI(); 130 | //da uv a lat/lon 131 | let c = (v * v + u * u).sqrt(); 132 | 133 | let (phi, mut theta) = if c != T::zero() { 134 | let phi = (v * c.sin() / c).asin(); 135 | let theta = (u * c.sin()).atan2(c * c.cos()); 136 | (phi, theta) 137 | } else { 138 | let phi = v.asin(); 139 | let theta = u.atan(); 140 | (phi, theta) 141 | }; 142 | theta *= T::from(2.0).unwrap(); 143 | 144 | let pos_world_space = cgmath::Vector4::new( 145 | theta.sin() * phi.cos(), 146 | phi.sin(), 147 | theta.cos() * phi.cos(), 148 | T::one(), 149 | ); 150 | 151 | Some(pos_world_space) 152 | } else { 153 | None 154 | } 155 | } 156 | 157 | /// World to screen space transformation 158 | /// X is between [-1, 1] 159 | /// Y is between [-0.5, 0.5] 160 | /// 161 | /// # Arguments 162 | /// 163 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 164 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option> { 165 | // X in [-1, 1] 166 | // Y in [-1/2; 1/2] and scaled by the screen width/height ratio 167 | //return vec3(X / PI, aspect * Y / PI, 0.f); 168 | 169 | //let pos_world_space = pos_world_space; 170 | 171 | let xyz = pos_world_space.truncate(); 172 | let (theta, delta) = math::xyz_to_radec(&xyz); 173 | 174 | let theta_by_two = theta / T::from(2.0).unwrap(); 175 | 176 | let alpha = (delta.0.cos() * theta_by_two.0.cos()).acos(); 177 | let inv_sinc_alpha = if alpha < T::from(1e-3).unwrap() { 178 | T::one() 179 | } else { 180 | alpha / alpha.sin() 181 | }; 182 | 183 | // The minus is an astronomical convention. 184 | // longitudes are increasing from right to left 185 | let x = T::from(2.0).unwrap() * inv_sinc_alpha * delta.0.cos() * theta_by_two.0.sin(); 186 | let y = inv_sinc_alpha * delta.0.sin(); 187 | 188 | Some(Vector2::new(x / T::PI(), y / T::PI())) 189 | } 190 | 191 | fn is_front_of_camera(_pos_world_space: &Vector4) -> bool { 192 | // 2D projections always faces the camera 193 | true 194 | } 195 | } 196 | 197 | use crate::angle::Angle; 198 | impl Projection for Ortho 199 | where 200 | T: Float, 201 | { 202 | fn compute_ndc_to_clip_factor(width: T, height: T) -> Vector2 { 203 | if width > height { 204 | // reduce width 205 | Vector2::new(height / width, T::one()) 206 | } else { 207 | // reduce height 208 | Vector2::new(T::one(), width / height) 209 | } 210 | } 211 | 212 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 213 | let px2 = pos_clip_space.x * pos_clip_space.x; 214 | let py2 = pos_clip_space.y * pos_clip_space.y; 215 | 216 | (px2 + py2) < T::one() 217 | } 218 | 219 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 220 | if y.abs() > T::one() { 221 | None 222 | } else { 223 | let t = T::from(1e-3).unwrap(); 224 | let x = (T::one() - y * y).sqrt(); 225 | Some((-x + t, x - t)) 226 | } 227 | } 228 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 229 | if x.abs() > T::one() { 230 | None 231 | } else { 232 | let t = T::from(1e-3).unwrap(); 233 | 234 | let y = (T::one() - x * x).sqrt(); 235 | Some((-y + t, y - t)) 236 | } 237 | } 238 | 239 | /// View to world space transformation 240 | /// 241 | /// This returns a normalized vector along its first 3 dimensions. 242 | /// Its fourth component is set to 1. 243 | /// 244 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 245 | /// 246 | /// # Arguments 247 | /// 248 | /// * `x` - in normalized device coordinates between [-1; 1] 249 | /// * `y` - in normalized device coordinates between [-1; 1] 250 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 251 | let xw_2 = 252 | T::one() - pos_clip_space.x * pos_clip_space.x - pos_clip_space.y * pos_clip_space.y; 253 | if xw_2 > T::zero() { 254 | let pos_world_space = 255 | cgmath::Vector4::new(pos_clip_space.x, pos_clip_space.y, xw_2.sqrt(), T::one()); 256 | 257 | Some(pos_world_space) 258 | } else { 259 | // Out of the sphere 260 | None 261 | } 262 | } 263 | 264 | /// World to screen space transformation 265 | /// 266 | /// # Arguments 267 | /// 268 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 269 | fn world_to_clip_space(pos_world_space: &cgmath::Vector4) -> Option> { 270 | if pos_world_space.z < T::zero() { 271 | None 272 | } else { 273 | Some(Vector2::new(pos_world_space.x, pos_world_space.y)) 274 | } 275 | } 276 | 277 | fn is_front_of_camera(pos_world_space: &Vector4) -> bool { 278 | pos_world_space.z > T::zero() 279 | } 280 | } 281 | 282 | impl Projection for Mollweide 283 | where 284 | T: Float, 285 | { 286 | fn compute_ndc_to_clip_factor(width: T, height: T) -> Vector2 { 287 | if width > T::from(2.0).unwrap() * height { 288 | // reduce width 289 | Vector2::new( 290 | T::from(2.0).unwrap() * height / width, 291 | T::from(2.0).unwrap(), 292 | ) 293 | } else { 294 | // reduce height 295 | Vector2::new(T::one(), width / height) 296 | } 297 | } 298 | 299 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 300 | // Semi-major axis length 301 | let a = T::one(); 302 | // Semi-minor axis length 303 | let b = T::from(0.5).unwrap(); 304 | 305 | let a2 = a * a; 306 | let b2 = b * b; 307 | let px2 = pos_clip_space.x * pos_clip_space.x; 308 | let py2 = pos_clip_space.y * pos_clip_space.y; 309 | 310 | (px2 * b2 + py2 * a2) < a2 * b2 311 | } 312 | 313 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 314 | if y.abs() > T::from(0.5).unwrap() { 315 | None 316 | } else { 317 | let x = (T::one() - T::from(4.0).unwrap() * y * y).sqrt(); 318 | Some((-x + T::from(1e-3).unwrap(), x - T::from(1e-3).unwrap())) 319 | } 320 | } 321 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 322 | if x.abs() > T::one() { 323 | None 324 | } else { 325 | let y = (T::one() - x * x).sqrt() * T::from(0.5).unwrap(); 326 | let t = T::from(1e-3).unwrap(); 327 | 328 | Some((-y + t, y - t)) 329 | } 330 | } 331 | 332 | /// View to world space transformation 333 | /// 334 | /// This returns a normalized vector along its first 3 dimensions. 335 | /// Its fourth component is set to 1. 336 | /// 337 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 338 | /// 339 | /// # Arguments 340 | /// 341 | /// * `x` - in normalized device coordinates between [-1; 1] 342 | /// * `y` - in normalized device coordinates between [-1; 1] 343 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 344 | if Self::is_included_inside_projection(&pos_clip_space) { 345 | let y2 = pos_clip_space.y * pos_clip_space.y; 346 | let four = T::from(4.0).unwrap(); 347 | let two = T::from(2.0).unwrap(); 348 | let k = (T::one() - four * y2).sqrt(); 349 | 350 | let theta = T::PI() * pos_clip_space.x / k; 351 | let delta = ((two * (two * pos_clip_space.y).asin() + four * pos_clip_space.y * k) 352 | / T::PI()) 353 | .asin(); 354 | 355 | // The minus is an astronomical convention. 356 | // longitudes are increasing from right to left 357 | let pos_world_space = cgmath::Vector4::new( 358 | theta.sin() * delta.cos(), 359 | delta.sin(), 360 | theta.cos() * delta.cos(), 361 | T::one(), 362 | ); 363 | 364 | Some(pos_world_space) 365 | } else { 366 | None 367 | } 368 | } 369 | 370 | /// World to screen space transformation 371 | /// X is between [-1, 1] 372 | /// Y is between [-0.5, 0.5] 373 | /// 374 | /// # Arguments 375 | /// 376 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 377 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option> { 378 | // X in [-1, 1] 379 | // Y in [-1/2; 1/2] and scaled by the screen width/height ratio 380 | let epsilon = T::from(1e-12).unwrap(); 381 | let max_iter = 10; 382 | 383 | let xyz = pos_world_space.truncate(); 384 | let (lon, lat) = math::xyz_to_radec(&xyz); 385 | let cst = T::PI() * lat.sin(); 386 | 387 | let mut theta = lat.0; 388 | let mut f = theta + theta.sin() - cst; 389 | 390 | let mut k = 0; 391 | while f.abs() > epsilon && k < max_iter { 392 | theta -= f / (T::one() + theta.cos()); 393 | f = theta + theta.sin() - cst; 394 | 395 | k += 1; 396 | } 397 | 398 | theta /= T::from(2.0).unwrap(); 399 | 400 | // The minus is an astronomical convention. 401 | // longitudes are increasing from right to left 402 | let x = (lon.0 / T::PI()) * theta.cos(); 403 | let y = T::from(0.5).unwrap() * theta.sin(); 404 | 405 | Some(Vector2::new(x, y)) 406 | } 407 | 408 | fn is_front_of_camera(_pos_world_space: &Vector4) -> bool { 409 | // 2D projections always faces the camera 410 | true 411 | } 412 | } 413 | 414 | impl Projection for AzimuthalEquidistant 415 | where 416 | T: Float, 417 | { 418 | fn compute_ndc_to_clip_factor(width: T, height: T) -> Vector2 { 419 | if width > height { 420 | // reduce width 421 | Vector2::new(height / width, T::one()) 422 | } else { 423 | // reduce height 424 | Vector2::new(T::one(), width / height) 425 | } 426 | } 427 | 428 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 429 | let px2 = pos_clip_space.x * pos_clip_space.x; 430 | let py2 = pos_clip_space.y * pos_clip_space.y; 431 | 432 | (px2 + py2) < T::one() 433 | } 434 | 435 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 436 | if y.abs() > T::one() { 437 | None 438 | } else { 439 | let x = (T::one() - y * y).sqrt(); 440 | let t = T::from(1e-3).unwrap(); 441 | 442 | Some((-x + t, x - t)) 443 | } 444 | } 445 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 446 | if x.abs() > T::one() { 447 | None 448 | } else { 449 | let y = (T::one() - x * x).sqrt(); 450 | let t = T::from(1e-3).unwrap(); 451 | 452 | Some((-y + t, y - t)) 453 | } 454 | } 455 | 456 | /// View to world space transformation 457 | /// 458 | /// This returns a normalized vector along its first 3 dimensions. 459 | /// Its fourth component is set to 1. 460 | /// 461 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 462 | /// 463 | /// # Arguments 464 | /// 465 | /// * `x` - in normalized device coordinates between [-1; 1] 466 | /// * `y` - in normalized device coordinates between [-1; 1] 467 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 468 | // r <= pi 469 | let x = pos_clip_space.x * T::PI(); 470 | let y = pos_clip_space.y * T::PI(); 471 | let mut r = (x * x + y * y).sqrt(); 472 | if r > T::PI() { 473 | None 474 | } else { 475 | let z = r.cos(); 476 | r = math::sinc_positive(r); 477 | 478 | let pos_world_space = Vector4::new(-x * r, y * r, z, T::one()); 479 | 480 | Some(pos_world_space) 481 | } 482 | } 483 | 484 | /// World to screen space transformation 485 | /// 486 | /// # Arguments 487 | /// 488 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 489 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option> { 490 | if pos_world_space.z > -T::one() { 491 | // Distance in the Euclidean plane (xy) 492 | // Angular distance is acos(x), but for small separation, asin(r) 493 | // is more accurate. 494 | let mut r = (pos_world_space.x * pos_world_space.x 495 | + pos_world_space.y * pos_world_space.y) 496 | .sqrt(); 497 | if pos_world_space.z > T::zero() { 498 | // Angular distance < PI/2, angular distance = asin(r) 499 | r = math::asinc_positive::(r); 500 | } else { 501 | // Angular distance > PI/2, angular distance = acos(x) 502 | r = pos_world_space.z.acos() / r; 503 | } 504 | let x = pos_world_space.x * r; 505 | let y = pos_world_space.y * r; 506 | 507 | Some(Vector2::new(x / T::PI(), y / T::PI())) 508 | } else { 509 | Some(Vector2::new(T::one(), T::zero())) 510 | } 511 | } 512 | 513 | fn is_front_of_camera(_pos_world_space: &Vector4) -> bool { 514 | // 2D projections always faces the camera 515 | true 516 | } 517 | } 518 | 519 | impl Projection for Gnomonic 520 | where 521 | T: Float, 522 | { 523 | fn compute_ndc_to_clip_factor(_width: T, _height: T) -> Vector2 { 524 | Vector2::new(T::one(), T::one()) 525 | } 526 | 527 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 528 | let px = pos_clip_space.x; 529 | let py = pos_clip_space.y; 530 | 531 | px > -T::one() && px < T::one() && py > -T::one() && py < T::one() 532 | } 533 | 534 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 535 | if y.abs() > T::one() { 536 | None 537 | } else { 538 | let t = T::from(1e-3).unwrap(); 539 | 540 | Some((-T::one() + t, T::one() - t)) 541 | } 542 | } 543 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 544 | if x.abs() > T::one() { 545 | None 546 | } else { 547 | let t = T::from(1e-3).unwrap(); 548 | 549 | Some((-T::one() + t, T::one() - t)) 550 | } 551 | } 552 | 553 | /// View to world space transformation 554 | /// 555 | /// This returns a normalized vector along its first 3 dimensions. 556 | /// Its fourth component is set to 1. 557 | /// 558 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 559 | /// 560 | /// # Arguments 561 | /// 562 | /// * `x` - in normalized device coordinates between [-1; 1] 563 | /// * `y` - in normalized device coordinates between [-1; 1] 564 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 565 | //if pos_clip_space.x * pos_clip_space.x + pos_clip_space.y * pos_clip_space.y >= 1.0 { 566 | // None 567 | //} else { 568 | let x_2d = pos_clip_space.x * T::PI(); 569 | let y_2d = pos_clip_space.y * T::PI(); 570 | let r = x_2d * x_2d + y_2d * y_2d; 571 | 572 | let z = (T::one() + r).sqrt(); 573 | let pos_world_space = Vector4::new(z * x_2d, z * y_2d, z, T::one()); 574 | 575 | Some(pos_world_space) 576 | } 577 | 578 | /// World to screen space transformation 579 | /// 580 | /// # Arguments 581 | /// 582 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 583 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option> { 584 | if pos_world_space.z <= T::from(1e-2).unwrap() { 585 | // Back hemisphere (z < 0) + diverges near z=0 586 | None 587 | } else { 588 | let pos_clip_space = Vector2::new( 589 | (pos_world_space.x / pos_world_space.z) / T::PI(), 590 | (pos_world_space.y / pos_world_space.z) / T::PI(), 591 | ); 592 | Some(pos_clip_space) 593 | } 594 | } 595 | 596 | fn is_front_of_camera(pos_world_space: &Vector4) -> bool { 597 | // 2D projections always faces the camera 598 | pos_world_space.z >= T::from(1e-2).unwrap() 599 | } 600 | } 601 | 602 | impl Projection for Mercator 603 | where 604 | T: Float, 605 | { 606 | fn compute_ndc_to_clip_factor(_width: T, _height: T) -> Vector2 { 607 | Vector2::new(T::one(), T::from(2.0).unwrap()) 608 | } 609 | 610 | fn is_included_inside_projection(pos_clip_space: &Vector2) -> bool { 611 | let px = pos_clip_space.x; 612 | let py = pos_clip_space.y; 613 | 614 | px > -T::one() && px < T::one() && py > -T::one() && py < T::one() 615 | } 616 | 617 | fn solve_along_abscissa(y: T) -> Option<(T, T)> { 618 | if y.abs() > T::one() { 619 | None 620 | } else { 621 | let t = T::from(1e-3).unwrap(); 622 | 623 | Some((-T::one() + t, T::one() - t)) 624 | } 625 | } 626 | fn solve_along_ordinate(x: T) -> Option<(T, T)> { 627 | if x.abs() > T::one() { 628 | None 629 | } else { 630 | let t = T::from(1e-3).unwrap(); 631 | Some((-T::one() + t, T::one() - t)) 632 | } 633 | } 634 | 635 | /// View to world space transformation 636 | /// 637 | /// This returns a normalized vector along its first 3 dimensions. 638 | /// Its fourth component is set to 1. 639 | /// 640 | /// The Aitoff projection maps screen coordinates from [-pi; pi] x [-pi/2; pi/2] 641 | /// 642 | /// # Arguments 643 | /// 644 | /// * `x` - in normalized device coordinates between [-1; 1] 645 | /// * `y` - in normalized device coordinates between [-1; 1] 646 | fn clip_to_world_space(pos_clip_space: &Vector2) -> Option> { 647 | let theta = pos_clip_space.x * T::PI(); 648 | let delta = (pos_clip_space.y.sinh()).atan() * T::PI(); 649 | 650 | let pos_world_space = math::radec_to_xyzw(Angle(theta), Angle(delta)); 651 | 652 | Some(pos_world_space) 653 | } 654 | 655 | /// World to screen space transformation 656 | /// 657 | /// # Arguments 658 | /// 659 | /// * `pos_world_space` - Position in the world space. Must be a normalized vector 660 | fn world_to_clip_space(pos_world_space: &Vector4) -> Option> { 661 | let (theta, delta) = math::xyzw_to_radec(&pos_world_space); 662 | 663 | Some(Vector2::new( 664 | theta.0 / T::PI(), 665 | ((delta.0 / T::PI()).tan()).asinh() as T, 666 | )) 667 | } 668 | 669 | fn is_front_of_camera(_pos_world_space: &Vector4) -> bool { 670 | // 2D projections always faces the camera 671 | true 672 | } 673 | } 674 | 675 | mod tests { 676 | 677 | #[test] 678 | fn generate_maps() { 679 | use super::*; 680 | use cgmath::InnerSpace; 681 | use cgmath::Vector2; 682 | use image::{Rgba, RgbaImage}; 683 | fn generate_projection_map>(filename: &str) { 684 | let (w, h) = (1024.0, 1024.0); 685 | let mut img = RgbaImage::new(w as u32, h as u32); 686 | for x in 0..(w as u32) { 687 | for y in 0..(h as u32) { 688 | let xy = Vector2::new(x, y); 689 | let clip_xy = Vector2::new( 690 | 2.0 * ((xy.x as f32) / (w as f32)) - 1.0, 691 | 2.0 * ((xy.y as f32) / (h as f32)) - 1.0, 692 | ); 693 | let rgb = if let Some(pos) = P::clip_to_world_space(&clip_xy) { 694 | let pos = pos.truncate().normalize(); 695 | Rgba([ 696 | ((pos.x * 0.5 + 0.5) * 256.0) as u8, 697 | ((pos.y * 0.5 + 0.5) * 256.0) as u8, 698 | ((pos.z * 0.5 + 0.5) * 256.0) as u8, 699 | 255, 700 | ]) 701 | } else { 702 | Rgba([255, 255, 255, 255]) 703 | }; 704 | 705 | img.put_pixel(x as u32, y as u32, rgb); 706 | } 707 | } 708 | img.save(filename).unwrap(); 709 | } 710 | 711 | generate_projection_map::("./img/aitoff2.png"); 712 | /*generate_projection_map::("./img/tan.png"); 713 | generate_projection_map::("./img/arc.png"); 714 | generate_projection_map::("./img/mollweide.png"); 715 | generate_projection_map::("./img/mercator.png"); 716 | generate_projection_map::("./img/sinus.png");*/ 717 | } 718 | } 719 | -------------------------------------------------------------------------------- /src/render_desc.rs: -------------------------------------------------------------------------------- 1 | use crate::texture::Texture; 2 | use crate::vertex::Vertex; 3 | 4 | pub struct TexturedObjectRenderDesc; 5 | impl TexturedObjectRenderDesc { 6 | pub fn initialize( 7 | // The device for which the pipeline is created for 8 | device: &wgpu::Device, 9 | sc_desc: &wgpu::SwapChainDescriptor, 10 | 11 | bind_group_layouts: &[&wgpu::BindGroupLayout], 12 | ) -> wgpu::RenderPipeline { 13 | let vs_module = device.create_shader_module(&wgpu::include_spirv!("shaders/allsky.vert.spv")); 14 | let fs_module = device.create_shader_module(&wgpu::include_spirv!("shaders/allsky.frag.spv")); 15 | let render_pipeline_layout = device.create_pipeline_layout( 16 | &wgpu::PipelineLayoutDescriptor { 17 | label: Some("Render to color attachment"), 18 | bind_group_layouts: bind_group_layouts, 19 | push_constant_ranges: &[], 20 | } 21 | ); 22 | let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { 23 | label: Some("Render to color attachment pipeline"), 24 | // The "layout" is what uniforms will be needed. 25 | layout: Some(&render_pipeline_layout), 26 | // Vertex shader and input buffers 27 | vertex: wgpu::VertexState { 28 | module: &vs_module, 29 | entry_point: "main", 30 | // Layout of our vertices. This should match the structs 31 | // which are uploaded to the GPU. This should also be 32 | // ensured by tagging on either a `#[repr(C)]` onto a 33 | // struct, or a `#[repr(transparent)]` if it only contains 34 | // one item, which is itself `repr(C)`. 35 | buffers: &[ 36 | Vertex::desc() 37 | ], 38 | }, 39 | // Fragment shader and output targets 40 | fragment: Some(wgpu::FragmentState { 41 | module: &fs_module, 42 | entry_point: "main", 43 | // Describes how the colour will be interpolated 44 | // and assigned to the output attachment. 45 | targets: &[sc_desc.format.into()], 46 | }), 47 | // How the triangles will be rasterized. This is more important 48 | // for the terrain because of the beneath-the water shot. 49 | // This is also dependent on how the triangles are being generated. 50 | primitive: wgpu::PrimitiveState { 51 | // What kind of data are we passing in? 52 | topology: wgpu::PrimitiveTopology::TriangleList, 53 | front_face: wgpu::FrontFace::Ccw, 54 | ..Default::default() 55 | }, 56 | // Describes how us writing to the depth/stencil buffer 57 | // will work. Since this is water, we need to read from the 58 | // depth buffer both as a texture in the shader, and as an 59 | // input attachment to do depth-testing. We don't write, so 60 | // depth_write_enabled is set to false. This is called 61 | // RODS or read-only depth stencil. 62 | depth_stencil: Some(wgpu::DepthStencilState { 63 | format: wgpu::TextureFormat::Depth24PlusStencil8, 64 | depth_write_enabled: true, 65 | depth_compare: wgpu::CompareFunction::Less, 66 | stencil: wgpu::StencilState::default(), 67 | bias: wgpu::DepthBiasState::default(), 68 | }), 69 | // depth_stencil: None, 70 | // No multisampling is used. 71 | multisample: wgpu::MultisampleState::default(), 72 | }); 73 | 74 | pipeline 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/shaders/allsky.frag: -------------------------------------------------------------------------------- 1 | // shader.frag 2 | #version 440 3 | 4 | layout(location=0) in vec2 pos_cs; 5 | layout(location=0) out vec4 f_color; 6 | layout(location=1) in vec3 pos_xyz; 7 | 8 | layout(set = 0, binding = 0) uniform texture3D t_map; 9 | layout(set = 0, binding = 1) uniform sampler s_map; 10 | layout(set = 0, binding = 2) 11 | uniform RotationMatrix { 12 | mat4 rot; 13 | }; 14 | const float TWICE_PI = 6.28318530718f; 15 | const float PI = 3.141592653589793f; 16 | const float FOUR_OVER_PI = 1.27323954474f; 17 | const float TRANSITION_Z = 0.66666666666f; 18 | const float TRANSITION_Z_INV = 1.5f; 19 | 20 | int quarter(vec2 p) { 21 | int x_neg = int(p.x < 0.0f); 22 | int y_neg = int(p.y < 0.0f); 23 | int q = (x_neg + y_neg) | (y_neg << 1); 24 | return q; 25 | } 26 | 27 | float xpm1(vec2 p) { 28 | bool x_neg = (p.x < 0.0f); 29 | //debug_assert!(x_neg <= 1); 30 | bool y_neg = (p.y < 0.0f); 31 | //debug_assert!(y_neg <= 1); 32 | // The purpose it to have the same numerical precision for each base cell 33 | // by avoiding subtraction by 1 or 3 or 5 or 7 34 | float lon = atan(abs(p.y), abs(p.x)); 35 | //debug_assert!(0.0 <= lon && lon <= PI / 2.0); 36 | float x02 = lon * FOUR_OVER_PI; 37 | //debug_assert!(0.0 <= x02 && x02 <= 2.0); 38 | if (x_neg != y_neg) { // Could be replaced by a sign copy from (x_neg ^ y_neg) << 32 39 | return 1.0f - x02; 40 | } else { 41 | return x02 - 1.0f; 42 | } 43 | } 44 | 45 | float one_minus_z_pos(vec3 p) { 46 | //debug_assert!(z > 0.0); 47 | float d2 = dot(p.xy, p.xy); // z = sqrt(1 - d2) AND sqrt(1 - x) = 1 - x / 2 - x^2 / 8 - x^3 / 16 - 5 x^4/128 - 7 * x^5/256 48 | 49 | if (d2 < 1e-1f) { // <=> dec > 84.27 deg 50 | return d2 * (0.5f + d2 * (0.125f + d2 * (0.0625f + d2 * (0.0390625f + d2 * 0.02734375f)))); 51 | } 52 | return 1.0f - p.z; 53 | } 54 | 55 | float one_minus_z_neg(vec3 p) { 56 | //debug_assert!(z < 0.0); 57 | float d2 = dot(p.xy, p.xy); // z = sqrt(1 - d2) AND sqrt(1 - x) = 1 - x / 2 - x^2 / 8 - x^3 / 16 - 5 x^4/128 - 7 * x^5/256 58 | if (d2 < 1e-1f) { // <=> dec < -84.27 deg 59 | // 0.5 * d2 + 0.125 * d2 * d2 60 | return d2 * (0.5f + d2 * (0.125f + d2 * (0.0625f + d2 * (0.0390625f + d2 * 0.02734375f)))); 61 | } 62 | return p.z + 1.0f; 63 | } 64 | 65 | // Z-Order curve projection. 66 | int ij2z(int i, int j) { 67 | int i1 = i | (j << 16); 68 | 69 | int j1 = (i1 ^ (i1 >> 8)) & 0x0000FF00; 70 | int i2 = i1 ^ j1 ^ (j1 << 8); 71 | 72 | int j2 = (i2 ^ (i2 >> 4)) & 0x00F000F0; 73 | int i3 = i2 ^ j2 ^ (j2 << 4); 74 | 75 | int j3 = (i3 ^ (i3 >> 2)) & 0x0C0C0C0C; 76 | int i4 = i3 ^ j3 ^ (j3 << 2); 77 | 78 | int j4 = (i4 ^ (i4 >> 1)) & 0x22222222; 79 | int i5 = i4 ^ j4 ^ (j4 << 1); 80 | 81 | return i5; 82 | } 83 | 84 | struct HashDxDy { 85 | int idx; 86 | float dx; 87 | float dy; 88 | }; 89 | 90 | // Returns the cell number (hash value) associated with the given position on the unit sphere, 91 | // together with the offset `(dx, dy)` on the Euclidean plane of the projected position with 92 | // respect to the origin of the cell (South vertex). 93 | // # Inputs: 94 | // - `depth` in `[0, 14]` (so that and HEALPix cell number can be stored on an unsigned integer) 95 | // - `x`: in `[-1.0, 1.0]` 96 | // - `y`: in `[-1.0, 1.0]` 97 | // - `z`: in `[-1.0, 1.0]` 98 | // # Output 99 | // - the cell number (hash value) associated with the given position on the unit sphere, 100 | // in `[0, 12*nside^2[` 101 | // - `dx`: the positional offset $\in [0, 1[$ along the south-to-east axis 102 | // - `dy`: the positional offset $\in [0, 1[$ along the south-to-west axis 103 | // # WARNING 104 | // - The function assumes, without checking, that the input vector is a unit vector 105 | // (hence `x^2 + y^2 + z^2 = 1`) !! 106 | // - Operations being made on simple precision float, the precision is lower than `~0.2 arcsec` only!! 107 | // - At depth 13, the precision on `(dx, dy)` is better than `(1/512, 1/512)`, i.e. 2e-3. 108 | HashDxDy hash_with_dxdy(int depth, vec3 p) { 109 | //assert!(depth <= 14); 110 | //assert!(-1.0 <= x && x <= 1.0); 111 | //assert!(-1.0 <= y && y <= 1.0); 112 | //assert!(-1.0 <= z && z <= 1.0); 113 | //debug_assert!(1.0 - (x * x + y * y + z * z) < 1e-5); 114 | // A f32 mantissa contains 23 bits. 115 | // - it basically means that when storing (x, y) coordinates, 116 | // we can go as deep as depth 24 (or maybe 25) 117 | 118 | int nside = 1 << depth; 119 | float half_nside = float(nside) * 0.5f; 120 | 121 | float x_pm1 = xpm1(p.xy); 122 | int q = quarter(p.xy); 123 | 124 | int d0h = 0; 125 | vec2 p_proj = vec2(0.f); 126 | if (p.z > TRANSITION_Z) { 127 | // North polar cap, Collignon projection. 128 | // - set the origin to (PI/4, 0) 129 | float sqrt_3_one_min_z = sqrt(3.0f * one_minus_z_pos(p)); 130 | p_proj = vec2(x_pm1 * sqrt_3_one_min_z, 2.0f - sqrt_3_one_min_z); 131 | d0h = q; 132 | } else if (p.z < -TRANSITION_Z) { 133 | // South polar cap, Collignon projection 134 | // - set the origin to (PI/4, -PI/2) 135 | float sqrt_3_one_min_z = sqrt(3.0f * one_minus_z_neg(p)); 136 | p_proj = vec2(x_pm1 * sqrt_3_one_min_z, sqrt_3_one_min_z); 137 | d0h = q + 8; 138 | } else { 139 | // Equatorial region, Cylindrical equal area projection 140 | // - set the origin to (PI/4, 0) if q = 2 141 | // - set the origin to (PI/4, -PI/2) if q = 0 142 | // - set the origin to (0, -TRANSITION_LAT) if q = 3 143 | // - set the origin to (PI/2, -TRANSITION_LAT) if q = 1 144 | // let zero_or_one = (x_cea as u8) & 1; 145 | float y_pm1 = p.z * TRANSITION_Z_INV; 146 | // |\2/| 147 | // .3X1. 148 | // |/0\| 149 | int q01 = int(x_pm1 > y_pm1); // 0/1 150 | //debug_assert!(q01 == 0 || q01 == 1); 151 | int q12 = int(x_pm1 >= -y_pm1); // 0\1 152 | //debug_assert!(q12 == 0 || q12 == 1); 153 | int q03 = 1 - q12; // 1\0 154 | //let q13 = q01 ^ q12; debug_assert!(q13 == 0 || q13 == 1); 155 | int q1 = q01 & q12; // = 1 if q1, 0 else 156 | //debug_assert!( q1 == 0 || q1 == 1); 157 | // x: xcea - 0 if q3 | xcea - 2 if q1 | xcea - 1 if q0 or q2 158 | //let x_proj = x_pm1 - ((q01 + q12) as i8 - 1) as f32; 159 | // y: y - 0 if q2 | y - 1 if q1 or q3 | y - 2 if q0 160 | //let y_proj = y_pm1 + (q01 + q03) as f32; 161 | p_proj = vec2( 162 | x_pm1 - float(q01 + q12 - 1), 163 | y_pm1 + float(q01 + q03) 164 | ); 165 | // d0h: +8 if q0 | +4 if q3 | +5 if q1 166 | d0h = ((q01 + q03) << 2) + ((q + q1) & 3); 167 | } 168 | 169 | // Coords inside the base cell 170 | float x = (half_nside * (p_proj.x + p_proj.y)); 171 | float y = (half_nside * (p_proj.y - p_proj.x)); 172 | int i = int(x); 173 | int j = int(y); 174 | 175 | return HashDxDy( 176 | (d0h << (depth << 1)) | ij2z(i, j), 177 | x - float(i), 178 | y - float(j) 179 | ); 180 | } 181 | 182 | vec4 get_color(vec3 pos) { 183 | HashDxDy result = hash_with_dxdy(0, pos.zxy); 184 | int idx = result.idx; 185 | vec2 uv = vec2(result.dx, result.dy); 186 | 187 | vec3 tq = vec3(uv.y, uv.x, ((float(idx) + 0.5)/12.0)); 188 | 189 | return texture(sampler3D(t_map, s_map), tq); 190 | } 191 | 192 | void main() { 193 | // Retrieve the position from the texture 194 | //vec3 pos_ws = normalize((texture(sampler2D(t_world_pos, s_world_pos), pos_cs).xyz - 0.5) * 2.0); 195 | vec3 pos_ws = normalize(pos_xyz); 196 | // Rotate it 197 | vec3 rotated_p = vec3(rot * vec4(pos_ws, 1.0)); 198 | 199 | f_color = get_color(rotated_p); 200 | //f_color = vec4(rotated_p, 1.0); 201 | } 202 | -------------------------------------------------------------------------------- /src/shaders/allsky.vert: -------------------------------------------------------------------------------- 1 | // shader.vert 2 | #version 440 3 | 4 | layout(location=0) in vec2 a_ndc; 5 | layout(location=1) in vec3 a_xyz; 6 | 7 | layout(location=0) out vec2 pos_cs; 8 | layout(location=1) out vec3 pos_xyz; 9 | 10 | layout(set = 0, binding = 3) 11 | uniform Window { 12 | vec4 size; 13 | }; 14 | 15 | void main() { 16 | gl_Position = vec4(vec2(a_ndc.x * size.x, a_ndc.y * size.y), 0.0, 1.0); 17 | pos_cs = a_ndc*0.5 + 0.5; 18 | pos_xyz = a_xyz; 19 | } -------------------------------------------------------------------------------- /src/texture.rs: -------------------------------------------------------------------------------- 1 | use image::GenericImageView; 2 | 3 | pub struct Texture { 4 | pub texture: wgpu::Texture, 5 | pub view: wgpu::TextureView, 6 | pub sampler: wgpu::Sampler, 7 | num_bytes_per_pixel: usize, 8 | } 9 | use byte_slice_cast::*; 10 | 11 | pub trait TextureFormat: ToByteSlice { 12 | const WGPU_FORMAT: wgpu::TextureFormat; 13 | } 14 | 15 | impl TextureFormat for f32 { 16 | const WGPU_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba32Float; 17 | } 18 | impl TextureFormat for u8 { 19 | const WGPU_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb; 20 | } 21 | impl Texture { 22 | fn from_bytes_rgba( 23 | device: &wgpu::Device, 24 | queue: &wgpu::Queue, 25 | format: wgpu::TextureFormat, 26 | rgba: Option<&[u8]>, 27 | dimensions: (u32, u32, u32), 28 | num_bytes_per_pixel: usize, 29 | label: &str, 30 | ) -> Self { 31 | let dimension = if dimensions.2 == 1 { 32 | wgpu::TextureDimension::D2 33 | } else { 34 | wgpu::TextureDimension::D3 35 | }; 36 | let texture = device.create_texture(&wgpu::TextureDescriptor { 37 | label: Some(label), 38 | // All textures are stored as 3D, we represent our 2D texture 39 | // by setting depth to 1. 40 | size: wgpu::Extent3d { 41 | width: dimensions.0, 42 | height: dimensions.1, 43 | depth_or_array_layers: dimensions.2, 44 | }, 45 | mip_level_count: 1, 46 | sample_count: 1, 47 | dimension, 48 | format, 49 | // TEXTURE_BINDING tells wgpu that we want to use this texture in shaders 50 | // COPY_DST means that we want to copy data to this texture 51 | usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, 52 | view_formats: &[], 53 | }); 54 | 55 | let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); 56 | let sampler = device.create_sampler(&wgpu::SamplerDescriptor { 57 | address_mode_u: wgpu::AddressMode::Repeat, 58 | address_mode_v: wgpu::AddressMode::Repeat, 59 | address_mode_w: wgpu::AddressMode::Repeat, 60 | mag_filter: wgpu::FilterMode::Linear, 61 | min_filter: wgpu::FilterMode::Nearest, 62 | mipmap_filter: wgpu::FilterMode::Nearest, 63 | ..Default::default() 64 | }); 65 | 66 | let texture = Self { 67 | texture, 68 | view, 69 | sampler, 70 | num_bytes_per_pixel, 71 | }; 72 | 73 | if let Some(rgba) = rgba { 74 | texture.write_data(queue, (0, 0, 0), rgba, dimensions); 75 | } 76 | 77 | texture 78 | } 79 | 80 | pub fn write_data( 81 | &self, 82 | queue: &wgpu::Queue, 83 | origin: (u32, u32, u32), 84 | data: &[u8], 85 | dimensions: (u32, u32, u32), 86 | ) { 87 | let size = wgpu::Extent3d { 88 | width: dimensions.0, 89 | height: dimensions.1, 90 | depth_or_array_layers: dimensions.2, 91 | }; 92 | queue.write_texture( 93 | wgpu::ImageCopyTexture { 94 | texture: &self.texture, 95 | mip_level: 0, 96 | origin: wgpu::Origin3d { 97 | x: origin.0, 98 | y: origin.1, 99 | z: origin.2, 100 | }, 101 | aspect: wgpu::TextureAspect::All, 102 | }, 103 | data, 104 | wgpu::ImageDataLayout { 105 | offset: 0, 106 | bytes_per_row: Some((self.num_bytes_per_pixel as u32) * dimensions.0), 107 | rows_per_image: Some(dimensions.1), 108 | }, 109 | size, 110 | ); 111 | } 112 | 113 | pub fn from_raw_bytes( 114 | device: &wgpu::Device, 115 | queue: &wgpu::Queue, 116 | bytes: Option<&[T]>, 117 | dimensions: (u32, u32, u32), 118 | num_bytes_per_pixel: usize, 119 | label: &str, 120 | ) -> Self { 121 | let bytes = bytes.and_then(|bytes| Some(T::to_byte_slice(bytes))); 122 | Self::from_bytes_rgba( 123 | device, 124 | queue, 125 | T::WGPU_FORMAT, 126 | bytes, 127 | dimensions, 128 | num_bytes_per_pixel, 129 | label, 130 | ) 131 | } 132 | 133 | // rgba images 134 | pub fn from_image( 135 | device: &wgpu::Device, 136 | queue: &wgpu::Queue, 137 | img: &image::DynamicImage, 138 | label: &str, 139 | ) -> Self { 140 | let rgba = img.as_rgba8().unwrap(); 141 | let dimensions = img.dimensions(); 142 | 143 | Self::from_bytes_rgba( 144 | device, 145 | queue, 146 | wgpu::TextureFormat::Rgba8UnormSrgb, 147 | Some(rgba), 148 | (dimensions.0, dimensions.1, 1), 149 | 4, 150 | label, 151 | ) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /src/time.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_arch = "wasm32"))] 2 | use std::time::Instant; 3 | 4 | pub(crate) struct Clock { 5 | #[cfg(not(target_arch = "wasm32"))] 6 | instant: std::time::Instant, 7 | #[cfg(target_arch = "wasm32")] 8 | start: f32, 9 | } 10 | 11 | impl Clock { 12 | pub(crate) fn now() -> Self { 13 | #[cfg(not(target_arch = "wasm32"))] 14 | { 15 | let instant = Instant::now(); 16 | Self { instant } 17 | } 18 | #[cfg(target_arch = "wasm32")] 19 | { 20 | let window = web_sys::window().expect("should have a window in this context"); 21 | let performance = window 22 | .performance() 23 | .expect("performance should be available"); 24 | let start = performance.now() as f32; 25 | 26 | Self { start } 27 | } 28 | } 29 | 30 | pub(crate) fn elapsed_as_secs(&self) -> f32 { 31 | #[cfg(target_arch = "wasm32")] 32 | { 33 | let window = web_sys::window().expect("should have a window in this context"); 34 | let performance = window 35 | .performance() 36 | .expect("performance should be available"); 37 | (performance.now() as f32 - self.start) / 1000.0 38 | } 39 | #[cfg(not(target_arch = "wasm32"))] 40 | self.instant.elapsed().as_secs_f32() 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/triangulation.rs: -------------------------------------------------------------------------------- 1 | use cgmath::Vector2; 2 | 3 | pub struct Triangulation {} 4 | use crate::math::Float; 5 | struct Face { 6 | min: Vector2, 7 | max: Vector2, 8 | } 9 | 10 | #[derive(Clone, Copy)] 11 | pub enum Direction { 12 | BottomLeft, 13 | BottomRight, 14 | TopLeft, 15 | TopRight, 16 | } 17 | impl Face 18 | where 19 | T: Float, 20 | { 21 | fn new(min: Vector2, max: Vector2) -> Face { 22 | Face { min, max } 23 | } 24 | 25 | fn split(self) -> [Face; 4] { 26 | let bl = self.get_child(Direction::BottomLeft); 27 | let br = self.get_child(Direction::BottomRight); 28 | let tr = self.get_child(Direction::TopRight); 29 | let tl = self.get_child(Direction::TopLeft); 30 | 31 | [bl, br, tr, tl] 32 | } 33 | 34 | fn get_farthest_vertex(&self) -> (Vector2, Direction) { 35 | let x_neg = self.min.x < T::zero(); 36 | let y_neg = self.min.y < T::zero(); 37 | 38 | if x_neg && y_neg { 39 | // bottom-left 40 | (self.min, Direction::BottomLeft) 41 | } else if !x_neg && !y_neg { 42 | // top-right 43 | (self.max, Direction::TopRight) 44 | } else if !x_neg && y_neg { 45 | // bottom-right 46 | (Vector2::new(self.max.x, self.min.y), Direction::BottomRight) 47 | } else { 48 | // top-left 49 | (Vector2::new(self.min.x, self.max.y), Direction::TopLeft) 50 | } 51 | } 52 | 53 | fn get_nearest_dir(&self) -> Direction { 54 | let x_neg = self.min.x < T::zero(); 55 | let y_neg = self.min.y < T::zero(); 56 | 57 | if x_neg && y_neg { 58 | // bottom-left 59 | Direction::TopRight 60 | } else if !x_neg && !y_neg { 61 | // top-right 62 | Direction::BottomLeft 63 | } else if !x_neg && y_neg { 64 | // bottom-right 65 | Direction::TopLeft 66 | } else { 67 | // top-left 68 | Direction::BottomRight 69 | } 70 | } 71 | 72 | fn get_nearest_vertex(&self) -> Vector2 { 73 | let x_neg = self.min.x < T::zero(); 74 | let y_neg = self.min.y < T::zero(); 75 | 76 | if x_neg && y_neg { 77 | // bottom-left 78 | self.max 79 | } else if !x_neg && !y_neg { 80 | // top-right 81 | self.min 82 | } else if !x_neg && y_neg { 83 | // bottom-right 84 | Vector2::new(self.min.x, self.max.y) 85 | } else { 86 | // top-left 87 | Vector2::new(self.max.x, self.min.y) 88 | } 89 | } 90 | 91 | fn get_vertex(&self, d: Direction) -> Vector2 { 92 | match d { 93 | Direction::BottomLeft => self.min, 94 | Direction::BottomRight => Vector2::new(self.max.x, self.min.y), 95 | Direction::TopLeft => Vector2::new(self.min.x, self.max.y), 96 | Direction::TopRight => self.max, 97 | } 98 | } 99 | 100 | pub fn add( 101 | &self, 102 | vertices: &mut Vec>, 103 | idx: &mut Vec, 104 | dir_farthest_vertex: Direction, 105 | ) { 106 | let bl = self.get_vertex(Direction::BottomLeft); 107 | let br = self.get_vertex(Direction::BottomRight); 108 | let tr = self.get_vertex(Direction::TopRight); 109 | let tl = self.get_vertex(Direction::TopLeft); 110 | 111 | let off_idx = vertices.len() as u32; 112 | 113 | // push the 4 vertices 114 | vertices.push(bl); 115 | vertices.push(br); 116 | vertices.push(tr); 117 | vertices.push(tl); 118 | 119 | match dir_farthest_vertex { 120 | Direction::TopLeft | Direction::BottomRight => { 121 | // push the 6 indexes 122 | idx.extend( 123 | [ 124 | off_idx, 125 | off_idx + 1, 126 | off_idx + 3, 127 | off_idx + 1, 128 | off_idx + 2, 129 | off_idx + 3, 130 | ] 131 | .iter(), 132 | ); 133 | // LINES drawing 134 | /*idx.extend([ 135 | off_idx, 136 | off_idx + 1, 137 | off_idx + 1, 138 | off_idx + 3, 139 | off_idx + 3, 140 | off_idx, 141 | 142 | off_idx + 1, 143 | off_idx + 2, 144 | off_idx + 2, 145 | off_idx + 3, 146 | off_idx + 3, 147 | off_idx + 1, 148 | ].iter());*/ 149 | } 150 | _ => { 151 | // push the 6 indexes 152 | idx.extend( 153 | [ 154 | off_idx, 155 | off_idx + 1, 156 | off_idx + 2, 157 | off_idx, 158 | off_idx + 2, 159 | off_idx + 3, 160 | ] 161 | .iter(), 162 | ); 163 | // LINES drawing 164 | /*idx.extend([ 165 | off_idx, 166 | off_idx + 1, 167 | off_idx + 1, 168 | off_idx + 2, 169 | off_idx + 2, 170 | off_idx, 171 | 172 | off_idx, 173 | off_idx + 2, 174 | off_idx + 2, 175 | off_idx + 3, 176 | off_idx + 3, 177 | off_idx, 178 | ].iter());*/ 179 | } 180 | } 181 | } 182 | 183 | fn add_triangle( 184 | &self, 185 | p: &[Vector2; 3], 186 | vertices: &mut Vec>, 187 | idx: &mut Vec, 188 | ) { 189 | let off_idx = vertices.len() as u32; 190 | 191 | // push the 4 vertices 192 | vertices.push(p[0]); 193 | vertices.push(p[1]); 194 | vertices.push(p[2]); 195 | 196 | // push the 6 indexes 197 | idx.extend([off_idx, off_idx + 1, off_idx + 2].iter()); 198 | // LINES drawing 199 | /*idx.extend([ 200 | off_idx, 201 | off_idx + 1, 202 | off_idx + 1, 203 | off_idx + 2, 204 | off_idx + 2, 205 | off_idx, 206 | ].iter());*/ 207 | } 208 | 209 | pub fn get_child(&self, d: Direction) -> Self { 210 | let center = (self.min + self.max) * T::from(0.5).unwrap(); 211 | let (min, max) = match d { 212 | Direction::BottomLeft => { 213 | let min = self.min; 214 | let max = center; 215 | (min, max) 216 | } 217 | Direction::BottomRight => { 218 | let min = Vector2::new(center.x, self.min.y); 219 | let max = Vector2::new(self.max.x, center.y); 220 | (min, max) 221 | } 222 | Direction::TopLeft => { 223 | let min = Vector2::new(self.min.x, center.y); 224 | let max = Vector2::new(center.x, self.max.y); 225 | (min, max) 226 | } 227 | Direction::TopRight => { 228 | let min = center; 229 | let max = self.max; 230 | (min, max) 231 | } 232 | }; 233 | 234 | Face { min, max } 235 | } 236 | } 237 | 238 | fn recursive_triangulation>( 239 | face: &Face, 240 | vertices: &mut Vec>, 241 | idx: &mut Vec, 242 | depth: u8, 243 | first: &mut bool, 244 | ) { 245 | let (farthest_vertex, dir_farthest_vertex) = face.get_farthest_vertex(); 246 | if depth > 0 { 247 | // Look if the square is totally included in the projection 248 | if P::is_included_inside_projection(&farthest_vertex) && depth < 1 { 249 | face.add(vertices, idx, dir_farthest_vertex); 250 | // If not check if is traversed by the border of the projection 251 | } else { 252 | let nearest_vertex = face.get_nearest_vertex(); 253 | if P::is_included_inside_projection(&nearest_vertex) { 254 | // The nearest is included and the farthest not, 255 | // so let's subdivide the cell 256 | // subdivision 257 | // top-left 258 | recursive_triangulation::( 259 | &face.get_child(Direction::TopLeft), 260 | vertices, 261 | idx, 262 | depth - 1, 263 | first, 264 | ); 265 | // top-right 266 | recursive_triangulation::( 267 | &face.get_child(Direction::TopRight), 268 | vertices, 269 | idx, 270 | depth - 1, 271 | first, 272 | ); 273 | // bottom-left 274 | recursive_triangulation::( 275 | &face.get_child(Direction::BottomLeft), 276 | vertices, 277 | idx, 278 | depth - 1, 279 | first, 280 | ); 281 | // bottom-right 282 | recursive_triangulation::( 283 | &face.get_child(Direction::BottomRight), 284 | vertices, 285 | idx, 286 | depth - 1, 287 | first, 288 | ); 289 | } 290 | } 291 | } else { 292 | if P::is_included_inside_projection(&farthest_vertex) { 293 | face.add(vertices, idx, dir_farthest_vertex); 294 | 295 | return; 296 | } 297 | 298 | match face.get_nearest_dir() { 299 | // x < 0 && y < 0 300 | Direction::TopRight => { 301 | let tr = face.get_vertex(Direction::TopRight); 302 | if !P::is_included_inside_projection(&tr) { 303 | return; 304 | } 305 | 306 | let tl = face.get_vertex(Direction::TopLeft); 307 | let br = face.get_vertex(Direction::BottomRight); 308 | 309 | if !P::is_included_inside_projection(&tl) && !P::is_included_inside_projection(&br) 310 | { 311 | let (x1, _) = P::solve_along_abscissa(tl.y).unwrap(); 312 | let (y1, _) = P::solve_along_ordinate(br.x).unwrap(); 313 | 314 | let tl_r = Vector2::new(x1, tl.y); 315 | let br_r = Vector2::new(br.x, y1); 316 | face.add_triangle(&[tl_r, br_r, tr], vertices, idx); 317 | } else if P::is_included_inside_projection(&tl) 318 | && !P::is_included_inside_projection(&br) 319 | { 320 | let (y1, _) = P::solve_along_ordinate(br.x).unwrap(); 321 | let (y2, _) = P::solve_along_ordinate(tl.x).unwrap(); 322 | 323 | face.add_triangle(&[tl, Vector2::new(tl.x, y2), tr], vertices, idx); 324 | face.add_triangle( 325 | &[Vector2::new(tl.x, y2), Vector2::new(br.x, y1), tr], 326 | vertices, 327 | idx, 328 | ); 329 | } else if !P::is_included_inside_projection(&tl) 330 | && P::is_included_inside_projection(&br) 331 | { 332 | let (x1, _) = P::solve_along_abscissa(tr.y).unwrap(); 333 | let (x2, _) = P::solve_along_abscissa(br.y).unwrap(); 334 | 335 | face.add_triangle( 336 | &[Vector2::new(x1, tr.y), Vector2::new(x2, br.y), tr], 337 | vertices, 338 | idx, 339 | ); 340 | face.add_triangle(&[Vector2::new(x2, br.y), br, tr], vertices, idx); 341 | } else if P::is_included_inside_projection(&tl) 342 | && P::is_included_inside_projection(&br) 343 | { 344 | let (y1, _) = P::solve_along_ordinate(tl.x).unwrap(); 345 | let (x2, _) = P::solve_along_abscissa(br.y).unwrap(); 346 | 347 | let u = Vector2::new(tl.x, y1); 348 | let v = Vector2::new(x2, br.y); 349 | face.add_triangle(&[tl, u, tr], vertices, idx); 350 | face.add_triangle(&[tr, u, v], vertices, idx); 351 | face.add_triangle(&[v, br, tr], vertices, idx); 352 | } 353 | } 354 | // x > 0 && y > 0 355 | Direction::BottomLeft => { 356 | let bl = face.get_vertex(Direction::BottomLeft); 357 | if !P::is_included_inside_projection(&bl) { 358 | return; 359 | } 360 | 361 | let tl = face.get_vertex(Direction::TopLeft); 362 | let br = face.get_vertex(Direction::BottomRight); 363 | 364 | if !P::is_included_inside_projection(&tl) && !P::is_included_inside_projection(&br) 365 | { 366 | let (_, x2) = P::solve_along_abscissa(br.y).unwrap(); 367 | let (_, y2) = P::solve_along_ordinate(tl.x).unwrap(); 368 | 369 | let u = Vector2::new(x2, br.y); 370 | let v = Vector2::new(tl.x, y2); 371 | face.add_triangle(&[u, v, bl], vertices, idx); 372 | } else if P::is_included_inside_projection(&tl) 373 | && !P::is_included_inside_projection(&br) 374 | { 375 | let (_, x1) = P::solve_along_abscissa(tl.y).unwrap(); 376 | let (_, x2) = P::solve_along_abscissa(br.y).unwrap(); 377 | 378 | let u = Vector2::new(x1, tl.y); 379 | let v = Vector2::new(x2, br.y); 380 | 381 | face.add_triangle(&[tl, bl, u], vertices, idx); 382 | face.add_triangle(&[u, bl, v], vertices, idx); 383 | } else if !P::is_included_inside_projection(&tl) 384 | && P::is_included_inside_projection(&br) 385 | { 386 | let (_, y1) = P::solve_along_ordinate(tl.x).unwrap(); 387 | let (_, y2) = P::solve_along_ordinate(br.x).unwrap(); 388 | 389 | let u = Vector2::new(tl.x, y1); 390 | let v = Vector2::new(br.x, y2); 391 | 392 | face.add_triangle(&[u, bl, v], vertices, idx); 393 | face.add_triangle(&[v, bl, br], vertices, idx); 394 | } else if P::is_included_inside_projection(&tl) 395 | && P::is_included_inside_projection(&br) 396 | { 397 | let (_, x1) = P::solve_along_abscissa(tl.y).unwrap(); 398 | let (_, y2) = P::solve_along_ordinate(br.x).unwrap(); 399 | 400 | let u = Vector2::new(x1, tl.y); 401 | let v = Vector2::new(br.x, y2); 402 | 403 | face.add_triangle(&[tl, bl, u], vertices, idx); 404 | face.add_triangle(&[v, u, bl], vertices, idx); 405 | face.add_triangle(&[v, bl, br], vertices, idx); 406 | } 407 | } 408 | // x > 0 && y < 0 409 | Direction::TopLeft => { 410 | let tl = face.get_vertex(Direction::TopLeft); 411 | if !P::is_included_inside_projection(&tl) { 412 | return; 413 | } 414 | 415 | let tr = face.get_vertex(Direction::TopRight); 416 | let bl = face.get_vertex(Direction::BottomLeft); 417 | 418 | if !P::is_included_inside_projection(&bl) && !P::is_included_inside_projection(&tr) 419 | { 420 | let (y1, _) = P::solve_along_ordinate(bl.x).unwrap(); 421 | let (_, x2) = P::solve_along_abscissa(tr.y).unwrap(); 422 | 423 | let u = Vector2::new(bl.x, y1); 424 | let v = Vector2::new(x2, tr.y); 425 | face.add_triangle(&[u, v, tl], vertices, idx); 426 | } else if P::is_included_inside_projection(&bl) 427 | && !P::is_included_inside_projection(&tr) 428 | { 429 | let (_, x1) = P::solve_along_abscissa(bl.y).unwrap(); 430 | let (_, x2) = P::solve_along_abscissa(tr.y).unwrap(); 431 | 432 | let u = Vector2::new(x1, bl.y); 433 | let v = Vector2::new(x2, tr.y); 434 | 435 | face.add_triangle(&[tl, bl, u], vertices, idx); 436 | face.add_triangle(&[tl, u, v], vertices, idx); 437 | } else if !P::is_included_inside_projection(&bl) 438 | && P::is_included_inside_projection(&tr) 439 | { 440 | let (y1, _) = P::solve_along_ordinate(bl.x).unwrap(); 441 | let (y2, _) = P::solve_along_ordinate(tr.x).unwrap(); 442 | 443 | let u = Vector2::new(bl.x, y1); 444 | let v = Vector2::new(tr.x, y2); 445 | 446 | face.add_triangle(&[tl, u, v], vertices, idx); 447 | face.add_triangle(&[tl, v, tr], vertices, idx); 448 | } else if P::is_included_inside_projection(&bl) 449 | && P::is_included_inside_projection(&tr) 450 | { 451 | let (_, x1) = P::solve_along_abscissa(bl.y).unwrap(); 452 | let (y2, _) = P::solve_along_ordinate(tr.x).unwrap(); 453 | 454 | let u = Vector2::new(x1, bl.y); 455 | let v = Vector2::new(tr.x, y2); 456 | 457 | face.add_triangle(&[tl, bl, u], vertices, idx); 458 | face.add_triangle(&[tl, u, v], vertices, idx); 459 | face.add_triangle(&[tl, v, tr], vertices, idx); 460 | } 461 | } 462 | // x < 0 && y > 0 463 | Direction::BottomRight => { 464 | let br = face.get_vertex(Direction::BottomRight); 465 | if !P::is_included_inside_projection(&br) { 466 | return; 467 | } 468 | 469 | let tr = face.get_vertex(Direction::TopRight); 470 | let bl = face.get_vertex(Direction::BottomLeft); 471 | 472 | if !P::is_included_inside_projection(&bl) && !P::is_included_inside_projection(&tr) 473 | { 474 | let (x2, _) = P::solve_along_abscissa(bl.y).unwrap(); 475 | let (_, y1) = P::solve_along_ordinate(tr.x).unwrap(); 476 | 477 | let u = Vector2::new(x2, bl.y); 478 | let v = Vector2::new(tr.x, y1); 479 | face.add_triangle(&[u, br, v], vertices, idx); 480 | } else if P::is_included_inside_projection(&bl) 481 | && !P::is_included_inside_projection(&tr) 482 | { 483 | let (_, y1) = P::solve_along_ordinate(bl.x).unwrap(); 484 | let (_, y2) = P::solve_along_ordinate(tr.x).unwrap(); 485 | 486 | let u = Vector2::new(bl.x, y1); 487 | let v = Vector2::new(tr.x, y2); 488 | 489 | face.add_triangle(&[u, bl, br], vertices, idx); 490 | face.add_triangle(&[u, br, v], vertices, idx); 491 | } else if !P::is_included_inside_projection(&bl) 492 | && P::is_included_inside_projection(&tr) 493 | { 494 | let (x1, _) = P::solve_along_abscissa(bl.y).unwrap(); 495 | let (x2, _) = P::solve_along_abscissa(tr.y).unwrap(); 496 | 497 | let u = Vector2::new(x1, bl.y); 498 | let v = Vector2::new(x2, tr.y); 499 | 500 | face.add_triangle(&[u, br, v], vertices, idx); 501 | face.add_triangle(&[v, br, tr], vertices, idx); 502 | } else if P::is_included_inside_projection(&bl) 503 | && P::is_included_inside_projection(&tr) 504 | { 505 | let (_, y1) = P::solve_along_ordinate(bl.x).unwrap(); 506 | let (x2, _) = P::solve_along_abscissa(tr.y).unwrap(); 507 | 508 | let u = Vector2::new(bl.x, y1); 509 | let v = Vector2::new(x2, tr.y); 510 | 511 | face.add_triangle(&[bl, br, u], vertices, idx); 512 | face.add_triangle(&[u, br, v], vertices, idx); 513 | face.add_triangle(&[v, br, tr], vertices, idx); 514 | } 515 | } 516 | } 517 | } 518 | } 519 | use crate::projection::Projection; 520 | use crate::Vertex; 521 | impl Triangulation { 522 | pub fn create>() -> (Vec, Vec) { 523 | let (mut vertices, mut idx) = (Vec::new(), Vec::new()); 524 | 525 | let root = Face::new(Vector2::new(-1.0, -1.0), Vector2::new(1.0, 1.0)); 526 | let children = root.split(); 527 | 528 | let depth = 6; 529 | let mut first = false; 530 | recursive_triangulation::(&children[0], &mut vertices, &mut idx, depth, &mut first); 531 | recursive_triangulation::(&children[1], &mut vertices, &mut idx, depth, &mut first); 532 | recursive_triangulation::(&children[2], &mut vertices, &mut idx, depth, &mut first); 533 | recursive_triangulation::(&children[3], &mut vertices, &mut idx, depth, &mut first); 534 | 535 | let vertices: Vec = vertices 536 | .into_iter() 537 | .map(|v| { 538 | let xyz = P::clip_to_world_space(&v).unwrap(); 539 | Vertex { 540 | ndc: [v.x, v.y], 541 | xyz: [xyz.x, xyz.y, xyz.z], 542 | } 543 | }) 544 | .collect(); 545 | 546 | (vertices, idx) 547 | } 548 | } 549 | -------------------------------------------------------------------------------- /src/vertex.rs: -------------------------------------------------------------------------------- 1 | #[repr(C)] 2 | #[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] 3 | pub struct Vertex { 4 | pub ndc: [f32; 2], 5 | pub xyz: [f32; 3], 6 | } 7 | impl Vertex { 8 | pub fn desc() -> wgpu::VertexBufferLayout<'static> { 9 | wgpu::VertexBufferLayout { 10 | array_stride: std::mem::size_of::() as wgpu::BufferAddress, 11 | step_mode: wgpu::VertexStepMode::Vertex, 12 | attributes: &[ 13 | // 3. 14 | wgpu::VertexAttribute { 15 | offset: 0, 16 | shader_location: 0, 17 | format: wgpu::VertexFormat::Float32x2, 18 | }, 19 | wgpu::VertexAttribute { 20 | offset: 2 * std::mem::size_of::() as u64, 21 | shader_location: 1, 22 | format: wgpu::VertexFormat::Float32x3, 23 | }, 24 | ], 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /vite.config.ts: -------------------------------------------------------------------------------- 1 | /// 2 | import * as path from 'path' 3 | import {resolve} from 'path' 4 | import {defineConfig} from 'vite'; 5 | // For wasm inclusion 6 | import wasm from "vite-plugin-wasm"; 7 | import topLevelAwait from "vite-plugin-top-level-await"; 8 | // For wasm generated by wasm-pack 9 | import wasmPack from 'vite-plugin-wasm-pack'; 10 | 11 | export default defineConfig({ 12 | build: { 13 | minify: 'esbuild', 14 | lib: { 15 | // Could also be a dictionary or array of multiple entry points 16 | entry: resolve(__dirname, 'pkg/wgpu_sky_rendering.js'), 17 | name: 'wgpu_sky_rendering', 18 | formats: ["umd", "es"], 19 | // the proper extensions will be added 20 | fileName: 'wgpu_sky_rendering', 21 | }, 22 | //rollupOptions: {}, 23 | format: "es", 24 | //target: ["es2015", "chrome58", "edge18", "firefox57", "node12", "safari11"], 25 | //target: ["es2015"], 26 | // Relative to the root 27 | outDir: resolve(__dirname, 'dist'), 28 | }, 29 | //publicDir: resolve(__dirname, 'src/img'), 30 | plugins: [ 31 | wasm(), 32 | wasmPack(__dirname), 33 | topLevelAwait(), 34 | ], 35 | resolve: { 36 | alias: [ 37 | {find: '@', replacement: path.resolve(__dirname, '/src')}, 38 | {find: '#', replacement: path.resolve(__dirname, '/tests/unit')}, 39 | {find: '$', replacement: path.resolve(__dirname, '/tests/e2e')} 40 | ], 41 | }, 42 | test: { 43 | globals: true, 44 | environment: 'happy-dom', 45 | include: [ 46 | 'tests/unit/**/*.{test,spec}.{js,ts}' 47 | ], 48 | deps: { 49 | inline: ['pkg'], 50 | }, 51 | }, 52 | server: { 53 | open: 'index.html', 54 | }, 55 | }); --------------------------------------------------------------------------------