├── .gitignore ├── README.md ├── compute.js ├── image.png ├── index.html ├── main.js ├── output.js ├── scene.js ├── style.css └── video.mp4 /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # WebGPU Path Tracing 2 | 3 | A simple path tracer powered by WebGPU compute shaders. [Try the demo!](https://iamferm.in/webgpu-path-tracing) 4 | 5 | ![screenshot](./image.png) 6 | 7 | ## Path tracer? WebGPU? 8 | 9 | Path tracing is a computer graphics rendering technique used to simulate the way light interacts with objects in a virtual scene to create realistic images. It is a method that attempts to model the physical behavior of light as it travels through a scene and interacts with it. 10 | 11 | This implementation uses WebGPU compute shaders. WebGPU is an API that exposes the capabilities of modern GPU hardware, allowing accelerated graphics and compute in web applications. 12 | 13 | ## What it does NOT have (yet) 14 | 15 | * Any materials other than lambertian diffuse and metal 16 | * Bounding volume hierarchies 17 | * More than one light source 18 | * A denoising stage 19 | * Motion blur 20 | * Textures 21 | * Fog 22 | 23 | ## How To Use 24 | 25 | You can try the demo [here](https://iamferm.in/webgpu-path-tracing). 26 | 27 | If you prefer to run it locally, you can just clone the repository and use your favourite web server; it doesn't have any dependencies! 28 | 29 | ## You may also like... 30 | 31 | * [Raytracing in one weekend](https://raytracing.github.io/) 32 | * [WebGL Path Tracing](https://github.com/evanw/webgl-path-tracing) 33 | -------------------------------------------------------------------------------- /compute.js: -------------------------------------------------------------------------------- 1 | // shaders where the path tracing magic happens 2 | 3 | const computeWGSL = ` 4 | @group(0) @binding(0) var outputTex : texture_storage_2d; 5 | @group(0) @binding(1) var inputTex : texture_2d; 6 | @group(0) @binding(2) var vertex: array; 7 | @group(0) @binding(3) var index: array; 8 | @group(0) @binding(4) var meshes: array; 9 | @group(0) @binding(5) var materials: array; 10 | @group(0) @binding(6) var uniforms: Uniforms; 11 | 12 | struct Mesh { 13 | vi : u32, // first vertex 14 | fi : u32, // first face 15 | nv : u32, // total vertices 16 | nf : u32, // total faces 17 | } 18 | 19 | struct Material { 20 | color : vec4f, 21 | emission : vec4f, 22 | metallic : f32, 23 | roughness : f32, 24 | } 25 | 26 | struct Ray { 27 | origin : vec3, 28 | direction : vec3, 29 | } 30 | 31 | struct HitRecord { 32 | hit : bool, 33 | point : vec3, 34 | normal : vec3, 35 | material : Material, 36 | t: f32, 37 | } 38 | 39 | struct Uniforms { 40 | seed: f32, 41 | weight: f32, 42 | cam_azimuth: f32, 43 | cam_elevation: f32, 44 | bounces: u32, 45 | samples: u32, 46 | }; 47 | 48 | var seed : f32; 49 | var pixel : vec2f; 50 | 51 | fn random() -> f32 { 52 | let result = fract(sin(seed / 100.0 * dot(pixel, vec2(12.9898, 78.233))) * 43758.5453); 53 | seed += 1.0; 54 | return result; 55 | } 56 | 57 | fn v2random() -> vec2f { 58 | let r1 = random(); 59 | let r2 = random(); 60 | return vec2f(r1, r2); 61 | } 62 | 63 | fn random_in_unit_disk() -> vec2f { 64 | let r1 = random()*2.0-1.0; 65 | let r2 = random()*2.0-1.0; 66 | return vec2f(r1, r2); 67 | } 68 | 69 | fn random_in_unit_sphere() -> vec3f { 70 | let r1 = random()*2.0-1.0; 71 | let r2 = random()*2.0-1.0; 72 | let r3 = random()*2.0-1.0; 73 | return vec3f(r1, r2, r3); 74 | } 75 | 76 | fn mesh_random_point(mesh : Mesh, pdf : ptr) -> vec3f{ 77 | // get a random triangle, should be weighted with the triangles areas! 78 | let trg = min(u32(f32(mesh.nf) * random()), mesh.nf-1) + mesh.fi; 79 | 80 | let vi = index[trg]; 81 | let v0 = vertex[vi[0]]; 82 | let v1 = vertex[vi[1]]; 83 | let v2 = vertex[vi[2]]; 84 | 85 | let u = random(); 86 | let v = random(); 87 | let w = 1.0 - u - v; 88 | 89 | // here we are again assuming all the triangles have the same area 90 | let trg_area = length(cross(v1 - v0, v2 - v0)) * 0.5; 91 | *pdf = 1.0/(f32(mesh.nf)*trg_area); 92 | 93 | return v0*u + v1*v + v2*w; 94 | } 95 | 96 | fn ray_at(r: Ray, t : f32) -> vec3 { 97 | return r.origin + r.direction * t; 98 | } 99 | 100 | // Möller–Trumbore ray-triangle intersection algorithm 101 | // from http://www.graphics.cornell.edu/pubs/1997/MT97.pdf 102 | const EPSILON : f32 = 0.000001; 103 | fn triangle_hit(r : Ray, v0 : vec3, v1: vec3, v2 : vec3, t : ptr) -> bool { 104 | 105 | let e1 = v1 - v0; 106 | let e2 = v2 - v0; 107 | let p = cross(r.direction, e2); 108 | let det = dot(e1, p); 109 | 110 | // check if ray is parallel to triangle 111 | if (abs(det) < EPSILON) { return false; } 112 | 113 | // calculate barycentric coordinate u 114 | let inv_det = 1.0 / det; 115 | let s = r.origin - v0; // called T in paper, not used here to avoid confusion with *t 116 | let u = inv_det * dot(s, p); 117 | 118 | if (u < 0.0 || u > 1.0) { return false; } 119 | 120 | // calculate barycentric coordinate v 121 | let q = cross(s, e1); 122 | let v = inv_det * dot(r.direction, q); 123 | 124 | if (v < 0.0 || u + v > 1.0) { return false; } 125 | 126 | // distance from the ray origin to the hit point 127 | *t = inv_det * dot(e2, q); 128 | if (*t < EPSILON) { return false; } 129 | 130 | // backface culling 131 | if (dot(cross(e1, e2), r.direction) > 0.0 ){ return false; } 132 | 133 | return true; 134 | } 135 | 136 | fn world_hit(r : Ray) -> HitRecord { 137 | 138 | var hit_rec : HitRecord; 139 | hit_rec.hit = false; 140 | var t = 100000000.0; 141 | var closest_hit = t; 142 | 143 | // loop through all the meshes in the scene 144 | for(var m = 0; m < 6; m++){ 145 | 146 | let mesh = meshes[m]; 147 | // loop through all the triangles in each mesh 148 | for(var i = mesh.fi; i < mesh.fi+mesh.nf; i++){ 149 | 150 | let vi = index[i]; 151 | let v0 = vertex[vi[0]]; 152 | let v1 = vertex[vi[1]]; 153 | let v2 = vertex[vi[2]]; 154 | let hit_bool = triangle_hit(r, v0, v1, v2, &t); 155 | 156 | // we have to return the closest hit to the ray origin 157 | if(hit_bool && t < closest_hit) { 158 | closest_hit = t; 159 | hit_rec.hit = true; 160 | hit_rec.t = t; 161 | hit_rec.normal = normalize(cross(v1 - v0, v2 - v0)); 162 | hit_rec.point = ray_at(r, t) + hit_rec.normal*EPSILON; 163 | hit_rec.material = materials[m]; 164 | } 165 | } 166 | } 167 | 168 | return hit_rec; 169 | } 170 | 171 | fn ray_color(r : Ray) -> vec3f { 172 | 173 | var depth = 0u; 174 | var color = vec3(0.0, 0.0, 0.0); // background color 175 | var ray = r; 176 | var hit_result = world_hit(ray); 177 | 178 | var final_color = vec3(0.0, 0.0, 0.0); // background at first 179 | var bounced_color = vec3(1.0, 1.0, 1.0); 180 | 181 | // recursion is not allowed 182 | while(depth < uniforms.bounces+1 && (hit_result.hit)){ 183 | 184 | // if the ray hits a emissive material, return it directly 185 | if (hit_result.material.emission.a > 0.0) { 186 | final_color = hit_result.material.emission.rgb; 187 | break; 188 | 189 | } else if (hit_result.material.metallic >= random()) { 190 | let hit_point = hit_result.point; 191 | ray.origin = hit_point; 192 | ray.direction = reflect(ray.direction, hit_result.normal); 193 | 194 | // surface roughness 195 | ray.direction += random_in_unit_sphere()*hit_result.material.roughness; 196 | ray.direction = normalize(ray.direction); 197 | 198 | bounced_color *= hit_result.material.color.rgb; 199 | depth++; 200 | 201 | hit_result = world_hit(ray); 202 | 203 | } else { 204 | let hit_point = hit_result.point; 205 | 206 | // bias towards lights 207 | var light_pdf = 1.0; 208 | let light_point = mesh_random_point(meshes[3], &light_pdf); 209 | let lh = light_point - hit_point; 210 | 211 | var shadow_ray : Ray; 212 | shadow_ray.origin = hit_point; 213 | shadow_ray.direction = normalize(lh); 214 | 215 | var shadow_hit = world_hit(shadow_ray); 216 | 217 | if (shadow_hit.material.emission.a > 0.0 && random()>0.5) { 218 | final_color = (1/light_pdf) 219 | * 1/(pow(shadow_hit.t, 2)) 220 | * shadow_hit.material.emission.rgb 221 | * abs(dot(shadow_hit.normal, shadow_ray.direction)) 222 | * hit_result.material.color.rgb 223 | * abs(dot(hit_result.normal, shadow_ray.direction)); 224 | break; 225 | 226 | } else { 227 | //scatter 228 | ray.origin = hit_point; 229 | ray.direction = normalize(hit_result.normal + random_in_unit_sphere()); 230 | 231 | bounced_color *= hit_result.material.color.rgb; 232 | depth++; 233 | 234 | hit_result = world_hit(ray); 235 | } 236 | } 237 | } 238 | 239 | color = final_color*bounced_color; 240 | return color; 241 | } 242 | 243 | @compute @workgroup_size(8, 8, 1) 244 | fn compute_main(@builtin(global_invocation_id) GlobalInvocationID: vec3u) { 245 | 246 | // set the private vars 247 | let pos = GlobalInvocationID.xy; 248 | pixel = vec2f(pos)/512.0; 249 | seed = uniforms.seed; // initial seed 250 | 251 | // setup camera 252 | var ray : Ray; 253 | let camera_center = vec3(0.0, 0.0, 1.0796); 254 | var color = vec4(0.0, 0.0, 0.0, 1.0); 255 | var samples = uniforms.samples; 256 | 257 | // camera rotation 258 | let camera_rot_y = mat3x3( 259 | cos(uniforms.cam_azimuth), 0.0, sin(uniforms.cam_azimuth), 260 | 0.0, 1.0, 0.0, 261 | -sin(uniforms.cam_azimuth), 0.0, cos(uniforms.cam_azimuth), 262 | ); 263 | let camera_rot_x = mat3x3( 264 | 1.0, 0.0, 0.0, 265 | 0.0, cos(uniforms.cam_elevation), -sin(uniforms.cam_elevation), 266 | 0.0, sin(uniforms.cam_elevation), cos(uniforms.cam_elevation), 267 | ); 268 | let camera_matrix = camera_rot_y * camera_rot_x; 269 | 270 | // repeat each pixel "samples" times 271 | for(var i=0u; i 2 | 3 | 4 | 5 | 6 | WebGPU Path Tracing 7 | 8 | 9 | 10 | 11 |

WebGPU Path Tracing

12 |

A simple path tracer powered by WebGPU compute shaders

13 |
14 | 15 |
16 |

Click and drag to move the camera.

17 |

Path tracing is a computer graphics rendering technique used to simulate the way light interacts with objects in a virtual scene to create realistic images. It is a method that attempts to model the physical behavior of light as it travels through a scene and interacts with it.

18 |

This implementation uses WebGPU compute shaders. WebGPU is an API that exposes the capabilities of modern GPU hardware, allowing accelerated graphics and compute in web applications.

19 |

Source code available at GitHub.

20 |

Made by Fermin Lozano

21 | 22 | 23 | -------------------------------------------------------------------------------- /main.js: -------------------------------------------------------------------------------- 1 | import outputWGSL from './output.js'; 2 | import computeWGSL from './compute.js'; 3 | import scene from './scene.js'; 4 | 5 | // when WebGPU is not available, show a video instead 6 | const showVideo = (error) => { 7 | const disclaimer = document.createElement("div"); 8 | disclaimer.id = "error"; 9 | disclaimer.innerHTML = "⚠️ " + error + "
🎥 This a video recording of the scene."; 10 | 11 | const video = document.createElement("video"); 12 | video.src = "./video.mp4"; 13 | video.poster = "./image.png"; 14 | video.autoplay = true; 15 | video.loop = true; 16 | video.muted = true; 17 | video.height = 512; 18 | video.width = 512; 19 | video.setAttribute("playsinline", "playsinline"); 20 | 21 | const viewport = document.querySelector("#viewport"); 22 | viewport.append(disclaimer); 23 | viewport.append(video); 24 | canvas.style.display = "none"; 25 | } 26 | 27 | // Initialize WebGPU context 28 | const canvas = document.querySelector("canvas"); 29 | if (!navigator.gpu) { 30 | const e = "This browser does not support WebGPU."; 31 | showVideo(e); 32 | throw new Error(e); 33 | } 34 | 35 | const adapter = await navigator.gpu.requestAdapter(); 36 | if (!adapter) { 37 | const e = "Your GPU does not support WebGPU."; 38 | showVideo(e); 39 | throw new Error(e); 40 | } 41 | 42 | const device = await adapter.requestDevice(); 43 | 44 | const context = canvas.getContext("webgpu"); 45 | const canvasFormat = navigator.gpu.getPreferredCanvasFormat(); 46 | context.configure({ 47 | device: device, 48 | format: canvasFormat, 49 | }); 50 | 51 | // Setup the output render pipeline 52 | const outputShaderModule = device.createShaderModule({ 53 | label: "Output shader", 54 | code: outputWGSL 55 | }); 56 | 57 | const renderOutputPipeline = device.createRenderPipeline({ 58 | label: "Output render pipeline", 59 | layout: 'auto', 60 | vertex: { 61 | module: outputShaderModule, 62 | entryPoint: "vert_main", 63 | }, 64 | fragment: { 65 | module: outputShaderModule, 66 | entryPoint: "frag_main", 67 | targets: [{ 68 | format: canvasFormat 69 | }] 70 | }, 71 | }); 72 | 73 | const sampler = device.createSampler({ 74 | magFilter: 'linear', 75 | minFilter: 'linear', 76 | }); 77 | 78 | // Two textures for ping pong swap to accumulate compute passes 79 | const textureA = device.createTexture({ 80 | size: { 81 | width: 512, 82 | height: 512, 83 | }, 84 | format: 'rgba8unorm', 85 | usage: 86 | GPUTextureUsage.COPY_DST | 87 | GPUTextureUsage.STORAGE_BINDING | 88 | GPUTextureUsage.TEXTURE_BINDING, 89 | }); 90 | 91 | const textureB = device.createTexture({ 92 | size: { 93 | width: 512, 94 | height: 512, 95 | }, 96 | format: 'rgba8unorm', 97 | usage: 98 | GPUTextureUsage.COPY_DST | 99 | GPUTextureUsage.STORAGE_BINDING | 100 | GPUTextureUsage.TEXTURE_BINDING, 101 | }); 102 | 103 | // Two bind groups to render the last accumulated compute pass 104 | const renderOutputBindGroup = [ 105 | device.createBindGroup({ 106 | layout: renderOutputPipeline.getBindGroupLayout(0), 107 | entries: [ 108 | { 109 | binding: 0, 110 | resource: sampler, 111 | }, 112 | { 113 | binding: 1, 114 | resource: textureA.createView(), 115 | }, 116 | ], 117 | }), 118 | device.createBindGroup({ 119 | layout: renderOutputPipeline.getBindGroupLayout(0), 120 | entries: [ 121 | { 122 | binding: 0, 123 | resource: sampler, 124 | }, 125 | { 126 | binding: 1, 127 | resource: textureB.createView(), 128 | }, 129 | ], 130 | }), 131 | ]; 132 | 133 | // Setup the compute pipeline 134 | const computeShaderModule = device.createShaderModule({ 135 | label: "Compute shader", 136 | code: computeWGSL 137 | }); 138 | 139 | const computePipeline = device.createComputePipeline({ 140 | label: "Compute pipeline", 141 | layout: 'auto', 142 | compute: { 143 | module: computeShaderModule, 144 | entryPoint: "compute_main", 145 | } 146 | }); 147 | 148 | // Populate the GPU buffers from imported scene data 149 | const vertexBuffer = device.createBuffer({ 150 | label: "vertex buffer", 151 | size: scene.vertexArray.byteLength, 152 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, 153 | }); 154 | device.queue.writeBuffer(vertexBuffer, 0, scene.vertexArray); 155 | 156 | const indexBuffer = device.createBuffer({ 157 | label: "index buffer", 158 | size: scene.indexArray.byteLength, 159 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, 160 | }); 161 | device.queue.writeBuffer(indexBuffer, 0, scene.indexArray); 162 | 163 | const meshBuffer = device.createBuffer({ 164 | label: "mesh buffer", 165 | size: scene.meshArray.byteLength, 166 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, 167 | }); 168 | device.queue.writeBuffer(meshBuffer, 0, scene.meshArray); 169 | 170 | const materialBuffer = device.createBuffer({ 171 | label: "material buffer", 172 | size: scene.materialArray.byteLength, 173 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST, 174 | }); 175 | device.queue.writeBuffer(materialBuffer, 0, scene.materialArray); 176 | 177 | // Compute shader uniforms 178 | const computeUniformsArray = new ArrayBuffer(24); 179 | const computeUniformsFloat = new Float32Array(computeUniformsArray, 0, 4); 180 | const computeUniformsUint = new Uint32Array(computeUniformsArray, 16, 2); 181 | 182 | computeUniformsFloat[0] = 100.0; // seed 183 | computeUniformsFloat[1] = 1.0; // weight 184 | computeUniformsFloat[2] = 0.0; // cam_azimuth 185 | computeUniformsFloat[3] = 0.0; // cam_elevation 186 | computeUniformsUint[0] = 1; // bounces 187 | computeUniformsUint[1] = 1; // samples 188 | 189 | const computeUniformsBuffer = device.createBuffer({ 190 | label: "Compute uniforms", 191 | size: computeUniformsArray.byteLength, 192 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, 193 | }); 194 | device.queue.writeBuffer(computeUniformsBuffer, 0, computeUniformsArray); 195 | 196 | // Two bind groups to accumulate compute passes 197 | const computeBindGroup = [ 198 | device.createBindGroup({ 199 | layout: computePipeline.getBindGroupLayout(0), 200 | entries: [ 201 | { 202 | binding: 0, 203 | resource: textureA.createView(), 204 | }, 205 | { 206 | binding: 1, 207 | resource: textureB.createView(), 208 | }, 209 | { 210 | binding: 2, 211 | resource: { buffer: vertexBuffer }, 212 | }, 213 | { 214 | binding: 3, 215 | resource: { buffer: indexBuffer } 216 | }, 217 | { 218 | binding: 4, 219 | resource: { buffer: meshBuffer } 220 | }, 221 | { 222 | binding: 5, 223 | resource: { buffer: materialBuffer } 224 | }, 225 | { 226 | binding: 6, 227 | resource: { buffer: computeUniformsBuffer }, 228 | }, 229 | ], 230 | }), 231 | device.createBindGroup({ 232 | layout: computePipeline.getBindGroupLayout(0), 233 | entries: [ 234 | { 235 | binding: 0, 236 | resource: textureB.createView(), 237 | }, 238 | { 239 | binding: 1, 240 | resource: textureA.createView(), 241 | }, 242 | { 243 | binding: 2, 244 | resource: { buffer: vertexBuffer }, 245 | }, 246 | { 247 | binding: 3, 248 | resource: { buffer: indexBuffer } 249 | }, 250 | { 251 | binding: 4, 252 | resource: { buffer: meshBuffer } 253 | }, 254 | { 255 | binding: 5, 256 | resource: { buffer: materialBuffer } 257 | }, 258 | { 259 | binding: 6, 260 | resource: { buffer: computeUniformsBuffer }, 261 | }, 262 | ], 263 | }), 264 | ] 265 | 266 | let initialSeed = 100.0; 267 | let step = 0; 268 | let cameraAzimuth = 0.0; 269 | let cameraElevation = 0.0; 270 | let requestId; 271 | 272 | const renderLoop = () => { 273 | 274 | if (step > 100) return; // stop passes after 100 steps 275 | 276 | const encoder = device.createCommandEncoder(); 277 | 278 | // Do the compute 279 | const computePass = encoder.beginComputePass(); 280 | computePass.setPipeline(computePipeline); 281 | computePass.setBindGroup(0, computeBindGroup[step%2]); 282 | computePass.dispatchWorkgroups(64, 64); 283 | computePass.end(); 284 | 285 | // Output render 286 | const pass = encoder.beginRenderPass({ 287 | colorAttachments: [{ 288 | view: context.getCurrentTexture().createView(), 289 | loadOp: "clear", 290 | clearValue: { r: 0, g: 0, b: 0, a: 1 }, 291 | storeOp: "store", 292 | }] 293 | }); 294 | pass.setPipeline(renderOutputPipeline); 295 | pass.setBindGroup(0, renderOutputBindGroup[step%2]); 296 | pass.draw(6, 1); 297 | pass.end(); 298 | 299 | // Update uniforms buffer 300 | initialSeed += 0.01; 301 | computeUniformsFloat[0] = initialSeed; 302 | computeUniformsFloat[1] = 1.0/++step; 303 | computeUniformsFloat[2] = cameraAzimuth; 304 | computeUniformsFloat[3] = cameraElevation; 305 | 306 | // when moving the camera, to improve responsiveness 307 | // reduce samples and bounces to the minimum 308 | if (pointerMoving){ 309 | computeUniformsUint[0] = 1; // bounces 310 | computeUniformsUint[1] = 1; // samples 311 | }else{ 312 | computeUniformsUint[0] = 4; 313 | computeUniformsUint[1] = 5; 314 | } 315 | 316 | device.queue.writeBuffer(computeUniformsBuffer, 0, computeUniformsArray); 317 | 318 | // Submit the command buffer 319 | device.queue.submit([encoder.finish()]); 320 | 321 | // just one pass when moving the camera 322 | if (pointerMoving) return; 323 | 324 | requestId = requestAnimationFrame(renderLoop); 325 | } 326 | 327 | requestId = requestAnimationFrame(renderLoop); 328 | 329 | // Camera orbit controls 330 | let pointerPrevX = 0, pointerPrevY = 0; 331 | let pointerMoving = false; 332 | 333 | const onPointerMove = (e) => { 334 | e.preventDefault(); 335 | e = typeof(e.touches) != 'undefined' ? e.touches[0] : e; 336 | 337 | cameraAzimuth += (e.clientX - pointerPrevX) * Math.PI/180; 338 | cameraElevation += (e.clientY - pointerPrevY) * Math.PI/180; 339 | pointerPrevX = e.clientX; 340 | pointerPrevY = e.clientY; 341 | 342 | // reset renderloop 343 | step = 0; 344 | if(requestId) cancelAnimationFrame(requestId); 345 | requestId = requestAnimationFrame(renderLoop); 346 | } 347 | 348 | // mobile touch events 349 | canvas.addEventListener('touchmove', onPointerMove); 350 | canvas.addEventListener('touchstart', (e) => { 351 | pointerPrevX = e.touches[0].clientX; 352 | pointerPrevY = e.touches[0].clientY; 353 | pointerMoving = true; 354 | }); 355 | 356 | canvas.addEventListener('touchend', (e) => { 357 | pointerMoving = false; 358 | requestId = requestAnimationFrame(renderLoop); 359 | }); 360 | 361 | // desktop mouse events 362 | canvas.addEventListener('mousedown', (e) => { 363 | pointerPrevX = e.clientX; 364 | pointerPrevY = e.clientY; 365 | canvas.addEventListener('mousemove', onPointerMove); 366 | pointerMoving = true; 367 | }); 368 | 369 | addEventListener('mouseup', () => { 370 | canvas.removeEventListener( 'mousemove', onPointerMove ); 371 | pointerMoving = false; 372 | requestId = requestAnimationFrame(renderLoop); 373 | }); -------------------------------------------------------------------------------- /output.js: -------------------------------------------------------------------------------- 1 | // shaders to render a texture in the viewport 2 | 3 | const outputWGSL = ` 4 | @group(0) @binding(0) var mySampler : sampler; 5 | @group(0) @binding(1) var myTexture : texture_2d; 6 | 7 | struct VertexOutput { 8 | @builtin(position) Position : vec4, 9 | @location(0) fragUV : vec2, 10 | } 11 | 12 | @vertex 13 | fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput { 14 | var pos = array( 15 | vec2( 1.0, 1.0), 16 | vec2( 1.0, -1.0), 17 | vec2(-1.0, -1.0), 18 | vec2( 1.0, 1.0), 19 | vec2(-1.0, -1.0), 20 | vec2(-1.0, 1.0), 21 | ); 22 | 23 | var uv = array( 24 | vec2(1.0, 1.0), 25 | vec2(1.0, 0.0), 26 | vec2(0.0, 0.0), 27 | vec2(1.0, 1.0), 28 | vec2(0.0, 0.0), 29 | vec2(0.0, 1.0), 30 | ); 31 | 32 | var output : VertexOutput; 33 | output.Position = vec4(pos[VertexIndex], 0.0, 1.0); 34 | output.fragUV = uv[VertexIndex]; 35 | return output; 36 | } 37 | 38 | @fragment 39 | fn frag_main(@location(0) fragUV : vec2) -> @location(0) vec4 { 40 | let result = textureSample(myTexture, mySampler, fragUV); 41 | return result; 42 | } 43 | ` 44 | export default outputWGSL; 45 | -------------------------------------------------------------------------------- /scene.js: -------------------------------------------------------------------------------- 1 | // cornell box scene, saved as a bunch of javascript typed arrays 2 | 3 | const vertexArray = new Float32Array(4*36); 4 | const indexArray = new Uint32Array(4*36); 5 | 6 | var totalVertices = 0; 7 | const addVertex = (x,y,z) => { 8 | vertexArray[totalVertices++] = x; 9 | vertexArray[totalVertices++] = y; 10 | vertexArray[totalVertices++] = z; 11 | vertexArray[totalVertices++] = 0.0; 12 | } 13 | 14 | var totalIndices = 0; 15 | const addFace = (v0, v1, v2) => { 16 | indexArray[totalIndices++] = v0-1; 17 | indexArray[totalIndices++] = v1-1; 18 | indexArray[totalIndices++] = v2-1; 19 | indexArray[totalIndices++] = 0; 20 | } 21 | 22 | const meshArray = new Uint32Array(4*6); 23 | const materialArray = new Float32Array(12*6); // needed padding 24 | var meshArrayIndex = 0; 25 | var materialArrayIndex = 0; 26 | let vi, fi; 27 | 28 | function color(r,g,b,a){ 29 | this.r = r; 30 | this.g = g; 31 | this.b = b; 32 | this.a = a; 33 | } 34 | 35 | function material(color, emission, metallic, roughness){ 36 | this.color = color; 37 | this.emission = emission; 38 | this.metallic = metallic; 39 | this.roughness = roughness; 40 | } 41 | 42 | const pushMaterial = (mat) => { 43 | materialArray[materialArrayIndex++] = mat.color.r; 44 | materialArray[materialArrayIndex++] = mat.color.g; 45 | materialArray[materialArrayIndex++] = mat.color.b; 46 | materialArray[materialArrayIndex++] = mat.color.a; 47 | materialArray[materialArrayIndex++] = mat.emission.r; 48 | materialArray[materialArrayIndex++] = mat.emission.g; 49 | materialArray[materialArrayIndex++] = mat.emission.b; 50 | materialArray[materialArrayIndex++] = mat.emission.a; 51 | materialArray[materialArrayIndex++] = mat.metallic; 52 | materialArray[materialArrayIndex++] = mat.roughness; 53 | materialArray[materialArrayIndex++] = 0.0; 54 | materialArray[materialArrayIndex++] = 0.0; 55 | } 56 | 57 | // Materials 58 | const transparentBlack = new color(0.0, 0.0, 0.0, 0.0); 59 | const gray = new color(0.73, 0.73, 0.73, 1.0); 60 | const red = new color(0.65, 0.05, 0.05, 1.0); 61 | const green = new color(0.12, 0.45, 0.15, 1.0); 62 | const light = new color(15.0, 15.0, 15.0, 1.0); 63 | 64 | const grayMaterial = new material(gray, transparentBlack, 0.0, 0.0); 65 | const metalMaterial = new material(gray, transparentBlack, 1.0, 0.0); 66 | const lightMaterial = new material(gray, light, 0.0, 0.0); 67 | const redMaterial = new material(red, transparentBlack, 0.0, 0.0); 68 | const greenMaterial = new material(green, transparentBlack, 0.0, 0.0); 69 | 70 | // Floor, back wall and ceiling 71 | vi = totalVertices/4; 72 | fi = totalIndices/4; 73 | meshArray[meshArrayIndex++] = totalVertices/4; 74 | meshArray[meshArrayIndex++] = totalIndices/4; 75 | addVertex(-0.274799, -0.273000, 0.279600); 76 | addVertex(0.278000, -0.273000, 0.279600); 77 | addVertex(0.278000, -0.273000, -0.279600); 78 | addVertex(-0.271599, -0.273000, -0.279600); 79 | addVertex(-0.277999, 0.275800, 0.279600); 80 | addVertex(-0.277999, 0.275800, -0.279600); 81 | addVertex(0.278000, 0.275800, -0.279600); 82 | addVertex(0.278000, 0.275800, 0.279600); 83 | addFace(1, 2, 3); 84 | addFace(1, 3, 4); 85 | addFace(5, 6, 7); 86 | addFace(5, 7, 8); 87 | addFace(7, 4, 3); 88 | addFace(7, 6, 4); 89 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 90 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 91 | pushMaterial(grayMaterial); 92 | 93 | // Tall block 94 | vi = totalVertices/4; 95 | fi = totalIndices/4; 96 | meshArray[meshArrayIndex++] = totalVertices/4; 97 | meshArray[meshArrayIndex++] = totalIndices/4; 98 | addVertex(0.013239, -0.272900, -0.017047); 99 | addVertex(0.013239, 0.057100, -0.017047); 100 | addVertex(-0.144353, -0.272900, 0.031839); 101 | addVertex(-0.144353, 0.057100, 0.031839); 102 | addVertex(-0.035647, -0.272900, -0.174639); 103 | addVertex(-0.035647, 0.057100, -0.174639); 104 | addVertex(-0.193239, -0.272900, -0.125753); 105 | addVertex(-0.193239, 0.057100, -0.125753); 106 | addFace(10, 11, 9); 107 | addFace(12, 15, 11); 108 | addFace(16, 13, 15); 109 | addFace(14, 9, 13); 110 | addFace(15, 9, 11); 111 | addFace(12, 14, 16); 112 | addFace(10, 12, 11); 113 | addFace(12, 16, 15); 114 | addFace(16, 14, 13); 115 | addFace(14, 10, 9); 116 | addFace(15, 13, 9); 117 | addFace(12, 10, 14); 118 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 119 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 120 | pushMaterial(metalMaterial); 121 | 122 | // Short block 123 | vi = totalVertices/4; 124 | fi = totalIndices/4; 125 | meshArray[meshArrayIndex++] = totalVertices/4; 126 | meshArray[meshArrayIndex++] = totalIndices/4; 127 | addVertex(0.195646, -0.272900, 0.055136); 128 | addVertex(0.195646, -0.107900, 0.055136); 129 | addVertex(0.148464, -0.272900, 0.213246); 130 | addVertex(0.148464, -0.107900, 0.213246); 131 | addVertex(0.037536, -0.272900, 0.007954); 132 | addVertex(0.037536, -0.107900, 0.007954); 133 | addVertex(-0.009646, -0.272900, 0.166064); 134 | addVertex(-0.009646, -0.107900, 0.166064); 135 | addFace(18, 19, 17); 136 | addFace(20, 23, 19); 137 | addFace(24, 21, 23); 138 | addFace(22, 17, 21); 139 | addFace(23, 17, 19); 140 | addFace(20, 22, 24); 141 | addFace(18, 20, 19); 142 | addFace(20, 24, 23); 143 | addFace(24, 22, 21); 144 | addFace(22, 18, 17); 145 | addFace(23, 21, 17); 146 | addFace(20, 18, 22); 147 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 148 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 149 | pushMaterial(grayMaterial); 150 | 151 | // Light 152 | vi = totalVertices/4; 153 | fi = totalIndices/4; 154 | meshArray[meshArrayIndex++] = totalVertices/4; 155 | meshArray[meshArrayIndex++] = totalIndices/4; 156 | addVertex(-0.065000, 0.275700, 0.052600); 157 | addVertex(0.065000, 0.275700, 0.052600); 158 | addVertex(-0.065000, 0.275700, -0.052400); 159 | addVertex(0.065000, 0.275700, -0.052400); 160 | addFace(27, 26, 25); 161 | addFace(27, 28, 26); 162 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 163 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 164 | pushMaterial(lightMaterial); 165 | 166 | // Left wall 167 | vi = totalVertices/4; 168 | fi = totalIndices/4; 169 | meshArray[meshArrayIndex++] = totalVertices/4; 170 | meshArray[meshArrayIndex++] = totalIndices/4; 171 | addVertex(-0.274799, -0.273000, 0.279600); 172 | addVertex(-0.271599, -0.273000, -0.279600); 173 | addVertex(-0.277999, 0.275800, 0.279600); 174 | addVertex(-0.277999, 0.275800, -0.279600); 175 | addFace(32, 29, 30); 176 | addFace(32, 31, 29); 177 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 178 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 179 | pushMaterial(redMaterial); 180 | 181 | // Right wall 182 | vi = totalVertices/4; 183 | fi = totalIndices/4; 184 | meshArray[meshArrayIndex++] = totalVertices/4; 185 | meshArray[meshArrayIndex++] = totalIndices/4; 186 | addVertex(0.278000, -0.273000, 0.279600); 187 | addVertex(0.278000, -0.273000, -0.279600); 188 | addVertex(0.278000, 0.275800, -0.279600); 189 | addVertex(0.278000, 0.275800, 0.279600); 190 | addFace(36, 34, 33); 191 | addFace(36, 35, 34); 192 | meshArray[meshArrayIndex++] = totalVertices/4 - vi; 193 | meshArray[meshArrayIndex++] = totalIndices/4 - fi; 194 | pushMaterial(greenMaterial); 195 | 196 | let scene = { 197 | vertexArray: vertexArray, 198 | indexArray: indexArray, 199 | meshArray: meshArray, 200 | materialArray: materialArray, 201 | } 202 | 203 | export default scene 204 | -------------------------------------------------------------------------------- /style.css: -------------------------------------------------------------------------------- 1 | body 2 | { 3 | font-family: Arial, Helvetica, sans-serif; 4 | max-width: 512px; 5 | margin: auto; 6 | padding: 1em; 7 | } 8 | 9 | h1, h3, .center 10 | { 11 | text-align: center; 12 | } 13 | 14 | #error 15 | { 16 | text-align: center; 17 | padding: 0.5em; 18 | background-color: rgb(187, 130, 47); 19 | color: white; 20 | max-width: 512px; 21 | box-sizing: border-box; 22 | } 23 | 24 | #viewport * 25 | { 26 | max-width: 100%; 27 | height: auto; 28 | } 29 | 30 | p{ 31 | margin: auto; 32 | padding: 1em; 33 | } 34 | -------------------------------------------------------------------------------- /video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ferminLR/webgpu-path-tracing/cc9d189db0598905f598e2c744b44e2a31add68d/video.mp4 --------------------------------------------------------------------------------