├── .gitignore ├── media ├── models │ └── city-set-draco.glb └── textures │ └── skybox │ └── cube-basis-mipmap.ktx2 ├── js └── engine │ ├── core │ ├── camera.js │ ├── stage.js │ ├── instance-color.js │ ├── buffers.js │ ├── materials.js │ ├── bounding-volume.js │ ├── entity-group.js │ ├── skin.js │ ├── skybox.js │ ├── render-world.js │ ├── input.js │ ├── animation.js │ ├── light.js │ ├── mesh.js │ ├── transform.js │ └── geometry-layout.js │ ├── webgpu │ ├── webgpu-system.js │ ├── materials │ │ ├── webgpu-materials.js │ │ ├── webgpu-unlit-material.js │ │ ├── webgpu-skybox-material.js │ │ ├── webgpu-pbr-material.js │ │ └── webgpu-material-factory.js │ ├── webgpu-flags.js │ ├── wgsl │ │ ├── unlit-material.js │ │ ├── skybox.js │ │ ├── default-vertex.js │ │ ├── light-sprite.js │ │ ├── debug-point.js │ │ ├── bloom.js │ │ ├── wgsl-utils.js │ │ ├── common.js │ │ ├── shadow.js │ │ └── clustered-light.js │ ├── webgpu-world.js │ ├── webgpu-light-sprite.js │ ├── webgpu-debug-point.js │ ├── webgpu-mesh.js │ ├── webgpu-bind-group-layouts.js │ ├── webgpu-texture-debug.js │ ├── webgpu-clustered-light.js │ ├── webgpu-render-batch.js │ ├── webgpu-buffer.js │ ├── webgpu-render-targets.js │ ├── webgpu-camera.js │ ├── webgpu-bloom.js │ └── webgpu-renderer.js │ ├── README.md │ ├── loaders │ └── lib │ │ ├── worker_service.js │ │ ├── worker_pool.js │ │ └── draco-worker.js │ ├── debug │ ├── bone-visualizer.js │ └── bounds-visualizer.js │ ├── geometry │ ├── box.js │ └── sphere.js │ ├── controls │ ├── flying-controls.js │ └── orbit-controls.js │ └── util │ ├── texture-atlas-allocator.js │ └── bvh.js ├── package.json ├── .github └── workflows │ └── main.yml └── index.html /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | -------------------------------------------------------------------------------- /media/models/city-set-draco.glb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toji/webgpu-shadow-playground/HEAD/media/models/city-set-draco.glb -------------------------------------------------------------------------------- /js/engine/core/camera.js: -------------------------------------------------------------------------------- 1 | export class Camera { 2 | fieldOfView = Math.PI * 0.5; 3 | zNear = 0.1; 4 | zFar = 512.0; 5 | } 6 | -------------------------------------------------------------------------------- /media/textures/skybox/cube-basis-mipmap.ktx2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/toji/webgpu-shadow-playground/HEAD/media/textures/skybox/cube-basis-mipmap.ktx2 -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-system.js: -------------------------------------------------------------------------------- 1 | import { System } from '../core/ecs.js'; 2 | import { Stage } from '../core/stage.js'; 3 | 4 | export class WebGPUSystem extends System { 5 | stage = Stage.Render; 6 | }; 7 | -------------------------------------------------------------------------------- /js/engine/core/stage.js: -------------------------------------------------------------------------------- 1 | export const Stage = { 2 | First: -999, 3 | PreFrameLogic: -1, 4 | Default: 0, 5 | PostFrameLogic: 1, 6 | PreRender: 2, 7 | ShadowRender: 3, 8 | Render: 4, 9 | PostRender: 5, 10 | Last: 999 11 | } -------------------------------------------------------------------------------- /js/engine/core/instance-color.js: -------------------------------------------------------------------------------- 1 | const InstanceColorMode = { 2 | Add: 0, 3 | Multiply: 1, 4 | }; 5 | 6 | export class InstanceColor { 7 | buffer = new Float32Array(4); 8 | color = new Float32Array(this.buffer.buffer, 0, 3); 9 | 10 | constructor(color, mode = InstanceColorMode.Add) { 11 | if (color) { 12 | this.color.set(color); 13 | } 14 | this.buffer[3] = mode; 15 | } 16 | } -------------------------------------------------------------------------------- /js/engine/webgpu/materials/webgpu-materials.js: -------------------------------------------------------------------------------- 1 | import { WebGPUMaterialFactory, WebGPUMaterialPipeline, WebGPUMaterialBindGroups, RenderOrder } from './webgpu-material-factory.js'; 2 | 3 | // It's necessary to include these material files to register their factories, 4 | // though we don't need to import anything from them explicitly. 5 | import './webgpu-pbr-material.js'; 6 | import './webgpu-unlit-material.js'; 7 | import './webgpu-skybox-material.js'; 8 | 9 | export { WebGPUMaterialFactory, WebGPUMaterialPipeline, WebGPUMaterialBindGroups, RenderOrder }; 10 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webgpu-shadow-playground", 3 | "version": "0.0.1", 4 | "repository": "https://github.com/toji/webgpu-shadow-playground", 5 | "authors": [ 6 | "Brandon Jones " 7 | ], 8 | "description": "Just playing with shadow rendering!", 9 | "license": "MIT", 10 | "dependencies": { 11 | "dat.gui": "^0.7.7", 12 | "gl-matrix": "^3.4.3", 13 | "matter-js": "^0.17.1", 14 | "stats.js": "^0.17.0", 15 | "update": "^0.7.4", 16 | "web-texture-tool": "github:toji/web-texture-tool", 17 | "wgsl-preprocessor": "^1.0.0" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-flags.js: -------------------------------------------------------------------------------- 1 | export const WEBGPU_DEFAULT_FLAGS = { 2 | // Adapter settings 3 | powerPreference: "high-performance", 4 | 5 | // Render target flags 6 | colorFormat: undefined, // Undefined indicates getPrefferedFormat should be used 7 | depthFormat: 'depth24plus', 8 | sampleCount: 4, 9 | resolutionMultiplier: 1, 10 | 11 | // Shadow mapping flags 12 | shadowsEnabled: true, 13 | shadowResolutionMultiplier: 1, 14 | shadowUpdateFrequency: 1, // A setting of 2 will only update the shadow map every other frame 15 | shadowSamples: 16, // May be 16, 4, or 1 16 | shadowFiltering: true, 17 | 18 | // Bloom 19 | bloomEnabled: true, 20 | }; -------------------------------------------------------------------------------- /js/engine/core/buffers.js: -------------------------------------------------------------------------------- 1 | export class StaticBuffer { 2 | #size; 3 | #usage; 4 | 5 | constructor(size, usage) { 6 | this.#size = size; 7 | this.#usage = usage; 8 | } 9 | 10 | get size() { 11 | return this.#size; 12 | } 13 | 14 | get usage() { 15 | return this.#usage; 16 | } 17 | 18 | get arrayBuffer() { 19 | throw new Error('arrayBuffer getter must be overriden in an extended class'); 20 | } 21 | 22 | finish() { 23 | throw new Error('finish() must be overriden in an extended class'); 24 | } 25 | } 26 | 27 | export class DynamicBuffer extends StaticBuffer { 28 | beginUpdate() { 29 | throw new Error('beginUpdate() must be overriden in an extended class'); 30 | } 31 | } -------------------------------------------------------------------------------- /js/engine/core/materials.js: -------------------------------------------------------------------------------- 1 | let nextMaterialId = 1; 2 | 3 | export class UnlitMaterial { 4 | id = nextMaterialId++; 5 | baseColorFactor = new Float32Array([1.0, 1.0, 1.0, 1.0]); 6 | baseColorTexture; 7 | baseColorSampler; 8 | transparent = false; 9 | doubleSided = false; 10 | alphaCutoff = 0.0; 11 | depthWrite = true; 12 | depthCompare = 'less'; 13 | castsShadow = true; 14 | additiveBlend = false; 15 | }; 16 | 17 | export class PBRMaterial extends UnlitMaterial { 18 | normalTexture; 19 | normalSampler; 20 | metallicFactor = 0.0; 21 | roughnessFactor = 1.0; 22 | metallicRoughnessTexture; 23 | metallicRoughnessSampler; 24 | emissiveFactor = new Float32Array([0.0, 0.0, 0.0]); 25 | emissiveTexture; 26 | emissiveSampler; 27 | occlusionTexture; 28 | occlusionSampler; 29 | occlusionStrength = 1.0; 30 | }; 31 | 32 | export class PBRSpecularGlossMaterial extends UnlitMaterial { 33 | 34 | } -------------------------------------------------------------------------------- /js/engine/README.md: -------------------------------------------------------------------------------- 1 | # A no-name WebGPU/ECS engine 2 | 3 | ## Why you shouldn't use this 4 | 5 | This folder is named "Engine" and looks relatively standalone, right? As a result, you may be 6 | tempted to use it in you own project. 7 | 8 | _This would be a mistake._ 9 | 10 | This engine was built to both allow me to experiment with building an ECS (Entity Component System) 11 | app and to give me a semi-reusable base to build WebGPU things on while the standard evolves, but 12 | I have no plans to put any maintenance or development effort into it other than what I personally 13 | find useful. Bugs filed against the engine that don't have a direct effect on the SpookyBall game 14 | will be ignored. 15 | 16 | With that in mind, feel free to reference or steal from this code as much as you'd like! It's my 17 | hope that it proves to be helpful for developers building their own WebGPU-based software. I just 18 | don't have the time to maintain a publically used 3D engine. -------------------------------------------------------------------------------- /js/engine/core/bounding-volume.js: -------------------------------------------------------------------------------- 1 | import { vec3 } from 'gl-matrix'; 2 | 3 | export const BoundingVolumeType = { 4 | Sphere: 0, 5 | AABB: 1, 6 | } 7 | 8 | const ORIGIN = vec3.create(); 9 | 10 | export class BoundingVolume { 11 | radius = 0; 12 | center = vec3.create(); 13 | min; 14 | max; 15 | 16 | constructor(options = {}) { 17 | if (options.radius !== undefined) { 18 | this.type = BoundingVolumeType.Sphere; 19 | this.radius = options.radius; 20 | this.center.set(options.center || ORIGIN); 21 | } else if (options.min && options.max) { 22 | this.type = BoundingVolumeType.AABB; 23 | this.min = vec3.clone(options.min); 24 | this.max = vec3.clone(options.max); 25 | 26 | // Compute the center 27 | vec3.add(this.center, this.min, this.max); 28 | vec3.scale(this.center, this.center, 0.5); 29 | // Compute a bounding radius 30 | this.radius = vec3.dist(this.center, this.max); 31 | } else { 32 | throw new Error('Must provide either a Sphere (radius, center) or AABB (min/max point).'); 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/unlit-material.js: -------------------------------------------------------------------------------- 1 | import { ColorConversions, DefaultVertexOutput } from './common.js'; 2 | 3 | export const MATERIAL_BUFFER_SIZE = 5 * Float32Array.BYTES_PER_ELEMENT; 4 | export function MaterialStruct(group = 1) { return ` 5 | struct Material { 6 | baseColorFactor : vec4, 7 | alphaCutoff : f32, 8 | }; 9 | @group(${group}) @binding(0) var material : Material; 10 | 11 | @group(${group}) @binding(1) var baseColorTexture : texture_2d; 12 | @group(${group}) @binding(2) var baseColorSampler : sampler; 13 | `; 14 | } 15 | 16 | export function UnlitFragmentSource(layout) { return ` 17 | ${ColorConversions} 18 | ${DefaultVertexOutput(layout)} 19 | ${MaterialStruct()} 20 | 21 | @fragment 22 | fn fragmentMain(input : VertexOutput) -> @location(0) vec4 { 23 | let baseColorMap = textureSample(baseColorTexture, baseColorSampler, input.texcoord); 24 | if (baseColorMap.a < material.alphaCutoff) { 25 | discard; 26 | } 27 | let baseColor = input.color * material.baseColorFactor * baseColorMap; 28 | return vec4(linearTosRGB(baseColor.rgb), baseColor.a); 29 | }`; 30 | }; 31 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Checkout dependencies and deploy 2 | 3 | # Controls when the action will run. 4 | on: 5 | # Triggers the workflow on push or pull request events but only for the main branch 6 | push: 7 | branches: [ main ] 8 | pull_request: 9 | branches: [ main ] 10 | 11 | # Allows you to run this workflow manually from the Actions tab 12 | workflow_dispatch: 13 | 14 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel 15 | jobs: 16 | # This workflow contains a single job called "build" 17 | build: 18 | # The type of runner that the job will run on 19 | runs-on: ubuntu-latest 20 | 21 | # Steps represent a sequence of tasks that will be executed as part of the job 22 | steps: 23 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 24 | - name: Checkout 🛎️ 25 | uses: actions/checkout@v2 26 | 27 | # Installs NPM dependencies 28 | - name: Install 🔧 29 | run: npm install 30 | 31 | - name: Deploy 🚀 32 | uses: Cecilapp/GitHub-Pages-deploy@3.2.0 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | with: 36 | email: tojiro@gmail.com 37 | build_dir: . -------------------------------------------------------------------------------- /js/engine/core/entity-group.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | 3 | // An EntityGroup is a component which contains a list of other entities. When the entity containing 4 | // the group is enabled or disabled, all of the entities in the EntityGroup also get enabled or 5 | // disabled. 6 | export class EntityGroup { 7 | entities = []; 8 | refCount = 0; 9 | 10 | addedToEntity(entity) { 11 | this.refCount++; 12 | } 13 | 14 | removedFromEntity(entity) { 15 | this.refCount--; 16 | if (this.refCount == 0) { 17 | for (const entity of this.entities) { 18 | entity.destroy(); 19 | } 20 | } 21 | } 22 | } 23 | 24 | export class EntityGroupSystem extends System { 25 | prevEnabledState = new WeakMap(); 26 | 27 | init() { 28 | this.groupQuery = this.query(EntityGroup).includeDisabled(); 29 | } 30 | 31 | execute() { 32 | this.groupQuery.forEach((entity, group) => { 33 | const wasEnabled = !!this.prevEnabledState.get(entity); 34 | if (entity.enabled != wasEnabled) { 35 | for (const child of group.entities) { 36 | child.enabled = entity.enabled; 37 | } 38 | this.prevEnabledState.set(entity, entity.enabled); 39 | } 40 | }); 41 | } 42 | } -------------------------------------------------------------------------------- /js/engine/loaders/lib/worker_service.js: -------------------------------------------------------------------------------- 1 | //======== 2 | // Server 3 | //======== 4 | 5 | class TransferResult { 6 | constructor(result, transfer) { 7 | this.result = result; 8 | this.transfer = transfer; 9 | } 10 | } 11 | 12 | class WorkerService { 13 | static #workerServices = []; 14 | static register(workerService) { 15 | WorkerService.#workerServices.push(workerService); 16 | workerService.#listen(); 17 | } 18 | 19 | constructor() { 20 | this.initialized = this.init(); 21 | } 22 | 23 | #listen() { 24 | addEventListener('message', async (msg) => { 25 | await this.initialized; 26 | 27 | const id = msg.data.id; 28 | const args = msg.data.args; 29 | 30 | try { 31 | const result = await this.onDispatch(args); 32 | if (result instanceof TransferResult) { 33 | postMessage({ id, result: result.result }, result.transfer); 34 | } else { 35 | postMessage({ id, result }); 36 | } 37 | 38 | } catch(error) { 39 | postMessage({ id, error }); 40 | } 41 | }); 42 | } 43 | 44 | async init() { 45 | // Override with any custom initialization logic that needs to happen asynchronously. 46 | } 47 | 48 | async onDispatch(args) { 49 | // Override to handle dispatched messages 50 | throw new Error('Classes that extend WorkerPoolService must override onDispatch'); 51 | } 52 | 53 | transfer(result, transfer) { 54 | return new TransferResult(result, transfer); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/skybox.js: -------------------------------------------------------------------------------- 1 | import { CameraStruct, ColorConversions } from './common.js'; 2 | 3 | export const SkyboxVertexSource = ` 4 | ${CameraStruct(0, 0)} 5 | 6 | struct VertexInput { 7 | @builtin(instance_index) instanceIndex : u32, 8 | @location(0) position : vec4, 9 | }; 10 | 11 | struct VertexOutput { 12 | @builtin(position) position : vec4, 13 | @location(0) texCoord : vec3, 14 | }; 15 | 16 | @vertex 17 | fn vertexMain(input : VertexInput) -> VertexOutput { 18 | var output : VertexOutput; 19 | output.texCoord = input.position.xyz; 20 | 21 | var modelView = camera.view; 22 | // Drop the translation portion of the modelView matrix 23 | modelView[3] = vec4(0.0, 0.0, 0.0, modelView[3].w); 24 | output.position = camera.projection * modelView * input.position; 25 | // Returning the W component for both Z and W forces the geometry depth to 26 | // the far plane. When combined with a depth func of "less-equal" this makes 27 | // the sky write to any depth fragment that has not been written to yet. 28 | output.position = output.position.xyww; 29 | return output; 30 | } 31 | `; 32 | 33 | export const SkyboxFragmentSource = ` 34 | ${ColorConversions} 35 | @group(0) @binding(3) var defaultSampler : sampler; 36 | 37 | struct FragmentInput { 38 | @location(0) texCoord : vec3 39 | }; 40 | @group(1) @binding(0) var skyboxTexture : texture_cube; 41 | 42 | @fragment 43 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 44 | let color = textureSample(skyboxTexture, defaultSampler, input.texCoord); 45 | return vec4(linearTosRGB(color.rgb), 1.0); 46 | } 47 | `; -------------------------------------------------------------------------------- /js/engine/debug/bone-visualizer.js: -------------------------------------------------------------------------------- 1 | import { System } from '../core/ecs.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { Mesh, Geometry, Attribute } from '../core/mesh.js'; 4 | import { UnlitMaterial } from '../core/materials.js'; 5 | 6 | const BONE_VERTS = new Float32Array([ 7 | 1.0, 1.0, -1.1, 8 | 1.1, 1.0, 1.0, 9 | 1.0, 1.1, 1.0, 10 | -1.1, 1.0, 1.0, 11 | 1.0, -1.1, 1.0, 12 | 1.0, 1.0, 5.1, 13 | ]); 14 | 15 | const BONE_INDICES = new Uint16Array([ 16 | 0, 1, 0, 2, 0, 3, 0, 4, 17 | 1, 2, 2, 3, 3, 4, 4, 1, 18 | 1, 5, 2, 5, 3, 5, 4, 5, 19 | ]); 20 | 21 | export class BoneVisualizerSystem extends System { 22 | stage = Stage.PostFrameLogic; 23 | 24 | init(gpu) { 25 | const vertexBuffer = gpu.createStaticBuffer(BONE_VERTS, 'vertex'); 26 | const indexBuffer = gpu.createStaticBuffer(BONE_INDICES, 'index'); 27 | 28 | const geometry = new Geometry({ 29 | drawCount: BONE_INDICES.length, 30 | attributes: [ new Attribute('position', vertexBuffer) ], 31 | indices: { buffer: indexBuffer, format: 'uint16' }, 32 | topology: 'line-list' 33 | }); 34 | 35 | const material = new UnlitMaterial(); 36 | material.baseColorFactor[0] = 0.0; 37 | material.baseColorFactor[1] = 1.0; 38 | material.baseColorFactor[2] = 1.0; 39 | material.depthCompare = 'always'; 40 | 41 | this.mesh = new Mesh({ geometry, material }); 42 | } 43 | 44 | execute(delta, time, gpu) { 45 | this.query(Mesh).forEach((entity, mesh) => { 46 | if (mesh.skin) { 47 | for (const transform of mesh.skin.joints) { 48 | gpu.addFrameMeshInstance(this.mesh, transform); 49 | } 50 | } 51 | }); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-world.js: -------------------------------------------------------------------------------- 1 | import { RenderWorld } from '../core/render-world.js'; 2 | 3 | import { WEBGPU_DEFAULT_FLAGS } from './webgpu-flags.js' 4 | import { WebGPUSystem } from './webgpu-system.js'; 5 | import { WebGPUCamera } from './webgpu-camera.js'; 6 | import { WebGPUCameraSystem } from './webgpu-camera.js'; 7 | import { WebGPUClusteredLights } from './webgpu-clustered-light.js'; 8 | import { WebGPUMeshSystem } from './webgpu-mesh.js'; 9 | import { WebGPUShadowSystem } from './webgpu-shadow.js'; 10 | import { WebGPURenderer } from './webgpu-renderer.js'; 11 | import { WebGPUBloomSystem } from './webgpu-bloom.js'; 12 | 13 | class WebGPURenderPass extends WebGPUSystem { 14 | async init(gpu) { 15 | this.cameras = this.query(WebGPUCamera); 16 | } 17 | 18 | execute(delta, time, gpu) { 19 | this.cameras.forEach((entity, camera) => { 20 | gpu.render(camera); 21 | return false; // Don't try to process more than one camera. 22 | }); 23 | } 24 | } 25 | 26 | export class WebGPUWorld extends RenderWorld { 27 | async intializeRenderer(flagOverrides) { 28 | // Apply the default flags and overwrite with any provided ones. 29 | const flags = Object.assign({}, WEBGPU_DEFAULT_FLAGS, flagOverrides); 30 | 31 | const renderer = new WebGPURenderer(); 32 | await renderer.init(this.canvas, flags); 33 | 34 | // Unfortunately the order of these systems is kind of delicate. 35 | this.registerRenderSystem(WebGPUCameraSystem); 36 | this.registerRenderSystem(WebGPUClusteredLights); 37 | this.registerRenderSystem(WebGPUMeshSystem); 38 | 39 | if (flags.shadowsEnabled) { 40 | this.registerRenderSystem(WebGPUShadowSystem); 41 | } 42 | 43 | this.registerRenderSystem(WebGPURenderPass); 44 | 45 | if (flags.bloomEnabled) { 46 | this.registerRenderSystem(WebGPUBloomSystem); 47 | } 48 | 49 | return renderer; 50 | } 51 | } -------------------------------------------------------------------------------- /js/engine/core/skin.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { Mesh } from './mesh.js'; 3 | import { Stage } from './stage.js'; 4 | 5 | let nextSkinId = 1; 6 | 7 | export class Skin { 8 | id = nextSkinId++; 9 | joints = []; 10 | jointBuffer; 11 | ibmBuffer; 12 | ibmOffset; 13 | 14 | constructor(options) { 15 | this.joints.push(...options.joints); 16 | this.ibmBuffer = options.inverseBindMatrixBuffer; 17 | this.ibmOffset = options.inverseBindMatrixOffset || 0; 18 | 19 | const ibmLength = Math.floor((this.ibmBuffer.size - this.ibmOffset) / (16 * Float32Array.BYTES_PER_ELEMENT)); 20 | 21 | if (this.joints.length > ibmLength) { 22 | throw new Error('Skin must have at least as many inverse bind matrices as joints'); 23 | } 24 | } 25 | } 26 | 27 | export class SkinSystem extends System { 28 | stage = Stage.PostFrameLogic; 29 | 30 | async init() { 31 | this.meshQuery = this.query(Mesh); 32 | } 33 | 34 | execute(delta, time, gpu) { 35 | // Look through all of the meshes in the world and update any skins we find. 36 | this.meshQuery.forEach((entity, mesh) => { 37 | const skin = mesh.skin; 38 | if (skin) { 39 | if (!skin.jointBuffer) { 40 | skin.jointBuffer = gpu.createDynamicBuffer(skin.joints.length * 16 * Float32Array.BYTES_PER_ELEMENT, 'joint'); 41 | } else { 42 | skin.jointBuffer.beginUpdate(); 43 | } 44 | 45 | // Push all of the current joint poses into the buffer. 46 | // TODO: Have a way to detect when joints are dirty and only push then. 47 | const buffer = new Float32Array(skin.jointBuffer.arrayBuffer); 48 | for (let i = 0; i < skin.joints.length; ++i) { 49 | buffer.set(skin.joints[i].worldMatrix, i * 16); 50 | } 51 | skin.jointBuffer.finish(); 52 | } 53 | }); 54 | } 55 | } -------------------------------------------------------------------------------- /js/engine/loaders/lib/worker_pool.js: -------------------------------------------------------------------------------- 1 | // This utility eases some of the pain points around worker communication and makes setting up pools 2 | // of workers that all do the same thing trivial. 3 | 4 | //======== 5 | // Client 6 | //======== 7 | 8 | export class WorkerPool { 9 | #workerPath; 10 | #maxWorkerPoolSize; 11 | #onMessage; 12 | #pendingWorkItems = new Map(); 13 | #nextWorkItemId = 1; 14 | #workerPool = []; 15 | #nextWorker = 0; 16 | 17 | constructor(workerPath, maxWorkerPoolSize = 4) { 18 | this.#workerPath = workerPath; 19 | this.#maxWorkerPoolSize = maxWorkerPoolSize; 20 | 21 | this.#onMessage = (msg) => { 22 | const id = msg.data.id; 23 | const workItem = this.#pendingWorkItems.get(id); 24 | if (!workItem) { 25 | console.error(`Got a result for unknown work item ${id}`); 26 | return; 27 | } 28 | this.#pendingWorkItems.delete(id); 29 | 30 | if (msg.data.error) { 31 | workItem.reject(msg.data.error); 32 | return; 33 | } 34 | 35 | workItem.resolve(msg.data.result); 36 | }; 37 | } 38 | 39 | #selectWorker(id, resolve, reject) { 40 | this.#pendingWorkItems.set(id, {resolve, reject}); 41 | if (this.#pendingWorkItems.size >= this.#workerPool.length && 42 | this.#workerPool.length < this.#maxWorkerPoolSize) { 43 | // Add a new worker 44 | const worker = new Worker(this.#workerPath); 45 | worker.onmessage = this.#onMessage; 46 | this.#workerPool.push(worker); 47 | return worker; 48 | } 49 | return this.#workerPool[this.#nextWorker++ % this.#workerPool.length]; 50 | } 51 | 52 | dispatch(args, transfer) { 53 | return new Promise((resolve, reject) => { 54 | const id = this.#nextWorkItemId++; 55 | this.#selectWorker(id, resolve, reject).postMessage({ 56 | id, 57 | args 58 | }, transfer); 59 | }); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/default-vertex.js: -------------------------------------------------------------------------------- 1 | import { wgsl } from 'wgsl-preprocessor'; 2 | import { AttributeLocation } from '../../core/mesh.js'; 3 | import { CameraStruct, SkinStructs, GetSkinMatrix, DefaultVertexInput, DefaultVertexOutput, GetInstanceMatrix } from './common.js'; 4 | 5 | export function DefaultVertexSource(layout, skinned = false) { return wgsl` 6 | ${DefaultVertexInput(layout)} 7 | ${DefaultVertexOutput(layout)} 8 | 9 | ${CameraStruct()} 10 | 11 | ${GetInstanceMatrix} 12 | 13 | #if ${skinned} 14 | ${SkinStructs(2)} 15 | ${GetSkinMatrix} 16 | #endif 17 | 18 | @vertex 19 | fn vertexMain(input : VertexInput) -> VertexOutput { 20 | var output : VertexOutput; 21 | 22 | #if ${skinned} 23 | let modelMatrix = getSkinMatrix(input); 24 | #else 25 | let modelMatrix = getInstanceMatrix(input); 26 | #endif 27 | 28 | #if ${layout.locationsUsed.includes(AttributeLocation.normal)} 29 | output.normal = normalize((modelMatrix * vec4(input.normal, 0.0)).xyz); 30 | #else 31 | output.normal = normalize((modelMatrix * vec4(0.0, 0.0, 1.0, 0.0)).xyz); 32 | #endif 33 | 34 | #if ${layout.locationsUsed.includes(AttributeLocation.tangent)} 35 | output.tangent = normalize((modelMatrix * vec4(input.tangent.xyz, 0.0)).xyz); 36 | output.bitangent = cross(output.normal, output.tangent) * input.tangent.w; 37 | #endif 38 | 39 | #if ${layout.locationsUsed.includes(AttributeLocation.color)} 40 | output.color = input.color; 41 | #else 42 | output.color = vec4(1.0); 43 | #endif 44 | 45 | #if ${layout.locationsUsed.includes(AttributeLocation.texcoord)} 46 | output.texcoord = input.texcoord; 47 | #endif 48 | #if ${layout.locationsUsed.includes(AttributeLocation.texcoord2)} 49 | output.texcoord2 = input.texcoord2; 50 | #endif 51 | 52 | output.instanceColor = input.instanceColor; 53 | 54 | let modelPos = modelMatrix * input.position; 55 | output.worldPos = modelPos.xyz; 56 | output.view = camera.position - modelPos.xyz; 57 | output.position = camera.projection * camera.view * modelPos; 58 | return output; 59 | }`; 60 | } 61 | -------------------------------------------------------------------------------- /js/engine/core/skybox.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { Mesh, Geometry, Attribute } from './mesh.js'; 3 | import { Transform } from './transform.js'; 4 | 5 | export class SkyboxMaterial { 6 | constructor(texture = null) { 7 | this.texture = texture; 8 | } 9 | } 10 | 11 | export class Skybox { 12 | mesh; 13 | material; 14 | 15 | constructor(texture = null) { 16 | texture?.then((skyboxTexture) => { 17 | this.material = new SkyboxMaterial(skyboxTexture.texture); 18 | }); 19 | } 20 | } 21 | 22 | const SKYBOX_VERTS = new Float32Array([ 23 | 1.0, 1.0, 1.0, // 0 24 | -1.0, 1.0, 1.0, // 1 25 | 1.0, -1.0, 1.0, // 2 26 | -1.0, -1.0, 1.0, // 3 27 | 1.0, 1.0, -1.0, // 4 28 | -1.0, 1.0, -1.0, // 5 29 | 1.0, -1.0, -1.0, // 6 30 | -1.0, -1.0, -1.0, // 7 31 | ]); 32 | 33 | const SKYBOX_INDICES = new Uint16Array([ 34 | // PosX (Right) 35 | 0, 2, 4, 36 | 6, 4, 2, 37 | 38 | // NegX (Left) 39 | 5, 3, 1, 40 | 3, 5, 7, 41 | 42 | // PosY (Top) 43 | 4, 1, 0, 44 | 1, 4, 5, 45 | 46 | // NegY (Bottom) 47 | 2, 3, 6, 48 | 7, 6, 3, 49 | 50 | // PosZ (Front) 51 | 0, 1, 2, 52 | 3, 2, 1, 53 | 54 | // NegZ (Back) 55 | 6, 5, 4, 56 | 5, 6, 7, 57 | ]); 58 | 59 | export class SkyboxSystem extends System { 60 | init(gpu) { 61 | const vertexBuffer = gpu.createStaticBuffer(SKYBOX_VERTS, 'vertex'); 62 | const indexBuffer = gpu.createStaticBuffer(SKYBOX_INDICES, 'index'); 63 | 64 | this.geometry = new Geometry({ 65 | drawCount: 36, 66 | attributes: [ new Attribute('position', vertexBuffer) ], 67 | indices: { buffer: indexBuffer, format: 'uint16' } 68 | }); 69 | 70 | this.skyboxQuery = this.query(Skybox); 71 | } 72 | 73 | execute(delta, time, gpu) { 74 | this.skyboxQuery.forEach((entity, skybox) => { 75 | if (!skybox.mesh && skybox.material) { 76 | skybox.mesh = new Mesh({ 77 | geometry: this.geometry, 78 | material: skybox.material 79 | }); 80 | } 81 | 82 | if (skybox.mesh) { 83 | gpu.addFrameMeshInstance(skybox.mesh, entity.get(Transform)); 84 | } 85 | }); 86 | } 87 | } -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/light-sprite.js: -------------------------------------------------------------------------------- 1 | import { CameraStruct, LightStruct, ColorConversions } from './common.js'; 2 | 3 | export const LightSpriteVertexSource = ` 4 | var pos : array, 4> = array, 4>( 5 | vec2(-1.0, 1.0), vec2(1.0, 1.0), vec2(-1.0, -1.0), vec2(1.0, -1.0) 6 | ); 7 | 8 | ${CameraStruct(0, 0)} 9 | ${LightStruct(0, 1)} 10 | 11 | struct VertexInput { 12 | @builtin(vertex_index) vertexIndex : u32, 13 | @builtin(instance_index) instanceIndex : u32, 14 | }; 15 | 16 | struct VertexOutput { 17 | @builtin(position) position : vec4, 18 | @location(0) localPos : vec2, 19 | @location(1) color: vec3, 20 | }; 21 | 22 | @vertex 23 | fn vertexMain(input : VertexInput) -> VertexOutput { 24 | var output : VertexOutput; 25 | 26 | let light = &globalLights.lights[input.instanceIndex]; 27 | 28 | output.localPos = pos[input.vertexIndex]; 29 | output.color = (*light).color * (*light).intensity; 30 | let worldPos = vec3(output.localPos, 0.0) * (*light).range * 0.025; 31 | 32 | // Generate a billboarded model view matrix 33 | var bbModelViewMatrix : mat4x4; 34 | bbModelViewMatrix[3] = vec4((*light).position, 1.0); 35 | bbModelViewMatrix = camera.view * bbModelViewMatrix; 36 | bbModelViewMatrix[0][0] = 1.0; 37 | bbModelViewMatrix[0][1] = 0.0; 38 | bbModelViewMatrix[0][2] = 0.0; 39 | 40 | bbModelViewMatrix[1][0] = 0.0; 41 | bbModelViewMatrix[1][1] = 1.0; 42 | bbModelViewMatrix[1][2] = 0.0; 43 | 44 | bbModelViewMatrix[2][0] = 0.0; 45 | bbModelViewMatrix[2][1] = 0.0; 46 | bbModelViewMatrix[2][2] = 1.0; 47 | 48 | output.position = camera.projection * bbModelViewMatrix * vec4(worldPos, 1.0); 49 | return output; 50 | } 51 | `; 52 | 53 | export const LightSpriteFragmentSource = ` 54 | ${ColorConversions} 55 | 56 | struct FragmentInput { 57 | @location(0) localPos : vec2, 58 | @location(1) color: vec3, 59 | }; 60 | 61 | @fragment 62 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 63 | let distToCenter = length(input.localPos); 64 | let fade = (1.0 - distToCenter) * (1.0 / (distToCenter * distToCenter)); 65 | return vec4(linearTosRGB(input.color * fade), fade); 66 | } 67 | `; -------------------------------------------------------------------------------- /js/engine/webgpu/materials/webgpu-unlit-material.js: -------------------------------------------------------------------------------- 1 | import { UnlitMaterial } from '../../core/materials.js'; 2 | import { UnlitFragmentSource, MATERIAL_BUFFER_SIZE } from '../wgsl/unlit-material.js'; 3 | import { WebGPUMaterialFactory } from './webgpu-material-factory.js'; 4 | import { vec4 } from 'gl-matrix'; 5 | 6 | // Can reuse these for every unlit material 7 | const materialArray = new Float32Array(MATERIAL_BUFFER_SIZE / Float32Array.BYTES_PER_ELEMENT); 8 | const baseColorFactor = new Float32Array(materialArray.buffer, 0, 4); 9 | 10 | export class WebGPUUnlitMaterial extends WebGPUMaterialFactory { 11 | init(gpu) { 12 | this.bindGroupLayout = gpu.device.createBindGroupLayout({ 13 | label: 'Unlit Material BindGroupLayout', 14 | entries: [{ 15 | binding: 0, // Uniform Buffer 16 | visibility: GPUShaderStage.FRAGMENT, 17 | buffer: {} 18 | }, 19 | { 20 | binding: 1, // baseColorTexture 21 | visibility: GPUShaderStage.FRAGMENT, 22 | texture: {} 23 | }, 24 | { 25 | binding: 2, // baseColorSampler 26 | visibility: GPUShaderStage.FRAGMENT, 27 | sampler: {} 28 | }] 29 | }); 30 | } 31 | 32 | createBindGroup(gpu, material) { 33 | vec4.copy(baseColorFactor, material.baseColorFactor); 34 | materialArray[4] = material.alphaCutoff; 35 | 36 | const materialBuffer = gpu.device.createBuffer({ 37 | size: MATERIAL_BUFFER_SIZE, 38 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, 39 | }); 40 | gpu.device.queue.writeBuffer(materialBuffer, 0, materialArray); 41 | 42 | return gpu.device.createBindGroup({ 43 | layout: this.bindGroupLayout, 44 | entries: [{ 45 | binding: 0, 46 | resource: { buffer: materialBuffer }, 47 | }, 48 | { 49 | binding: 1, 50 | resource: material.baseColorTexture || gpu.whiteTextureView, 51 | }, 52 | { 53 | binding: 2, 54 | resource: material.baseColorSampler || gpu.defaultSampler, 55 | }] 56 | }); 57 | } 58 | 59 | createFragmentModule(gpu, geometryLayout, material) { 60 | return { 61 | module: gpu.device.createShaderModule({ code: UnlitFragmentSource(geometryLayout) }), 62 | entryPoint: 'fragmentMain', 63 | }; 64 | } 65 | } 66 | 67 | WebGPUMaterialFactory.register(UnlitMaterial, WebGPUUnlitMaterial); -------------------------------------------------------------------------------- /js/engine/geometry/box.js: -------------------------------------------------------------------------------- 1 | import { Geometry, InterleavedAttributes } from '../core/mesh.js'; 2 | 3 | export class BoxGeometry extends Geometry { 4 | constructor(renderer, options = {}) { 5 | const w = (options.width || 1) * 0.5; 6 | const h = (options.height || 1) * 0.5; 7 | const d = (options.depth || 1) * 0.5; 8 | 9 | const x = options.x || 0; 10 | const y = options.y || 0; 11 | const z = options.z || 0; 12 | 13 | const cubeVerts = new Float32Array([ 14 | //position, normal, uv, 15 | x+w, y-h, z+d, 0, -1, 0, 1, 1, 16 | x-w, y-h, z+d, 0, -1, 0, 0, 1, 17 | x-w, y-h, z-d, 0, -1, 0, 0, 0, 18 | x+w, y-h, z-d, 0, -1, 0, 1, 0, 19 | x+w, y-h, z+d, 0, -1, 0, 1, 1, 20 | x-w, y-h, z-d, 0, -1, 0, 0, 0, 21 | 22 | x+w, y+h, z+d, 1, 0, 0, 1, 1, 23 | x+w, y-h, z+d, 1, 0, 0, 0, 1, 24 | x+w, y-h, z-d, 1, 0, 0, 0, 0, 25 | x+w, y+h, z-d, 1, 0, 0, 1, 0, 26 | x+w, y+h, z+d, 1, 0, 0, 1, 1, 27 | x+w, y-h, z-d, 1, 0, 0, 0, 0, 28 | 29 | x-w, y+h, z+d, 0, 1, 0, 1, 1, 30 | x+w, y+h, z+d, 0, 1, 0, 0, 1, 31 | x+w, y+h, z-d, 0, 1, 0, 0, 0, 32 | x-w, y+h, z-d, 0, 1, 0, 1, 0, 33 | x-w, y+h, z+d, 0, 1, 0, 1, 1, 34 | x+w, y+h, z-d, 0, 1, 0, 0, 0, 35 | 36 | x-w, y-h, z+d, -1, 0, 0, 1, 1, 37 | x-w, y+h, z+d, -1, 0, 0, 0, 1, 38 | x-w, y+h, z-d, -1, 0, 0, 0, 0, 39 | x-w, y-h, z-d, -1, 0, 0, 1, 0, 40 | x-w, y-h, z+d, -1, 0, 0, 1, 1, 41 | x-w, y+h, z-d, -1, 0, 0, 0, 0, 42 | 43 | x+w, y+h, z+d, 0, 0, 1, 1, 1, 44 | x-w, y+h, z+d, 0, 0, 1, 0, 1, 45 | x-w, y-h, z+d, 0, 0, 1, 0, 0, 46 | x-w, y-h, z+d, 0, 0, 1, 0, 0, 47 | x+w, y-h, z+d, 0, 0, 1, 1, 0, 48 | x+w, y+h, z+d, 0, 0, 1, 1, 1, 49 | 50 | x+w, y-h, z-d, 0, 0, -1, 1, 1, 51 | x-w, y-h, z-d, 0, 0, -1, 0, 1, 52 | x-w, y+h, z-d, 0, 0, -1, 0, 0, 53 | x+w, y+h, z-d, 0, 0, -1, 1, 0, 54 | x+w, y-h, z-d, 0, 0, -1, 1, 1, 55 | x-w, y+h, z-d, 0, 0, -1, 0, 0, 56 | ]); 57 | 58 | const vertBuffer = renderer.createStaticBuffer(cubeVerts); 59 | const attributes = new InterleavedAttributes(vertBuffer, 32) 60 | .addAttribute('position', 0) 61 | .addAttribute('normal', 12) 62 | .addAttribute('texcoord', 24); 63 | 64 | super({ 65 | attributes: [attributes], 66 | drawCount: 36 67 | }); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-light-sprite.js: -------------------------------------------------------------------------------- 1 | import { WebGPUSystem } from './webgpu-system.js'; 2 | import { Geometry } from '../core/mesh.js'; 3 | import { WebGPUMaterialPipeline, RenderOrder } from './materials/webgpu-materials.js'; 4 | import { LightBuffer } from '../core/light.js'; 5 | import { LightSpriteVertexSource, LightSpriteFragmentSource } from './wgsl/light-sprite.js'; 6 | 7 | export class WebGPULightSpriteSystem extends WebGPUSystem { 8 | init(gpu) { 9 | const vertexModule = gpu.device.createShaderModule({ 10 | code: LightSpriteVertexSource, 11 | label: 'Light Sprite Vertex' 12 | }); 13 | const fragmentModule = gpu.device.createShaderModule({ 14 | code: LightSpriteFragmentSource, 15 | label: 'Light Sprite Fragment' 16 | }); 17 | 18 | const fragmentTargets = [{ 19 | format: gpu.renderTargets.format, 20 | blend: { 21 | color: { 22 | srcFactor: 'src-alpha', 23 | dstFactor: 'one', 24 | }, 25 | alpha: { 26 | srcFactor: "one", 27 | dstFactor: "one", 28 | }, 29 | }, 30 | }] 31 | 32 | if (gpu.flags.bloomEnabled) { 33 | fragmentTargets.push({ 34 | format: gpu.renderTargets.format, 35 | writeMask: 0, 36 | }); 37 | } 38 | 39 | // Setup a render pipeline for drawing the light sprites 40 | const pipeline = gpu.device.createRenderPipeline({ 41 | label: `Light Sprite Pipeline`, 42 | layout: gpu.device.createPipelineLayout({ 43 | bindGroupLayouts: [ 44 | gpu.bindGroupLayouts.frame, 45 | ] 46 | }), 47 | vertex: { 48 | module: vertexModule, 49 | entryPoint: 'vertexMain' 50 | }, 51 | fragment: { 52 | module: fragmentModule, 53 | entryPoint: 'fragmentMain', 54 | targets: fragmentTargets, 55 | }, 56 | primitive: { 57 | topology: 'triangle-strip', 58 | stripIndexFormat: 'uint32' 59 | }, 60 | depthStencil: { 61 | depthWriteEnabled: false, 62 | depthCompare: 'less', 63 | format: gpu.renderTargets.depthFormat, 64 | }, 65 | multisample: { 66 | count: gpu.renderTargets.sampleCount, 67 | } 68 | }); 69 | 70 | this.lightPipeline = new WebGPUMaterialPipeline({ 71 | pipeline, 72 | renderOrder: RenderOrder.Last 73 | }); 74 | this.lightGeometry = new Geometry({ drawCount: 4 }); 75 | } 76 | 77 | execute(delta, time, gpu) { 78 | const lights = this.singleton.get(LightBuffer); 79 | gpu.renderBatch.addRenderable(this.lightGeometry, this.lightPipeline, undefined, { count: lights.lightCount }); 80 | } 81 | } -------------------------------------------------------------------------------- /js/engine/controls/flying-controls.js: -------------------------------------------------------------------------------- 1 | import { System } from '../core/ecs.js'; 2 | import { KeyboardState, MouseState } from '../core/input.js'; 3 | import { Transform } from '../core/transform.js'; 4 | import { vec3, vec2, quat } from 'gl-matrix'; 5 | 6 | export class FlyingControls { 7 | speed = 3; 8 | angles = vec2.create(); 9 | } 10 | 11 | const TMP_DIR = vec3.create(); 12 | 13 | export class FlyingControlsSystem extends System { 14 | static queries = { 15 | flyingControls: { components: [FlyingControls, Transform] }, 16 | }; 17 | 18 | execute(delta) { 19 | const keyboard = this.singleton.get(KeyboardState); 20 | const mouse = this.singleton.get(MouseState); 21 | 22 | this.query(FlyingControls, Transform).forEach((entity, control, transform) => { 23 | // Handle Mouse state. 24 | if (mouse.buttons[0] && (mouse.delta[0] || mouse.delta[1])) { 25 | control.angles[1] += mouse.delta[0] * 0.025; 26 | // Keep our rotation in the range of [0, 2*PI] 27 | // (Prevents numeric instability if you spin around a LOT.) 28 | while (control.angles[1] < 0) { 29 | control.angles[1] += Math.PI * 2.0; 30 | } 31 | while (control.angles[1] >= Math.PI * 2.0) { 32 | control.angles[1] -= Math.PI * 2.0; 33 | } 34 | 35 | control.angles[0] += mouse.delta[1] * 0.025; 36 | // Clamp the up/down rotation to prevent us from flipping upside-down 37 | control.angles[0] = Math.min(Math.max(control.angles[0], -Math.PI*0.5), Math.PI*0.5); 38 | 39 | // Update the tranform rotation 40 | const q = transform.orientation; 41 | quat.identity(q); 42 | quat.rotateY(q, q, -control.angles[1]); 43 | quat.rotateX(q, q, -control.angles[0]); 44 | } 45 | 46 | // Handle keyboard state. 47 | vec3.set(TMP_DIR, 0, 0, 0); 48 | if (keyboard.keyPressed('KeyW')) { 49 | TMP_DIR[2] -= 1.0; 50 | } 51 | if (keyboard.keyPressed('KeyS')) { 52 | TMP_DIR[2] += 1.0; 53 | } 54 | if (keyboard.keyPressed('KeyA')) { 55 | TMP_DIR[0] -= 1.0; 56 | } 57 | if (keyboard.keyPressed('KeyD')) { 58 | TMP_DIR[0] += 1.0; 59 | } 60 | if (keyboard.keyPressed('Space')) { 61 | TMP_DIR[1] += 1.0; 62 | } 63 | if (keyboard.keyPressed('ShiftLeft')) { 64 | TMP_DIR[1] -= 1.0; 65 | } 66 | 67 | if (TMP_DIR[0] !== 0 || TMP_DIR[1] !== 0 || TMP_DIR[2] !== 0) { 68 | vec3.transformQuat(TMP_DIR, TMP_DIR, transform.orientation); 69 | vec3.normalize(TMP_DIR, TMP_DIR); 70 | vec3.scaleAndAdd(transform.position, transform.position, TMP_DIR, control.speed * delta); 71 | } 72 | }); 73 | } 74 | } -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/debug-point.js: -------------------------------------------------------------------------------- 1 | import { CameraStruct, SkinStructs, GetSkinMatrix, DefaultVertexInput, DefaultVertexOutput, GetInstanceMatrix } from './common.js'; 2 | import { AttributeLocation } from '../../core/mesh.js'; 3 | 4 | const SIZE = 0.05; 5 | 6 | export const DebugPointVertexSource = ` 7 | var pos : array, 4> = array, 4>( 8 | vec2(-${SIZE}, ${SIZE}), vec2(${SIZE}, ${SIZE}), vec2(-${SIZE}, -${SIZE}), vec2(${SIZE}, -${SIZE}) 9 | ); 10 | 11 | ${CameraStruct()} 12 | ${GetInstanceMatrix} 13 | 14 | struct VertexInput { 15 | @builtin(vertex_index) vertexIndex : u32, 16 | @builtin(instance_index) instanceIndex : u32, 17 | 18 | @location(${AttributeLocation.maxAttributeLocation}) instance0 : vec4, 19 | @location(${AttributeLocation.maxAttributeLocation+1}) instance1 : vec4, 20 | @location(${AttributeLocation.maxAttributeLocation+2}) instance2 : vec4, 21 | @location(${AttributeLocation.maxAttributeLocation+3}) instance3 : vec4, 22 | @location(${AttributeLocation.maxAttributeLocation+4}) instanceColor : vec4, 23 | } 24 | 25 | struct VertexOutput { 26 | @builtin(position) position : vec4, 27 | @location(0) localPos : vec2, 28 | @location(1) color: vec4, 29 | }; 30 | 31 | @vertex 32 | fn vertexMain(input : VertexInput) -> VertexOutput { 33 | var output : VertexOutput; 34 | 35 | let modelMatrix = getInstanceMatrix(input); 36 | output.localPos = pos[input.vertexIndex]; 37 | output.color = input.instanceColor; 38 | let worldPos = vec3(output.localPos, 0.0); // * modelMatrix; 39 | 40 | // Generate a billboarded model view matrix 41 | var bbModelViewMatrix : mat4x4; 42 | bbModelViewMatrix = camera.view * modelMatrix; 43 | bbModelViewMatrix[0][0] = 1.0; 44 | bbModelViewMatrix[0][1] = 0.0; 45 | bbModelViewMatrix[0][2] = 0.0; 46 | 47 | bbModelViewMatrix[1][0] = 0.0; 48 | bbModelViewMatrix[1][1] = 1.0; 49 | bbModelViewMatrix[1][2] = 0.0; 50 | 51 | bbModelViewMatrix[2][0] = 0.0; 52 | bbModelViewMatrix[2][1] = 0.0; 53 | bbModelViewMatrix[2][2] = 1.0; 54 | 55 | output.position = camera.projection * bbModelViewMatrix * vec4(worldPos, 1.0); 56 | //output.position = camera.projection * camera.view * modelMatrix * vec4(worldPos, 1.0); 57 | return output; 58 | } 59 | `; 60 | 61 | export const DebugPointFragmentSource = ` 62 | struct FragmentInput { 63 | @location(0) localPos : vec2, 64 | @location(1) color: vec4, 65 | }; 66 | 67 | @fragment 68 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 69 | let distToCenter = length(input.localPos * ${(1/SIZE).toFixed(2)}); 70 | let fade = (1.0 - distToCenter) * (1.0 / (distToCenter * distToCenter * distToCenter)); 71 | return vec4(input.color * fade); 72 | 73 | //return vec4(input.color); 74 | } 75 | `; -------------------------------------------------------------------------------- /js/engine/webgpu/materials/webgpu-skybox-material.js: -------------------------------------------------------------------------------- 1 | import { SkyboxMaterial } from '../../core/skybox.js'; 2 | import { SkyboxVertexSource, SkyboxFragmentSource } from '../wgsl/skybox.js'; 3 | import { WebGPUMaterialFactory, RenderOrder, WebGPUMaterialPipeline } from './webgpu-material-factory.js'; 4 | 5 | export class WebGPUSkyboxMaterial extends WebGPUMaterialFactory { 6 | init(gpu) { 7 | this.bindGroupLayout = gpu.device.createBindGroupLayout({ 8 | label: 'Skybox BindGroupLayout', 9 | entries: [{ 10 | binding: 0, // skyboxTexture 11 | visibility: GPUShaderStage.FRAGMENT, 12 | texture: { viewDimension: 'cube' } 13 | }] 14 | }); 15 | 16 | const vertexModule = gpu.device.createShaderModule({ 17 | code: SkyboxVertexSource, 18 | label: 'Skybox Vertex' 19 | }); 20 | 21 | const fragmentModule = gpu.device.createShaderModule({ 22 | code: SkyboxFragmentSource, 23 | label: 'Skybox Fragment' 24 | }); 25 | 26 | const fragmentTargets = [{ 27 | format: gpu.renderTargets.format, 28 | }] 29 | 30 | if (gpu.flags.bloomEnabled) { 31 | fragmentTargets.push({ 32 | format: gpu.renderTargets.format, 33 | writeMask: 0, 34 | }); 35 | } 36 | 37 | const pipeline = gpu.device.createRenderPipeline({ 38 | label: `Skybox Pipeline`, 39 | layout: gpu.device.createPipelineLayout({ 40 | bindGroupLayouts: [ 41 | gpu.bindGroupLayouts.frame, 42 | this.bindGroupLayout, 43 | ] 44 | }), 45 | vertex: { 46 | module: vertexModule, 47 | entryPoint: 'vertexMain', 48 | buffers: [{ 49 | arrayStride: 3 * Float32Array.BYTES_PER_ELEMENT, 50 | attributes: [{ 51 | shaderLocation: 0, 52 | format: 'float32x3', 53 | offset: 0, 54 | }] 55 | }] 56 | }, 57 | fragment: { 58 | module: fragmentModule, 59 | entryPoint: 'fragmentMain', 60 | targets: fragmentTargets, 61 | }, 62 | primitive: { 63 | topology: 'triangle-list', 64 | }, 65 | depthStencil: { 66 | depthWriteEnabled: false, 67 | depthCompare: 'less-equal', 68 | format: gpu.renderTargets.depthFormat, 69 | }, 70 | multisample: { 71 | count: gpu.renderTargets.sampleCount, 72 | } 73 | }); 74 | 75 | this.gpuPipeline = new WebGPUMaterialPipeline({ 76 | pipeline, 77 | renderOrder: RenderOrder.Skybox 78 | }); 79 | } 80 | 81 | createBindGroup(gpu, material) { 82 | return gpu.device.createBindGroup({ 83 | layout: this.bindGroupLayout, 84 | entries: [{ 85 | binding: 0, 86 | resource: material.texture.createView({ dimension: 'cube' }), 87 | }] 88 | }); 89 | } 90 | 91 | getPipeline(gpu, geometryLayout, material, skinned) { 92 | return this.gpuPipeline; 93 | } 94 | } 95 | 96 | WebGPUMaterialFactory.register(SkyboxMaterial, WebGPUSkyboxMaterial); -------------------------------------------------------------------------------- /js/engine/core/render-world.js: -------------------------------------------------------------------------------- 1 | import { World } from './ecs.js'; 2 | 3 | import { InputSystem } from './input.js'; 4 | import { EntityGroupSystem } from './entity-group.js'; 5 | import { AnimationSystem } from './animation.js'; 6 | import { MeshSystem } from './mesh.js'; 7 | import { SkinSystem } from './skin.js'; 8 | import { SkyboxSystem } from './skybox.js'; 9 | import { LightSystem } from './light.js'; 10 | 11 | export class Renderer { 12 | #renderMeshInstances = new Map(); 13 | 14 | maxLightCount = 512; 15 | maxShadowCasters = 64; 16 | 17 | get textureLoader() { 18 | throw new Error('textureLoader getter must be overriden in an extended class.'); 19 | } 20 | 21 | createStaticBuffer(sizeOrArrayBuffer, usage = 'vertex') { 22 | throw new Error('createStaticBuffer must be overriden in an extended class.'); 23 | } 24 | 25 | createDynamicBuffer(sizeOrArrayBuffer, usage = 'vertex') { 26 | throw new Error('createDynamicBuffer must be overriden in an extended class.'); 27 | } 28 | 29 | clearFrameMeshInstances() { 30 | this.#renderMeshInstances.clear(); 31 | } 32 | 33 | addFrameMeshInstance(mesh, transform, color) { 34 | let meshInstances = this.#renderMeshInstances.get(mesh); 35 | if (!meshInstances) { 36 | meshInstances = new Array(); 37 | this.#renderMeshInstances.set(mesh, meshInstances); 38 | } 39 | meshInstances.push({ transform, color }); 40 | } 41 | 42 | getFrameMeshInstances() { 43 | return this.#renderMeshInstances; 44 | } 45 | } 46 | 47 | export class RenderWorld extends World { 48 | #canvas; 49 | #renderer = null; 50 | #rendererInitialized; 51 | 52 | constructor(canvas, flags = {}) { 53 | super(); 54 | 55 | this.#canvas = canvas || document.createElement('canvas'); 56 | 57 | this.#rendererInitialized = this.intializeRenderer(flags).then((renderer) => { 58 | this.#renderer = renderer; 59 | return renderer; 60 | }); 61 | 62 | this.registerRenderSystem(InputSystem); 63 | this.registerSystem(EntityGroupSystem); 64 | this.registerSystem(AnimationSystem); 65 | this.registerSystem(MeshSystem); 66 | this.registerSystem(SkinSystem); 67 | this.registerRenderSystem(LightSystem); 68 | this.registerRenderSystem(SkyboxSystem); 69 | } 70 | 71 | get canvas() { 72 | return this.#canvas; 73 | } 74 | 75 | execute(delta, time) { 76 | this.#renderer?.clearFrameMeshInstances(); 77 | super.execute(delta, time, this.#renderer); 78 | } 79 | 80 | registerRenderSystem(systemType, ...initArgs) { 81 | this.#rendererInitialized.then((renderer) => { 82 | this.registerSystem(systemType, renderer, ...initArgs); 83 | }); 84 | return this; 85 | } 86 | 87 | async intializeRenderer() { 88 | throw new Error('intializeRenderer must be overriden in an extended class.'); 89 | } 90 | 91 | async renderer() { 92 | return await this.#rendererInitialized; 93 | } 94 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-debug-point.js: -------------------------------------------------------------------------------- 1 | import { Geometry } from '../core/mesh.js'; 2 | import { WebGPUMaterialPipeline, RenderOrder } from './materials/webgpu-materials.js'; 3 | import { DebugPointVertexSource, DebugPointFragmentSource } from './wgsl/debug-point.js'; 4 | import { Transform, StaticTransform } from '../core/transform.js'; 5 | import { INSTANCE_BUFFER_LAYOUT } from './materials/webgpu-material-factory.js'; 6 | 7 | export class WebGPUDebugPoint { 8 | static gpuPipelines = new WeakMap(); 9 | static pointGeometry = new Geometry({ drawCount: 4 }); 10 | 11 | static addPoint(gpu, position, color = [1, 1, 1, 1]) { 12 | const pipeline = WebGPUDebugPoint.getGPUPipeline(gpu); 13 | gpu.renderBatch.addRenderable(WebGPUDebugPoint.pointGeometry, pipeline, undefined, { 14 | transform: new StaticTransform({ 15 | position, 16 | }), 17 | color 18 | }); 19 | } 20 | 21 | static getGPUPipeline(gpu) { 22 | let pointPipeline = WebGPUDebugPoint.gpuPipelines.get(gpu); 23 | if (!pointPipeline) { 24 | const vertexModule = gpu.device.createShaderModule({ 25 | code: DebugPointVertexSource, 26 | label: 'Debug Point Vertex' 27 | }); 28 | const fragmentModule = gpu.device.createShaderModule({ 29 | code: DebugPointFragmentSource, 30 | label: 'Debug Point Fragment' 31 | }); 32 | 33 | const fragmentTargets = [{ 34 | format: gpu.renderTargets.format, 35 | blend: { 36 | color: { 37 | srcFactor: 'src-alpha', 38 | dstFactor: 'one', 39 | }, 40 | alpha: { 41 | srcFactor: "one", 42 | dstFactor: "one", 43 | }, 44 | }, 45 | }] 46 | 47 | if (gpu.flags.bloomEnabled) { 48 | fragmentTargets.push({ 49 | format: gpu.renderTargets.format, 50 | writeMask: 0, 51 | }); 52 | } 53 | 54 | const pipeline = gpu.device.createRenderPipeline({ 55 | label: `Debug Point Pipeline`, 56 | layout: gpu.device.createPipelineLayout({ 57 | bindGroupLayouts: [ 58 | gpu.bindGroupLayouts.frame, 59 | ] 60 | }), 61 | vertex: { 62 | module: vertexModule, 63 | entryPoint: 'vertexMain', 64 | buffers: [ INSTANCE_BUFFER_LAYOUT ] 65 | }, 66 | fragment: { 67 | module: fragmentModule, 68 | entryPoint: 'fragmentMain', 69 | targets: fragmentTargets, 70 | }, 71 | primitive: { 72 | topology: 'triangle-strip', 73 | stripIndexFormat: 'uint32' 74 | }, 75 | depthStencil: { 76 | depthWriteEnabled: false, 77 | depthCompare: 'less', 78 | format: gpu.renderTargets.depthFormat, 79 | }, 80 | multisample: { 81 | count: gpu.renderTargets.sampleCount, 82 | } 83 | }); 84 | 85 | pointPipeline = new WebGPUMaterialPipeline({ 86 | pipeline, 87 | renderOrder: RenderOrder.Last, 88 | instanceSlot: 0, 89 | }); 90 | 91 | WebGPUDebugPoint.gpuPipelines.set(gpu, pointPipeline); 92 | } 93 | 94 | return pointPipeline; 95 | } 96 | } -------------------------------------------------------------------------------- /js/engine/geometry/sphere.js: -------------------------------------------------------------------------------- 1 | import { Geometry, InterleavedAttributes } from '../core/mesh.js'; 2 | import { vec3 } from 'gl-matrix'; 3 | 4 | // Big swaths of this code lifted with love from Three.js 5 | export class SphereGeometry extends Geometry { 6 | constructor(renderer, radius = 1, widthSegments = 32, heightSegments = 16 ) { 7 | const phiStart = 0; 8 | const phiLength = Math.PI * 2; 9 | const thetaStart = 0; 10 | const thetaLength = Math.PI; 11 | 12 | widthSegments = Math.max( 3, Math.floor( widthSegments ) ); 13 | heightSegments = Math.max( 2, Math.floor( heightSegments ) ); 14 | 15 | const thetaEnd = Math.min( thetaStart + thetaLength, Math.PI ); 16 | 17 | let index = 0; 18 | const grid = []; 19 | 20 | const vertex = vec3.create(); 21 | const normal = vec3.create(); 22 | 23 | // buffers 24 | 25 | const vertices = []; 26 | const indices = []; 27 | 28 | // generate vertices, normals and uvs 29 | 30 | for (let iy = 0; iy <= heightSegments; ++iy) { 31 | const verticesRow = []; 32 | const v = iy / heightSegments; 33 | 34 | // special case for the poles 35 | let uOffset = 0; 36 | if (iy == 0 && thetaStart == 0) { 37 | uOffset = 0.5 / widthSegments; 38 | } else if (iy == heightSegments && thetaEnd == Math.PI) { 39 | uOffset = - 0.5 / widthSegments; 40 | } 41 | 42 | for (let ix = 0; ix <= widthSegments; ++ix) { 43 | const u = ix / widthSegments; 44 | 45 | // vertex 46 | vertex[0] = - radius * Math.cos(phiStart + u * phiLength) * Math.sin(thetaStart + v * thetaLength); 47 | vertex[1] = radius * Math.cos(thetaStart + v * thetaLength); 48 | vertex[2] = radius * Math.sin(phiStart + u * phiLength) * Math.sin(thetaStart + v * thetaLength); 49 | 50 | vertices.push(vertex[0], vertex[1], vertex[2]); 51 | 52 | // normal 53 | vec3.normalize(normal, vertex); 54 | vertices.push(normal[0], normal[1], normal[2]); 55 | 56 | // texcoord 57 | vertices.push(u + uOffset, 1 - v); 58 | 59 | verticesRow.push(index++); 60 | } 61 | 62 | grid.push(verticesRow); 63 | } 64 | 65 | // indices 66 | 67 | for (let iy = 0; iy < heightSegments; iy++) { 68 | for (let ix = 0; ix < widthSegments; ix++) { 69 | const a = grid[iy][ix + 1]; 70 | const b = grid[iy][ix]; 71 | const c = grid[iy + 1][ix]; 72 | const d = grid[iy + 1][ix + 1]; 73 | 74 | if (iy !== 0 || thetaStart > 0) indices.push(a, b, d); 75 | if (iy !== heightSegments - 1 || thetaEnd < Math.PI) indices.push(b, c, d); 76 | } 77 | } 78 | 79 | const vertBuffer = renderer.createStaticBuffer(new Float32Array(vertices)); 80 | const attributes = new InterleavedAttributes(vertBuffer, 32) 81 | .addAttribute('position', 0) 82 | .addAttribute('normal', 12) 83 | .addAttribute('texcoord', 24); 84 | 85 | super({ 86 | drawCount: indices.length, 87 | attributes: [attributes], 88 | indices: { 89 | buffer: renderer.createStaticBuffer(new Uint16Array(indices), 'index'), 90 | format: 'uint16', 91 | }, 92 | }); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/bloom.js: -------------------------------------------------------------------------------- 1 | export const BloomBlurCommon = ` 2 | // Values from https://www.rastergrid.com/blog/2010/09/efficient-gaussian-blur-with-linear-sampling/ 3 | var offsets : array = array( 4 | 0.0, 1.3846153846, 3.2307692308); 5 | var weights : array = array( 6 | 0.2270270270, 0.3162162162, 0.0702702703); 7 | 8 | struct BloomUniforms { 9 | radius : f32, 10 | dim : f32, 11 | }; 12 | @group(0) @binding(0) var bloom : BloomUniforms; 13 | @group(0) @binding(1) var bloomTexture : texture_2d; 14 | @group(0) @binding(2) var bloomSampler : sampler; 15 | 16 | struct FragmentInput { 17 | @location(0) texCoord : vec2 18 | }; 19 | 20 | fn getGaussianBlur(texCoord : vec2) -> vec4 { 21 | let texelRadius = vec2(bloom.radius) / vec2(textureDimensions(bloomTexture)); 22 | let step = bloomDir * texelRadius; 23 | 24 | var sum = vec4(0.0); 25 | 26 | sum = sum + textureSample(bloomTexture, bloomSampler, texCoord) * weights[0]; 27 | 28 | sum = sum + textureSample(bloomTexture, bloomSampler, texCoord + step * 1.0) * weights[1]; 29 | sum = sum + textureSample(bloomTexture, bloomSampler, texCoord - step * 1.0) * weights[1]; 30 | 31 | sum = sum + textureSample(bloomTexture, bloomSampler, texCoord + step * 2.0) * weights[2]; 32 | sum = sum + textureSample(bloomTexture, bloomSampler, texCoord - step * 2.0) * weights[2]; 33 | 34 | // This is more compact than the unrolled loop above, but was causing corruption on older Mac Intel GPUs. 35 | //for (var i = 1; i < 3; i = i + 1) { 36 | // sum = sum + textureSample(bloomTexture, bloomSampler, texCoord + step * f32(i)) * weights[i]; 37 | // sum = sum + textureSample(bloomTexture, bloomSampler, texCoord - step * f32(i)) * weights[i]; 38 | //} 39 | 40 | return vec4(sum.rgb, 1.0); 41 | } 42 | `; 43 | 44 | export const BloomBlurHorizontalFragmentSource = ` 45 | const bloomDir = vec2(1.0, 0.0); 46 | ${BloomBlurCommon} 47 | 48 | @fragment 49 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 50 | return getGaussianBlur(input.texCoord); 51 | } 52 | `; 53 | 54 | // Combines the vertical blur step and a dimming of the previous blur results to allow for glowing trails. 55 | export const BloomBlurVerticalFragmentSource = ` 56 | const bloomDir = vec2(0.0, 1.0); 57 | ${BloomBlurCommon} 58 | 59 | @group(0) @binding(3) var prevTexture : texture_2d; 60 | 61 | @fragment 62 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 63 | let blurColor = getGaussianBlur(input.texCoord); 64 | let dimColor = textureSample(prevTexture, bloomSampler, input.texCoord) * bloom.dim; 65 | 66 | return blurColor + dimColor; 67 | } 68 | `; 69 | 70 | export const BloomBlendFragmentSource = ` 71 | @group(0) @binding(0) var bloomTexture : texture_2d; 72 | @group(0) @binding(1) var bloomSampler : sampler; 73 | 74 | struct FragmentInput { 75 | @location(0) texCoord : vec2 76 | }; 77 | 78 | @fragment 79 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 80 | let color = textureSample(bloomTexture, bloomSampler, input.texCoord); 81 | return vec4(color.rgb, 1.0); 82 | } 83 | `; 84 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-mesh.js: -------------------------------------------------------------------------------- 1 | import { WebGPUSystem } from './webgpu-system.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { WebGPUMaterialFactory, WebGPUMaterialBindGroups } from './materials/webgpu-materials.js'; 4 | 5 | class WebGPUMeshPrimitive { 6 | constructor(geometry, pipeline, bindGroups) { 7 | this.geometry = geometry; 8 | this.pipeline = pipeline; 9 | this.bindGroups = bindGroups || new WebGPUMaterialBindGroups(); 10 | } 11 | } 12 | 13 | class WebGPUSkin { 14 | id; 15 | bindGroup; 16 | } 17 | 18 | export class WebGPUMeshSystem extends WebGPUSystem { 19 | stage = Stage.PreRender; 20 | 21 | #factories = new Map(); 22 | #gpuMeshes = new WeakMap(); 23 | #gpuSkins = new WeakMap(); 24 | 25 | init(gpu) { 26 | const materialFactories = WebGPUMaterialFactory.getFactories(); 27 | for (const [material, factoryConstructor] of materialFactories) { 28 | const factory = new factoryConstructor(); 29 | this.#factories.set(material, factory); 30 | factory.init(gpu); 31 | } 32 | } 33 | 34 | getGPUSkin(gpu, skin) { 35 | if (!skin || !skin?.jointBuffer) return null; 36 | 37 | let gpuSkin = this.#gpuSkins.get(skin); 38 | if (!gpuSkin) { 39 | gpuSkin = new WebGPUSkin(); 40 | gpuSkin.id = skin.id; 41 | gpuSkin.bindGroup = gpu.device.createBindGroup({ 42 | label: `Skin[${skin.id}] BindGroup`, 43 | layout: gpu.bindGroupLayouts.skin, 44 | entries: [{ 45 | binding: 0, 46 | resource: { buffer: skin.jointBuffer.gpuBuffer }, 47 | }, { 48 | binding: 1, 49 | resource: { buffer: skin.ibmBuffer.gpuBuffer, offset: skin.ibmOffset }, 50 | }] 51 | }); 52 | 53 | this.#gpuSkins.set(skin, gpuSkin); 54 | } 55 | return gpuSkin; 56 | } 57 | 58 | execute(delta, time, gpu) { 59 | const meshInstances = gpu.getFrameMeshInstances(); 60 | for (const mesh of meshInstances.keys()) { 61 | const skin = this.getGPUSkin(gpu, mesh.skin); 62 | if (mesh.skin && !skin) { 63 | // If we get a skinned mesh without a joint buffer skip it. 64 | console.warn('Got a skinned mesh with no joint buffer'); 65 | continue; 66 | } 67 | let gpuMesh = this.#gpuMeshes.get(mesh); 68 | if (!gpuMesh) { 69 | gpuMesh = []; 70 | for (const primitive of mesh.primitives) { 71 | const layout = primitive.geometry.layout; 72 | const material = primitive.material; 73 | const factory = this.#factories.get(material.constructor); 74 | if (!factory) { 75 | throw new Error(`No WebGPUMaterialFactory registered for ${material.constructor.name}`); 76 | } 77 | 78 | gpuMesh.push(new WebGPUMeshPrimitive( 79 | primitive.geometry, 80 | factory.getPipeline(gpu, layout, material, !!skin), 81 | factory.getBindGroup(gpu, material, skin) 82 | )); 83 | } 84 | this.#gpuMeshes.set(mesh, gpuMesh); 85 | } 86 | 87 | const instances = meshInstances.get(mesh); 88 | for (const primitive of gpuMesh) { 89 | for (const instance of instances) { 90 | gpu.renderBatch.addRenderable(primitive.geometry, primitive.pipeline, primitive.bindGroups, instance); 91 | } 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /js/engine/controls/orbit-controls.js: -------------------------------------------------------------------------------- 1 | import { System } from '../core/ecs.js'; 2 | import { MouseState, GamepadState } from '../core/input.js'; 3 | import { Transform } from '../core/transform.js'; 4 | import { vec3, vec2, quat } from 'gl-matrix'; 5 | 6 | export class OrbitControls { 7 | target = vec3.create(); 8 | angle = vec2.create(); 9 | distance = 5; 10 | maxAngleX = Math.PI * 0.5; 11 | minAngleX = -Math.PI * 0.5; 12 | maxAngleY = Math.PI; 13 | minAngleY = -Math.PI; 14 | constrainXAngle = true; 15 | constrainYAngle = false; 16 | maxDistance = 10; 17 | minDistance = 1; 18 | distanceStep = 0.005; 19 | constrainDistance = true; 20 | } 21 | 22 | export class OrbitControlsSystem extends System { 23 | execute() { 24 | const mouse = this.singleton.get(MouseState); 25 | const gamepad = this.singleton.get(GamepadState); 26 | 27 | this.query(OrbitControls, Transform).forEach((entity, control, transform) => { 28 | let updated = false; 29 | 30 | // Handle Mouse state. 31 | if (mouse.buttons[0] && (mouse.delta[0] || mouse.delta[1])) { 32 | control.angle[1] += mouse.delta[0] * 0.025; 33 | control.angle[0] += mouse.delta[1] * 0.025; 34 | updated = true; 35 | } 36 | 37 | if (mouse.wheelDelta[1]) { 38 | control.distance += (-mouse.wheelDelta[1] * control.distanceStep); 39 | updated = true; 40 | } 41 | 42 | // Handle Gamepad state 43 | for (const pad of gamepad.gamepads) { 44 | if (pad.axes.length > 3) { 45 | const x = Math.abs(pad.axes[2]) > 0.1 ? pad.axes[2] : 0; 46 | const y = Math.abs(pad.axes[3]) > 0.1 ? pad.axes[3] : 0; 47 | if (x || y) { 48 | control.angle[1] += x * 0.025; 49 | control.angle[0] -= y * 0.025; 50 | updated = true; 51 | } 52 | } 53 | } 54 | 55 | // Constrain the motion if necessary 56 | if (updated) { 57 | if(control.constrainYAngle) { 58 | control.angle[1] = Math.min(Math.max(control.angle[1], control.minAngleY), control.maxAngleY); 59 | } else { 60 | while (control.angle[1] < -Math.PI) { 61 | control.angle[1] += Math.PI * 2; 62 | } 63 | while (control.angle[1] >= Math.PI) { 64 | control.angle[1] -= Math.PI * 2; 65 | } 66 | } 67 | 68 | if(control.constrainXAngle) { 69 | control.angle[0] = Math.min(Math.max(control.angle[0], control.minAngleX), control.maxAngleX); 70 | } else { 71 | while (control.angle[0] < -Math.PI) { 72 | control.angle[0] += Math.PI * 2; 73 | } 74 | while (control.angle[0] >= Math.PI) { 75 | control.angle[0] -= Math.PI * 2; 76 | } 77 | } 78 | 79 | if(control.constrainDistance) { 80 | control.distance = Math.min(Math.max(control.distance, control.minDistance), control.maxDistance); 81 | } 82 | } 83 | 84 | // Update the orientation 85 | const q = transform.orientation; 86 | quat.identity(q); 87 | quat.rotateY(q, q, -control.angle[1]); 88 | quat.rotateX(q, q, -control.angle[0]); 89 | 90 | // Update the position 91 | vec3.set(transform.position, 0, 0, control.distance); 92 | vec3.transformQuat(transform.position, transform.position, transform.orientation); 93 | vec3.add(transform.position, transform.position, control.target); 94 | }); 95 | } 96 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-bind-group-layouts.js: -------------------------------------------------------------------------------- 1 | // Since bind group layouts are used all over the place and frequently shared between 2 | // systems, it's easier to initialize all the common ones in one place 3 | export class WebGPUBindGroupLayouts { 4 | constructor(device) { 5 | this.frame = device.createBindGroupLayout({ 6 | label: `Frame BindGroupLayout`, 7 | entries: [{ 8 | binding: 0, // Camera uniforms 9 | visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE, 10 | buffer: {}, 11 | }, { 12 | binding: 1, // Light uniforms 13 | visibility: GPUShaderStage.VERTEX | GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE, 14 | buffer: { type: 'read-only-storage' } 15 | }, { 16 | binding: 2, // Cluster Lights storage 17 | visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE, 18 | buffer: { type: 'read-only-storage' } 19 | }, { 20 | binding: 3, // Default Sampler 21 | visibility: GPUShaderStage.FRAGMENT, 22 | sampler: {} 23 | }, { 24 | binding: 4, // Shadow texture 25 | visibility: GPUShaderStage.FRAGMENT, 26 | texture: { sampleType: 'depth' } 27 | }, { 28 | binding: 5, // Shadow sampler 29 | visibility: GPUShaderStage.FRAGMENT, 30 | sampler: { type: 'comparison' } 31 | }, { 32 | binding: 6, // Light/Shadow lookup table 33 | visibility: GPUShaderStage.FRAGMENT, 34 | buffer: { type: 'read-only-storage' } 35 | }, { 36 | binding: 7, // Shadow properites 37 | visibility: GPUShaderStage.FRAGMENT, 38 | buffer: { type: 'read-only-storage' } 39 | },] 40 | }); 41 | 42 | this.instance = this.model = device.createBindGroupLayout({ 43 | label: `Instance BindGroupLayout`, 44 | entries: [{ 45 | binding: 0, // Instance uniforms 46 | visibility: GPUShaderStage.VERTEX, 47 | buffer: { 48 | hasDynamicOffset: true, 49 | minBindingSize: 16 * Float32Array.BYTES_PER_ELEMENT * 4 50 | }, 51 | }] 52 | }); 53 | 54 | // These would be better off in some other location, but order of operations it tricky 55 | this.clusterBounds = device.createBindGroupLayout({ 56 | label: `Cluster Storage BindGroupLayout`, 57 | entries: [{ 58 | binding: 0, 59 | visibility: GPUShaderStage.COMPUTE, 60 | buffer: { type: 'storage' } 61 | }] 62 | }); 63 | 64 | this.clusterLights = device.createBindGroupLayout({ 65 | label: `Cluster Bounds BindGroupLayout`, 66 | entries: [{ 67 | binding: 0, // Camera uniforms 68 | visibility: GPUShaderStage.COMPUTE, 69 | buffer: {}, 70 | }, { 71 | binding: 1, // Cluster Bounds 72 | visibility: GPUShaderStage.COMPUTE, 73 | buffer: { type: 'read-only-storage' } 74 | }, { 75 | binding: 2, // Cluster Lights 76 | visibility: GPUShaderStage.COMPUTE, 77 | buffer: { type: 'storage' } 78 | }, { 79 | binding: 3, // Light uniforms 80 | visibility: GPUShaderStage.COMPUTE, 81 | buffer: { type: 'read-only-storage' } 82 | }] 83 | }); 84 | 85 | this.skin = device.createBindGroupLayout({ 86 | label: 'Skin BindGroupLayout', 87 | entries: [{ 88 | binding: 0, // joint buffer 89 | visibility: GPUShaderStage.VERTEX, 90 | buffer: { type: 'read-only-storage' } 91 | }, { 92 | binding: 1, // inverse bind matrix buffer 93 | visibility: GPUShaderStage.VERTEX, 94 | buffer: { type: 'read-only-storage' } 95 | }] 96 | }); 97 | } 98 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-texture-debug.js: -------------------------------------------------------------------------------- 1 | import { WebGPUSystem } from './webgpu-system.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { FullscreenTexturedQuadVertexSource, ShadowDebugFragmentSource, TextureDebugFragmentSource } from './wgsl/common.js'; 4 | 5 | export class WebGPUDebugTextureView { 6 | bindGroup; 7 | constructor(textureView, isShadow) { 8 | this.textureView = textureView; 9 | this.isShadow = isShadow; 10 | } 11 | } 12 | 13 | export class WebGPUTextureDebugSystem extends WebGPUSystem { 14 | stage = Stage.PostRender; 15 | 16 | init(gpu) { 17 | // Setup a render pipeline for drawing debug views of textured quads 18 | this.pipeline = gpu.device.createRenderPipeline({ 19 | label: `Texture Debug Pipeline`, 20 | layout: 'auto', 21 | vertex: { 22 | module: gpu.device.createShaderModule({ 23 | code: FullscreenTexturedQuadVertexSource, 24 | label: 'Texture Debug Vertex' 25 | }), 26 | entryPoint: 'vertexMain' 27 | }, 28 | fragment: { 29 | module: gpu.device.createShaderModule({ 30 | code: TextureDebugFragmentSource, 31 | label: 'Texture Debug Fragment' 32 | }), 33 | entryPoint: 'fragmentMain', 34 | targets: [{ 35 | format: gpu.renderTargets.format, 36 | }], 37 | } 38 | }); 39 | 40 | this.shadowPipeline = gpu.device.createRenderPipeline({ 41 | label: `Shadow Texture Debug Pipeline`, 42 | layout: 'auto', 43 | vertex: { 44 | module: gpu.device.createShaderModule({ 45 | code: FullscreenTexturedQuadVertexSource, 46 | label: 'Shadow Texture Debug Vertex' 47 | }), 48 | entryPoint: 'vertexMain' 49 | }, 50 | fragment: { 51 | module: gpu.device.createShaderModule({ 52 | code: ShadowDebugFragmentSource, 53 | label: 'Shadow Texture Debug Fragment' 54 | }), 55 | entryPoint: 'fragmentMain', 56 | targets: [{ 57 | format: gpu.renderTargets.format, 58 | }], 59 | } 60 | }); 61 | } 62 | 63 | execute(delta, time, gpu) { 64 | let textureCount = 0; 65 | this.query(WebGPUDebugTextureView).forEach((entity, textureView) => { 66 | if (!textureView.bindGroup) { 67 | textureView.bindGroup = gpu.device.createBindGroup({ 68 | label: 'Texture Debug Bind Group', 69 | layout: textureView.isShadow ? this.shadowPipeline.getBindGroupLayout(0) : this.pipeline.getBindGroupLayout(0), 70 | entries: [{ 71 | binding: 0, 72 | resource: textureView.textureView, 73 | }, { 74 | binding: 1, 75 | resource: gpu.defaultSampler, 76 | }] 77 | }); 78 | } 79 | textureCount++; 80 | }); 81 | 82 | if (!textureCount) { return; } 83 | 84 | const outputTexture = gpu.renderTargets.context.getCurrentTexture(); 85 | const commandEncoder = gpu.device.createCommandEncoder({}); 86 | 87 | const passEncoder = commandEncoder.beginRenderPass({ 88 | colorAttachments: [{ 89 | view: outputTexture.createView(), 90 | clearValue: {r: 0, g: 0, b: 0, a: 1.0}, 91 | loadOp: 'clear', 92 | storeOp: 'store', 93 | }], 94 | }); 95 | 96 | this.query(WebGPUDebugTextureView).forEach((entity, textureView) => { 97 | passEncoder.setPipeline(textureView.isShadow ? this.shadowPipeline : this.pipeline); 98 | passEncoder.setBindGroup(0, textureView.bindGroup); 99 | passEncoder.draw(3); 100 | }) 101 | 102 | passEncoder.end(); 103 | 104 | gpu.device.queue.submit([commandEncoder.finish()]); 105 | } 106 | } -------------------------------------------------------------------------------- /js/engine/loaders/lib/draco-worker.js: -------------------------------------------------------------------------------- 1 | importScripts('./worker_service.js'); 2 | importScripts('https://www.gstatic.com/draco/versioned/decoders/1.5.2/draco_decoder_gltf.js'); 3 | 4 | const DRACO_DECODER = new Promise((resolve) => { 5 | DracoDecoderModule({ 6 | onModuleLoaded: (draco) => { 7 | resolve(draco); 8 | } 9 | }); 10 | }); 11 | 12 | class DracoDecoderService extends WorkerService { 13 | async init() { 14 | this.draco = await DRACO_DECODER; 15 | this.decoder = new this.draco.Decoder(); 16 | } 17 | 18 | async onDispatch(args) { 19 | const dracoBuffer = new Int8Array(args.buffer); 20 | const dracoAttributes = args.attributes; 21 | const indexSize = args.indexSize; 22 | 23 | const geometryType = this.decoder.GetEncodedGeometryType(dracoBuffer); 24 | 25 | let geometry; 26 | let status; 27 | switch (geometryType) { 28 | case this.draco.POINT_CLOUD: { 29 | geometry = new this.draco.PointCloud(); 30 | status = this.decoder.DecodeArrayToPointCloud(dracoBuffer, dracoBuffer.byteLength, geometry); 31 | break; 32 | } 33 | case this.draco.TRIANGULAR_MESH: { 34 | geometry = new this.draco.Mesh(); 35 | status = this.decoder.DecodeArrayToMesh(dracoBuffer, dracoBuffer.byteLength, geometry); 36 | break; 37 | } 38 | default: 39 | throw new Error('Unknown Draco geometry type'); 40 | } 41 | 42 | if (!status.ok()) { 43 | throw new Error('Draco decode failed'); 44 | } 45 | 46 | const bufferViews = {}; 47 | 48 | const vertCount = geometry.num_points(); 49 | 50 | for (const name in dracoAttributes) { 51 | const attributeId = dracoAttributes[name]; 52 | const attribute = this.decoder.GetAttributeByUniqueId(geometry, attributeId); 53 | const stride = attribute.byte_stride(); 54 | const byteLength = vertCount * stride; 55 | 56 | const outPtr = this.draco._malloc(byteLength); 57 | const success = this.decoder.GetAttributeDataArrayForAllPoints( 58 | geometry, attribute, attribute.data_type(), byteLength, outPtr); 59 | if (!success) { 60 | throw new Error('Failed to get decoded attribute data array'); 61 | } 62 | 63 | bufferViews[name] = { 64 | // Copy the decoded attribute data out of the WASM heap. 65 | buffer: new Uint8Array(this.draco.HEAPF32.buffer, outPtr, byteLength).slice().buffer, 66 | stride, 67 | }; 68 | 69 | this.draco._free(outPtr); 70 | } 71 | 72 | if (geometryType == this.draco.TRIANGULAR_MESH && indexSize) { 73 | const indexCount = geometry.num_faces() * 3; 74 | const byteLength = indexCount * indexSize; 75 | 76 | const outPtr = this.draco._malloc(byteLength); 77 | let success; 78 | if (indexSize == 4) { 79 | success = this.decoder.GetTrianglesUInt32Array(geometry, byteLength, outPtr); 80 | } else { 81 | success = this.decoder.GetTrianglesUInt16Array(geometry, byteLength, outPtr); 82 | } 83 | 84 | if (!success) { 85 | throw new Error('Failed to get decoded index data array'); 86 | } 87 | 88 | bufferViews.INDICES = { 89 | // Copy the decoded index data out of the WASM heap. 90 | buffer: new Uint8Array(this.draco.HEAPF32.buffer, outPtr, byteLength).slice().buffer, 91 | stride: indexSize, 92 | }; 93 | 94 | this.draco._free(outPtr); 95 | } 96 | 97 | const transferBuffers = []; 98 | for (const name in bufferViews) { 99 | transferBuffers.push(bufferViews[name].buffer); 100 | } 101 | 102 | return this.transfer(bufferViews, transferBuffers); 103 | } 104 | } 105 | 106 | WorkerService.register(new DracoDecoderService()); -------------------------------------------------------------------------------- /js/engine/util/texture-atlas-allocator.js: -------------------------------------------------------------------------------- 1 | // Very simple BSP allocator. Ported from some ancient Quake 2 rendering code that I did. 2 | class TextureAtlasRect { 3 | #node; 4 | 5 | constructor(node) { 6 | this.#node = node; 7 | } 8 | 9 | get x() { return this.#node?.x; } 10 | get y() { return this.#node?.y; } 11 | get width() { return this.#node?.width; } 12 | get height() { return this.#node?.height; } 13 | 14 | release() { 15 | if (this.#node) { 16 | this.#node.onRelease(); 17 | this.#node = null; 18 | } 19 | } 20 | } 21 | 22 | class TextureAtlasNode { 23 | allocated = false; 24 | children = null; 25 | rect = null; 26 | 27 | constructor(x, y, width, height, parent) { 28 | this.x = x; 29 | this.y = y; 30 | this.width = width; 31 | this.height = height; 32 | this.parent = parent; 33 | } 34 | 35 | get fullyAllocated() { 36 | if (this.children) { 37 | return this.children[0].fullyAllocated && this.children[1].fullyAllocated; 38 | } 39 | return this.rect; 40 | } 41 | 42 | get hasAllocation() { 43 | if (this.children) { 44 | return this.children[0].hasAllocation || this.children[1].hasAllocation; 45 | } 46 | return this.rect; 47 | } 48 | 49 | allocate() { 50 | if (!this.rect) { 51 | if (this.children) { 52 | throw new Error('Split nodes cannot be allocated.'); 53 | } 54 | this.rect = new TextureAtlasRect(this); 55 | } 56 | return this.rect; 57 | } 58 | 59 | split(width, height) { 60 | if (this.children) { 61 | throw new Error('Node is already split.'); 62 | } 63 | if((this.height - height) > (this.width - width)) { 64 | // Vertical split 65 | this.children = [ 66 | new TextureAtlasNode(this.x, this.y, this.width, height, this), 67 | new TextureAtlasNode(this.x, this.y+height, this.width, this.height - height, this) 68 | ]; 69 | } else { 70 | // Horizontal split 71 | this.children = [ 72 | new TextureAtlasNode(this.x, this.y, width, this.height, this), 73 | new TextureAtlasNode(this.x+width, this.y, this.width - width, this.height, this) 74 | ]; 75 | } 76 | } 77 | 78 | onRelease() { 79 | this.rect = null; 80 | this.children = null; 81 | 82 | // When a node is released, check to see if it's parent node can also be released, which 83 | // collapses a split node into a single bigger node. 84 | if (this.parent && !this.parent.hasAllocation) { 85 | this.parent.onRelease(); 86 | } 87 | } 88 | } 89 | 90 | export class TextureAtlasAllocator { 91 | #root; 92 | 93 | constructor(width, height) { 94 | this.#root = new TextureAtlasNode(0, 0, width, height || width); 95 | } 96 | 97 | #findNodeToAllocate(node, width, height) { 98 | // Node is too small for the required size. 99 | if (node.width < width || node.height < height) { 100 | return null; 101 | } 102 | 103 | // Already used 104 | if (node.fullyAllocated) { return null; } 105 | 106 | // Check children nodes 107 | if (node.children) { 108 | var retNode = this.#findNodeToAllocate(node.children[0], width, height); 109 | return retNode || this.#findNodeToAllocate(node.children[1], width, height); 110 | } 111 | 112 | // Perfect fit. Allocate without splitting 113 | if(node.width == width && node.height == height) { 114 | return node; 115 | } 116 | 117 | node.split(width, height); 118 | return this.#findNodeToAllocate(node.children[0], width, height); 119 | } 120 | 121 | allocate(width, height) { 122 | let node = this.#findNodeToAllocate(this.#root, width, height || width); 123 | return node?.allocate(); 124 | } 125 | } -------------------------------------------------------------------------------- /js/engine/core/input.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { Stage } from './stage.js'; 3 | import { vec2 } from 'gl-matrix'; 4 | 5 | export class KeyboardState { 6 | pressed = {}; 7 | 8 | keyPressed(keycode) { 9 | return !!this.pressed[keycode]; 10 | } 11 | } 12 | 13 | export class MouseState { 14 | buttons = []; 15 | position = vec2.create(); 16 | delta = vec2.create(); 17 | wheelDelta = vec2.create(); 18 | } 19 | 20 | export class GamepadState { 21 | gamepads = []; 22 | } 23 | 24 | export class InputSystem extends System { 25 | stage = Stage.First; 26 | eventCanvas = null; 27 | lastMouseX = 0; 28 | lastMouseY = 0; 29 | mouseDeltaX = 0; 30 | mouseDeltaY = 0; 31 | mouseWheelDeltaX = 0; 32 | mouseWheelDeltaY = 0; 33 | 34 | init(gpu) { 35 | const keyboard = new KeyboardState(); 36 | const mouse = new MouseState(); 37 | const gamepad = new GamepadState(); 38 | 39 | this.singleton.add(keyboard, mouse, gamepad); 40 | 41 | window.addEventListener('keydown', (event) => { 42 | // Do nothing if event already handled 43 | if (event.defaultPrevented) { return; } 44 | keyboard.pressed[event.code] = true; 45 | }); 46 | window.addEventListener('keyup', (event) => { 47 | keyboard.pressed[event.code] = false; 48 | }); 49 | window.addEventListener('blur', (event) => { 50 | // Clear the pressed keys on blur so that we don't have inadvertent inputs 51 | // after we've shifted focus to another window. 52 | keyboard.pressed = {}; 53 | mouse.buttons = []; 54 | }); 55 | 56 | this.pointerEnterCallback = (event) => { 57 | this.lastMouseX = event.clientX; 58 | this.lastMouseY = event.clientY; 59 | this.mouseDeltaX = 0; 60 | this.mouseDeltaY = 0; 61 | }; 62 | 63 | this.pointerMoveCallback = (event) => { 64 | this.mouseDeltaX += event.clientX - this.lastMouseX; 65 | this.mouseDeltaY += event.clientY - this.lastMouseY; 66 | this.lastMouseX = mouse.position[0] = event.clientX; 67 | this.lastMouseY = mouse.position[1] = event.clientY; 68 | }; 69 | 70 | this.pointerDownCallback = (event) => { 71 | mouse.buttons[event.button] = true; 72 | }; 73 | 74 | this.pointerUpCallback = (event) => { 75 | mouse.buttons[event.button] = false; 76 | }; 77 | 78 | this.mousewheelCallback = (event) => { 79 | this.mouseWheelDeltaX += event.wheelDeltaX; 80 | this.mouseWheelDeltaY += event.wheelDeltaY; 81 | }; 82 | 83 | // TODO: These listeners should be attached to the canvases in question 84 | gpu.canvas.addEventListener('pointerenter', this.pointerEnterCallback); 85 | gpu.canvas.addEventListener('pointerdown', this.pointerDownCallback); 86 | gpu.canvas.addEventListener('pointermove', this.pointerMoveCallback); 87 | gpu.canvas.addEventListener('pointerup', this.pointerUpCallback); 88 | gpu.canvas.addEventListener('mousewheel', this.mousewheelCallback); 89 | } 90 | 91 | execute() { 92 | // Update the mouse singleton with the latest movement deltas since the last frame. 93 | const mouse = this.singleton.get(MouseState); 94 | mouse.delta[0] = this.mouseDeltaX; 95 | mouse.delta[1] = this.mouseDeltaY; 96 | mouse.wheelDelta[0] = this.mouseWheelDeltaX; 97 | mouse.wheelDelta[1] = this.mouseWheelDeltaY; 98 | this.mouseDeltaX = 0; 99 | this.mouseDeltaY = 0; 100 | this.mouseWheelDeltaX = 0; 101 | this.mouseWheelDeltaY = 0; 102 | 103 | const gamepad = this.singleton.get(GamepadState); 104 | gamepad.gamepads = []; 105 | const pads = navigator.getGamepads(); 106 | if (pads) { 107 | for (const pad of pads) { 108 | if (pad) { 109 | gamepad.gamepads.push(pad); 110 | } 111 | } 112 | } 113 | } 114 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-clustered-light.js: -------------------------------------------------------------------------------- 1 | // Lots of this is ported or otherwise influenced by http://www.aortiz.me/2018/12/21/CG.html and 2 | // https://github.com/Angelo1211/HybridRenderingEngine 3 | 4 | import { WebGPUSystem } from './webgpu-system.js'; 5 | import { WebGPUCamera } from './webgpu-camera.js'; 6 | import { 7 | DISPATCH_SIZE, 8 | ClusterBoundsSource, 9 | ClusterLightsSource 10 | } from './wgsl/clustered-light.js'; 11 | 12 | const emptyArray = new Uint32Array(1); 13 | 14 | export class WebGPUClusteredLights extends WebGPUSystem { 15 | #outputSize = {width: 0, height: 0}; 16 | #zRange = [0, 0]; 17 | 18 | init(gpu) { 19 | const device = gpu.device; 20 | 21 | // Pipeline creation 22 | device.createComputePipelineAsync({ 23 | layout: device.createPipelineLayout({ 24 | bindGroupLayouts: [ 25 | gpu.bindGroupLayouts.frame, 26 | gpu.bindGroupLayouts.clusterBounds, 27 | ] 28 | }), 29 | compute: { 30 | module: device.createShaderModule({ code: ClusterBoundsSource, label: "Cluster Bounds" }), 31 | entryPoint: 'computeMain', 32 | } 33 | }).then((pipeline) => { 34 | this.boundsPipeline = pipeline; 35 | }); 36 | 37 | device.createComputePipelineAsync({ 38 | layout: device.createPipelineLayout({ 39 | bindGroupLayouts: [ 40 | gpu.bindGroupLayouts.clusterLights, 41 | ] 42 | }), 43 | compute: { 44 | module: device.createShaderModule({ code: ClusterLightsSource, label: "Cluster Lights" }), 45 | entryPoint: 'computeMain', 46 | } 47 | }).then((pipeline) => { 48 | this.lightsPipeline = pipeline; 49 | }); 50 | } 51 | 52 | updateClusterBounds(gpu, camera) { 53 | if (!this.boundsPipeline || 54 | (this.#outputSize.width == gpu.renderTargets.size.width && 55 | this.#outputSize.height == gpu.renderTargets.size.height && 56 | this.#zRange[0] == camera.zRange[0] && 57 | this.#zRange[1] == camera.zRange[1])) { 58 | return; 59 | } 60 | 61 | // TODO: This should really be updated for any change in the camera 62 | this.#outputSize.width = gpu.renderTargets.size.width; 63 | this.#outputSize.height = gpu.renderTargets.size.height; 64 | this.#zRange[0] = camera.zRange[0]; 65 | this.#zRange[1] = camera.zRange[1]; 66 | 67 | const commandEncoder = gpu.device.createCommandEncoder({ label: 'Cluster Bounds Command Encoder'}); 68 | 69 | const passEncoder = commandEncoder.beginComputePass({ label: 'Cluster Bounds Compute Pass'}); 70 | passEncoder.setPipeline(this.boundsPipeline); 71 | passEncoder.setBindGroup(0, camera.bindGroup); 72 | passEncoder.setBindGroup(1, camera.clusterBoundsBindGroup); 73 | passEncoder.dispatchWorkgroups(...DISPATCH_SIZE); 74 | passEncoder.end(); 75 | 76 | gpu.device.queue.submit([commandEncoder.finish()]); 77 | } 78 | 79 | updateClusterLights(gpu, camera) { 80 | if (!this.lightsPipeline) { return; } 81 | 82 | // Reset the light offset counter to 0 before populating the light clusters. 83 | gpu.device.queue.writeBuffer(camera.clusterLightsBuffer, 0, emptyArray); 84 | 85 | const commandEncoder = gpu.device.createCommandEncoder(); 86 | 87 | // Update the FrameUniforms buffer with the values that are used by every 88 | // program and don't change for the duration of the frame. 89 | const passEncoder = commandEncoder.beginComputePass(); 90 | passEncoder.setPipeline(this.lightsPipeline); 91 | passEncoder.setBindGroup(0, camera.clusterLightsBindGroup); 92 | passEncoder.dispatchWorkgroups(...DISPATCH_SIZE); 93 | passEncoder.end(); 94 | 95 | gpu.device.queue.submit([commandEncoder.finish()]); 96 | } 97 | 98 | execute(delta, time, gpu) { 99 | this.query(WebGPUCamera).forEach((entity, camera) => { 100 | this.updateClusterBounds(gpu, camera); 101 | this.updateClusterLights(gpu, camera); 102 | }); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /js/engine/core/animation.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { TransformPool } from './transform.js'; 3 | import { quat } from 'gl-matrix'; 4 | 5 | const tmpOut0 = new Float32Array(4); 6 | const tmpOut1 = new Float32Array(4); 7 | 8 | export class StepAnimationSampler { 9 | constructor(times, values, componentCount) { 10 | this.times = times; 11 | this.values = values; 12 | this.componentCount = componentCount; 13 | } 14 | 15 | get duration() { 16 | return this.times[this.times.length - 1]; 17 | } 18 | 19 | getTimeIndex(t) { 20 | // TODO: Optimize the crap out of this! 21 | if (t < this.times[0]) { 22 | return [0, 0, 0.0]; 23 | } 24 | const last = this.times.length - 1; 25 | if (t >= this.times[last]) { 26 | return [last, last, 0.0]; 27 | } 28 | let t0 = this.times[0]; 29 | for (let i = 1; i < this.times.length; ++i) { 30 | const t1 = this.times[i]; 31 | if (t <= t1) { 32 | const a = (t - t0) / (t1 - t0); 33 | return [i-1, i, a]; 34 | } 35 | t0 = t1; 36 | } 37 | } 38 | 39 | getValueAt(out, index) { 40 | const offset = index * this.componentCount; 41 | switch(this.componentCount) { 42 | case 4: 43 | out[3] = this.values[offset+3]; 44 | case 3: 45 | out[2] = this.values[offset+2]; 46 | case 2: 47 | out[1] = this.values[offset+1]; 48 | case 1: 49 | out[0] = this.values[offset]; 50 | } 51 | } 52 | 53 | sampleValue(out, t) { 54 | const ti = this.getTimeIndex(t); 55 | this.getValueAt(out, ti[0]); 56 | } 57 | } 58 | 59 | export class LinearAnimationSampler extends StepAnimationSampler { 60 | sampleValue(out, t) { 61 | const ti = this.getTimeIndex(t); 62 | this.getValueAt(tmpOut0, ti[0]); 63 | this.getValueAt(tmpOut1, ti[1]); 64 | 65 | // Get the weights for the two values 66 | const w1 = ti[2]; 67 | const w0 = 1 - w1; 68 | 69 | switch(this.componentCount) { 70 | case 4: 71 | out[3] = tmpOut0[3] * w0 + tmpOut1[3] * w1; 72 | case 3: 73 | out[2] = tmpOut0[2] * w0 + tmpOut1[2] * w1; 74 | case 2: 75 | out[1] = tmpOut0[1] * w0 + tmpOut1[1] * w1; 76 | case 1: 77 | out[0] = tmpOut0[0] * w0 + tmpOut1[0] * w1; 78 | } 79 | } 80 | } 81 | 82 | export class SphericalLinearAnimationSampler extends StepAnimationSampler { 83 | sampleValue(out, t) { 84 | const ti = this.getTimeIndex(t); 85 | this.getValueAt(tmpOut0, ti[0]); 86 | this.getValueAt(tmpOut1, ti[1]); 87 | quat.slerp(out, tmpOut0, tmpOut1, ti[2]); 88 | } 89 | } 90 | 91 | // TODO: Allow animation mixing 92 | export class AnimationChannel { 93 | constructor(targetIndex, path, sampler) { 94 | this.targetIndex = targetIndex; 95 | this.path = path; 96 | this.sampler = sampler; 97 | } 98 | 99 | applyAtTime(t, transformPool) { 100 | const transform = transformPool.getTransform(this.targetIndex); 101 | this.sampler.sampleValue(transform[this.path], t); 102 | } 103 | } 104 | 105 | export class Animation { 106 | constructor(name, channels) { 107 | this.name = name; 108 | this.channels = channels; 109 | this.duration = 0; 110 | for (const channel of this.channels) { 111 | this.duration = Math.max(this.duration, channel.sampler.duration); 112 | } 113 | } 114 | 115 | applyAtTime(t, transformPool) { 116 | // TODO: Better control over edge behavior 117 | t = t % this.duration; 118 | 119 | for (const channel of this.channels) { 120 | channel.applyAtTime(t, transformPool); 121 | } 122 | } 123 | } 124 | 125 | export class AnimationTiming { 126 | constructor(options = {}) { 127 | this.startTime = options.startTime || 0; 128 | this.speedFactor = options.speedFactor || 1.0; 129 | } 130 | } 131 | 132 | export class AnimationSystem extends System { 133 | init() { 134 | this.animationQuery = this.query(Animation, TransformPool); 135 | } 136 | 137 | execute(delta, time) { 138 | this.animationQuery.forEach((entity, animation, transformPool) => { 139 | const timing = entity.get(AnimationTiming); 140 | let progress = time; 141 | if (timing) { 142 | progress -= timing.startTime; 143 | progress *= timing.speedFactor; 144 | } 145 | animation.applyAtTime(progress, transformPool); 146 | }); 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-render-batch.js: -------------------------------------------------------------------------------- 1 | import { mat4 } from 'gl-matrix'; 2 | import { WebGPUMaterialBindGroups } from './materials/webgpu-material-factory.js'; 3 | import { INSTANCE_SIZE_BYTES, INSTANCE_SIZE_F32 } from './wgsl/common.js'; 4 | 5 | const IDENTITY_MATRIX = mat4.create(); 6 | const EMPTY_BIND_GROUP = new WebGPUMaterialBindGroups(); 7 | const DEFAULT_INSTANCE_COLOR = new Float32Array(4); 8 | 9 | const INITIAL_INSTANCE_COUNT = 128; 10 | 11 | export class WebGPURenderBatch { 12 | device; 13 | pipelineGeometries = new Map(); 14 | #instanceCapacity; 15 | #instanceBuffer; 16 | #instanceArray; 17 | #instanceBufferDirty = true; 18 | #totalInstanceCount = 0; 19 | 20 | constructor(device) { 21 | this.device = device; 22 | this.resizeInstanceBuffer(INITIAL_INSTANCE_COUNT); 23 | } 24 | 25 | resizeInstanceBuffer(capacity) { 26 | if (this.#instanceBuffer) { 27 | this.#instanceBuffer.destroy(); 28 | } 29 | 30 | this.#instanceBufferDirty = true; 31 | this.#instanceCapacity = capacity; 32 | this.#instanceArray = new Float32Array(INSTANCE_SIZE_F32 * capacity); 33 | this.#instanceBuffer = this.device.createBuffer({ 34 | size: INSTANCE_SIZE_BYTES * capacity, 35 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST 36 | }); 37 | } 38 | 39 | clear() { 40 | this.pipelineGeometries = new Map(); 41 | this.#totalInstanceCount = 0; 42 | this.#instanceBufferDirty = true; 43 | } 44 | 45 | addRenderable(geometry, pipeline, bindGroups = EMPTY_BIND_GROUP, instance = {}) { 46 | this.#instanceBufferDirty = true; 47 | let geometryMaterials = this.pipelineGeometries.get(pipeline); 48 | if (!geometryMaterials) { 49 | geometryMaterials = new Map(); 50 | this.pipelineGeometries.set(pipeline, geometryMaterials); 51 | } 52 | let materialInstances = geometryMaterials.get(geometry); 53 | if (!materialInstances) { 54 | materialInstances = new Map(); 55 | geometryMaterials.set(geometry, materialInstances); 56 | } 57 | let instances = materialInstances.get(bindGroups); 58 | if (!instances) { 59 | instances = {instanceCount: 0, transforms: [], colors: [], bufferOffset: 0}; 60 | materialInstances.set(bindGroups, instances); 61 | } 62 | 63 | instances.instanceCount += instance.count || 1; 64 | instances.transforms.push(instance.transform?.worldMatrix || IDENTITY_MATRIX); 65 | instances.colors.push(instance.color?.buffer || instance.color || DEFAULT_INSTANCE_COLOR); 66 | this.#totalInstanceCount += 1; 67 | } 68 | 69 | get instanceBuffer() { 70 | if (this.#instanceBufferDirty) { 71 | // Instance buffer needs to be resized to compensate for the total number of instances. 72 | while (this.#instanceCapacity < this.#totalInstanceCount) { 73 | this.resizeInstanceBuffer(this.#instanceCapacity * 2); 74 | } 75 | // TODO: Heuristic for resizing the instance buffer to be smaller? 76 | 77 | // Loop through all of the instances we're going to render and place their transforms in the 78 | // instances buffer. 79 | let instanceCount = 0; 80 | for (const geometryMaterials of this.pipelineGeometries.values()) { 81 | for (const materialInstances of geometryMaterials.values()) { 82 | for (const instances of materialInstances.values()) { 83 | instances.bufferOffset = instanceCount * INSTANCE_SIZE_BYTES; 84 | for (let i = 0; i < instances.transforms.length; ++i) { 85 | // TODO: Could just copy over the 4x3 portion of the matrix needed to represent a full 86 | // TRS transform. Copies would be slower, though. 87 | const arrayOffset = instanceCount * INSTANCE_SIZE_F32; 88 | this.#instanceArray.set(instances.transforms[i], arrayOffset); 89 | this.#instanceArray.set(instances.colors[i], arrayOffset + 16); 90 | instanceCount++; 91 | } 92 | } 93 | } 94 | } 95 | 96 | // Write the instance data out to the buffer. 97 | this.device.queue.writeBuffer(this.#instanceBuffer, 0, this.#instanceArray, 0, instanceCount * INSTANCE_SIZE_F32); 98 | this.#instanceBufferDirty = false; 99 | } 100 | return this.#instanceBuffer; 101 | } 102 | 103 | get sortedPipelines() { 104 | // Sort the pipelines by render order (e.g. so transparent objects are rendered last). 105 | const pipelines = Array.from(this.pipelineGeometries.keys()) 106 | pipelines.sort((a, b) => a.renderOrder - b.renderOrder); 107 | return pipelines; 108 | } 109 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-buffer.js: -------------------------------------------------------------------------------- 1 | import { StaticBuffer, DynamicBuffer } from '../core/buffers.js'; 2 | 3 | export class WebGPUStaticBuffer extends StaticBuffer { 4 | #arrayBuffer; 5 | 6 | constructor(device, gpuBuffer, size, usage, mapped = false) { 7 | super(size, usage); 8 | 9 | this.gpuBuffer = gpuBuffer; 10 | 11 | if (mapped) { 12 | // Static buffers are expected to be created with mappedAtCreation. 13 | this.#arrayBuffer = gpuBuffer.getMappedRange(); 14 | } 15 | } 16 | 17 | get arrayBuffer() { 18 | return this.#arrayBuffer; 19 | } 20 | 21 | // For static buffers, once you call finish() the data cannot be updated again. 22 | finish() { 23 | this.gpuBuffer.unmap(); 24 | this.#arrayBuffer = null; 25 | } 26 | } 27 | 28 | export class WebGPUDynamicBuffer extends DynamicBuffer { 29 | #device; 30 | #arrayBuffer; 31 | #size; 32 | #activeStagingBuffer; 33 | #stagingBufferQueue = []; 34 | 35 | constructor(device, gpuBuffer, size, usage, mapped = false) { 36 | super(size, usage); 37 | 38 | this.#device = device; 39 | this.#size = size; 40 | this.gpuBuffer = gpuBuffer; 41 | this.#activeStagingBuffer = gpuBuffer; 42 | 43 | if (mapped) { 44 | // Static buffers are expected to be created with mappedAtCreation. 45 | this.#arrayBuffer = gpuBuffer.getMappedRange(); 46 | } 47 | } 48 | 49 | #getOrCreateStagingBuffer() { 50 | if (this.#stagingBufferQueue.length) { 51 | return this.#stagingBufferQueue.pop(); 52 | } 53 | 54 | return this.#device.createBuffer({ 55 | size: this.#size, 56 | usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE, 57 | mappedAtCreation: true, 58 | }); 59 | } 60 | 61 | get arrayBuffer() { 62 | return this.#arrayBuffer; 63 | } 64 | 65 | beginUpdate() { 66 | this.#activeStagingBuffer = this.#getOrCreateStagingBuffer(); 67 | this.#arrayBuffer = this.#activeStagingBuffer.getMappedRange(); 68 | } 69 | 70 | // For static buffers, once you call finish() the data cannot be updated again. 71 | finish() { 72 | this.#activeStagingBuffer.unmap(); 73 | this.#arrayBuffer = null; 74 | 75 | if (this.#activeStagingBuffer !== this.gpuBuffer) { 76 | const stagingBuffer = this.#activeStagingBuffer; 77 | const commandEncoder = this.#device.createCommandEncoder({}); 78 | commandEncoder.copyBufferToBuffer(stagingBuffer, 0, this.gpuBuffer, 0, this.#size); 79 | this.#device.queue.submit([commandEncoder.finish()]); 80 | 81 | stagingBuffer.mapAsync(GPUMapMode.WRITE).then(() => { 82 | this.#stagingBufferQueue.push(stagingBuffer); 83 | }); 84 | } 85 | this.#activeStagingBuffer = null; 86 | } 87 | } 88 | 89 | function toGPUBufferUsage(usage) { 90 | switch (usage) { 91 | case 'vertex': 92 | return GPUBufferUsage.VERTEX; 93 | case 'index': 94 | return GPUBufferUsage.INDEX; 95 | case 'joint': 96 | case 'light': 97 | return GPUBufferUsage.STORAGE; 98 | default: 99 | throw new Error(`Unknown Buffer usage '${usage}'`); 100 | } 101 | } 102 | 103 | export class WebGPUBufferManager { 104 | constructor(device) { 105 | this.device = device; 106 | } 107 | 108 | createBufferInternal(constructor, sizeOrArrayBuffer, usage) { 109 | let size; 110 | let arrayBufferView = null; 111 | if (typeof sizeOrArrayBuffer === 'number') { 112 | size = sizeOrArrayBuffer; 113 | } else { 114 | size = sizeOrArrayBuffer.byteLength; 115 | arrayBufferView = sizeOrArrayBuffer; 116 | if (!ArrayBuffer.isView(arrayBufferView)) { 117 | arrayBufferView = new Uint8Array(arrayBufferView); 118 | } 119 | } 120 | 121 | // Align the size to the next multiple of 4 122 | size = Math.ceil(size / 4) * 4; 123 | 124 | const gpuBuffer = this.device.createBuffer({ 125 | size, 126 | usage, 127 | mappedAtCreation: true, 128 | }); 129 | const buffer = new constructor(this.device, gpuBuffer, size, usage, true); 130 | 131 | // If an ArrayBuffer or TypedArray was passed in, initialize the GPUBuffer 132 | // with it's data. Otherwise we'll leave it mapped for the used to populate. 133 | if (arrayBufferView) { 134 | const typedArray = new arrayBufferView.constructor(buffer.arrayBuffer); 135 | typedArray.set(arrayBufferView); 136 | buffer.finish(); 137 | } 138 | 139 | return buffer; 140 | } 141 | 142 | createStaticBuffer(sizeOrArrayBuffer, usage) { 143 | return this.createBufferInternal(WebGPUStaticBuffer, sizeOrArrayBuffer, toGPUBufferUsage(usage)); 144 | } 145 | 146 | createDynamicBuffer(sizeOrArrayBuffer, usage) { 147 | return this.createBufferInternal(WebGPUDynamicBuffer, sizeOrArrayBuffer, toGPUBufferUsage(usage) | GPUBufferUsage.COPY_DST); 148 | } 149 | } -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-render-targets.js: -------------------------------------------------------------------------------- 1 | // Holds render targets which need to be shared between render passes. 2 | export class WebGPURenderTargets extends EventTarget { 3 | context; 4 | 5 | msaaColorTexture; 6 | msaaEmissiveTexture; 7 | emissiveTexture; 8 | depthTexture; 9 | 10 | format = 'bgra8unorm'; 11 | depthFormat = 'depth24plus'; 12 | size = {width: 0, height: 0}; 13 | 14 | constructor(adapter, device, canvas, flags) { 15 | super(); 16 | 17 | this.format = flags.colorFormat; 18 | this.depthFormat = flags.depthFormat; 19 | this.sampleCount = flags.sampleCount; 20 | this.resolutionMultiplier = flags.resolutionMultiplier; 21 | 22 | this.useEmissive = flags.bloomEnabled; 23 | 24 | this.context = canvas.getContext('webgpu'); 25 | 26 | // This function isn't available in Firefox, though it is in the spec. 27 | if (!this.format) { 28 | if (navigator.gpu.getPreferredCanvasFormat) { 29 | this.format = navigator.gpu.getPreferredCanvasFormat(adapter); 30 | } else { 31 | this.format = 'bgra8unorm'; 32 | } 33 | flags.colorFormat = this.format; 34 | } 35 | 36 | this.context.configure({ 37 | device: device, 38 | format: this.format, 39 | alphaMode: 'opaque', 40 | }); 41 | 42 | this.resizeObserver = new ResizeObserver(entries => { 43 | for (let entry of entries) { 44 | if (entry.target != canvas) { continue; } 45 | 46 | if (entry.devicePixelContentBoxSize) { 47 | // Should give exact pixel dimensions, but only works on Chrome. 48 | const devicePixelSize = entry.devicePixelContentBoxSize[0]; 49 | this.onCanvasResized(device, devicePixelSize.inlineSize, devicePixelSize.blockSize); 50 | } else if (entry.contentBoxSize) { 51 | // Firefox implements `contentBoxSize` as a single content rect, rather than an array 52 | const contentBoxSize = Array.isArray(entry.contentBoxSize) ? entry.contentBoxSize[0] : entry.contentBoxSize; 53 | this.onCanvasResized(device, contentBoxSize.inlineSize, contentBoxSize.blockSize); 54 | } else { 55 | this.onCanvasResized(device, entry.contentRect.width, entry.contentRect.height); 56 | } 57 | } 58 | }); 59 | this.resizeObserver.observe(canvas); 60 | this.onCanvasResized(device, canvas.width, canvas.height); 61 | } 62 | 63 | onCanvasResized(device, pixelWidth, pixelHeight) { 64 | this.size.width = pixelWidth * this.resolutionMultiplier; 65 | this.size.height = pixelHeight * this.resolutionMultiplier; 66 | 67 | this.context.canvas.width = this.size.width; 68 | this.context.canvas.height = this.size.height; 69 | 70 | if (this.sampleCount > 1) { 71 | this.msaaColorTexture = device.createTexture({ 72 | size: this.size, 73 | sampleCount: this.sampleCount, 74 | format: this.format, 75 | usage: GPUTextureUsage.RENDER_ATTACHMENT, 76 | }); 77 | } 78 | 79 | if (this.depthFormat) { 80 | this.depthTexture = device.createTexture({ 81 | size: this.size, 82 | sampleCount: this.sampleCount, 83 | format: this.depthFormat, 84 | usage: GPUTextureUsage.RENDER_ATTACHMENT 85 | }); 86 | } 87 | 88 | if (this.useEmissive) { 89 | if (this.sampleCount > 1) { 90 | this.msaaEmissiveTexture = device.createTexture({ 91 | size: this.size, 92 | sampleCount: this.sampleCount, 93 | format: this.format, 94 | usage: GPUTextureUsage.RENDER_ATTACHMENT, 95 | }); 96 | } 97 | this.emissiveTexture = device.createTexture({ 98 | size: this.size, 99 | format: this.format, 100 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, 101 | }); 102 | 103 | const bloomSize = { 104 | width: Math.floor(this.size.width * 0.5), 105 | height: Math.floor(this.size.height * 0.5) 106 | }; 107 | this.bloomTextures = [ 108 | device.createTexture({ 109 | size: bloomSize, 110 | format: this.format, 111 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, 112 | }), 113 | // Two last-stage textures for ping-ponging to allow glowy trails. 114 | device.createTexture({ 115 | size: bloomSize, 116 | format: this.format, 117 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, 118 | }), 119 | device.createTexture({ 120 | size: bloomSize, 121 | format: this.format, 122 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, 123 | }) 124 | ]; 125 | } 126 | 127 | this.dispatchEvent(new Event('reconfigured')); 128 | } 129 | } -------------------------------------------------------------------------------- /js/engine/util/bvh.js: -------------------------------------------------------------------------------- 1 | // Bounding volume hierarchy (BHV) 2 | 3 | // Lots of this based on Erin Catto's great doc on dynamic BVHs: 4 | // https://box2d.org/files/ErinCatto_DynamicBVH_Full.pdf 5 | 6 | import { vec3 } from 'gl-matrix'; 7 | 8 | const tmpVec3 = vec3.create(); 9 | 10 | const tmpBounds = [ 11 | vec3.create(), vec3.create(), vec3.create(), vec3.create(), 12 | vec3.create(), vec3.create(), vec3.create(), vec3.create() 13 | ]; 14 | 15 | export class AABB { 16 | min = vec3.create(); 17 | max = vec3.create(); 18 | 19 | constructor(aabb) { 20 | if (aabb) { this.updateBounds(aabb); } 21 | } 22 | 23 | union(aabb0, aabb1) { 24 | vec3.min(this.min, aabb0.min, aabb1.min); 25 | vec3.max(this.max, aabb0.max, aabb1.max); 26 | return this; 27 | } 28 | 29 | updateBounds(aabb) { 30 | vec3.copy(this.min, aabb.min); 31 | vec3.copy(this.max, aabb.max); 32 | return this; 33 | } 34 | 35 | get surfaceArea() { 36 | vec3.sub(tmpVec3, this.max, this.min); 37 | return (tmpVec3[0] * tmpVec3[1] + 38 | tmpVec3[1] * tmpVec3[2] + 39 | tmpVec3[2] * tmpVec3[0]) * 2; 40 | } 41 | 42 | // Transform this AABB by a matrix, getting the new AABB. (The new AABB will almost certainly be 43 | // larger than necessary to fit the transformed contents.) 44 | transform(mat) { 45 | if (!mat) { return; } 46 | 47 | vec3.transformMat4(tmpBounds[0], this.min, mat); 48 | vec3.transformMat4(tmpBounds[1], [this.min[0], this.min[1], this.max[2]], mat); 49 | vec3.transformMat4(tmpBounds[2], [this.min[0], this.max[1], this.min[2]], mat); 50 | vec3.transformMat4(tmpBounds[3], [this.min[0], this.max[1], this.max[2]], mat); 51 | vec3.transformMat4(tmpBounds[4], [this.max[0], this.min[1], this.min[2]], mat); 52 | vec3.transformMat4(tmpBounds[5], [this.max[0], this.min[1], this.max[2]], mat); 53 | vec3.transformMat4(tmpBounds[6], [this.max[0], this.max[1], this.min[2]], mat); 54 | vec3.transformMat4(tmpBounds[7], this.max, mat); 55 | 56 | vec3.copy(this.min, tmpBounds[0]); 57 | vec3.copy(this.max, tmpBounds[0]); 58 | for (let i = 1; i < 8; ++i) { 59 | vec3.min(this.min, this.min, tmpBounds[i]); 60 | vec3.max(this.max, this.max, tmpBounds[i]); 61 | } 62 | } 63 | } 64 | 65 | class BVHNode extends AABB { 66 | parent; 67 | child0; 68 | child1; 69 | value; 70 | 71 | constructor(aabb, value) { 72 | super(aabb); 73 | this.value = value; 74 | } 75 | 76 | setChildren(node0, node1) { 77 | this.union(node0, node1); 78 | this.child0 = node0; 79 | this.child1 = node1; 80 | node0.parent = this; 81 | node1.parent = this; 82 | } 83 | } 84 | 85 | const tmpAabb = new AABB(); 86 | 87 | export class BVH { 88 | #rootNode; 89 | visLevel = 0; 90 | 91 | constructor() { 92 | 93 | } 94 | 95 | reset() { 96 | this.#rootNode = null; 97 | } 98 | 99 | get rootNode() { 100 | return this.#rootNode; 101 | } 102 | 103 | #findBestSibling(node, testNode, parentIndirectCost = 0, bestSibling = null) { 104 | if (!testNode.child0) { 105 | // It's a leaf node 106 | const cost = tmpAabb.union(node, testNode).surfaceArea + parentIndirectCost; 107 | 108 | if (bestSibling && bestSibling.cost < cost) { 109 | // The current best sibling is already lower cost. 110 | return bestSibling; 111 | } 112 | 113 | return { node: testNode, cost: tmpAabb.union(node, testNode).surfaceArea + parentIndirectCost }; 114 | } else { 115 | const nodeIndirectCost = (tmpAabb.union(node, testNode).surfaceArea - testNode.surfaceArea) + parentIndirectCost; 116 | 117 | if (bestSibling && nodeIndirectCost > bestSibling.cost) { 118 | // Early-out of searching this branch, since we know that it can't be a better pick than 119 | // a previously identified sibling. 120 | return bestSibling; 121 | } 122 | 123 | bestSibling = this.#findBestSibling(node, testNode.child0, nodeIndirectCost, bestSibling); 124 | return this.#findBestSibling(node, testNode.child1, nodeIndirectCost, bestSibling); 125 | } 126 | 127 | } 128 | 129 | insert(aabb, value) { 130 | const node = new BVHNode(aabb, value); 131 | 132 | if (!this.#rootNode) { 133 | this.#rootNode = node; 134 | return; 135 | } 136 | 137 | // Find best sibling for leaf 138 | let sibling = this.#findBestSibling(node, this.#rootNode).node; 139 | 140 | // Create new parent 141 | const prevParent = sibling.parent; 142 | const newParent = new BVHNode(); 143 | newParent.parent = prevParent; 144 | newParent.setChildren(node, sibling); 145 | 146 | if (!prevParent) { 147 | // Sibling was the root node 148 | this.#rootNode = newParent; 149 | } else { 150 | if (prevParent.child0 == sibling) { 151 | prevParent.child0 = newParent; 152 | } else { 153 | prevParent.child1 = newParent; 154 | } 155 | } 156 | 157 | // Refit ancestors AABBs 158 | let parentNode = prevParent; 159 | while (parentNode) { 160 | parentNode.union(parentNode.child0, parentNode.child1); 161 | parentNode = parentNode.parent; 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /js/engine/webgpu/materials/webgpu-pbr-material.js: -------------------------------------------------------------------------------- 1 | import { PBRMaterial } from '../../core/materials.js'; 2 | import { PBRFragmentSource, MATERIAL_BUFFER_SIZE } from '../wgsl/pbr-material.js'; 3 | import { WebGPUMaterialFactory } from './webgpu-material-factory.js'; 4 | import { vec4, vec3 } from 'gl-matrix'; 5 | 6 | // Can reuse these for every PBR material 7 | const materialArray = new Float32Array(MATERIAL_BUFFER_SIZE / Float32Array.BYTES_PER_ELEMENT); 8 | const baseColorFactor = new Float32Array(materialArray.buffer, 0, 4); 9 | const emissiveFactor = new Float32Array(materialArray.buffer, 4 * 4, 3); 10 | const metallicRoughnessFactor = new Float32Array(materialArray.buffer, 8 * 4, 2); 11 | 12 | function isFullyRough(material) { 13 | return material.roughnessFactor == 1.0 && !material.metallicRoughnessTexture; 14 | } 15 | 16 | class WebGPUPBRMaterial extends WebGPUMaterialFactory { 17 | writesEmissive = true; 18 | 19 | init(gpu) { 20 | this.bindGroupLayout = gpu.device.createBindGroupLayout({ 21 | label: 'PBR Material BindGroupLayout', 22 | entries: [{ 23 | binding: 0, // Uniform Buffer 24 | visibility: GPUShaderStage.FRAGMENT, 25 | buffer: {} 26 | }, 27 | { 28 | binding: 1, // baseColorTexture 29 | visibility: GPUShaderStage.FRAGMENT, 30 | texture: {} 31 | }, 32 | { 33 | binding: 2, // baseColorSampler 34 | visibility: GPUShaderStage.FRAGMENT, 35 | sampler: {} 36 | }, 37 | { 38 | binding: 3, // normalTexture 39 | visibility: GPUShaderStage.FRAGMENT, 40 | texture: {} 41 | }, 42 | { 43 | binding: 4, // normalSampler 44 | visibility: GPUShaderStage.FRAGMENT, 45 | sampler: {} 46 | }, 47 | { 48 | binding: 5, // metallicRoughnessTexture 49 | visibility: GPUShaderStage.FRAGMENT, 50 | texture: {} 51 | }, 52 | { 53 | binding: 6, // metallicRoughnessSampler 54 | visibility: GPUShaderStage.FRAGMENT, 55 | sampler: {} 56 | }, 57 | { 58 | binding: 7, // occlusionTexture 59 | visibility: GPUShaderStage.FRAGMENT, 60 | texture: {} 61 | }, 62 | { 63 | binding: 8, // occlusionSampler 64 | visibility: GPUShaderStage.FRAGMENT, 65 | sampler: {} 66 | }, 67 | { 68 | binding: 9, // emissiveTexture 69 | visibility: GPUShaderStage.FRAGMENT, 70 | texture: {} 71 | }, 72 | { 73 | binding: 10, // emissiveSampler 74 | visibility: GPUShaderStage.FRAGMENT, 75 | sampler: {} 76 | }] 77 | }); 78 | } 79 | 80 | createBindGroup(gpu, material) { 81 | vec4.copy(baseColorFactor, material.baseColorFactor); 82 | vec3.copy(emissiveFactor, material.emissiveFactor); 83 | metallicRoughnessFactor[0] = material.metallicFactor; 84 | metallicRoughnessFactor[1] = material.roughnessFactor; 85 | materialArray[7] = material.occlusionStrength; 86 | materialArray[8] = material.alphaCutoff; 87 | 88 | const materialBuffer = gpu.device.createBuffer({ 89 | size: MATERIAL_BUFFER_SIZE, 90 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST, 91 | }); 92 | gpu.device.queue.writeBuffer(materialBuffer, 0, materialArray); 93 | 94 | return gpu.device.createBindGroup({ 95 | layout: this.bindGroupLayout, 96 | entries: [{ 97 | binding: 0, 98 | resource: { buffer: materialBuffer }, 99 | }, 100 | { 101 | binding: 1, 102 | resource: material.baseColorTexture || gpu.whiteTextureView, 103 | }, 104 | { 105 | binding: 2, 106 | resource: material.baseColorSampler || gpu.defaultSampler, 107 | }, 108 | { 109 | binding: 3, 110 | resource: material.normalTexture || gpu.defaultNormalTextureView, 111 | }, 112 | { 113 | binding: 4, 114 | resource: material.normalSampler || gpu.defaultSampler, 115 | }, 116 | { 117 | binding: 5, 118 | resource: material.metallicRoughnessTexture || gpu.whiteTextureView, 119 | }, 120 | { 121 | binding: 6, 122 | resource: material.metallicRoughnessSampler || gpu.defaultSampler, 123 | }, 124 | { 125 | binding: 7, 126 | resource: material.occlusionTexture || gpu.whiteTextureView, 127 | }, 128 | { 129 | binding: 8, 130 | resource: material.occlusionSampler || gpu.defaultSampler, 131 | }, 132 | { 133 | binding: 9, 134 | resource: material.emissiveTexture || gpu.whiteTextureView, 135 | }, 136 | { 137 | binding: 10, 138 | resource: material.emissiveSampler || gpu.defaultSampler, 139 | },] 140 | }); 141 | } 142 | 143 | pipelineKey(geometryLayout, material, skinned) { 144 | return super.pipelineKey(geometryLayout, material, skinned) + `:${isFullyRough(material)}`; 145 | } 146 | 147 | createFragmentModule(gpu, geometryLayout, material) { 148 | return { 149 | module: gpu.device.createShaderModule({ code: PBRFragmentSource(geometryLayout, isFullyRough(material), gpu.flags) }), 150 | entryPoint: 'fragmentMain', 151 | }; 152 | } 153 | } 154 | 155 | WebGPUMaterialFactory.register(PBRMaterial, WebGPUPBRMaterial); 156 | -------------------------------------------------------------------------------- /js/engine/core/light.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { vec3 } from 'gl-matrix'; 3 | import { Transform } from './transform.js'; 4 | 5 | export class PointLight { 6 | lightIndex = -1; 7 | color = new Float32Array(3); 8 | 9 | constructor(options) { 10 | this.color.set(options?.color || [1, 1, 1]); 11 | this.intensity = options?.intensity || 1; 12 | this.range = options?.range || -1; 13 | } 14 | 15 | get computedRange() { 16 | const lightRadius = 0.05; 17 | const illuminationThreshold = 0.001; 18 | return lightRadius * (Math.sqrt(this.intensity/illuminationThreshold) - 1); 19 | } 20 | } 21 | 22 | // TODO: Not currently functional 23 | export class SpotLight { 24 | lightIndex = -1; 25 | color = new Float32Array(3); 26 | 27 | constructor(options) { 28 | this.color.set(options?.color || [1, 1, 1]); 29 | this.intensity = options?.intensity || 1; 30 | this.range = options?.range || -1; 31 | this.angle = options?.angle || Math.PI / 3; 32 | } 33 | } 34 | 35 | export class DirectionalLight { 36 | color = new Float32Array(3); 37 | direction = new Float32Array(3); 38 | 39 | constructor(options) { 40 | this.color.set(options?.color || [1, 1, 1]); 41 | this.intensity = options?.intensity || 1; 42 | this.direction.set(options?.direction || [0, 1, 0]); 43 | } 44 | } 45 | 46 | export class ShadowCastingLight { 47 | up = new Float32Array(3); 48 | 49 | constructor(options) { 50 | this.textureSize = options.textureSize || 512; // For a point light this would be per-side 51 | this.zNear = options.zNear || 1.0; 52 | this.zFar = options.zFar || 128.0; 53 | 54 | // Only applies to directional light 55 | this.width = options.width || 10; 56 | this.height = options.height || 10; 57 | this.up.set(options.up || [0, 1, 0]); 58 | this.cascades = options.cascades || 0; 59 | } 60 | } 61 | 62 | export class AmbientLight { 63 | color = new Float32Array(3); 64 | 65 | constructor(r = 0.1, g = 0.1, b = 0.1) { 66 | this.color[0] = r; 67 | this.color[1] = g; 68 | this.color[2] = b; 69 | } 70 | } 71 | 72 | const AMBIENT_LIGHT_BYTE_SIZE = 4 * Float32Array.BYTES_PER_ELEMENT; 73 | const DIRECTIONAL_LIGHT_BYTE_SIZE = 8 * Float32Array.BYTES_PER_ELEMENT; 74 | const POINT_LIGHT_BYTE_SIZE = 8 * Float32Array.BYTES_PER_ELEMENT; 75 | 76 | export class LightBuffer { 77 | constructor(gpu) { 78 | const LIGHT_BUFFER_SIZE = AMBIENT_LIGHT_BYTE_SIZE + DIRECTIONAL_LIGHT_BYTE_SIZE + (POINT_LIGHT_BYTE_SIZE * gpu.maxLightCount); 79 | this.buffer = gpu.createDynamicBuffer(LIGHT_BUFFER_SIZE, 'light'); 80 | this.buffer.finish(); // TODO: That could be more elegant. 81 | this.lightCount = 0; 82 | } 83 | } 84 | 85 | export class LightSystem extends System { 86 | init(gpu) { 87 | this.singleton.add(new LightBuffer(gpu)); 88 | 89 | this.ambientLightQuery = this.query(AmbientLight); 90 | this.directionalLightQuery = this.query(DirectionalLight); 91 | this.pointLightQuery = this.query(PointLight); 92 | this.spotLightQuery = this.query(SpotLight); 93 | } 94 | 95 | execute(delta, time, gpu) { 96 | const lightBuffer = this.singleton.get(LightBuffer); 97 | lightBuffer.buffer.beginUpdate(); 98 | const arrayBuffer = lightBuffer.buffer.arrayBuffer; 99 | 100 | // Accumulate all of the ambient lights. 101 | const ambientColor = new Float32Array(arrayBuffer, 0, 3); 102 | vec3.set(ambientColor, 0, 0, 0); 103 | this.ambientLightQuery.forEach((entity, light) => { 104 | vec3.add(ambientColor, ambientColor, light.color); 105 | }); 106 | 107 | // Get any directional lights. 108 | const dirColorIntensity = new Float32Array(arrayBuffer, 4 * Float32Array.BYTES_PER_ELEMENT, 4); 109 | dirColorIntensity[3] = 0; 110 | this.directionalLightQuery.forEach((entity, light) => { 111 | vec3.copy(dirColorIntensity, light.color); 112 | dirColorIntensity[3] = light.intensity; // Intensity 113 | 114 | const dirDirection = new Float32Array(arrayBuffer, 8 * Float32Array.BYTES_PER_ELEMENT, 3); 115 | vec3.copy(dirDirection, light.direction); 116 | 117 | return false; // Only process the first one. 118 | }); 119 | 120 | let pointLightByteOffset = AMBIENT_LIGHT_BYTE_SIZE + DIRECTIONAL_LIGHT_BYTE_SIZE; 121 | lightBuffer.lightCount = 0; 122 | this.pointLightQuery.forEach((entity, light) => { 123 | if (light.intensity > 0) { 124 | const positionRange = new Float32Array(arrayBuffer, pointLightByteOffset, 4); 125 | const colorIntensity = new Float32Array(arrayBuffer, pointLightByteOffset + 4 * Float32Array.BYTES_PER_ELEMENT, 4); 126 | 127 | const transform = entity.get(Transform); 128 | if (transform) { 129 | transform.getWorldPosition(positionRange); 130 | } else { 131 | // If the light doesn't have a transform position it at the origin. 132 | vec3.set(positionRange, 0, 0, 0); 133 | } 134 | positionRange[3] = light.range >= 0 ? light.range : light.computedRange; 135 | 136 | vec3.copy(colorIntensity, light.color); 137 | colorIntensity[3] = light.intensity; 138 | 139 | light.lightIndex = lightBuffer.lightCount++; 140 | 141 | pointLightByteOffset += 8 * Float32Array.BYTES_PER_ELEMENT; 142 | 143 | // Stop processing lights if we overflow our max 144 | if (lightBuffer.lightCount == gpu.maxLightCount) { return false; } 145 | } else { 146 | light.lightIndex = -1; 147 | } 148 | }); 149 | 150 | this.spotLightQuery.forEach((entity, light) => { 151 | // TODO 152 | light.lightIndex = -1; 153 | }); 154 | 155 | const pointlightCountArray = new Uint32Array(arrayBuffer, 11 * Float32Array.BYTES_PER_ELEMENT, 1); 156 | pointlightCountArray[0] = lightBuffer.lightCount; 157 | 158 | lightBuffer.buffer.finish(); 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /js/engine/debug/bounds-visualizer.js: -------------------------------------------------------------------------------- 1 | import { System } from '../core/ecs.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { Mesh, Geometry, Attribute } from '../core/mesh.js'; 4 | import { BoundingVolume, BoundingVolumeType } from '../core/bounding-volume.js'; 5 | import { BVH } from '../util/bvh.js'; 6 | import { UnlitMaterial } from '../core/materials.js'; 7 | import { Transform, StaticTransform } from '../core/transform.js'; 8 | import { vec3 } from 'gl-matrix'; 9 | 10 | function createAABBGeometry(gpu) { 11 | const boundsVerts = new Float32Array([ 12 | 1.0, 1.0, 1.0, // 0 13 | 0.0, 1.0, 1.0, // 1 14 | 1.0, 0.0, 1.0, // 2 15 | 0.0, 0.0, 1.0, // 3 16 | 1.0, 1.0, 0.0, // 4 17 | 0.0, 1.0, 0.0, // 5 18 | 1.0, 0.0, 0.0, // 6 19 | 0.0, 0.0, 0.0, // 7 20 | ]); 21 | 22 | const boundsIndices = new Uint16Array([ 23 | 0, 1, 2, 3, 0, 2, 1, 3, // Front 24 | 4, 5, 6, 7, 4, 6, 5, 7, // Back 25 | 0, 4, 1, 5, 2, 6, 3, 7, // Corners 26 | ]); 27 | 28 | const vertexBuffer = gpu.createStaticBuffer(boundsVerts, 'vertex'); 29 | const indexBuffer = gpu.createStaticBuffer(boundsIndices, 'index'); 30 | 31 | const geometry = new Geometry({ 32 | drawCount: boundsIndices.length, 33 | attributes: [ new Attribute('position', vertexBuffer) ], 34 | indices: { buffer: indexBuffer, format: 'uint16' }, 35 | topology: 'line-list' 36 | }); 37 | 38 | return geometry; 39 | } 40 | 41 | function createAABBMesh(gpu) { 42 | const geometry = createAABBGeometry(gpu); 43 | 44 | const material = new UnlitMaterial(); 45 | material.baseColorFactor[0] = 1.0; 46 | material.baseColorFactor[1] = 1.0; 47 | material.baseColorFactor[2] = 0.0; 48 | material.depthCompare = 'always'; 49 | 50 | const mesh = new Mesh({ geometry, material }); 51 | mesh.name = 'Bounding Volume AABB Visualization Mesh'; 52 | 53 | return mesh; 54 | } 55 | 56 | function createSphereMesh(gpu) { 57 | const ringSegments = 16; 58 | const colliderVerts = []; 59 | const colliderIndices = []; 60 | 61 | let idx = 0; 62 | for (let i = 0; i < ringSegments+1; ++i) { 63 | const u = (i / ringSegments) * Math.PI * 2; 64 | colliderVerts.push(Math.cos(u), 0, Math.sin(u)); 65 | if (i > 0) { colliderIndices.push(idx, ++idx); } 66 | } 67 | 68 | idx++ 69 | for (let i = 0; i < ringSegments+1; ++i) { 70 | const u = (i / ringSegments) * Math.PI * 2; 71 | colliderVerts.push(Math.cos(u), Math.sin(u), 0); 72 | if (i > 0) { colliderIndices.push(idx, ++idx); } 73 | } 74 | 75 | idx++ 76 | for (let i = 0; i < ringSegments+1; ++i) { 77 | const u = (i / ringSegments) * Math.PI * 2; 78 | colliderVerts.push(0, Math.cos(u), Math.sin(u)); 79 | if (i > 0) { colliderIndices.push(idx, ++idx); } 80 | } 81 | 82 | const vertexBuffer = gpu.createStaticBuffer(new Float32Array(colliderVerts), 'vertex'); 83 | const indexBuffer = gpu.createStaticBuffer(new Uint16Array(colliderIndices), 'index'); 84 | 85 | const geometry = new Geometry({ 86 | drawCount: colliderIndices.length, 87 | attributes: [ new Attribute('position', vertexBuffer) ], 88 | indices: { buffer: indexBuffer, format: 'uint16' }, 89 | topology: 'line-list' 90 | }); 91 | 92 | const material = new UnlitMaterial(); 93 | material.baseColorFactor[0] = 0.0; 94 | material.baseColorFactor[1] = 1.0; 95 | material.baseColorFactor[2] = 0.0; 96 | material.depthCompare = 'always'; 97 | material.castsShadow = false; 98 | 99 | const mesh = new Mesh({ geometry, material }); 100 | mesh.name = 'Bounding Volume Sphere Visualization Mesh'; 101 | 102 | return mesh; 103 | } 104 | 105 | export class BoundsVisualizerSystem extends System { 106 | stage = Stage.PostFrameLogic; 107 | 108 | init(gpu) { 109 | this.aabbMesh = createAABBMesh(gpu); 110 | 111 | const material = new UnlitMaterial(); 112 | material.baseColorFactor[0] = 0.0; 113 | material.baseColorFactor[1] = 0.3; 114 | material.baseColorFactor[2] = 1.0; 115 | material.depthCompare = 'always'; 116 | material.castsShadow = false; 117 | 118 | this.aabbLeafMesh = new Mesh({ 119 | geometry: createAABBGeometry(gpu), 120 | material }); 121 | this.aabbLeafMesh.name = 'Bounding Volume AABB Leaf Visualization Mesh'; 122 | 123 | this.sphereMesh = createSphereMesh(gpu); 124 | } 125 | 126 | execute(delta, time, gpu) { 127 | const scale = vec3.create(); 128 | 129 | /*this.query(BoundingVolume).forEach((entity, bounds) => { 130 | const transform = entity.get(Transform); 131 | 132 | switch(bounds.type) { 133 | case BoundingVolumeType.AABB: 134 | vec3.subtract(scale, bounds.max, bounds.min); 135 | 136 | gpu.addFrameMeshInstance(this.aabbMesh, new StaticTransform({ 137 | position: bounds.min, 138 | scale 139 | }, transform?.worldMatrix)); 140 | break; 141 | 142 | case BoundingVolumeType.Sphere: 143 | gpu.addFrameMeshInstance(this.sphereMesh, new StaticTransform({ 144 | position: bounds.center, 145 | scale: [bounds.radius, bounds.radius, bounds.radius] 146 | }, transform?.worldMatrix)); 147 | break; 148 | } 149 | });*/ 150 | 151 | this.query(BVH).forEach((entity, bvh) => { 152 | const transform = entity.get(Transform); 153 | 154 | const addNodeMesh = (node, level) => { 155 | if (!node) { return; } 156 | 157 | if (bvh.visLevel == 0 || bvh.visLevel >= level || Math.abs(bvh.visLevel) == level) { 158 | vec3.subtract(scale, node.max, node.min); 159 | const leaf = !node.child0; 160 | gpu.addFrameMeshInstance(leaf ? this.aabbLeafMesh : this.aabbMesh, 161 | new StaticTransform({ 162 | position: node.min, 163 | scale 164 | }, transform?.worldMatrix)); 165 | } 166 | addNodeMesh(node.child0, level+1); 167 | addNodeMesh(node.child1, level+1); 168 | }; 169 | 170 | addNodeMesh(bvh.rootNode, 0); 171 | }); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/wgsl-utils.js: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Brandon Jones 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | 21 | const LOG_ALL_SHADERS = false; 22 | const LOG_FULL_SHADER_TEXT = true; 23 | 24 | const MESSAGE_STYLE = { 25 | 'info': { 26 | icon: 'ℹ️', 27 | logFn: console.info, 28 | }, 29 | 'warning': { 30 | icon: '⚠️', 31 | logFn: console.warn, 32 | }, 33 | 'error': { 34 | icon: '⛔', 35 | logFn: console.error, 36 | } 37 | } 38 | 39 | /** 40 | * A method that captures errors returned by compiling a WebGPU shader module 41 | * and annotates them with additional information before echoing to the console 42 | * to aid with debugging. 43 | */ 44 | if ('GPUDevice' in window) { 45 | const origCreateShaderModule = GPUDevice.prototype.createShaderModule; 46 | GPUDevice.prototype.createShaderModule = function(descriptor) { 47 | if (!this.pushErrorScope) { 48 | return origCreateShaderModule.call(this, descriptor); 49 | } 50 | 51 | this.pushErrorScope('validation'); 52 | 53 | const shaderModule = origCreateShaderModule.call(this, descriptor); 54 | 55 | const validationPromise = this.popErrorScope().then((error) => { 56 | // If compilationInfo is not available in this browser just echo any error 57 | // messages we get. 58 | if (!shaderModule.compilationInfo && error) { 59 | console.error(error.message); 60 | } else { 61 | return error; 62 | } 63 | }); 64 | 65 | if (shaderModule.compilationInfo) { 66 | shaderModule.compilationInfo().then(async (info) => { 67 | const validationError = await validationPromise; 68 | 69 | if (!info.messages.length && !validationError && !LOG_ALL_SHADERS) { 70 | return; 71 | } 72 | 73 | const messageCount = { 74 | error: 0, 75 | warning: 0, 76 | info: 0, 77 | }; 78 | 79 | for (const message of info.messages) { 80 | messageCount[message.type] += 1; 81 | } 82 | 83 | if (messageCount.error == 0 && validationError) { 84 | messageCount.error = 1; 85 | } 86 | 87 | const label = shaderModule.label; 88 | let groupLabel = (label ? `"${label}"` : 'Shader') + 89 | ' returned compilation messages:'; 90 | for (const type in messageCount) { 91 | if (messageCount[type] > 0) { 92 | groupLabel += ` ${messageCount[type]}${MESSAGE_STYLE[type].icon}`; 93 | } 94 | } 95 | 96 | if (messageCount.error == 0) { 97 | console.groupCollapsed(groupLabel); 98 | } else { 99 | console.group(groupLabel); 100 | } 101 | 102 | const code = descriptor.code; 103 | for (const message of info.messages) { 104 | const type = message.type; 105 | 106 | // If the message doesn't have an associated position in the code just 107 | // repeat the message verbatim 108 | if (message.lineNum == 0 && message.linePos == 0) { 109 | MESSAGE_STYLE[type].logFn(message.message); 110 | continue; 111 | } 112 | 113 | const length = Math.max(message.length, 1); 114 | const lineStartIndex = code.lastIndexOf('\n', message.offset); 115 | const lineStart = code.substring(lineStartIndex+1, message.offset); 116 | const highlightText = code.substr(message.offset, length); 117 | const lineEndIndex = code.indexOf('\n', message.offset+length); 118 | const lineEnd = code.substring(message.offset+length, lineEndIndex == -1 ? undefined : lineEndIndex); 119 | 120 | MESSAGE_STYLE[type].logFn( 121 | `%c${message.lineNum}:${message.linePos} - %c${message.message}\n%c${lineStart}%c${highlightText}%c${lineEnd}`, 122 | 'font-weight: bold;', 123 | 'font-weight: default;', 124 | 'color: green;', 125 | 'color: lightgrey; background-color: darkred; font-weight: bold;', 126 | 'color: green;'); 127 | } 128 | 129 | if (validationError) { 130 | console.groupCollapsed("Validation Error Message"); 131 | console.error(validationError.message); 132 | console.groupEnd(); 133 | } 134 | 135 | if (LOG_FULL_SHADER_TEXT) { 136 | // Output the full shader text with numbered lines for easier reference. 137 | let numberedCodeLines = ''; 138 | const codeLines = code.split('\n'); 139 | const padLength = codeLines.length.toString().length; 140 | for (let i = 0; i < codeLines.length; ++i) { 141 | const lineNum = (i+1).toString().padStart(padLength, ' '); 142 | numberedCodeLines += `${lineNum}: ${codeLines[i]}\n`; 143 | } 144 | 145 | console.groupCollapsed("Full shader text"); 146 | console.log(numberedCodeLines); 147 | console.groupEnd(); 148 | } 149 | 150 | console.groupCollapsed("Stack Trace"); 151 | console.trace(); 152 | console.groupEnd(); 153 | 154 | console.groupEnd(); 155 | }); 156 | } 157 | 158 | return shaderModule; 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /js/engine/core/mesh.js: -------------------------------------------------------------------------------- 1 | import { System } from './ecs.js'; 2 | import { Stage } from './stage.js'; 3 | import { Transform } from './transform.js'; 4 | import { InstanceColor } from './instance-color.js'; 5 | import { GeometryLayoutCache } from './geometry-layout.js'; 6 | 7 | export const AttributeLocation = { 8 | position: 0, 9 | normal: 1, 10 | tangent: 2, 11 | texcoord: 3, 12 | texcoord2: 4, 13 | color: 5, 14 | joints: 6, 15 | weights: 7, 16 | maxAttributeLocation: 8, 17 | }; 18 | 19 | const DefaultAttributeFormat = { 20 | position: 'float32x3', 21 | normal: 'float32x3', 22 | tangent: 'float32x3', 23 | texcoord: 'float32x2', 24 | texcoord2: 'float32x2', 25 | color: 'float32x4', 26 | joints: 'uint16x4', 27 | weights: 'float32x4', 28 | }; 29 | 30 | const DefaultStride = { 31 | uint8x2: 2, 32 | uint8x4: 4, 33 | sint8x2: 2, 34 | sint8x4: 4, 35 | unorm8x2: 2, 36 | unorm8x4: 4, 37 | snorm8x2: 2, 38 | snorm8x4: 4, 39 | uint16x2: 4, 40 | uint16x4: 8, 41 | sint16x2: 4, 42 | sint16x4: 8, 43 | unorm16x2: 4, 44 | unorm16x4: 8, 45 | snorm16x2: 4, 46 | snorm16x4: 8, 47 | float16x2: 4, 48 | float16x4: 8, 49 | float32: 4, 50 | float32x2: 8, 51 | float32x3: 12, 52 | float32x4: 16, 53 | uint32: 4, 54 | uint32x2: 8, 55 | uint32x3: 12, 56 | uint32x4: 16, 57 | sint32: 4, 58 | sint32x2: 8, 59 | sint32x3: 12, 60 | sint32x4: 16, 61 | }; 62 | 63 | const LAYOUT_CACHE = new GeometryLayoutCache(); 64 | 65 | export class InterleavedAttributes { 66 | constructor(buffer, stride) { 67 | this.buffer = buffer; 68 | this.arrayStride = stride; 69 | this.attributes = []; 70 | this.minOffset = Number.MAX_SAFE_INTEGER; 71 | this.minShaderLocation = Number.MAX_SAFE_INTEGER; 72 | } 73 | 74 | addAttribute(attribute, offset = 0, format) { 75 | const shaderLocation = AttributeLocation[attribute]; 76 | if (shaderLocation === undefined) { 77 | throw new Error(`Unable to determine shader location for ${attribute}.`); 78 | } 79 | if (format === undefined) { 80 | format = DefaultAttributeFormat[attribute]; 81 | if (!format) { 82 | throw new Error(`Unable to determine attribute format for ${attribute}.`); 83 | } 84 | } 85 | this.minOffset = Math.min(this.minOffset, offset); 86 | this.minShaderLocation = Math.min(this.minShaderLocation, shaderLocation); 87 | this.attributes.push({attribute, shaderLocation, offset, format}); 88 | return this; 89 | } 90 | }; 91 | 92 | export class Attribute extends InterleavedAttributes { 93 | constructor(attribute, buffer, format, stride) { 94 | if (format === undefined) { 95 | format = DefaultAttributeFormat[attribute]; 96 | if (!format) { 97 | throw new Error(`Unable to determine attribute format for ${attribute}.`); 98 | } 99 | } 100 | if (!stride) { 101 | stride = DefaultStride[format]; 102 | } 103 | super(buffer, stride); 104 | super.addAttribute(attribute, 0, format); 105 | } 106 | 107 | addAttribute() { 108 | throw new Error('Cannot add attributes to a AttributeBuffer. Use InterleavedBuffer instead.'); 109 | } 110 | }; 111 | 112 | let nextGeometryId = 1; 113 | 114 | export class Geometry { 115 | id = nextGeometryId++; 116 | vertexBuffers = []; 117 | indexBuffer = null; 118 | drawCount = 0; 119 | layoutId; 120 | 121 | constructor(options) { 122 | // Sort the buffers/attributes by shaderLocation to aid in pipeline deduplication. 123 | const attribBuffers = []; 124 | if (options.attributes) { 125 | for (const attribBuffer of options.attributes) { 126 | if (!attribBuffer.attributes.length) { continue; } 127 | attribBuffers.push(attribBuffer); 128 | } 129 | attribBuffers.sort((a, b) => a.minShaderLocation - b.minShaderLocation); 130 | let i = 0; 131 | for (const buffer of attribBuffers) { 132 | this.vertexBuffers.push({ 133 | slot: i++, 134 | buffer: buffer.buffer, 135 | offset: buffer.minOffset 136 | }); 137 | buffer.attributes.sort((a, b) => a.shaderLocation - b.shaderLocation); 138 | } 139 | } 140 | 141 | if (options.indices?.buffer) { 142 | this.indexBuffer = { 143 | buffer: options.indices.buffer, 144 | offset: options.indices.offset || 0, 145 | format: options.indices.format || 'uint32' 146 | }; 147 | } 148 | 149 | const topology = options.topology || 'triangle-list'; 150 | const layout = LAYOUT_CACHE.createLayout(attribBuffers, topology, this.indexBuffer?.format); 151 | this.layoutId = layout.id; 152 | 153 | this.drawCount = options.drawCount || 0; 154 | // TODO: If an explicit drawCount wasn't given, guess one from the given buffers. 155 | } 156 | 157 | get layout() { 158 | return LAYOUT_CACHE.getLayout(this.layoutId); 159 | } 160 | } 161 | 162 | // A mesh is a collection of geometry/material pairs. Only meshes should be used as components for 163 | // an entity. (This allows entities to have multiple geometries, whereas otherwise they would only 164 | // be able to have a single geometry and material, which would be limiting cases in which 165 | // an object consisting of multiple parts need to function as a single entity.) 166 | export class Mesh { 167 | name = ''; // Primarily for debugging. 168 | primitives = []; // Borrowing the term from glTF, but it's clunky. 169 | skin = null; 170 | boundingVolume = null; // Not required, but may be available in some cases. 171 | 172 | constructor(...primitives) { 173 | for (const primitive of primitives) { 174 | if (!primitive.geometry || !primitive.material) { 175 | throw new Error('Primitive specified for mesh that lacks geometry or material'); 176 | } 177 | primitive.meshPipelineId = `${primitive.geometry.layoutId},${primitive.material.materialId}`; 178 | } 179 | this.primitives.push(...primitives); 180 | } 181 | } 182 | 183 | export class MeshSystem extends System { 184 | stage = Stage.PostFrameLogic; 185 | 186 | async init() { 187 | this.meshQuery = this.query(Mesh); 188 | } 189 | 190 | execute(delta, time, gpu) { 191 | // TODO: This would be a perfect place for some frustum culling, etc. 192 | this.meshQuery.forEach((entity, mesh) => { 193 | gpu.addFrameMeshInstance(mesh, entity.get(Transform), entity.get(InstanceColor)); 194 | }); 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-camera.js: -------------------------------------------------------------------------------- 1 | import { WebGPUSystem } from './webgpu-system.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { mat4, vec3 } from 'gl-matrix'; 4 | 5 | import { Transform } from '../core/transform.js'; 6 | import { Camera } from '../core/camera.js'; 7 | import { LightBuffer } from '../core/light.js'; 8 | 9 | import { CAMERA_BUFFER_SIZE } from './wgsl/common.js'; 10 | import { CLUSTER_BOUNDS_SIZE, CLUSTER_LIGHTS_SIZE } from './wgsl/clustered-light.js'; 11 | 12 | // Also used by things like shadow-casting lights. 13 | export class WebGPUCameraBase { 14 | constructor(gpu) { 15 | const device = gpu.device; 16 | 17 | this.arrayBuffer = new ArrayBuffer(CAMERA_BUFFER_SIZE); 18 | this.projection = new Float32Array(this.arrayBuffer, 0, 16); 19 | this.inverseProjection = new Float32Array(this.arrayBuffer, 16 * Float32Array.BYTES_PER_ELEMENT, 16); 20 | this.view = new Float32Array(this.arrayBuffer, 32 * Float32Array.BYTES_PER_ELEMENT, 16); 21 | this.position = new Float32Array(this.arrayBuffer, 48 * Float32Array.BYTES_PER_ELEMENT, 3); 22 | this.time = new Float32Array(this.arrayBuffer, 51 * Float32Array.BYTES_PER_ELEMENT, 1); 23 | this.outputSize = new Float32Array(this.arrayBuffer, 52 * Float32Array.BYTES_PER_ELEMENT, 2); 24 | this.zRange = new Float32Array(this.arrayBuffer, 54 * Float32Array.BYTES_PER_ELEMENT, 2); 25 | 26 | this.cameraBuffer = device.createBuffer({ 27 | size: CAMERA_BUFFER_SIZE, 28 | usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM, 29 | }); 30 | } 31 | } 32 | 33 | export class WebGPUCamera extends WebGPUCameraBase { 34 | constructor(gpu, lightBuffer) { 35 | super(gpu); 36 | 37 | const device = gpu.device; 38 | 39 | this.clusterBoundsBuffer = device.createBuffer({ 40 | size: CLUSTER_BOUNDS_SIZE, 41 | usage: GPUBufferUsage.STORAGE 42 | }); 43 | 44 | this.clusterLightsBuffer = device.createBuffer({ 45 | size: CLUSTER_LIGHTS_SIZE, 46 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST 47 | }); 48 | 49 | this.clusterBoundsBindGroup = device.createBindGroup({ 50 | layout: gpu.bindGroupLayouts.clusterBounds, 51 | entries: [{ 52 | binding: 0, 53 | resource: { 54 | buffer: this.clusterBoundsBuffer, 55 | }, 56 | }], 57 | }); 58 | 59 | this.clusterLightsBindGroup = device.createBindGroup({ 60 | layout: gpu.bindGroupLayouts.clusterLights, 61 | entries: [{ 62 | binding: 0, 63 | resource: { buffer: this.cameraBuffer, }, 64 | }, { 65 | binding: 1, 66 | resource: { buffer: this.clusterBoundsBuffer }, 67 | }, { 68 | binding: 2, 69 | resource: { buffer: this.clusterLightsBuffer }, 70 | }, { 71 | binding: 3, 72 | resource: { buffer: lightBuffer.gpuBuffer, }, 73 | }], 74 | }); 75 | 76 | this.bindGroup = gpu.device.createBindGroup({ 77 | layout: gpu.bindGroupLayouts.frame, 78 | entries: [{ 79 | binding: 0, 80 | resource: { buffer: this.cameraBuffer, }, 81 | }, { 82 | binding: 1, 83 | resource: { buffer: lightBuffer.gpuBuffer, }, 84 | }, { 85 | binding: 2, 86 | resource: { buffer: this.clusterLightsBuffer, }, 87 | }, { 88 | binding: 3, 89 | resource: gpu.defaultSampler, 90 | }, { 91 | binding: 4, 92 | resource: gpu.shadowDepthTextureView, 93 | }, { 94 | binding: 5, 95 | resource: gpu.shadowDepthSampler, 96 | }, { 97 | binding: 6, 98 | resource: { buffer: gpu.lightShadowTableBuffer, }, 99 | }, { 100 | binding: 7, 101 | resource: { buffer: gpu.shadowPropertiesBuffer, }, 102 | }], 103 | }); 104 | 105 | this.bindGroupUnfilteredShadow = gpu.device.createBindGroup({ 106 | layout: gpu.bindGroupLayouts.frame, 107 | entries: [{ 108 | binding: 0, 109 | resource: { buffer: this.cameraBuffer, }, 110 | }, { 111 | binding: 1, 112 | resource: { buffer: lightBuffer.gpuBuffer, }, 113 | }, { 114 | binding: 2, 115 | resource: { buffer: this.clusterLightsBuffer, }, 116 | }, { 117 | binding: 3, 118 | resource: gpu.defaultSampler, 119 | }, { 120 | binding: 4, 121 | resource: gpu.shadowDepthTextureView, 122 | }, { 123 | binding: 5, 124 | resource: gpu.shadowUnfilteredDepthSampler, 125 | }, { 126 | binding: 6, 127 | resource: { buffer: gpu.lightShadowTableBuffer, }, 128 | }, { 129 | binding: 7, 130 | resource: { buffer: gpu.shadowPropertiesBuffer, }, 131 | }], 132 | }); 133 | } 134 | } 135 | 136 | export class WebGPUCameraSystem extends WebGPUSystem { 137 | stage = Stage.PreRender; 138 | execute(delta, time, gpu) { 139 | // If a Camera does not have an associated WebGPUCamera add one. 140 | this.query(Camera).not(WebGPUCamera).forEach((entity) => { 141 | const lights = this.singleton.get(LightBuffer); 142 | entity.add(new WebGPUCamera(gpu, lights.buffer)); 143 | }); 144 | 145 | // If a WebGPUCamera has had it's Camera removed, also remove the WebGPUCamera. 146 | this.query(WebGPUCamera).not(Camera).forEach((entity) => { 147 | entity.remove(WebGPUCamera); 148 | }); 149 | 150 | // Update the values for the WebGPUCamera every frame and write the values to the buffer. 151 | this.query(Camera, WebGPUCamera).forEach((entity, camera, gpuCamera) => { 152 | // Update the values for the WebGPU camera every frame and write the values to the buffer. 153 | const transform = entity.get(Transform); 154 | if (transform) { 155 | mat4.invert(gpuCamera.view, transform.worldMatrix); 156 | transform.getWorldPosition(gpuCamera.position); 157 | } else { 158 | // If the camera doesn't have a transform position it at the origin. 159 | mat4.identity(gpuCamera.view); 160 | vec3.set(gpuCamera.position, 0, 0, 0); 161 | } 162 | 163 | const aspect = gpu.renderTargets.size.width / gpu.renderTargets.size.height; 164 | mat4.perspectiveZO(gpuCamera.projection, camera.fieldOfView, aspect, 165 | camera.zNear, camera.zFar); 166 | mat4.invert(gpuCamera.inverseProjection, gpuCamera.projection); 167 | 168 | gpuCamera.time[0] = time; 169 | gpuCamera.outputSize[0] = gpu.renderTargets.size.width; 170 | gpuCamera.outputSize[1] = gpu.renderTargets.size.height; 171 | gpuCamera.zRange[0] = camera.zNear; 172 | gpuCamera.zRange[1] = camera.zFar; 173 | 174 | gpu.device.queue.writeBuffer(gpuCamera.cameraBuffer, 0, gpuCamera.arrayBuffer); 175 | }); 176 | } 177 | } -------------------------------------------------------------------------------- /js/engine/core/transform.js: -------------------------------------------------------------------------------- 1 | import { mat4, vec3, quat } from 'gl-matrix'; 2 | 3 | const DEFAULT_POSITION = vec3.create(); 4 | const DEFAULT_ORIENTATION = quat.create(); 5 | const DEFAULT_SCALE = vec3.fromValues(1, 1, 1); 6 | 7 | export class Transform { 8 | #storage; 9 | #position; 10 | #orientation; 11 | #scale; 12 | #localMatrix; 13 | #worldMatrix; 14 | 15 | #localMatrixDirty = true; 16 | #worldMatrixDirty = true; 17 | #parent = null; 18 | #children; 19 | 20 | constructor(options = {}) { 21 | let buffer; 22 | let offset = 0; 23 | // Allocate storage for all the transform elements 24 | if (options.externalStorage) { 25 | buffer = options.externalStorage.buffer; 26 | offset = options.externalStorage.offset; 27 | } else { 28 | buffer = new Float32Array(42).buffer; 29 | } 30 | 31 | this.#position = new Float32Array(buffer, offset, 3); 32 | this.#orientation = new Float32Array(buffer, offset + 3 * Float32Array.BYTES_PER_ELEMENT, 4); 33 | this.#scale = new Float32Array(buffer, offset + 7 * Float32Array.BYTES_PER_ELEMENT, 3); 34 | this.#localMatrix = new Float32Array(buffer, offset + 10 * Float32Array.BYTES_PER_ELEMENT, 16); 35 | this.#worldMatrix = new Float32Array(buffer, offset + 26 * Float32Array.BYTES_PER_ELEMENT, 16); 36 | 37 | if (options.transform) { 38 | const storage = new Float32Array(this.#position.buffer, this.#position.byteOffset, 42); 39 | storage.set(new Float32Array(options.transform.#position.buffer, options.transform.#position.byteOffset, 42)); 40 | this.#localMatrixDirty = options.transform.#localMatrixDirty; 41 | } else if (options.matrix) { 42 | this.setLocalMatrix(options.matrix); 43 | } else { 44 | if (options.position) { 45 | this.#position.set(options.position); 46 | } 47 | this.#orientation.set(options.orientation ? options.orientation : DEFAULT_ORIENTATION); 48 | this.#scale.set(options.scale ? options.scale : DEFAULT_SCALE); 49 | } 50 | 51 | if (options.parent) { 52 | options.parent.addChild(this); 53 | } 54 | } 55 | 56 | get position() { 57 | this.#makeDirty(); 58 | return this.#position; 59 | } 60 | set position(value) { 61 | this.#makeDirty(); 62 | this.#position.set(value); 63 | } 64 | 65 | getWorldPosition(out, position) { 66 | if (position) { 67 | if (position != out) { 68 | vec3.copy(out, position); 69 | } 70 | } else { 71 | vec3.set(out, 0, 0, 0); 72 | } 73 | vec3.transformMat4(out, out, this.worldMatrix); 74 | } 75 | 76 | get orientation() { 77 | this.#makeDirty(); 78 | return this.#orientation; 79 | } 80 | set orientation(value) { 81 | this.#makeDirty(); 82 | this.#orientation.set(value); 83 | } 84 | 85 | get scale() { 86 | this.#makeDirty(); 87 | return this.#scale; 88 | } 89 | set scale(value) { 90 | this.#makeDirty(); 91 | this.#scale.set(value); 92 | } 93 | 94 | getLocalMatrix(out) { 95 | return mat4.copy(out, this.#resolveLocalMatrix()); 96 | } 97 | 98 | setLocalMatrix(value) { 99 | mat4.copy(this.#localMatrix, value); 100 | mat4.getRotation(this.#orientation, this.#localMatrix); 101 | mat4.getTranslation(this.#position, this.#localMatrix); 102 | mat4.getScaling(this.#scale, this.#localMatrix); 103 | this.#makeDirty(false); 104 | } 105 | 106 | get worldMatrix() { 107 | return this.#resolveWorldMatrix(); 108 | } 109 | 110 | addChild(transform) { 111 | if (transform.parent && transform.parent != this) { 112 | transform.parent.removeChild(transform); 113 | } 114 | 115 | if (!this.#children) { this.#children = new Set(); } 116 | this.#children.add(transform); 117 | transform.#parent = this; 118 | transform.#makeDirty(false); 119 | } 120 | 121 | removeChild(transform) { 122 | const removed = this.#children?.delete(transform); 123 | if (removed) { 124 | transform.#parent = null; 125 | transform.#makeDirty(false); 126 | } 127 | } 128 | 129 | get children() { 130 | return this.#children?.values() || []; 131 | } 132 | 133 | get parent() { 134 | return this.#parent; 135 | } 136 | 137 | #makeDirty(markLocalDirty = true) { 138 | if (markLocalDirty) { this.#localMatrixDirty = true; } 139 | if (this.#worldMatrixDirty) { return; } 140 | this.#worldMatrixDirty = true; 141 | 142 | if (this.#children) { 143 | for (const child of this.#children) { 144 | child.#makeDirty(false); 145 | } 146 | } 147 | } 148 | 149 | #resolveLocalMatrix() { 150 | const wasDirty = this.#localMatrixDirty; 151 | if (this.#localMatrixDirty) { 152 | mat4.fromRotationTranslationScale(this.#localMatrix, 153 | this.#orientation, 154 | this.#position, 155 | this.#scale); 156 | this.#localMatrixDirty = false; 157 | } 158 | return this.#localMatrix; 159 | } 160 | 161 | #resolveWorldMatrix() { 162 | if (this.#worldMatrixDirty) { 163 | if (!this.parent) { 164 | this.#worldMatrix.set(this.#resolveLocalMatrix()); 165 | } else { 166 | mat4.mul(this.#worldMatrix, this.parent.worldMatrix, this.#resolveLocalMatrix()); 167 | } 168 | this.#worldMatrixDirty = false; 169 | } 170 | 171 | return this.#worldMatrix; 172 | } 173 | } 174 | 175 | export class TransformPool { 176 | #buffer; 177 | #transforms = []; 178 | 179 | constructor(size) { 180 | this.#buffer = new Float32Array(42 * size).buffer; 181 | 182 | for (let i = 0; i < size; ++i) { 183 | this.#transforms[i] = new Transform({ 184 | externalStorage: { 185 | buffer: this.#buffer, 186 | offset: (i * 42 * Float32Array.BYTES_PER_ELEMENT), 187 | } 188 | }); 189 | } 190 | } 191 | 192 | get size() { 193 | return this.#transforms.length; 194 | } 195 | 196 | getTransform(index) { 197 | return this.#transforms[index]; 198 | } 199 | 200 | clone() { 201 | const out = new TransformPool(this.size); 202 | // Copy the entire buffer from this pool to the new one. 203 | new Float32Array(out.#buffer).set(new Float32Array(this.#buffer)); 204 | return out; 205 | } 206 | } 207 | 208 | // Creates a lightweight transform that always reports the same world matrix 209 | // Mostly used for debug utilities that need to apply a static transform to 210 | // a mesh. 211 | export class StaticTransform { 212 | worldMatrix = new Float32Array(16); 213 | 214 | constructor(transform = null, matrix = null) { 215 | if (transform instanceof Float32Array) { 216 | matrix = transform; 217 | transform = null; 218 | } 219 | 220 | if (transform) { 221 | mat4.fromRotationTranslationScale(this.worldMatrix, 222 | transform.orientation || DEFAULT_ORIENTATION, 223 | transform.position || DEFAULT_POSITION, 224 | transform.scale || DEFAULT_SCALE); 225 | if (matrix) { 226 | mat4.mul(this.worldMatrix, matrix, this.worldMatrix); 227 | } 228 | } else if (matrix) { 229 | mat4.copy(this.worldMatrix, matrix); 230 | } else { 231 | mat4.identity(this.worldMatrix); 232 | } 233 | } 234 | } 235 | -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/common.js: -------------------------------------------------------------------------------- 1 | import { wgsl } from 'wgsl-preprocessor'; 2 | import { AttributeLocation } from '../../core/mesh.js'; 3 | 4 | export const CAMERA_BUFFER_SIZE = 56 * Float32Array.BYTES_PER_ELEMENT; 5 | export function CameraStruct(group = 0, binding = 0) { return ` 6 | struct Camera { 7 | projection : mat4x4, 8 | inverseProjection : mat4x4, 9 | view : mat4x4, 10 | position : vec3, 11 | time : f32, 12 | outputSize : vec2, 13 | zNear : f32, 14 | zFar : f32, 15 | }; 16 | @group(${group}) @binding(${binding}) var camera : Camera; 17 | `; 18 | } 19 | 20 | export const LIGHT_BUFFER_SIZE = 8 * Float32Array.BYTES_PER_ELEMENT; 21 | export function LightStruct(group = 0, binding = 1) { return ` 22 | struct Light { 23 | position : vec3, 24 | range : f32, 25 | color : vec3, 26 | intensity : f32, 27 | }; 28 | 29 | struct GlobalLights { 30 | ambient : vec3, 31 | dirColor : vec3, 32 | dirIntensity : f32, 33 | dirDirection : vec3, 34 | lightCount : u32, 35 | lights : array, 36 | }; 37 | @group(${group}) @binding(${binding}) var globalLights : GlobalLights; 38 | `; 39 | } 40 | 41 | export function SkinStructs(group = 1) { return ` 42 | struct Joints { 43 | matrices : array> 44 | }; 45 | @group(${group}) @binding(0) var joint : Joints; 46 | @group(${group}) @binding(1) var inverseBind : Joints; 47 | `}; 48 | 49 | export const GetSkinMatrix = ` 50 | fn getSkinMatrix(input : VertexInput) -> mat4x4 { 51 | let joint0 = joint.matrices[input.joints.x] * inverseBind.matrices[input.joints.x]; 52 | let joint1 = joint.matrices[input.joints.y] * inverseBind.matrices[input.joints.y]; 53 | let joint2 = joint.matrices[input.joints.z] * inverseBind.matrices[input.joints.z]; 54 | let joint3 = joint.matrices[input.joints.w] * inverseBind.matrices[input.joints.w]; 55 | 56 | let skinMatrix = joint0 * input.weights.x + 57 | joint1 * input.weights.y + 58 | joint2 * input.weights.z + 59 | joint3 * input.weights.w; 60 | return skinMatrix; 61 | } 62 | `; 63 | 64 | export const INSTANCE_SIZE_F32 = 20; 65 | export const INSTANCE_SIZE_BYTES = INSTANCE_SIZE_F32 * Float32Array.BYTES_PER_ELEMENT; 66 | 67 | export function DefaultVertexInput(layout) { 68 | let inputs = layout.locationsUsed.map((location) => { 69 | switch(location) { 70 | case AttributeLocation.position: return `@location(${AttributeLocation.position}) position : vec4,`; 71 | case AttributeLocation.normal: return `@location(${AttributeLocation.normal}) normal : vec3,`; 72 | case AttributeLocation.tangent: return `@location(${AttributeLocation.tangent}) tangent : vec4,`; 73 | case AttributeLocation.texcoord: return `@location(${AttributeLocation.texcoord}) texcoord : vec2,`; 74 | case AttributeLocation.texcoord2: return `@location(${AttributeLocation.texcoord2}) texcoord2 : vec2,`; 75 | case AttributeLocation.color: return `@location(${AttributeLocation.color}) color : vec4,`; 76 | case AttributeLocation.joints: return `@location(${AttributeLocation.joints}) joints : vec4,`; 77 | case AttributeLocation.weights: return `@location(${AttributeLocation.weights}) weights : vec4,`; 78 | } 79 | }); 80 | 81 | inputs.push(`@location(${AttributeLocation.maxAttributeLocation}) instanceMat0 : vec3,`); 82 | inputs.push(`@location(${AttributeLocation.maxAttributeLocation+1}) instanceMat1 : vec3,`); 83 | inputs.push(`@location(${AttributeLocation.maxAttributeLocation+2}) instanceMat2 : vec3,`); 84 | inputs.push(`@location(${AttributeLocation.maxAttributeLocation+3}) instanceMat3 : vec3,`); 85 | inputs.push(`@location(${AttributeLocation.maxAttributeLocation+4}) instanceColor : vec4,`); 86 | 87 | return `struct VertexInput { 88 | ${inputs.join('\n')} 89 | };`; 90 | }; 91 | 92 | export function DefaultVertexOutput(layout) { return wgsl` 93 | struct VertexOutput { 94 | @builtin(position) position : vec4, 95 | @location(0) worldPos : vec3, 96 | @location(1) view : vec3, // Vector from vertex to camera. 97 | @location(2) texcoord : vec2, 98 | @location(3) texcoord2 : vec2, 99 | @location(4) color : vec4, 100 | @location(5) instanceColor : vec4, 101 | @location(6) normal : vec3, 102 | 103 | #if ${layout.locationsUsed.includes(AttributeLocation.tangent)} 104 | @location(7) tangent : vec3, 105 | @location(8) bitangent : vec3, 106 | #endif 107 | }; 108 | `; 109 | } 110 | 111 | export const GetInstanceMatrix = ` 112 | fn getInstanceMatrix(input : VertexInput) -> mat4x4 { 113 | return mat4x4( 114 | vec4(input.instanceMat0, 0.0), 115 | vec4(input.instanceMat1, 0.0), 116 | vec4(input.instanceMat2, 0.0), 117 | vec4(input.instanceMat3, 1.0) 118 | ); 119 | } 120 | `; 121 | 122 | const USE_APPROXIMATE_SRGB = true; 123 | const GAMMA = 2.2; 124 | 125 | export const ColorConversions = wgsl` 126 | fn linearTosRGB(linear : vec3) -> vec3 { 127 | #if ${USE_APPROXIMATE_SRGB} 128 | let INV_GAMMA = 1.0 / ${GAMMA}; 129 | return pow(linear, vec3(INV_GAMMA)); 130 | #else 131 | if (all(linear <= vec3(0.0031308))) { 132 | return linear * 12.92; 133 | } 134 | return (pow(abs(linear), vec3(1.0/2.4)) * 1.055) - vec3(0.055); 135 | #endif 136 | } 137 | `; 138 | 139 | export const FullscreenTexturedQuadVertexSource = ` 140 | var pos : array, 3> = array, 3>( 141 | vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0)); 142 | 143 | struct VertexInput { 144 | @builtin(vertex_index) vertexIndex : u32 145 | }; 146 | 147 | struct VertexOutput { 148 | @builtin(position) position : vec4, 149 | @location(0) texCoord : vec2, 150 | }; 151 | 152 | @vertex 153 | fn vertexMain(input : VertexInput) -> VertexOutput { 154 | var output : VertexOutput; 155 | 156 | output.position = vec4(pos[input.vertexIndex], 1.0, 1.0); 157 | output.texCoord = pos[input.vertexIndex] * 0.5 + 0.5; 158 | output.texCoord.y = output.texCoord.y * -1.0; 159 | 160 | return output; 161 | } 162 | `; 163 | 164 | export const TextureDebugFragmentSource = ` 165 | struct FragmentInput { 166 | @location(0) texCoord : vec2 167 | }; 168 | 169 | @group(0) @binding(0) var debugTexture: texture_2d; 170 | @group(0) @binding(1) var debugSampler: sampler; 171 | 172 | @fragment 173 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 174 | let color = textureSample(debugTexture, debugSampler, input.texCoord); 175 | return color; 176 | } 177 | `; 178 | 179 | export const ShadowDebugFragmentSource = ` 180 | struct FragmentInput { 181 | @location(0) texCoord : vec2 182 | }; 183 | 184 | @group(0) @binding(0) var shadowTexture: texture_depth_2d; 185 | @group(0) @binding(1) var shadowSampler: sampler; 186 | 187 | @fragment 188 | fn fragmentMain(input : FragmentInput) -> @location(0) vec4 { 189 | let shadowDepth = textureSample(shadowTexture, shadowSampler, input.texCoord); 190 | return vec4(shadowDepth, shadowDepth, shadowDepth, 1.0); 191 | } 192 | `; -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/shadow.js: -------------------------------------------------------------------------------- 1 | import { wgsl } from 'wgsl-preprocessor'; 2 | import { DefaultVertexOutput } from './common.js'; 3 | 4 | export function ShadowFunctions(group = 0, flags) { return wgsl` 5 | @group(0) @binding(3) var defaultSampler: sampler; 6 | @group(${group}) @binding(4) var shadowTexture : texture_depth_2d; 7 | @group(${group}) @binding(5) var shadowSampler : sampler_comparison; 8 | 9 | struct LightShadowTable { 10 | light : array>, 11 | }; 12 | @group(${group}) @binding(6) var lightShadowTable : LightShadowTable; 13 | 14 | #if ${flags.shadowSamples == 16} 15 | const shadowSampleWidth = 3.0; 16 | var shadowSampleOffsets : array, 16> = array, 16>( 17 | vec2(-1.5, -1.5), vec2(-1.5, -0.5), vec2(-1.5, 0.5), vec2(-1.5, 1.5), 18 | vec2(-0.5, -1.5), vec2(-0.5, -0.5), vec2(-0.5, 0.5), vec2(-0.5, 1.5), 19 | vec2(0.5, -1.5), vec2(0.5, -0.5), vec2(0.5, 0.5), vec2(0.5, 1.5), 20 | vec2(1.5, -1.5), vec2(1.5, -0.5), vec2(1.5, 0.5), vec2(1.5, 1.5) 21 | ); 22 | #elif ${flags.shadowSamples == 9} 23 | const shadowSampleWidth = 3.0; 24 | var shadowSampleOffsets : array, 9> = array, 9>( 25 | vec2(-1.0, -1.0), vec2(0.0, -1.0), vec2(1.0, -1.0), 26 | vec2(-1.0, 0.0), vec2(0.0, 0.0), vec2(1.0, 0.0), 27 | vec2(-1.0, 1.0), vec2(0.0, 1.0), vec2(1.0, 1.0), 28 | ); 29 | #elif ${flags.shadowSamples == 4} 30 | const shadowSampleWidth = 2.0; 31 | var shadowSampleOffsets : array, 4> = array, 4>( 32 | vec2(-0.5, -0.5), vec2(-0.5, 0.5), vec2(0.5, -0.5), vec2(0.5, 0.5), 33 | ); 34 | #elif ${flags.shadowSamples == 2} 35 | const shadowSampleWidth = 1.0; 36 | var shadowSampleOffsets : array, 2> = array, 2>( 37 | vec2(-0.5, -0.5), vec2(0.5, 0.5) 38 | ); 39 | #elif ${flags.shadowSamples == 1} 40 | const shadowSampleWidth = 0.0; 41 | var shadowSampleOffsets : array, 1> = array, 1>( 42 | vec2(0.0, 0.0) 43 | ); 44 | #else 45 | ERROR: Bad flag. shadowSampleCount must be 16, 4, 2, or 1 46 | #endif 47 | 48 | const shadowSampleCount = ${flags.shadowSamples}u; 49 | 50 | struct ShadowProperties { 51 | viewport: vec4, 52 | viewProj: mat4x4, 53 | }; 54 | struct LightShadows { 55 | properties : array 56 | }; 57 | @group(${group}) @binding(7) var shadow : LightShadows; 58 | 59 | struct CascadeInfo { 60 | index: i32, 61 | viewport: vec4, 62 | shadowPos: vec3, 63 | }; 64 | 65 | fn selectCascade(lightIndex : u32, worldPos : vec3) -> CascadeInfo { 66 | var cascade : CascadeInfo; 67 | cascade.index = -1; 68 | 69 | let shadowLookup = lightShadowTable.light[0u]; 70 | let shadowIndex = shadowLookup.x; 71 | if (shadowIndex == -1) { 72 | return cascade; // Not a shadow casting light 73 | } 74 | 75 | let texelSize = 1.0 / vec2(textureDimensions(shadowTexture, 0)); 76 | 77 | let cascadeCount = max(1, shadowLookup.y); 78 | 79 | for (var i = 0; i < cascadeCount; i = i + 1) { 80 | cascade.viewport = shadow.properties[shadowIndex+i].viewport; 81 | let lightPos = shadow.properties[shadowIndex+i].viewProj * vec4(worldPos, 1.0); 82 | 83 | // Put into texture coordinates 84 | cascade.shadowPos = vec3( 85 | ((lightPos.xy / lightPos.w)) * vec2(0.5, -0.5) + vec2(0.5, 0.5), 86 | lightPos.z / lightPos.w); 87 | 88 | // If the shadow falls outside the range covered by this cascade, skip it and try the next one up. 89 | if (all(cascade.shadowPos > vec3(texelSize*shadowSampleWidth,0.0)) && 90 | all(cascade.shadowPos < vec3(vec2(1.0)-(texelSize*shadowSampleWidth),1.0))) { 91 | cascade.index = i; 92 | return cascade; 93 | } 94 | } 95 | 96 | // None of the cascades fit. 97 | return cascade; 98 | } 99 | 100 | fn dirLightVisibility(worldPos : vec3) -> f32 { 101 | let cascade = selectCascade(0u, worldPos); 102 | 103 | let viewportPos = vec2(cascade.viewport.xy + cascade.shadowPos.xy * cascade.viewport.zw); 104 | 105 | let texelSize = 1.0 / vec2(textureDimensions(shadowTexture, 0)); 106 | let clampRect = vec4(cascade.viewport.xy - texelSize, (cascade.viewport.xy+cascade.viewport.zw) + texelSize); 107 | 108 | // Percentage Closer Filtering 109 | var visibility = 0.0; 110 | for (var i = 0u; i < shadowSampleCount; i = i + 1u) { 111 | visibility = visibility + textureSampleCompareLevel( 112 | shadowTexture, shadowSampler, 113 | clamp(viewportPos + shadowSampleOffsets[i] * texelSize, clampRect.xy, clampRect.zw), 114 | cascade.shadowPos.z); 115 | } 116 | 117 | return visibility / f32(shadowSampleCount); 118 | } 119 | 120 | // First two components of the return value are the texCoord, the third component is the face index. 121 | fn getCubeFace(v : vec3) -> i32{ 122 | let vAbs = abs(v); 123 | 124 | if (vAbs.z >= vAbs.x && vAbs.z >= vAbs.y) { 125 | if (v.z < 0.0) { 126 | return 5; 127 | } 128 | return 4; 129 | } 130 | 131 | if (vAbs.y >= vAbs.x) { 132 | if (v.y < 0.0) { 133 | return 3; 134 | } 135 | return 2; 136 | } 137 | 138 | if (v.x < 0.0) { 139 | return 1; 140 | } 141 | return 0; 142 | } 143 | 144 | fn pointLightVisibility(lightIndex : u32, worldPos : vec3, pointToLight : vec3) -> f32 { 145 | var shadowIndex = lightShadowTable.light[lightIndex+1u].x; 146 | if (shadowIndex == -1) { 147 | return 1.0; // Not a shadow casting light 148 | } 149 | 150 | // Determine which face of the cubemap we're sampling from 151 | // TODO: Allow for PBR sampling across seams 152 | shadowIndex = shadowIndex + getCubeFace(pointToLight * -1.0); 153 | 154 | let viewport = shadow.properties[shadowIndex].viewport; 155 | let lightPos = shadow.properties[shadowIndex].viewProj * vec4(worldPos, 1.0); 156 | 157 | // Put into texture coordinates 158 | let shadowPos = vec3( 159 | ((lightPos.xy / lightPos.w)) * vec2(0.5, -0.5) + vec2(0.5, 0.5), 160 | lightPos.z / lightPos.w); 161 | 162 | let viewportPos = vec2(viewport.xy + shadowPos.xy * viewport.zw); 163 | 164 | let texelSize = 1.0 / vec2(textureDimensions(shadowTexture, 0)); 165 | let clampRect = vec4(viewport.xy, (viewport.xy+viewport.zw)); 166 | 167 | var uv = viewportPos + 0.5; 168 | let st = fract(uv); 169 | let base_uv = floor(uv) - 0.5; 170 | 171 | // Percentage Closer Filtering 172 | var visibility = 0.0; 173 | for (var i = 0u; i < shadowSampleCount; i = i + 1u) { 174 | visibility = visibility + textureSampleCompareLevel( 175 | shadowTexture, shadowSampler, 176 | clamp(base_uv + shadowSampleOffsets[i] * texelSize, clampRect.xy, clampRect.zw), 177 | shadowPos.z); 178 | } 179 | return visibility / f32(shadowSampleCount); 180 | } 181 | `; 182 | } 183 | 184 | export function ShadowFragmentSource(layout) { return ` 185 | ${DefaultVertexOutput(layout)} 186 | 187 | @fragment 188 | fn fragmentMain(input : VertexOutput) { 189 | } 190 | `; 191 | } 192 | -------------------------------------------------------------------------------- /js/engine/webgpu/materials/webgpu-material-factory.js: -------------------------------------------------------------------------------- 1 | import { AttributeLocation } from '../../core/mesh.js'; 2 | import { DefaultVertexSource } from '../wgsl/default-vertex.js'; 3 | 4 | export const RenderOrder = { 5 | First: 0, 6 | Default: 1, 7 | Skybox: 2, 8 | Transparent: 3, 9 | Last: 4 10 | }; 11 | 12 | let nextPipelineId = 1; 13 | 14 | export class WebGPUMaterialPipeline { 15 | constructor(options) { 16 | this.pipelineId = nextPipelineId++; 17 | this.pipeline = options?.pipeline ?? null; 18 | this.pipelineLayout = options?.pipelineLayout ?? null; 19 | this.vertex = options?.vertex ?? null; 20 | this.renderOrder = options?.renderOrder ?? RenderOrder.Default 21 | this.instanceSlot = options?.instanceSlot ?? -1; 22 | this.layout = options?.layout ?? null; 23 | this.skinned = options?.skinned ?? false; 24 | } 25 | } 26 | 27 | export class WebGPUMaterialBindGroups { 28 | firstBindGroupIndex = 1; 29 | castsShadow = true; 30 | bindGroups = []; 31 | constructor(...bg) { 32 | this.bindGroups.push(...bg); 33 | } 34 | } 35 | 36 | export const INSTANCE_BUFFER_LAYOUT = { 37 | arrayStride: 80, 38 | stepMode: 'instance', 39 | attributes: [ 40 | // Transform matrix (4x4) 41 | { 42 | format: 'float32x4', 43 | offset: 0, 44 | shaderLocation: AttributeLocation.maxAttributeLocation, 45 | }, { 46 | format: 'float32x4', 47 | offset: 16, 48 | shaderLocation: AttributeLocation.maxAttributeLocation+1, 49 | }, { 50 | format: 'float32x4', 51 | offset: 32, 52 | shaderLocation: AttributeLocation.maxAttributeLocation+2, 53 | }, { 54 | format: 'float32x4', 55 | offset: 48, 56 | shaderLocation: AttributeLocation.maxAttributeLocation+3, 57 | }, 58 | 59 | // Instance Color 60 | { 61 | format: 'float32x4', 62 | offset: 64, 63 | shaderLocation: AttributeLocation.maxAttributeLocation+4, 64 | }] 65 | }; 66 | 67 | const materialFactories = new Map(); 68 | 69 | export class WebGPUMaterialFactory { 70 | renderOrder = RenderOrder.Default; 71 | writesEmissive = false; 72 | 73 | #pipelineCache = new Map(); 74 | #materialCache = new Map(); 75 | 76 | static register(material, factory) { 77 | materialFactories.set(material, factory); 78 | } 79 | 80 | static getFactories() { 81 | return materialFactories; 82 | } 83 | 84 | init(gpu) {} 85 | 86 | getPipeline(gpu, geometryLayout, material, skinned) { 87 | const key = this.pipelineKey(geometryLayout, material, skinned); 88 | let gpuPipeline = this.#pipelineCache.get(key); 89 | if (!gpuPipeline) { 90 | const vertex = this.createVertexModule(gpu, geometryLayout, material, skinned); 91 | const fragment = this.createFragmentModule(gpu, geometryLayout, material); 92 | 93 | vertex.buffers = new Array(...geometryLayout.buffers); 94 | 95 | // Add a vertexSlot for the instance array 96 | vertex.buffers.push(INSTANCE_BUFFER_LAYOUT); 97 | 98 | const pipelineLayout = this.createPipelineLayout(gpu, skinned); 99 | const pipeline = this.createPipeline(gpu, geometryLayout, vertex, fragment, material, pipelineLayout); 100 | if (!pipeline) { return; } 101 | 102 | gpuPipeline = new WebGPUMaterialPipeline({ 103 | pipeline, 104 | pipelineLayout, 105 | vertex, 106 | renderOrder: material.transparent ? RenderOrder.Transparent : this.renderOrder, 107 | instanceSlot: geometryLayout.buffers.length, 108 | layout: geometryLayout, 109 | skinned 110 | }); 111 | this.#pipelineCache.set(key, gpuPipeline); 112 | } 113 | return gpuPipeline; 114 | } 115 | 116 | getBindGroup(gpu, material, skin) { 117 | const key = `${material.id};${skin?.id || -1}`; 118 | let bindGroup = this.#materialCache.get(key); 119 | if (!bindGroup) { 120 | const bindGroupList = [this.createBindGroup(gpu, material)]; 121 | if (skin) { 122 | bindGroupList.push(skin.bindGroup); 123 | } 124 | bindGroup = new WebGPUMaterialBindGroups(...bindGroupList); 125 | bindGroup.castsShadow = material.castsShadow; 126 | this.#materialCache.set(key, bindGroup); 127 | } 128 | return bindGroup; 129 | } 130 | 131 | pipelineKey(geometryLayout, material, skinned) { 132 | return `${geometryLayout.id};${material.transparent};${material.doubleSided};${material.depthWrite};${material.depthCompare};${skinned}`; 133 | } 134 | 135 | createPipelineLayout(gpu, skinned = false) { 136 | const bindGroupLayouts = [ 137 | gpu.bindGroupLayouts.frame, 138 | ]; 139 | 140 | if (this.bindGroupLayout) { 141 | bindGroupLayouts.push(this.bindGroupLayout); 142 | } 143 | 144 | if (skinned) { 145 | bindGroupLayouts.push(gpu.bindGroupLayouts.skin); 146 | } 147 | 148 | return gpu.device.createPipelineLayout({ bindGroupLayouts }); 149 | } 150 | 151 | // Creates a pipeline with defaults settings and the overridden shaders. 152 | // Can be customize if needed. 153 | createPipeline(gpu, layout, vertex, fragment, material, pipelineLayout) { 154 | let blend; 155 | let depthBias = 0; 156 | let alphaToCoverageEnabled = false; 157 | if (material?.transparent) { 158 | const alpha = { 159 | srcFactor: 'one', 160 | dstFactor: 'one', 161 | }; 162 | 163 | if (material?.additiveBlend) { 164 | blend = { 165 | color: { 166 | srcFactor: 'src-alpha', 167 | dstFactor: 'one', 168 | }, 169 | alpha 170 | }; 171 | } else { 172 | // Just to help transparent decals on top of opaque surfaces avoid z-fighting. 173 | depthBias = -2; 174 | 175 | if (gpu.renderTargets.sampleCount) { 176 | alphaToCoverageEnabled = true; 177 | } 178 | 179 | blend = { 180 | color: { 181 | srcFactor: 'src-alpha', 182 | dstFactor: 'one-minus-src-alpha', 183 | }, 184 | alpha 185 | }; 186 | } 187 | } 188 | 189 | fragment.targets = [{ 190 | format: gpu.renderTargets.format, 191 | blend, 192 | }]; 193 | 194 | if (gpu.flags.bloomEnabled) { 195 | fragment.targets.push({ 196 | format: gpu.renderTargets.format, 197 | blend, 198 | writeMask: this.writesEmissive ? GPUColorWrite.ALL : 0, 199 | }); 200 | } 201 | 202 | return gpu.device.createRenderPipeline({ 203 | label: `${material.constructor.name} Pipeline (LayoutID: ${layout.id})`, 204 | layout: pipelineLayout, 205 | vertex, 206 | fragment, 207 | primitive: { 208 | topology: layout.primitive.topology, 209 | stripIndexFormat: layout.primitive.stripIndexFormat, 210 | cullMode: material.doubleSided ? 'none' : 'back', 211 | }, 212 | depthStencil: { 213 | format: gpu.renderTargets.depthFormat, 214 | depthWriteEnabled: material.depthWrite, 215 | depthCompare: material.depthCompare, 216 | depthBias, 217 | }, 218 | multisample: { 219 | count: gpu.renderTargets.sampleCount, 220 | alphaToCoverageEnabled, 221 | }, 222 | }); 223 | } 224 | 225 | createVertexModule(gpu, geometryLayout, material, skinned) { 226 | return { 227 | module: gpu.device.createShaderModule({ code: DefaultVertexSource(geometryLayout, skinned) }), 228 | entryPoint: 'vertexMain', 229 | }; 230 | } 231 | 232 | createFragmentModule(gpu, geometryLayout, material) { 233 | throw new Error('Must override createFragmentModule() for each class that extends WebGPUMaterialShader.'); 234 | } 235 | 236 | createBindGroup(gpu, material) { 237 | return null; // Some materials may not require a given bind group. 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /js/engine/core/geometry-layout.js: -------------------------------------------------------------------------------- 1 | function MakeLookup(table) { 2 | const lookup = { MASK: 0 }; 3 | for (const key in table) { 4 | const value = table[key]; 5 | lookup[value] = key; 6 | lookup.MASK |= value; 7 | } 8 | return lookup; 9 | } 10 | 11 | const TopologyId = { 12 | 'point-list': 0x00, 13 | 'line-list': 0x01, 14 | 'line-strip': 0x02, 15 | 'triangle-strip': 0x03, 16 | 'triangle-list': 0x04, 17 | }; 18 | const TopologyLookup = MakeLookup(TopologyId); 19 | 20 | const StripIndexFormatId = { 21 | uint16: 0x00, 22 | uint32: 0x08, 23 | }; 24 | const StripIndexFormatLookup = MakeLookup(StripIndexFormatId); 25 | 26 | const FormatId = { 27 | uint8x2: 0x00, 28 | uint8x4: 0x01, 29 | sint8x2: 0x02, 30 | sint8x4: 0x03, 31 | unorm8x2: 0x04, 32 | unorm8x4: 0x05, 33 | snorm8x2: 0x06, 34 | snorm8x4: 0x07, 35 | uint16x2: 0x08, 36 | uint16x4: 0x09, 37 | sint16x2: 0x0A, 38 | sint16x4: 0x0B, 39 | unorm16x2: 0x0C, 40 | unorm16x4: 0x0D, 41 | snorm16x2: 0x0E, 42 | snorm16x4: 0x0F, 43 | float16x2: 0x10, 44 | float16x4: 0x12, 45 | float32: 0x13, 46 | float32x2: 0x14, 47 | float32x3: 0x15, 48 | float32x4: 0x16, 49 | uint32: 0x17, 50 | uint32x2: 0x18, 51 | uint32x3: 0x19, 52 | uint32x4: 0x1A, 53 | sint32: 0x1B, 54 | sint32x2: 0x1C, 55 | sint32x3: 0x1D, 56 | sint32x4: 0x1E, 57 | }; 58 | const FormatLookup = MakeLookup(FormatId); 59 | 60 | const StepModeId = { 61 | vertex: 0x0000, 62 | instance: 0x8000, 63 | }; 64 | const StepModeLookup = MakeLookup(StepModeId); 65 | 66 | const Uint8ToHex = new Array(256); 67 | for (let i = 0; i <= 0xFF; ++i) { 68 | Uint8ToHex[i] = i.toString(16).padStart(2, '0'); 69 | } 70 | 71 | const HexToUint8 = new Array(256); 72 | for (let i = 0; i <= 0xFF; ++i) { 73 | HexToUint8[i.toString(16).padStart(2, '0')] = i; 74 | } 75 | 76 | class GeometryLayout { 77 | id = 0; 78 | #serializedBuffer; 79 | #serializedString; 80 | #locationsUsed; 81 | 82 | constructor(buffers, primitive) { 83 | this.buffers = buffers; 84 | this.primitive = primitive; 85 | } 86 | 87 | get locationsUsed() { 88 | if (!this.#locationsUsed) { 89 | this.#locationsUsed = []; 90 | for (const buffer of this.buffers) { 91 | for (const attrib of buffer.attributes) { 92 | this.#locationsUsed.push(attrib.shaderLocation); 93 | } 94 | } 95 | } 96 | 97 | return this.#locationsUsed; 98 | } 99 | 100 | serializeToBuffer() { 101 | if (this.#serializedBuffer) { 102 | return this.#serializedBuffer; 103 | } 104 | 105 | let attribCount = 0; 106 | for (const buffer of this.buffers) { 107 | attribCount += buffer.attributes.length; 108 | } 109 | 110 | // Each buffer takes 2 bytes to encode and each attribute takes 3 bytes. 111 | // The primitive topology takes 1 byte. 112 | const byteLength = 1 + (this.buffers.length * 2) + attribCount * 3; 113 | const outBuffer = new ArrayBuffer(byteLength); 114 | const dataView = new DataView(outBuffer); 115 | 116 | let topologyData8 = TopologyId[this.primitive.topology]; 117 | topologyData8 += StripIndexFormatId[this.primitive.stripIndexFormat || 'uint16']; 118 | dataView.setUint8(0, topologyData8); 119 | 120 | let offset = 1; 121 | for (const buffer of this.buffers) { 122 | let bufferData16 = buffer.attributes.length; // Lowest 4 bits 123 | bufferData16 += buffer.arrayStride << 4; // Middle 11 bits 124 | bufferData16 += StepModeId[buffer.stepMode || 'vertex']; // Highest bit 125 | dataView.setUint16(offset, bufferData16, true); 126 | offset += 2; 127 | 128 | for (const attrib of buffer.attributes) { 129 | let attribData16 = attrib.offset || 0; // Lowest 12 bits 130 | attribData16 += attrib.shaderLocation << 12; // Highest 4 bits 131 | dataView.setUint16(offset, attribData16, true); 132 | dataView.setUint8(offset+2, FormatId[attrib.format]); 133 | 134 | offset += 3; 135 | } 136 | } 137 | 138 | this.#serializedBuffer = outBuffer; 139 | return outBuffer; 140 | } 141 | 142 | serializeToString() { 143 | if (this.#serializedString) { return this.#serializedString; } 144 | 145 | const array = new Uint8Array(this.serializeToBuffer()); 146 | let outStr = ''; 147 | for (let i = 0; i < array.length; ++i) { 148 | outStr += Uint8ToHex[array[i]]; 149 | } 150 | 151 | this.#serializedString = outStr; 152 | return outStr; 153 | } 154 | 155 | static deserializeFromBuffer(inBuffer, bufferOffest, bufferLength) { 156 | const dataView = new DataView(inBuffer, bufferOffest, bufferLength); 157 | 158 | const topologyData8 = dataView.getUint8(0); 159 | const primitive = { 160 | topology: TopologyLookup[topologyData8 & TopologyLookup.MASK], 161 | }; 162 | 163 | switch(primitive.topology) { 164 | case 'triangle-strip': 165 | case 'line-strip': 166 | primitive.stripIndexFormat = StripIndexFormatLookup[topologyData8 & StripIndexFormatLookup.MASK]; 167 | } 168 | 169 | const buffers = []; 170 | let offset = 1; 171 | while (offset < dataView.byteLength) { 172 | const bufferData16 = dataView.getUint16(offset, true); 173 | const attribCount = bufferData16 & 0x0F; 174 | let buffer = { 175 | attributes: new Array(attribCount), 176 | arrayStride: (bufferData16 >> 4) & 0x08FF, 177 | stepMode: StepModeLookup[bufferData16 & StepModeLookup.MASK], 178 | }; 179 | buffers.push(buffer); 180 | offset += 2; 181 | 182 | for (let i = 0; i < attribCount; ++i) { 183 | const attribData16 = dataView.getUint16(offset, true); 184 | buffer.attributes[i] = { 185 | offset: attribData16 & 0x0FFF, 186 | shaderLocation: (attribData16 >> 12) & 0x0F, 187 | format: FormatLookup[dataView.getUint8(offset+2)] 188 | }; 189 | offset += 3; 190 | } 191 | } 192 | 193 | return new GeometryLayout(buffers, primitive); 194 | } 195 | 196 | static deserializeFromString(value) { 197 | const array = new Uint8Array(value.length / 2); 198 | for (let i = 0; i < array.length; ++i) { 199 | const strOffset = i*2; 200 | array[i] = HexToUint8[value.substring(strOffset, strOffset+2)]; 201 | } 202 | const layout = GeometryLayout.deserializeFromBuffer(array.buffer); 203 | layout.#serializedBuffer = array.buffer; 204 | layout.#serializedString = value; 205 | return layout; 206 | } 207 | }; 208 | 209 | export class GeometryLayoutCache { 210 | #nextId = 1; 211 | #keyMap = new Map(); // Map of the given key to an ID 212 | #cache = new Map(); // Map of ID to cached resource 213 | 214 | getLayout(id) { 215 | return this.#cache.get(id); 216 | } 217 | 218 | createLayout(attribBuffers, topology, indexFormat = 'uint32') { 219 | const buffers = []; 220 | for (const buffer of attribBuffers) { 221 | const attributes = []; 222 | for (const attrib of buffer.attributes) { 223 | // Exact offset will be handled when setting the buffer. 224 | const offset = attrib.offset - buffer.minOffset 225 | attributes.push({ 226 | shaderLocation: attrib.shaderLocation, 227 | format: attrib.format, 228 | offset, 229 | }); 230 | } 231 | 232 | buffers.push({ 233 | arrayStride: buffer.arrayStride, 234 | attributes 235 | }); 236 | } 237 | 238 | const primitive = { topology }; 239 | switch(topology) { 240 | case 'triangle-strip': 241 | case 'line-strip': 242 | primitive.stripIndexFormat = indexFormat; 243 | } 244 | 245 | const layout = new GeometryLayout(buffers, primitive); 246 | 247 | const key = layout.serializeToString(); 248 | const id = this.#keyMap.get(key); 249 | 250 | if (id !== undefined) { 251 | return this.#cache.get(id); 252 | } 253 | 254 | layout.id = this.#nextId++; 255 | this.#keyMap.set(key, layout.id); 256 | this.#cache.set(layout.id, layout); 257 | Object.freeze(layout); 258 | 259 | return layout; 260 | } 261 | } -------------------------------------------------------------------------------- /js/engine/webgpu/wgsl/clustered-light.js: -------------------------------------------------------------------------------- 1 | import { CameraStruct, LightStruct } from './common.js'; 2 | 3 | export const TILE_COUNT = [32, 18, 48]; 4 | export const TOTAL_TILES = TILE_COUNT[0] * TILE_COUNT[1] * TILE_COUNT[2]; 5 | 6 | const WORKGROUP_SIZE = [4, 2, 4]; 7 | export const DISPATCH_SIZE = [ 8 | TILE_COUNT[0] / WORKGROUP_SIZE[0], 9 | TILE_COUNT[1] / WORKGROUP_SIZE[1], 10 | TILE_COUNT[2] / WORKGROUP_SIZE[2]] 11 | 12 | // Cluster x, y, z size * 32 bytes per cluster. 13 | export const CLUSTER_BOUNDS_SIZE = TOTAL_TILES * 32; 14 | 15 | // Each cluster tracks up to MAX_LIGHTS_PER_CLUSTER light indices (ints) and one light count. 16 | // This limitation should be able to go away when we have atomic methods in WGSL. 17 | export const MAX_LIGHTS_PER_CLUSTER = 256; 18 | export const MAX_CLUSTERED_LIGHTS = TOTAL_TILES * 64; 19 | export const CLUSTER_LIGHTS_SIZE = 4 + (8 * TOTAL_TILES) + (4 * MAX_CLUSTERED_LIGHTS); 20 | 21 | export function ClusterStruct(group, binding, access = 'read') { return ` 22 | struct ClusterBounds { 23 | minAABB : vec3, 24 | maxAABB : vec3, 25 | }; 26 | struct Clusters { 27 | bounds : array 28 | }; 29 | @group(${group}) @binding(${binding}) var clusters : Clusters; 30 | `; 31 | } 32 | 33 | export function ClusterLightsStruct(group=0, binding=2, access='read') { return ` 34 | struct ClusterLights { 35 | offset : u32, 36 | count : u32, 37 | }; 38 | struct ClusterLightGroup { 39 | offset : ${access == 'read' ? 'u32' : 'atomic'}, 40 | lights : array, 41 | indices : array, 42 | }; 43 | @group(${group}) @binding(${binding}) var clusterLights : ClusterLightGroup; 44 | `; 45 | } 46 | 47 | export const TileFunctions = ` 48 | const tileCount = vec3(${TILE_COUNT[0]}u, ${TILE_COUNT[1]}u, ${TILE_COUNT[2]}u); 49 | 50 | fn linearDepth(depthSample : f32) -> f32 { 51 | return camera.zFar * camera.zNear / fma(depthSample, camera.zNear-camera.zFar, camera.zFar); 52 | } 53 | 54 | fn getTile(fragCoord : vec4) -> vec3 { 55 | // TODO: scale and bias calculation can be moved outside the shader to save cycles. 56 | let sliceScale = f32(tileCount.z) / log2(camera.zFar / camera.zNear); 57 | let sliceBias = -(f32(tileCount.z) * log2(camera.zNear) / log2(camera.zFar / camera.zNear)); 58 | let zTile = u32(max(log2(linearDepth(fragCoord.z)) * sliceScale + sliceBias, 0.0)); 59 | 60 | return vec3(u32(fragCoord.x / (camera.outputSize.x / f32(tileCount.x))), 61 | u32(fragCoord.y / (camera.outputSize.y / f32(tileCount.y))), 62 | zTile); 63 | } 64 | 65 | fn getClusterIndex(fragCoord : vec4) -> u32 { 66 | let tile = getTile(fragCoord); 67 | return tile.x + 68 | tile.y * tileCount.x + 69 | tile.z * tileCount.x * tileCount.y; 70 | } 71 | `; 72 | 73 | export const ClusterBoundsSource = ` 74 | ${CameraStruct(0, 0)} 75 | ${ClusterStruct(1, 0, 'read_write')} 76 | 77 | fn lineIntersectionToZPlane(a : vec3, b : vec3, zDistance : f32) -> vec3 { 78 | let normal = vec3(0.0, 0.0, 1.0); 79 | let ab = b - a; 80 | let t = (zDistance - dot(normal, a)) / dot(normal, ab); 81 | return a + t * ab; 82 | } 83 | 84 | fn clipToView(clip : vec4) -> vec4 { 85 | let view = camera.inverseProjection * clip; 86 | return view / vec4(view.w, view.w, view.w, view.w); 87 | } 88 | 89 | fn screen2View(screen : vec4) -> vec4 { 90 | let texCoord = screen.xy / camera.outputSize.xy; 91 | let clip = vec4(vec2(texCoord.x, 1.0 - texCoord.y) * 2.0 - vec2(1.0, 1.0), screen.z, screen.w); 92 | return clipToView(clip); 93 | } 94 | 95 | const tileCount = vec3(${TILE_COUNT[0]}u, ${TILE_COUNT[1]}u, ${TILE_COUNT[2]}u); 96 | const eyePos = vec3(0.0); 97 | 98 | @compute @workgroup_size(${WORKGROUP_SIZE[0]}, ${WORKGROUP_SIZE[1]}, ${WORKGROUP_SIZE[2]}) 99 | fn computeMain(@builtin(global_invocation_id) global_id : vec3) { 100 | let tileIndex : u32 = global_id.x + 101 | global_id.y * tileCount.x + 102 | global_id.z * tileCount.x * tileCount.y; 103 | 104 | let tileSize = vec2(camera.outputSize.x / f32(tileCount.x), 105 | camera.outputSize.y / f32(tileCount.y)); 106 | 107 | let maxPoint_sS = vec4(vec2(f32(global_id.x+1u), f32(global_id.y+1u)) * tileSize, 0.0, 1.0); 108 | let minPoint_sS = vec4(vec2(f32(global_id.x), f32(global_id.y)) * tileSize, 0.0, 1.0); 109 | 110 | let maxPoint_vS = screen2View(maxPoint_sS).xyz; 111 | let minPoint_vS = screen2View(minPoint_sS).xyz; 112 | 113 | let tileNear : f32 = -camera.zNear * pow(camera.zFar/ camera.zNear, f32(global_id.z)/f32(tileCount.z)); 114 | let tileFar : f32 = -camera.zNear * pow(camera.zFar/ camera.zNear, f32(global_id.z+1u)/f32(tileCount.z)); 115 | 116 | let minPointNear = lineIntersectionToZPlane(eyePos, minPoint_vS, tileNear); 117 | let minPointFar = lineIntersectionToZPlane(eyePos, minPoint_vS, tileFar); 118 | let maxPointNear = lineIntersectionToZPlane(eyePos, maxPoint_vS, tileNear); 119 | let maxPointFar = lineIntersectionToZPlane(eyePos, maxPoint_vS, tileFar); 120 | 121 | clusters.bounds[tileIndex].minAABB = min(min(minPointNear, minPointFar),min(maxPointNear, maxPointFar)); 122 | clusters.bounds[tileIndex].maxAABB = max(max(minPointNear, minPointFar),max(maxPointNear, maxPointFar)); 123 | } 124 | `; 125 | 126 | export const ClusterLightsSource = ` 127 | ${CameraStruct(0, 0)} 128 | ${ClusterStruct(0, 1, 'read')} 129 | ${ClusterLightsStruct(0, 2, 'read_write')} 130 | ${LightStruct(0, 3)} 131 | 132 | ${TileFunctions} 133 | 134 | fn sqDistPointAABB(point : vec3, minAABB : vec3, maxAABB : vec3) -> f32 { 135 | var sqDist = 0.0; 136 | // const minAABB = clusters.bounds[tileIndex].minAABB; 137 | // const maxAABB = clusters.bounds[tileIndex].maxAABB; 138 | 139 | // Wait, does this actually work? Just porting code, but it seems suspect? 140 | for(var i : i32 = 0; i < 3; i = i + 1) { 141 | let v = point[i]; 142 | if(v < minAABB[i]){ 143 | sqDist = sqDist + (minAABB[i] - v) * (minAABB[i] - v); 144 | } 145 | if(v > maxAABB[i]){ 146 | sqDist = sqDist + (v - maxAABB[i]) * (v - maxAABB[i]); 147 | } 148 | } 149 | 150 | return sqDist; 151 | } 152 | 153 | @compute @workgroup_size(${WORKGROUP_SIZE[0]}, ${WORKGROUP_SIZE[1]}, ${WORKGROUP_SIZE[2]}) 154 | fn computeMain(@builtin(global_invocation_id) global_id : vec3) { 155 | let tileIndex = global_id.x + 156 | global_id.y * tileCount.x + 157 | global_id.z * tileCount.x * tileCount.y; 158 | 159 | // TODO: Look into improving threading using local invocation groups? 160 | var clusterLightCount = 0u; 161 | var cluserLightIndices : array; 162 | for (var i = 0u; i < globalLights.lightCount; i = i + 1u) { 163 | let range = globalLights.lights[i].range; 164 | // Lights without an explicit range affect every cluster, but this is a poor way to handle that. 165 | var lightInCluster : bool = range <= 0.0; 166 | 167 | if (!lightInCluster) { 168 | let lightViewPos = camera.view * vec4(globalLights.lights[i].position, 1.0); 169 | let sqDist = sqDistPointAABB(lightViewPos.xyz, clusters.bounds[tileIndex].minAABB, clusters.bounds[tileIndex].maxAABB); 170 | lightInCluster = sqDist <= (range * range); 171 | } 172 | 173 | if (lightInCluster) { 174 | // Light affects this cluster. Add it to the list. 175 | cluserLightIndices[clusterLightCount] = i; 176 | clusterLightCount = clusterLightCount + 1u; 177 | } 178 | 179 | if (clusterLightCount == ${MAX_LIGHTS_PER_CLUSTER}u) { 180 | break; 181 | } 182 | } 183 | 184 | // TODO: Stick a barrier here and track cluster lights with an offset into a global light list 185 | let lightCount = clusterLightCount; 186 | var offset = atomicAdd(&clusterLights.offset, lightCount); 187 | 188 | if (offset >= ${MAX_CLUSTERED_LIGHTS}u) { 189 | return; 190 | } 191 | 192 | for(var i = 0u; i < clusterLightCount; i = i + 1u) { 193 | clusterLights.indices[offset + i] = cluserLightIndices[i]; 194 | } 195 | clusterLights.lights[tileIndex].offset = offset; 196 | clusterLights.lights[tileIndex].count = clusterLightCount; 197 | } 198 | `; -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-bloom.js: -------------------------------------------------------------------------------- 1 | import { WebGPUSystem } from './webgpu-system.js'; 2 | import { Stage } from '../core/stage.js'; 3 | import { FullscreenTexturedQuadVertexSource } from './wgsl/common.js'; 4 | import { BloomBlurHorizontalFragmentSource, BloomBlurVerticalFragmentSource, BloomBlendFragmentSource } from './wgsl/bloom.js'; 5 | 6 | export class WebGPUBloomSystem extends WebGPUSystem { 7 | stage = Stage.PostRender; 8 | frameIndex = 0; 9 | init(gpu) { 10 | // Setup a render pipeline for drawing debug views of textured quads 11 | this.blurHorizonalPipeline = gpu.device.createRenderPipeline({ 12 | label: `Bloom Blur Horizontal Pipeline`, 13 | layout: 'auto', 14 | vertex: { 15 | module: gpu.device.createShaderModule({ 16 | code: FullscreenTexturedQuadVertexSource, 17 | label: 'Bloom Blur Horizontal Vertex' 18 | }), 19 | entryPoint: 'vertexMain' 20 | }, 21 | fragment: { 22 | module: gpu.device.createShaderModule({ 23 | code: BloomBlurHorizontalFragmentSource, 24 | label: 'Bloom Blur Horizontal Fragment' 25 | }), 26 | entryPoint: 'fragmentMain', 27 | targets: [{ 28 | format: gpu.renderTargets.format, 29 | }], 30 | } 31 | }); 32 | 33 | this.blurVerticalPipeline = gpu.device.createRenderPipeline({ 34 | label: `Bloom Blur Vertical Pipeline`, 35 | layout: 'auto', 36 | vertex: { 37 | module: gpu.device.createShaderModule({ 38 | code: FullscreenTexturedQuadVertexSource, 39 | label: 'Bloom Blur Vertical Vertex' 40 | }), 41 | entryPoint: 'vertexMain' 42 | }, 43 | fragment: { 44 | module: gpu.device.createShaderModule({ 45 | code: BloomBlurVerticalFragmentSource, 46 | label: 'Bloom Blur Vertical Fragment' 47 | }), 48 | entryPoint: 'fragmentMain', 49 | targets: [{ 50 | format: gpu.renderTargets.format, 51 | }], 52 | } 53 | }); 54 | 55 | this.blendPipeline = gpu.device.createRenderPipeline({ 56 | label: `Bloom Blend Pipeline`, 57 | layout: 'auto', 58 | vertex: { 59 | module: gpu.device.createShaderModule({ 60 | code: FullscreenTexturedQuadVertexSource, 61 | label: 'Bloom Blend Vertex' 62 | }), 63 | entryPoint: 'vertexMain' 64 | }, 65 | fragment: { 66 | module: gpu.device.createShaderModule({ 67 | code: BloomBlendFragmentSource, 68 | label: 'Bloom Blend Fragment' 69 | }), 70 | entryPoint: 'fragmentMain', 71 | targets: [{ 72 | format: gpu.renderTargets.format, 73 | // Additive blending 74 | blend: { 75 | color: { 76 | srcFactor: 'one', 77 | dstFactor: 'one', 78 | }, 79 | alpha: { 80 | srcFactor: "one", 81 | dstFactor: "one", 82 | } 83 | } 84 | }], 85 | } 86 | }); 87 | 88 | this.blurUniformBuffer = gpu.device.createBuffer({ 89 | size: 4 * Float32Array.BYTES_PER_ELEMENT, 90 | usage: GPUBufferUsage.UNIFORM, 91 | mappedAtCreation: true, 92 | }); 93 | 94 | const blurArray = new Float32Array(this.blurUniformBuffer.getMappedRange()); 95 | blurArray[0] = 1; // Bloom radius 96 | blurArray[1] = 0.5; // Glow historical dimming amount 97 | this.blurUniformBuffer.unmap(); 98 | 99 | gpu.renderTargets.addEventListener('reconfigured', () => { 100 | this.onRenderTargetsReconfigured(gpu); 101 | }); 102 | this.onRenderTargetsReconfigured(gpu); 103 | } 104 | 105 | onRenderTargetsReconfigured(gpu) { 106 | this.pass0BindGroup = gpu.device.createBindGroup({ 107 | label: 'Bloom Blur Pass 0 Bind Group', 108 | layout: this.blurHorizonalPipeline.getBindGroupLayout(0), 109 | entries: [{ 110 | binding: 0, 111 | resource: { buffer: this.blurUniformBuffer }, 112 | }, { 113 | binding: 1, 114 | resource: gpu.renderTargets.emissiveTexture.createView(), 115 | }, { 116 | binding: 2, 117 | resource: gpu.defaultSampler, 118 | }] 119 | }); 120 | 121 | this.pass1BindGroups = [ 122 | gpu.device.createBindGroup({ 123 | label: 'Bloom Blur Pass 1 Bind Group A', 124 | layout: this.blurVerticalPipeline.getBindGroupLayout(0), 125 | entries: [{ 126 | binding: 0, 127 | resource: { buffer: this.blurUniformBuffer }, 128 | }, { 129 | binding: 1, 130 | resource: gpu.renderTargets.bloomTextures[0].createView(), 131 | }, { 132 | binding: 2, 133 | resource: gpu.defaultSampler, 134 | }, { 135 | binding: 3, 136 | resource: gpu.renderTargets.bloomTextures[2].createView(), 137 | }] 138 | }), 139 | gpu.device.createBindGroup({ 140 | label: 'Bloom Blur Pass 1 Bind Group B', 141 | layout: this.blurVerticalPipeline.getBindGroupLayout(0), 142 | entries: [{ 143 | binding: 0, 144 | resource: { buffer: this.blurUniformBuffer }, 145 | }, { 146 | binding: 1, 147 | resource: gpu.renderTargets.bloomTextures[0].createView(), 148 | }, { 149 | binding: 2, 150 | resource: gpu.defaultSampler, 151 | }, { 152 | binding: 3, 153 | resource: gpu.renderTargets.bloomTextures[1].createView(), 154 | }] 155 | }), 156 | ]; 157 | 158 | this.blendPassBindGroups = [ 159 | gpu.device.createBindGroup({ 160 | label: 'Bloom blend pass Bind Group A', 161 | layout: this.blendPipeline.getBindGroupLayout(0), 162 | entries: [{ 163 | binding: 0, 164 | resource: gpu.renderTargets.bloomTextures[1].createView(), 165 | }, { 166 | binding: 1, 167 | resource: gpu.defaultSampler, 168 | }] 169 | }), 170 | gpu.device.createBindGroup({ 171 | label: 'Bloom blend pass Bind Group B', 172 | layout: this.blendPipeline.getBindGroupLayout(0), 173 | entries: [{ 174 | binding: 0, 175 | resource: gpu.renderTargets.bloomTextures[2].createView(), 176 | }, { 177 | binding: 1, 178 | resource: gpu.defaultSampler, 179 | }] 180 | }), 181 | ]; 182 | } 183 | 184 | execute(delta, time, gpu) { 185 | const bloomTextures = gpu.renderTargets.bloomTextures; 186 | const commandEncoder = gpu.device.createCommandEncoder({}); 187 | 188 | const pingPongIndex = this.frameIndex % 2; 189 | 190 | // 1st pass (Horizontal blur) 191 | let passEncoder = commandEncoder.beginRenderPass({ 192 | colorAttachments: [{ 193 | view: bloomTextures[0].createView(), 194 | clearValue: {r: 0, g: 0, b: 0, a: 1.0}, 195 | loadOp: 'clear', 196 | storeOp: 'store', 197 | }], 198 | }); 199 | 200 | passEncoder.setPipeline(this.blurHorizonalPipeline); 201 | passEncoder.setBindGroup(0, this.pass0BindGroup); 202 | passEncoder.draw(3); 203 | passEncoder.end(); 204 | 205 | // 2nd pass (Vertical blur) 206 | passEncoder = commandEncoder.beginRenderPass({ 207 | colorAttachments: [{ 208 | view: bloomTextures[1 + pingPongIndex].createView(), 209 | clearValue: {r: 0, g: 0, b: 0, a: 1.0}, 210 | loadOp: 'clear', 211 | storeOp: 'store', 212 | }], 213 | }); 214 | 215 | passEncoder.setPipeline(this.blurVerticalPipeline); 216 | passEncoder.setBindGroup(0, this.pass1BindGroups[pingPongIndex]); 217 | passEncoder.draw(3); 218 | passEncoder.end(); 219 | 220 | // Blend pass 221 | passEncoder = commandEncoder.beginRenderPass({ 222 | colorAttachments: [{ 223 | view: gpu.renderTargets.context.getCurrentTexture().createView(), 224 | loadOp: 'load', 225 | storeOp: 'store', 226 | }], 227 | }); 228 | 229 | passEncoder.setPipeline(this.blendPipeline); 230 | passEncoder.setBindGroup(0, this.blendPassBindGroups[pingPongIndex]); 231 | passEncoder.draw(3); 232 | passEncoder.end(); 233 | 234 | gpu.device.queue.submit([commandEncoder.finish()]); 235 | 236 | this.frameIndex++; 237 | } 238 | } -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | WebGPU Shadow Playground 14 | 15 | 42 | 43 | 44 | 45 | 46 | 57 | 58 | 242 | 243 | -------------------------------------------------------------------------------- /js/engine/webgpu/webgpu-renderer.js: -------------------------------------------------------------------------------- 1 | import { Renderer } from '../core/render-world.js'; 2 | import { WebGPURenderTargets } from './webgpu-render-targets.js'; 3 | import { WebGPURenderBatch } from './webgpu-render-batch.js'; 4 | import { WebGPUBufferManager } from './webgpu-buffer.js'; 5 | import { WebGPUBindGroupLayouts } from './webgpu-bind-group-layouts.js' 6 | import { WebGPUTextureLoader } from 'webgpu-texture-loader'; 7 | 8 | const desiredFeatures = [ 9 | 'texture-compression-bc' 10 | ]; 11 | 12 | export class WebGPURenderer extends Renderer { 13 | adapter = null; 14 | device = null; 15 | 16 | renderTargets = null; 17 | 18 | bindGroupLayouts = {}; 19 | bufferManager = null; 20 | #textureLoader = null; 21 | 22 | shadowAtlasSize = 8192; 23 | shadowFormat = 'depth16unorm'; //'depth32float'; 24 | 25 | async init(canvas, flags) { 26 | this.flags = flags; 27 | 28 | this.shadowAtlasSize *= flags.shadowResolutionMultiplier; 29 | 30 | this.adapter = await navigator.gpu.requestAdapter({ 31 | powerPreference: flags.powerPreference, 32 | }); 33 | 34 | // Determine which of the desired features can be enabled for this device. 35 | const requiredFeatures = desiredFeatures.filter(feature => this.adapter.features.has(feature)); 36 | this.device = await this.adapter.requestDevice({requiredFeatures}); 37 | 38 | this.renderTargets = new WebGPURenderTargets(this.adapter, this.device, canvas, flags); 39 | 40 | this.renderBatch = new WebGPURenderBatch(this.device); 41 | 42 | this.bindGroupLayouts = new WebGPUBindGroupLayouts(this.device); 43 | this.bufferManager = new WebGPUBufferManager(this.device); 44 | this.#textureLoader = new WebGPUTextureLoader(this.device); 45 | 46 | this.blackTextureView = this.#textureLoader.fromColor(0, 0, 0, 0).texture.createView(); 47 | this.whiteTextureView = this.#textureLoader.fromColor(1.0, 1.0, 1.0, 1.0).texture.createView(); 48 | this.defaultNormalTextureView = this.#textureLoader.fromColor(0.5, 0.5, 1.0, 0).texture.createView(); 49 | this.defaultSampler = this.device.createSampler({ 50 | minFilter: 'linear', 51 | magFilter: 'linear', 52 | mipmapFilter: 'linear', 53 | addressModeU: 'repeat', 54 | addressModeV: 'repeat', 55 | }); 56 | 57 | // Shadow mapping (TODO: Allocate dynamically only if shadows are needed) 58 | this.shadowDepthTexture = this.device.createTexture({ 59 | size: [this.shadowAtlasSize, this.shadowAtlasSize], 60 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING, 61 | format: this.shadowFormat, 62 | }); 63 | this.shadowDepthTextureView = this.shadowDepthTexture.createView(); 64 | 65 | this.lightShadowTableBuffer = this.device.createBuffer({ 66 | size: this.maxLightCount * Int32Array.BYTES_PER_ELEMENT * 2, 67 | usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE, 68 | }); 69 | 70 | this.shadowPropertiesBuffer = this.device.createBuffer({ 71 | size: this.maxShadowCasters * 20 * Float32Array.BYTES_PER_ELEMENT, 72 | usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE, 73 | }); 74 | 75 | this.shadowDepthSampler = this.device.createSampler({ 76 | minFilter: 'linear', 77 | magFilter: 'linear', 78 | mipmapFilter: 'linear', 79 | compare: 'less', 80 | }); 81 | this.shadowUnfilteredDepthSampler = this.device.createSampler({ 82 | compare: 'less', 83 | }); 84 | 85 | // Render pass descriptor 86 | this.colorAttachment = { 87 | // view is acquired and set in onResize. 88 | view: undefined, 89 | // view is acquired and set in onFrame. 90 | resolveTarget: undefined, 91 | clearValue: {r: 0, g: 0, b: 0, a: 1.0}, 92 | loadOp: 'clear', 93 | storeOp: this.renderTargets.sampleCount > 1 ? 'discard' : 'store', 94 | }; 95 | 96 | const colorAttachments = [this.colorAttachment]; 97 | 98 | if (this.flags.bloomEnabled) { 99 | this.emissiveAttachment = { 100 | // view is acquired and set in onResize. 101 | view: undefined, 102 | // view is acquired and set in onFrame. 103 | resolveTarget: undefined, 104 | clearValue: {r: 0, g: 0, b: 0, a: 1.0}, 105 | loadOp: 'clear', 106 | storeOp: this.renderTargets.sampleCount > 1 ? 'discard' : 'store', 107 | }; 108 | 109 | colorAttachments.push(this.emissiveAttachment); 110 | } 111 | 112 | this.depthAttachment = { 113 | // view is acquired and set in onResize. 114 | view: undefined, 115 | depthClearValue: 1.0, 116 | depthLoadOp: 'clear', 117 | depthStoreOp: 'discard', 118 | }; 119 | 120 | this.renderPassDescriptor = { 121 | colorAttachments, 122 | depthStencilAttachment: this.depthAttachment 123 | }; 124 | 125 | this.renderTargets.addEventListener('reconfigured', () => { 126 | this.onRenderTargetsReconfigured(); 127 | }); 128 | this.onRenderTargetsReconfigured(); 129 | } 130 | 131 | get canvas() { 132 | return this.renderTargets?.context.canvas; 133 | } 134 | 135 | // RenderWorld overloads 136 | get textureLoader() { 137 | return this.#textureLoader; 138 | } 139 | 140 | createStaticBuffer(sizeOrArrayBuffer, usage = 'vertex') { 141 | return this.bufferManager.createStaticBuffer(sizeOrArrayBuffer, usage); 142 | } 143 | 144 | createDynamicBuffer(sizeOrArrayBuffer, usage = 'vertex') { 145 | return this.bufferManager.createDynamicBuffer(sizeOrArrayBuffer, usage); 146 | } 147 | 148 | onRenderTargetsReconfigured() { 149 | // Override to configure with the appropriate render targets for this pass. 150 | // The defautls are simply to set the depth and MSAA texture; 151 | if (this.renderTargets.sampleCount > 1) { 152 | this.colorAttachment.view = this.renderTargets.msaaColorTexture.createView(); 153 | } 154 | 155 | if (this.flags.bloomEnabled) { 156 | if (this.renderTargets.sampleCount > 1) { 157 | this.emissiveAttachment.view = this.renderTargets.msaaEmissiveTexture.createView(); 158 | this.emissiveAttachment.resolveTarget = this.renderTargets.emissiveTexture.createView(); 159 | } else { 160 | this.emissiveAttachment.view = this.renderTargets.emissiveTexture.createView(); 161 | } 162 | } 163 | 164 | if (this.renderTargets.depthFormat) { 165 | this.depthAttachment.view = this.renderTargets.depthTexture.createView(); 166 | } 167 | } 168 | 169 | render(camera) { 170 | const instanceBuffer = this.renderBatch.instanceBuffer; 171 | 172 | const outputTexture = this.renderTargets.context.getCurrentTexture(); 173 | const commandEncoder = this.device.createCommandEncoder({}); 174 | 175 | if (this.renderTargets.sampleCount > 1) { 176 | this.colorAttachment.resolveTarget = outputTexture.createView(); 177 | } else { 178 | this.colorAttachment.view = outputTexture.createView(); 179 | } 180 | 181 | const passEncoder = commandEncoder.beginRenderPass(this.renderPassDescriptor); 182 | 183 | passEncoder.setBindGroup(0, this.flags.shadowFiltering ? camera.bindGroup : camera.bindGroupUnfilteredShadow); 184 | 185 | // Loop through all the renderable entities and store them by pipeline. 186 | for (const pipeline of this.renderBatch.sortedPipelines) { 187 | passEncoder.setPipeline(pipeline.pipeline); 188 | 189 | const geometryList = this.renderBatch.pipelineGeometries.get(pipeline); 190 | for (const [geometry, materialList] of geometryList) { 191 | 192 | for (const vb of geometry.vertexBuffers) { 193 | passEncoder.setVertexBuffer(vb.slot, vb.buffer.gpuBuffer, vb.offset); 194 | } 195 | const ib = geometry.indexBuffer; 196 | if (ib) { 197 | passEncoder.setIndexBuffer(ib.buffer.gpuBuffer, ib.format, ib.offset); 198 | } 199 | 200 | for (const [material, instances] of materialList) { 201 | if (material) { 202 | let i = material.firstBindGroupIndex; 203 | for (const bindGroup of material.bindGroups) { 204 | passEncoder.setBindGroup(i++, bindGroup); 205 | } 206 | } 207 | 208 | if (pipeline.instanceSlot >= 0) { 209 | passEncoder.setVertexBuffer(pipeline.instanceSlot, instanceBuffer, instances.bufferOffset); 210 | } 211 | 212 | if (ib) { 213 | passEncoder.drawIndexed(geometry.drawCount, instances.instanceCount); 214 | } else { 215 | passEncoder.draw(geometry.drawCount, instances.instanceCount); 216 | } 217 | 218 | // Restore the camera binding if needed 219 | if (material?.firstBindGroupIndex == 0) { 220 | passEncoder.setBindGroup(0, this.flags.shadowFiltering ? camera.bindGroup : camera.bindGroupUnfilteredShadow); 221 | } 222 | } 223 | } 224 | } 225 | 226 | passEncoder.end(); 227 | 228 | this.device.queue.submit([commandEncoder.finish()]); 229 | 230 | // Clear the render batch. It'll be built up again next frame. 231 | this.renderBatch.clear(); 232 | } 233 | } --------------------------------------------------------------------------------