├── .gitignore
├── .vscode
└── extensions.json
├── LICENSE
├── deploy.sh
├── index.html
├── logo_new.png
├── package.json
├── public
├── sprites.webp
├── texture.png
├── texture.webp
└── video.mp4
├── readme.md
├── samples
├── basicLights.html
├── basicTriangle.html
├── basicTriangleMSAA.html
├── canvasTexture.html
├── colorTriangle.html
├── cubes.html
├── cubesDynamicOffsets.html
├── cubesInstance.html
├── cubesOffsets.html
├── cubesRenderBundle.html
├── gpuCompute.html
├── gpuParticles.html
├── helloWebgpu.html
├── imageTexture.html
├── rotatingCube.html
├── shadowMapping.html
├── spriteTexture.html
└── videoTexture.html
├── src
├── basicLights.ts
├── basicTriangle.ts
├── basicTriangleMSAA.ts
├── canvasTexture.ts
├── colorTriangle.ts
├── cubes.ts
├── cubesDynamicOffsets.ts
├── cubesInstance.ts
├── cubesOffsets.ts
├── cubesRenderBundle.ts
├── gpuCompute.ts
├── gpuParticles.ts
├── helloWebgpu.ts
├── imageTexture.ts
├── rotatingCube.ts
├── shaders
│ ├── basic.instanced.vert.wgsl
│ ├── basic.vert.wgsl
│ ├── color.frag.wgsl
│ ├── compute.position.wgsl
│ ├── compute.transform.wgsl
│ ├── imageTexture.frag.wgsl
│ ├── lambert.frag.wgsl
│ ├── normal.vert.wgsl
│ ├── position.frag.wgsl
│ ├── position.vert.wgsl
│ ├── red.frag.wgsl
│ ├── shadow.frag.wgsl
│ ├── shadow.vertex.wgsl
│ ├── shadowDepth.wgsl
│ ├── spriteTexture.frag.wgsl
│ ├── triangle.vert.wgsl
│ └── videoTexture.frag.wgsl
├── shadowMapping.ts
├── spriteTexture.ts
├── util
│ ├── box.ts
│ ├── cube.ts
│ ├── math.ts
│ ├── sphere.ts
│ └── triangle.ts
└── videoTexture.ts
├── tsconfig.json
├── vite.config.github.js
└── vite.config.js
/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 | yarn-error.log
15 | yarn.lock
16 | package-lock.json
17 |
18 | # Editor directories and files
19 | .vscode/*
20 | !.vscode/extensions.json
21 | .idea
22 | .DS_Store
23 | *.suo
24 | *.ntvs*
25 | *.njsproj
26 | *.sln
27 | *.sw?
28 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "PolyMeilex.wgsl"
4 | ]
5 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | # Released under MIT License
2 |
3 | Copyright (c) 2022 Orillusion
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | set -e
3 | rm -rf dist
4 | npm run build
5 |
6 | cd dist
7 |
8 | git init
9 | git checkout -b main
10 | git add -A
11 | git commit -m 'deploy'
12 |
13 | git push -f git@github.com:Orillusion/orillusion-webgpu-samples.git main:gh-pages
14 |
15 | cd -
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion Webgpu Samples
7 |
61 |
62 |
63 |
86 |
87 | < >
88 |
106 |
107 |
--------------------------------------------------------------------------------
/logo_new.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Orillusion/orillusion-webgpu-samples/5554001cb5a28293437e64eddcabc259ca4cdc0f/logo_new.png
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "orillusion-webgpu-samples",
3 | "version": "0.1.0",
4 | "author": "Orillusion",
5 | "description": "Orillusion webgpu vite template & samples",
6 | "repository": {
7 | "type": "git",
8 | "url": "git+https://github.com/Orillusion/orillusion-webgpu-samples.git"
9 | },
10 | "scripts": {
11 | "dev": "vite",
12 | "build": "tsc && vite build --config vite.config.github.js",
13 | "preview": "vite preview"
14 | },
15 | "devDependencies": {
16 | "@types/dom-webcodecs": "^0.1.7",
17 | "@webgpu/types": "^0.1.32",
18 | "gl-matrix": "^3.4.3",
19 | "typescript": "^5.1.0-dev.20230508",
20 | "vite": "^4.3.5"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/public/sprites.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Orillusion/orillusion-webgpu-samples/5554001cb5a28293437e64eddcabc259ca4cdc0f/public/sprites.webp
--------------------------------------------------------------------------------
/public/texture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Orillusion/orillusion-webgpu-samples/5554001cb5a28293437e64eddcabc259ca4cdc0f/public/texture.png
--------------------------------------------------------------------------------
/public/texture.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Orillusion/orillusion-webgpu-samples/5554001cb5a28293437e64eddcabc259ca4cdc0f/public/texture.webp
--------------------------------------------------------------------------------
/public/video.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Orillusion/orillusion-webgpu-samples/5554001cb5a28293437e64eddcabc259ca4cdc0f/public/video.mp4
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # Orillusion-webgpu-samples
4 |
5 | ## Install and Run
6 |
7 | Type the following in any terminal:
8 |
9 | ```bash
10 | # Clone the repo
11 | git clone https://github.com/Orillusion/orillusion-webgpu-samples.git
12 |
13 | # Go inside the folder
14 | cd orillusion-webgpu-samples
15 |
16 | # Start installing dependencies
17 | npm install #or yarn
18 |
19 | # Run project at localhost:3000
20 | npm run dev #or yarn run dev
21 | ```
22 |
23 | ## Project Layout
24 |
25 | ```bash
26 | ├─ 📂 node_modules/ # Dependencies
27 | │ ├─ 📁 @webgpu # WebGPU types for TS
28 | │ └─ 📁 ... # Other dependencies (TypeScript, Vite, etc.)
29 | ├─ 📂 src/ # Source files
30 | │ ├─ 📁 shaders # Folder for shader files
31 | │ └─ 📄 *.ts # TS files for each demo
32 | ├─ 📂 samples/ # Sample html
33 | │ └─ 📄 *.html # HTML entry for each demo
34 | ├─ 📄 .gitignore # Ignore certain files in git repo
35 | ├─ 📄 index.html # Entry page
36 | ├─ 📄 LICENSE # MIT
37 | ├─ 📄 logo.png # Orillusion logo image
38 | ├─ 📄 package.json # Node package file
39 | ├─ 📄 tsconfig.json # TS configuration file
40 | ├─ 📄 vite.config.js # vite configuration file
41 | └─ 📄 readme.md # Read Me!
42 | ```
43 |
44 | ## Platform
45 | **Windows/Mac/Linux:**
46 | - Chrome 113+
47 | - Edge: 113+
48 | - Safari: 17.5+ (with `WebGPU` feature flag)
49 |
50 | **Android (Behind the `enable-unsafe-webgpu` flag):**
51 | - Chrome Canary 113+
52 | - Edge Canary 113+
53 |
54 | **IOS:**
55 | - Safari: 17.5+ (with `WebGPU` feature flag)
56 |
--------------------------------------------------------------------------------
/samples/basicLights.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Basic Lights
7 |
45 |
46 |
47 |
48 |
49 | Ambient Intensity
50 |
51 | Point Light Intensity
52 |
53 | Point Light Radius
54 |
55 | Dir Light Intensity
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/samples/basicTriangle.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Basic Triangle
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/basicTriangleMSAA.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Basic Triangle
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/canvasTexture.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Canvas Textrue
7 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/samples/colorTriangle.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Basic Triangle
7 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/samples/cubes.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Cubes
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/cubesDynamicOffsets.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Dynamic Offsets
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/cubesInstance.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Instanced Cube
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/cubesOffsets.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Cube with Group Offset
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/cubesRenderBundle.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | RenderBundle
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/gpuCompute.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | GPU Compute
7 |
27 |
28 |
29 | Matrix Multiply Benchmark
30 |
37 | JS CPU: - ms (Average of 10x)
38 | WebGPU: - ms (Average of 300x)
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/samples/gpuParticles.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Particles
7 |
42 |
43 |
44 |
45 | 150000
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/samples/helloWebgpu.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Hello WebGPU
7 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/samples/imageTexture.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Image Texture
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/rotatingCube.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Rotating Cube
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/shadowMapping.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Shadow Mapping
7 |
27 |
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/samples/spriteTexture.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Sprite Texture
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/samples/videoTexture.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Orillusion | Video Texutre
7 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/src/basicLights.ts:
--------------------------------------------------------------------------------
1 | import normal from './shaders/normal.vert.wgsl?raw'
2 | import lambert from './shaders/lambert.frag.wgsl?raw'
3 | import * as sphere from './util/sphere'
4 | import * as box from './util/box'
5 | import { getModelViewMatrix, getProjectionMatrix } from './util/math'
6 |
7 | // initialize webgpu device & config canvas context
8 | async function initWebGPU(canvas: HTMLCanvasElement) {
9 | if(!navigator.gpu)
10 | throw new Error('Not Support WebGPU')
11 | const adapter = await navigator.gpu.requestAdapter()
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | device, format,
23 | // prevent chrome warning after v102
24 | alphaMode: 'opaque'
25 | })
26 | return {device, context, format, size}
27 | }
28 |
29 | // create pipiline & buffers
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
31 | const pipeline = await device.createRenderPipelineAsync({
32 | label: 'Basic Pipline',
33 | layout: 'auto',
34 | vertex: {
35 | module: device.createShaderModule({
36 | code: normal,
37 | }),
38 | entryPoint: 'main',
39 | buffers: [{
40 | arrayStride: 8 * 4, // 3 position 2 uv,
41 | attributes: [
42 | {
43 | // position
44 | shaderLocation: 0,
45 | offset: 0,
46 | format: 'float32x3',
47 | },
48 | {
49 | // normal
50 | shaderLocation: 1,
51 | offset: 3 * 4,
52 | format: 'float32x3',
53 | },
54 | {
55 | // uv
56 | shaderLocation: 2,
57 | offset: 6 * 4,
58 | format: 'float32x2',
59 | },
60 | ]
61 | }]
62 | },
63 | fragment: {
64 | module: device.createShaderModule({
65 | code: lambert,
66 | }),
67 | entryPoint: 'main',
68 | targets: [
69 | {
70 | format: format
71 | }
72 | ]
73 | },
74 | primitive: {
75 | topology: 'triangle-list',
76 | // Culling backfaces pointing away from the camera
77 | cullMode: 'back'
78 | },
79 | // Enable depth testing since we have z-level positions
80 | // Fragment closest to the camera is rendered in front
81 | depthStencil: {
82 | depthWriteEnabled: true,
83 | depthCompare: 'less',
84 | format: 'depth24plus',
85 | }
86 | } as GPURenderPipelineDescriptor)
87 | // create depthTexture for renderPass
88 | const depthTexture = device.createTexture({
89 | size, format: 'depth24plus',
90 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
91 | })
92 | const depthView = depthTexture.createView()
93 | // create vertex & index buffer
94 | const boxBuffer = {
95 | vertex: device.createBuffer({
96 | label: 'GPUBuffer store vertex',
97 | size: box.vertex.byteLength,
98 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
99 | }),
100 | index: device.createBuffer({
101 | label: 'GPUBuffer store vertex index',
102 | size: box.index.byteLength,
103 | usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
104 | })
105 | }
106 | const sphereBuffer = {
107 | vertex: device.createBuffer({
108 | label: 'GPUBuffer store vertex',
109 | size: sphere.vertex.byteLength,
110 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
111 | }),
112 | index: device.createBuffer({
113 | label: 'GPUBuffer store vertex index',
114 | size: sphere.index.byteLength,
115 | usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
116 | })
117 | }
118 | device.queue.writeBuffer(boxBuffer.vertex, 0, box.vertex)
119 | device.queue.writeBuffer(boxBuffer.index, 0, box.index)
120 | device.queue.writeBuffer(sphereBuffer.vertex, 0, sphere.vertex)
121 | device.queue.writeBuffer(sphereBuffer.index, 0, sphere.index)
122 |
123 | // create a 4x4xNUM STORAGE buffer to store matrix
124 | const modelViewBuffer = device.createBuffer({
125 | label: 'GPUBuffer store n*4x4 matrix',
126 | size: 4 * 4 * 4 * NUM, // 4 x 4 x float32 x NUM
127 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
128 | })
129 | // create a 4x4 uniform buffer to store projection
130 | const projectionBuffer = device.createBuffer({
131 | label: 'GPUBuffer store 4x4 matrix',
132 | size: 4 * 4 * 4, // 4 x 4 x float32
133 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
134 | })
135 | // create a 4x4xNUM STORAGE buffer to store color
136 | const colorBuffer = device.createBuffer({
137 | label: 'GPUBuffer store n*4 color',
138 | size: 4 * 4 * NUM, // 4 x float32 x NUM
139 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
140 | })
141 | // create a uniform group for Matrix
142 | const vsGroup = device.createBindGroup({
143 | label: 'Uniform Group with matrix',
144 | layout: pipeline.getBindGroupLayout(0),
145 | entries: [
146 | {
147 | binding: 0,
148 | resource: {
149 | buffer: modelViewBuffer
150 | }
151 | },
152 | {
153 | binding: 1,
154 | resource: {
155 | buffer: projectionBuffer
156 | }
157 | },
158 | {
159 | binding: 2,
160 | resource: {
161 | buffer: colorBuffer
162 | }
163 | },
164 | ]
165 | })
166 | // create a uniform buffer to store pointLight
167 | const ambientBuffer = device.createBuffer({
168 | label: 'GPUBuffer store 4x4 matrix',
169 | size: 1 * 4, // 1 x float32: intensity f32
170 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
171 | })
172 | // create a uniform buffer to store pointLight
173 | const pointBuffer = device.createBuffer({
174 | label: 'GPUBuffer store 4x4 matrix',
175 | size: 8 * 4, // 8 x float32: position vec4 + 4 configs
176 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
177 | })
178 | // create a uniform buffer to store dirLight
179 | const directionalBuffer = device.createBuffer({
180 | label: 'GPUBuffer store 4x4 matrix',
181 | size: 8 * 4, // 8 x float32: position vec4 + 4 configs
182 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
183 | })
184 | // create a uniform group for light Matrix
185 | const lightGroup = device.createBindGroup({
186 | label: 'Uniform Group with matrix',
187 | layout: pipeline.getBindGroupLayout(1),
188 | entries: [
189 | {
190 | binding: 0,
191 | resource: {
192 | buffer: ambientBuffer
193 | }
194 | },
195 | {
196 | binding: 1,
197 | resource: {
198 | buffer: pointBuffer
199 | }
200 | },
201 | {
202 | binding: 2,
203 | resource: {
204 | buffer: directionalBuffer
205 | }
206 | }
207 | ]
208 | })
209 | // return all vars
210 | return {
211 | pipeline, boxBuffer, sphereBuffer,
212 | modelViewBuffer, projectionBuffer, colorBuffer, vsGroup,
213 | ambientBuffer, pointBuffer, directionalBuffer, lightGroup,
214 | depthTexture, depthView
215 | }
216 | }
217 |
218 | // create & submit device commands
219 | function draw(
220 | device: GPUDevice,
221 | context: GPUCanvasContext,
222 | pipelineObj: {
223 | pipeline: GPURenderPipeline,
224 | boxBuffer: {vertex: GPUBuffer, index: GPUBuffer},
225 | sphereBuffer: {vertex: GPUBuffer, index: GPUBuffer},
226 | vsGroup: GPUBindGroup,
227 | lightGroup: GPUBindGroup
228 | depthView: GPUTextureView
229 | },
230 | ) {
231 | const commandEncoder = device.createCommandEncoder()
232 | const renderPassDescriptor: GPURenderPassDescriptor = {
233 | colorAttachments: [
234 | {
235 | view: context.getCurrentTexture().createView(),
236 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
237 | loadOp: 'clear',
238 | storeOp: 'store'
239 | }
240 | ],
241 | depthStencilAttachment: {
242 | view: pipelineObj.depthView,
243 | depthClearValue: 1.0,
244 | depthLoadOp: 'clear',
245 | depthStoreOp: 'store',
246 | }
247 | }
248 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
249 | passEncoder.setPipeline(pipelineObj.pipeline)
250 | passEncoder.setBindGroup(0, pipelineObj.vsGroup)
251 | passEncoder.setBindGroup(1, pipelineObj.lightGroup)
252 | // set box vertex
253 | passEncoder.setVertexBuffer(0, pipelineObj.boxBuffer.vertex)
254 | passEncoder.setIndexBuffer(pipelineObj.boxBuffer.index, 'uint16')
255 | passEncoder.drawIndexed(box.indexCount, NUM / 2, 0, 0, 0)
256 | // set sphere vertex
257 | passEncoder.setVertexBuffer(0, pipelineObj.sphereBuffer.vertex)
258 | passEncoder.setIndexBuffer(pipelineObj.sphereBuffer.index, 'uint16')
259 | passEncoder.drawIndexed(sphere.indexCount, NUM / 2, 0, 0, NUM / 2)
260 | passEncoder.end()
261 | // webgpu run in a separate process, all the commands will be executed after submit
262 | device.queue.submit([commandEncoder.finish()])
263 | }
264 |
265 | // total objects
266 | const NUM = 500
267 | async function run(){
268 | const canvas = document.querySelector('canvas')
269 | if (!canvas)
270 | throw new Error('No Canvas')
271 |
272 | const {device, context, format, size} = await initWebGPU(canvas)
273 | const pipelineObj = await initPipeline(device, format, size)
274 |
275 | // create objects
276 | const scene:any[] = []
277 | const modelViewMatrix = new Float32Array(NUM * 4 * 4)
278 | const colorBuffer = new Float32Array(NUM * 4)
279 | for(let i = 0; i < NUM; i++){
280 | // craete simple object
281 | const position = {x: Math.random() * 40 - 20, y: Math.random() * 40 - 20, z: - 50 - Math.random() * 50}
282 | const rotation = {x: Math.random(), y: Math.random(), z: Math.random()}
283 | const scale = {x:1, y:1, z:1}
284 | const modelView = getModelViewMatrix(position, rotation, scale)
285 | modelViewMatrix.set(modelView, i * 4 * 4)
286 | // random color for each object
287 | colorBuffer.set([Math.random(), Math.random(), Math.random(), 1], i * 4)
288 | scene.push({position, rotation, scale})
289 | }
290 | // write matrix & colors
291 | device.queue.writeBuffer(pipelineObj.colorBuffer, 0, colorBuffer)
292 | device.queue.writeBuffer(pipelineObj.modelViewBuffer, 0, modelViewMatrix)
293 |
294 | // ambient light, just 1 float32
295 | const ambient = new Float32Array([0.1])
296 | // point light, 2 x vec4: 4 position + 4 configs
297 | const pointLight = new Float32Array(8)
298 | pointLight[2] = -50 // z
299 | pointLight[4] = 1 // intensity
300 | pointLight[5] = 20 // radius
301 | // dir light, 2 x vec4: 4 position + 4 configs
302 | const directionalLight = new Float32Array(8)
303 | directionalLight[4] = 0.5 // intensity
304 |
305 | // start loop
306 | function frame(){
307 | // update lights position
308 | const now = performance.now()
309 | pointLight[0] = 10 * Math.sin(now / 1000)
310 | pointLight[1] = 10 * Math.cos(now / 1000)
311 | pointLight[2] = -60 + 10 * Math.cos(now / 1000)
312 | directionalLight[0] = Math.sin(now / 1500)
313 | directionalLight[2] = Math.cos(now / 1500)
314 | // update lights position & config to GPU
315 | device.queue.writeBuffer(pipelineObj.ambientBuffer, 0, ambient)
316 | device.queue.writeBuffer(pipelineObj.pointBuffer, 0, pointLight)
317 | device.queue.writeBuffer(pipelineObj.directionalBuffer, 0, directionalLight)
318 | draw(device, context, pipelineObj)
319 | requestAnimationFrame(frame)
320 | }
321 | frame()
322 |
323 | // UI
324 | document.querySelector('#ambient')?.addEventListener('input', (e:Event) => {
325 | ambient[0] = +(e.target as HTMLInputElement).value
326 | })
327 | document.querySelector('#point')?.addEventListener('input', (e:Event) => {
328 | pointLight[4] = +(e.target as HTMLInputElement).value
329 | })
330 | document.querySelector('#radius')?.addEventListener('input', (e:Event) => {
331 | pointLight[5] = +(e.target as HTMLInputElement).value
332 | })
333 | document.querySelector('#dir')?.addEventListener('input', (e:Event) => {
334 | directionalLight[4] = +(e.target as HTMLInputElement).value
335 | })
336 |
337 | function updateCamera(){
338 | const aspect = size.width / size.height
339 | const projectionMatrix = getProjectionMatrix(aspect)
340 | device.queue.writeBuffer(pipelineObj.projectionBuffer, 0, projectionMatrix)
341 | }
342 | updateCamera()
343 | // re-configure context on resize
344 | window.addEventListener('resize', ()=>{
345 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
346 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
347 | // don't need to recall context.configure() after v104
348 | // re-create depth texture
349 | pipelineObj.depthTexture.destroy()
350 | pipelineObj.depthTexture = device.createTexture({
351 | size, format: 'depth24plus',
352 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
353 | })
354 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
355 | // update aspect
356 | updateCamera()
357 | })
358 | }
359 | run()
--------------------------------------------------------------------------------
/src/basicTriangle.ts:
--------------------------------------------------------------------------------
1 | import triangleVert from './shaders/triangle.vert.wgsl?raw'
2 | import redFrag from './shaders/red.frag.wgsl?raw'
3 |
4 | // initialize webgpu device & config canvas context
5 | async function initWebGPU(canvas: HTMLCanvasElement) {
6 | if(!navigator.gpu)
7 | throw new Error('Not Support WebGPU')
8 | const adapter = await navigator.gpu.requestAdapter({
9 | powerPreference: 'high-performance'
10 | // powerPreference: 'low-power'
11 | })
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | // json specific format when key and value are the same
23 | device, format,
24 | // prevent chrome warning
25 | alphaMode: 'opaque'
26 | })
27 | return {device, context, format, size}
28 | }
29 | // create a simple pipiline
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat): Promise {
31 | const descriptor: GPURenderPipelineDescriptor = {
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: triangleVert
36 | }),
37 | entryPoint: 'main'
38 | },
39 | primitive: {
40 | topology: 'triangle-list' // try point-list, line-list, line-strip, triangle-strip?
41 | },
42 | fragment: {
43 | module: device.createShaderModule({
44 | code: redFrag
45 | }),
46 | entryPoint: 'main',
47 | targets: [
48 | {
49 | format: format
50 | }
51 | ]
52 | }
53 | }
54 | return await device.createRenderPipelineAsync(descriptor)
55 | }
56 | // create & submit device commands
57 | function draw(device: GPUDevice, context: GPUCanvasContext, pipeline: GPURenderPipeline) {
58 | const commandEncoder = device.createCommandEncoder()
59 | const view = context.getCurrentTexture().createView()
60 | const renderPassDescriptor: GPURenderPassDescriptor = {
61 | colorAttachments: [
62 | {
63 | view: view,
64 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
65 | loadOp: 'clear', // clear/load
66 | storeOp: 'store' // store/discard
67 | }
68 | ]
69 | }
70 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
71 | passEncoder.setPipeline(pipeline)
72 | // 3 vertex form a triangle
73 | passEncoder.draw(3)
74 | passEncoder.end()
75 | // webgpu run in a separate process, all the commands will be executed after submit
76 | device.queue.submit([commandEncoder.finish()])
77 | }
78 |
79 | async function run(){
80 | const canvas = document.querySelector('canvas')
81 | if (!canvas)
82 | throw new Error('No Canvas')
83 | const {device, context, format} = await initWebGPU(canvas)
84 | const pipeline = await initPipeline(device, format)
85 | // start draw
86 | draw(device, context, pipeline)
87 |
88 | // re-configure context on resize
89 | window.addEventListener('resize', ()=>{
90 | canvas.width = canvas.clientWidth * devicePixelRatio
91 | canvas.height = canvas.clientHeight * devicePixelRatio
92 | // don't need to recall context.configure() after v104
93 | draw(device, context, pipeline)
94 | })
95 | }
96 | run()
--------------------------------------------------------------------------------
/src/basicTriangleMSAA.ts:
--------------------------------------------------------------------------------
1 | import triangleVert from './shaders/triangle.vert.wgsl?raw'
2 | import redFrag from './shaders/red.frag.wgsl?raw'
3 |
4 | // initialize webgpu device & config canvas context
5 | async function initWebGPU(canvas: HTMLCanvasElement) {
6 | if(!navigator.gpu)
7 | throw new Error('Not Support WebGPU')
8 | const adapter = await navigator.gpu.requestAdapter({
9 | powerPreference: 'high-performance'
10 | // powerPreference: 'low-power'
11 | })
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | // json specific format when key and value are the same
23 | device, format,
24 | // prevent chrome warning
25 | alphaMode: 'opaque'
26 | })
27 | return {device, context, format, size}
28 | }
29 | // create a simple pipiline with multiSample
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat): Promise {
31 | const descriptor: GPURenderPipelineDescriptor = {
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: triangleVert
36 | }),
37 | entryPoint: 'main'
38 | },
39 | primitive: {
40 | topology: 'triangle-list' // try point-list, line-list, line-strip, triangle-strip?
41 | },
42 | fragment: {
43 | module: device.createShaderModule({
44 | code: redFrag
45 | }),
46 | entryPoint: 'main',
47 | targets: [
48 | {
49 | format: format
50 | }
51 | ]
52 | },
53 | multisample: {
54 | count: 4,
55 | }
56 | }
57 | return await device.createRenderPipelineAsync(descriptor)
58 | }
59 | // create & submit device commands
60 | function draw(device: GPUDevice, context: GPUCanvasContext, pipeline: GPURenderPipeline, MSAAView: GPUTextureView) {
61 | const commandEncoder = device.createCommandEncoder()
62 | const renderPassDescriptor: GPURenderPassDescriptor = {
63 | colorAttachments: [
64 | {
65 | view: MSAAView,
66 | resolveTarget: context.getCurrentTexture().createView(),
67 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
68 | loadOp: 'clear', // clear/load
69 | storeOp: 'store' // store/discard
70 | }
71 | ]
72 | }
73 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
74 | passEncoder.setPipeline(pipeline)
75 | // 3 vertex form a triangle
76 | passEncoder.draw(3)
77 | passEncoder.end()
78 | // webgpu run in a separate process, all the commands will be executed after submit
79 | device.queue.submit([commandEncoder.finish()])
80 | }
81 |
82 | async function run(){
83 | const canvas = document.querySelector('canvas')
84 | if (!canvas)
85 | throw new Error('No Canvas')
86 | const {device, context, format, size} = await initWebGPU(canvas)
87 | const pipeline = await initPipeline(device, format)
88 | // create 4x samplecount texture
89 | let MSAATexture = device.createTexture({
90 | size, format,
91 | sampleCount: 4,
92 | usage: GPUTextureUsage.RENDER_ATTACHMENT
93 | });
94 | let MSAAView = MSAATexture.createView();
95 | // start draw
96 | draw(device, context, pipeline, MSAAView)
97 |
98 | // re-configure context on resize
99 | window.addEventListener('resize', ()=>{
100 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
101 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
102 | // don't need to recall context.configure() after v104
103 | MSAATexture.destroy()
104 | MSAATexture = device.createTexture({
105 | size, format,
106 | sampleCount: 4,
107 | usage: GPUTextureUsage.RENDER_ATTACHMENT
108 | });
109 | MSAAView = MSAATexture.createView();
110 | draw(device, context, pipeline, MSAAView)
111 | })
112 | }
113 | run()
--------------------------------------------------------------------------------
/src/canvasTexture.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import imageTexture from './shaders/imageTexture.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if (!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return { device, context, format, size }
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: { width: number, height: number }) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicVert,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3'
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2'
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: imageTexture,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back',
71 | frontFace: 'ccw'
72 | },
73 | // Enable depth testing since we have z-level positions
74 | // Fragment closest to the camera is rendered in front
75 | depthStencil: {
76 | depthWriteEnabled: true,
77 | depthCompare: 'less',
78 | format: 'depth24plus'
79 | }
80 | } as GPURenderPipelineDescriptor)
81 | // create depthTexture for renderPass
82 | const depthTexture = device.createTexture({
83 | size, format: 'depth24plus',
84 | usage: GPUTextureUsage.RENDER_ATTACHMENT
85 | })
86 | const depthView = depthTexture.createView()
87 | // create vertex buffer
88 | const vertexBuffer = device.createBuffer({
89 | label: 'GPUBuffer store vertex',
90 | size: cube.vertex.byteLength,
91 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
92 | })
93 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
94 | // create a mvp matrix buffer
95 | const mvpBuffer = device.createBuffer({
96 | label: 'GPUBuffer store 4x4 matrix',
97 | size: 4 * 4 * 4, // 4 x 4 x float32
98 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
99 | })
100 | // create a uniform group contains matrix
101 | const uniformGroup = device.createBindGroup({
102 | label: 'Uniform Group with Matrix',
103 | layout: pipeline.getBindGroupLayout(0),
104 | entries: [
105 | {
106 | binding: 0,
107 | resource: {
108 | buffer: mvpBuffer
109 | }
110 | }
111 | ]
112 | })
113 | // return all vars
114 | return { pipeline, vertexBuffer, mvpBuffer, uniformGroup, depthTexture, depthView }
115 | }
116 |
117 | // create & submit device commands
118 | function draw(
119 | device: GPUDevice,
120 | context: GPUCanvasContext,
121 | pipelineObj: {
122 | pipeline: GPURenderPipeline
123 | vertexBuffer: GPUBuffer
124 | mvpBuffer: GPUBuffer
125 | uniformGroup: GPUBindGroup
126 | depthView: GPUTextureView
127 | },
128 | textureGroup: GPUBindGroup
129 | ) {
130 | // start encoder
131 | const commandEncoder = device.createCommandEncoder()
132 | const renderPassDescriptor: GPURenderPassDescriptor = {
133 | colorAttachments: [
134 | {
135 | view: context.getCurrentTexture().createView(),
136 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
137 | loadOp: 'clear',
138 | storeOp: 'store'
139 | }
140 | ],
141 | depthStencilAttachment: {
142 | view: pipelineObj.depthView,
143 | depthClearValue: 1.0,
144 | depthLoadOp: 'clear',
145 | depthStoreOp: 'store'
146 | }
147 | }
148 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
149 | passEncoder.setPipeline(pipelineObj.pipeline)
150 | // set uniformGroup
151 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
152 | passEncoder.setBindGroup(1, textureGroup)
153 | // set vertex
154 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
155 | // draw vertex count of cube
156 | passEncoder.draw(cube.vertexCount)
157 | passEncoder.end()
158 | // webgpu run in a separate process, all the commands will be executed after submit
159 | device.queue.submit([commandEncoder.finish()])
160 | }
161 |
162 | async function run() {
163 | const canvas = document.querySelector('canvas#webgpu') as HTMLCanvasElement
164 | const canvas2 = document.querySelector('canvas#canvas') as HTMLCanvasElement
165 | if (!canvas || !canvas2)
166 | throw new Error('No Canvas')
167 | const { device, context, format, size } = await initWebGPU(canvas)
168 | const pipelineObj = await initPipeline(device, format, size)
169 |
170 | // create empty texture
171 | const textureSize = [canvas2.width, canvas2.height]
172 | const texture = device.createTexture({
173 | size: textureSize,
174 | format: 'rgba8unorm',
175 | usage:
176 | GPUTextureUsage.TEXTURE_BINDING |
177 | GPUTextureUsage.COPY_DST |
178 | GPUTextureUsage.RENDER_ATTACHMENT
179 | })
180 | // Create a sampler with linear filtering for smooth interpolation.
181 | const sampler = device.createSampler({
182 | // addressModeU: 'repeat',
183 | // addressModeV: 'repeat',
184 | magFilter: 'linear',
185 | minFilter: 'linear'
186 | })
187 | const textureGroup = device.createBindGroup({
188 | label: 'Texture Group with Texture/Sampler',
189 | layout: pipelineObj.pipeline.getBindGroupLayout(1),
190 | entries: [
191 | {
192 | binding: 0,
193 | resource: sampler
194 | },
195 | {
196 | binding: 1,
197 | resource: texture.createView()
198 | }
199 | ]
200 | })
201 |
202 | // default state
203 | let aspect = size.width / size.height
204 | const position = { x: 0, y: 0, z: -5 }
205 | const scale = { x: 1, y: 1, z: 1 }
206 | const rotation = { x: 0, y: 0, z: 0 }
207 | // start loop
208 | function frame() {
209 | // rotate by time, and update transform matrix
210 | const now = Date.now() / 1000
211 | rotation.x = Math.sin(now)
212 | rotation.y = Math.cos(now)
213 | const mvpMatrix = getMvpMatrix(aspect, position, rotation, scale)
214 | device.queue.writeBuffer(
215 | pipelineObj.mvpBuffer,
216 | 0,
217 | mvpMatrix.buffer
218 | )
219 | // update texture from canvas every frame
220 | device.queue.copyExternalImageToTexture(
221 | { source: canvas2 },
222 | { texture: texture },
223 | textureSize
224 | )
225 | // then draw
226 | draw(device, context, pipelineObj, textureGroup)
227 | requestAnimationFrame(frame)
228 | }
229 | requestAnimationFrame(frame)
230 |
231 | // re-configure context on resize
232 | window.addEventListener('resize', () => {
233 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
234 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
235 | // don't need to recall context.configure() after v104
236 | // re-create depth texture
237 | pipelineObj.depthTexture.destroy()
238 | pipelineObj.depthTexture = device.createTexture({
239 | size, format: 'depth24plus',
240 | usage: GPUTextureUsage.RENDER_ATTACHMENT
241 | })
242 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
243 | // update aspect
244 | aspect = size.width / size.height
245 | })
246 |
247 | // a simple 2d canvas whiteboard
248 | {
249 | const ctx = canvas2.getContext('2d')
250 | if(!ctx)
251 | throw new Error('No support 2d')
252 | ctx.fillStyle = '#fff'
253 | ctx.lineWidth = 5
254 | ctx.lineCap = 'round'
255 | ctx.lineJoin = 'round'
256 | ctx.fillRect(0,0, canvas2.width, canvas2.height)
257 |
258 | let drawing = false
259 | let lastX = 0, lastY = 0
260 | let hue = 0
261 | canvas2.addEventListener('pointerdown', (e:PointerEvent) => {
262 | drawing = true
263 | lastX = e.offsetX
264 | lastY = e.offsetY
265 | })
266 | canvas2.addEventListener('pointermove', (e:PointerEvent) => {
267 | if(!drawing)
268 | return
269 | const x = e.offsetX
270 | const y = e.offsetY
271 | hue = hue > 360 ? 0 : hue +1
272 | ctx.strokeStyle = `hsl(${ hue }, 90%, 50%)`
273 | ctx.beginPath()
274 | ctx.moveTo(lastX, lastY)
275 | ctx.lineTo(x, y)
276 | ctx.stroke()
277 |
278 | lastX = x
279 | lastY = y
280 | })
281 | canvas2.addEventListener('pointerup', ()=> drawing = false)
282 | canvas2.addEventListener('pointerout', ()=> drawing = false)
283 | }
284 | }
285 | run()
--------------------------------------------------------------------------------
/src/colorTriangle.ts:
--------------------------------------------------------------------------------
1 | import positionVert from './shaders/position.vert.wgsl?raw'
2 | import colorFrag from './shaders/color.frag.wgsl?raw'
3 | import * as triangle from './util/triangle'
4 |
5 | // initialize webgpu device & config canvas context
6 | async function initWebGPU(canvas: HTMLCanvasElement) {
7 | if(!navigator.gpu)
8 | throw new Error('Not Support WebGPU')
9 | const adapter = await navigator.gpu.requestAdapter()
10 | if (!adapter)
11 | throw new Error('No Adapter Found')
12 | const device = await adapter.requestDevice()
13 | const context = canvas.getContext('webgpu') as GPUCanvasContext
14 | const format = navigator.gpu.getPreferredCanvasFormat()
15 | const devicePixelRatio = window.devicePixelRatio || 1
16 | canvas.width = canvas.clientWidth * devicePixelRatio
17 | canvas.height = canvas.clientHeight * devicePixelRatio
18 | const size = {width: canvas.width, height: canvas.height}
19 | context.configure({
20 | device, format,
21 | // prevent chrome warning after v102
22 | alphaMode: 'opaque'
23 | })
24 | return {device, context, format, size}
25 | }
26 |
27 | // create a simple pipiline & buffers
28 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat) {
29 | const pipeline = await device.createRenderPipelineAsync({
30 | label: 'Basic Pipline',
31 | layout: 'auto',
32 | vertex: {
33 | module: device.createShaderModule({
34 | code: positionVert,
35 | }),
36 | entryPoint: 'main',
37 | buffers: [{
38 | arrayStride: 3 * 4, // 3 float32,
39 | attributes: [
40 | {
41 | // position xyz
42 | shaderLocation: 0,
43 | offset: 0,
44 | format: 'float32x3',
45 | }
46 | ]
47 | }]
48 | },
49 | fragment: {
50 | module: device.createShaderModule({
51 | code: colorFrag,
52 | }),
53 | entryPoint: 'main',
54 | targets: [
55 | {
56 | format: format
57 | }
58 | ]
59 | },
60 | primitive: {
61 | topology: 'triangle-list' // try point-list, line-list, line-strip, triangle-strip?
62 | }
63 | } as GPURenderPipelineDescriptor)
64 | // create vertex buffer
65 | const vertexBuffer = device.createBuffer({
66 | label: 'GPUBuffer store vertex',
67 | size: triangle.vertex.byteLength,
68 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
69 | //mappedAtCreation: true
70 | })
71 | device.queue.writeBuffer(vertexBuffer, 0, triangle.vertex)
72 | // create color buffer
73 | const colorBuffer = device.createBuffer({
74 | label: 'GPUBuffer store rgba color',
75 | size: 4 * 4, // 4 * float32
76 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
77 | })
78 | device.queue.writeBuffer(colorBuffer, 0, new Float32Array([1,1,0,1]))
79 |
80 | // create a uniform group for color
81 | const uniformGroup = device.createBindGroup({
82 | label: 'Uniform Group with colorBuffer',
83 | layout: pipeline.getBindGroupLayout(0),
84 | entries: [
85 | {
86 | binding: 0,
87 | resource: {
88 | buffer: colorBuffer
89 | }
90 | }
91 | ]
92 | })
93 | // return all vars
94 | return {pipeline, vertexBuffer, colorBuffer, uniformGroup}
95 | }
96 |
97 | // create & submit device commands
98 | function draw(device: GPUDevice, context: GPUCanvasContext, pipelineObj: {
99 | pipeline: GPURenderPipeline,
100 | vertexBuffer: GPUBuffer,
101 | colorBuffer: GPUBuffer,
102 | uniformGroup: GPUBindGroup
103 | }) {
104 | const commandEncoder = device.createCommandEncoder()
105 | const view = context.getCurrentTexture().createView()
106 | const renderPassDescriptor: GPURenderPassDescriptor = {
107 | colorAttachments: [
108 | {
109 | view: view,
110 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
111 | loadOp: 'clear',
112 | storeOp: 'store'
113 | }
114 | ]
115 | }
116 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
117 | passEncoder.setPipeline(pipelineObj.pipeline)
118 | // set uniformGroup
119 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
120 | // set vertex
121 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
122 | // 3 vertex form a triangle
123 | passEncoder.draw(triangle.vertexCount)
124 | passEncoder.end()
125 | // webgpu run in a separate process, all the commands will be executed after submit
126 | device.queue.submit([commandEncoder.finish()])
127 | }
128 |
129 | async function run(){
130 | const canvas = document.querySelector('canvas')
131 | if (!canvas)
132 | throw new Error('No Canvas')
133 | const {device, context, format} = await initWebGPU(canvas)
134 | const pipelineObj = await initPipeline(device, format)
135 |
136 | // first draw
137 | draw(device, context, pipelineObj)
138 |
139 | // update colorBuffer if color changed
140 | document.querySelector('input[type="color"]')?.addEventListener('input', (e:Event) => {
141 | // get hex color string
142 | const color = (e.target as HTMLInputElement).value
143 | console.log(color)
144 | // parse hex color into rgb
145 | const r = +('0x' + color.slice(1, 3)) / 255
146 | const g = +('0x' + color.slice(3, 5)) / 255
147 | const b = +('0x' + color.slice(5, 7)) / 255
148 | // write colorBuffer with new color
149 | device.queue.writeBuffer(pipelineObj.colorBuffer, 0, new Float32Array([r, g, b, 1]))
150 | draw(device, context, pipelineObj)
151 | })
152 | // update vertexBuffer
153 | document.querySelector('input[type="range"]')?.addEventListener('input', (e:Event) => {
154 | // get input value
155 | const value = +(e.target as HTMLInputElement).value
156 | console.log(value)
157 | // chagne vertex 0/3/6
158 | triangle.vertex[0] = 0 + value
159 | triangle.vertex[3] = -0.5 + value
160 | triangle.vertex[6] = 0.5 + value
161 | // write vertexBuffer with new vertex
162 | device.queue.writeBuffer(pipelineObj.vertexBuffer, 0, triangle.vertex)
163 | draw(device, context, pipelineObj)
164 | })
165 | // re-configure context on resize
166 | window.addEventListener('resize', ()=>{
167 | canvas.width = canvas.clientWidth * devicePixelRatio
168 | canvas.height = canvas.clientHeight * devicePixelRatio
169 | // don't need to recall context.configure() after v104
170 | draw(device, context, pipelineObj)
171 | })
172 | }
173 | run()
--------------------------------------------------------------------------------
/src/cubes.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicVert,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3',
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2',
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: positionFrag,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back'
71 | },
72 | // Enable depth testing since we have z-level positions
73 | // Fragment closest to the camera is rendered in front
74 | depthStencil: {
75 | depthWriteEnabled: true,
76 | depthCompare: 'less',
77 | format: 'depth24plus',
78 | }
79 | } as GPURenderPipelineDescriptor)
80 | // create depthTexture for renderPass
81 | const depthTexture = device.createTexture({
82 | size, format: 'depth24plus',
83 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
84 | })
85 | const depthView = depthTexture.createView()
86 | // create vertex buffer
87 | const vertexBuffer = device.createBuffer({
88 | label: 'GPUBuffer store vertex',
89 | size: cube.vertex.byteLength,
90 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
91 | })
92 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
93 | // create a 4x4 mvp matrix1
94 | const mvpBuffer1 = device.createBuffer({
95 | label: 'GPUBuffer store 4x4 matrix1',
96 | size: 4 * 4 * 4, // 4 x 4 x float32
97 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
98 | })
99 | // create a uniform group for Matrix2
100 | const group1 = device.createBindGroup({
101 | label: 'Uniform Group with matrix1',
102 | layout: pipeline.getBindGroupLayout(0),
103 | entries: [
104 | {
105 | binding: 0,
106 | resource: {
107 | buffer: mvpBuffer1
108 | }
109 | }
110 | ]
111 | })
112 | // create a 4x4 mvp matrix2
113 | const mvpBuffer2 = device.createBuffer({
114 | label: 'GPUBuffer store 4x4 matrix2',
115 | size: 4 * 4 * 4, // 4 x 4 x float32
116 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
117 | })
118 | // create a uniform group for Matrix2
119 | const group2 = device.createBindGroup({
120 | label: 'Uniform Group with matrix2',
121 | layout: pipeline.getBindGroupLayout(0),
122 | entries: [
123 | {
124 | binding: 0,
125 | resource: {
126 | buffer: mvpBuffer2
127 | }
128 | }
129 | ]
130 | })
131 | // return all vars
132 | return {pipeline, depthTexture, depthView, vertexBuffer, mvpBuffer1, mvpBuffer2, group1, group2}
133 | }
134 |
135 | // create & submit device commands
136 | function draw(
137 | device: GPUDevice,
138 | context: GPUCanvasContext,
139 | pipelineObj: {
140 | pipeline: GPURenderPipeline,
141 | vertexBuffer: GPUBuffer,
142 | mvpBuffer1: GPUBuffer,
143 | mvpBuffer2: GPUBuffer,
144 | group1: GPUBindGroup,
145 | group2: GPUBindGroup,
146 | depthView: GPUTextureView
147 | }
148 | ) {
149 | // start encoder
150 | const commandEncoder = device.createCommandEncoder()
151 | const renderPassDescriptor: GPURenderPassDescriptor = {
152 | colorAttachments: [
153 | {
154 | view: context.getCurrentTexture().createView(),
155 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
156 | loadOp: 'clear',
157 | storeOp: 'store'
158 | }
159 | ],
160 | depthStencilAttachment: {
161 | view: pipelineObj.depthView,
162 | depthClearValue: 1.0,
163 | depthLoadOp: 'clear',
164 | depthStoreOp: 'store',
165 | }
166 | }
167 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
168 | passEncoder.setPipeline(pipelineObj.pipeline)
169 | // set vertex
170 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
171 | {
172 | // draw first cube
173 | passEncoder.setBindGroup(0, pipelineObj.group1)
174 | passEncoder.draw(cube.vertexCount)
175 | // draw second cube
176 | passEncoder.setBindGroup(0, pipelineObj.group2)
177 | passEncoder.draw(cube.vertexCount)
178 | }
179 | passEncoder.end()
180 | // webgpu run in a separate process, all the commands will be executed after submit
181 | device.queue.submit([commandEncoder.finish()])
182 | }
183 |
184 | async function run(){
185 | const canvas = document.querySelector('canvas')
186 | if (!canvas)
187 | throw new Error('No Canvas')
188 | const {device, context, format, size} = await initWebGPU(canvas)
189 | const pipelineObj = await initPipeline(device, format, size)
190 | // defaut state
191 | let aspect = size.width/ size.height
192 | const position1 = {x:2, y:0, z: -8}
193 | const rotation1 = {x: 0, y: 0, z:0}
194 | const scale1 = {x:1, y:1, z: 1}
195 | const position2 = {x:-2, y:0, z: -8}
196 | const rotation2 = {x: 0, y: 0, z:0}
197 | const scale2 = {x:1, y:1, z: 1}
198 | // start loop
199 | function frame(){
200 | // first, update two transform matrixs
201 | const now = Date.now() / 1000
202 | {
203 | // first cube
204 | rotation1.x = Math.sin(now)
205 | rotation1.y = Math.cos(now)
206 | const mvpMatrix1 = getMvpMatrix(aspect, position1, rotation1, scale1)
207 | device.queue.writeBuffer(
208 | pipelineObj.mvpBuffer1,
209 | 0,
210 | mvpMatrix1
211 | )
212 | }
213 | {
214 | // second cube
215 | rotation2.x = Math.cos(now)
216 | rotation2.y = Math.sin(now)
217 | const mvpMatrix2 = getMvpMatrix(aspect, position2, rotation2, scale2)
218 | device.queue.writeBuffer(
219 | pipelineObj.mvpBuffer2,
220 | 0,
221 | mvpMatrix2
222 | )
223 | }
224 | // then draw
225 | draw(device, context, pipelineObj)
226 | requestAnimationFrame(frame)
227 | }
228 | frame()
229 |
230 | // re-configure context on resize
231 | window.addEventListener('resize', ()=>{
232 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
233 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
234 | // don't need to recall context.configure() after v104
235 | // re-create depth texture
236 | pipelineObj.depthTexture.destroy()
237 | pipelineObj.depthTexture = device.createTexture({
238 | size, format: 'depth24plus',
239 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
240 | })
241 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
242 | // update aspect
243 | aspect = size.width/ size.height
244 | })
245 | }
246 | run()
--------------------------------------------------------------------------------
/src/cubesDynamicOffsets.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
30 | // create group layout for dynamicOffset
31 | const dynamicBindGroupLayout = device.createBindGroupLayout({
32 | entries: [
33 | {
34 | binding: 0,
35 | visibility: GPUShaderStage.VERTEX,
36 | buffer: {
37 | type: 'uniform',
38 | hasDynamicOffset: true,
39 | minBindingSize: 0
40 | }
41 | }
42 | ]
43 | })
44 | // create pipline layout for dynamicOffset
45 | const dynamicPipelineLayout = device.createPipelineLayout({
46 | bindGroupLayouts: [dynamicBindGroupLayout]
47 | });
48 | const pipeline = await device.createRenderPipelineAsync({
49 | label: 'Basic Pipline',
50 | layout: dynamicPipelineLayout,
51 | vertex: {
52 | module: device.createShaderModule({
53 | code: basicVert,
54 | }),
55 | entryPoint: 'main',
56 | buffers: [{
57 | arrayStride: 5 * 4, // 3 position 2 uv,
58 | attributes: [
59 | {
60 | // position
61 | shaderLocation: 0,
62 | offset: 0,
63 | format: 'float32x3',
64 | },
65 | {
66 | // uv
67 | shaderLocation: 1,
68 | offset: 3 * 4,
69 | format: 'float32x2',
70 | }
71 | ]
72 | }]
73 | },
74 | fragment: {
75 | module: device.createShaderModule({
76 | code: positionFrag,
77 | }),
78 | entryPoint: 'main',
79 | targets: [
80 | {
81 | format: format
82 | }
83 | ]
84 | },
85 | primitive: {
86 | topology: 'triangle-list',
87 | // Culling backfaces pointing away from the camera
88 | cullMode: 'back'
89 | },
90 | // Enable depth testing since we have z-level positions
91 | // Fragment closest to the camera is rendered in front
92 | depthStencil: {
93 | depthWriteEnabled: true,
94 | depthCompare: 'less',
95 | format: 'depth24plus',
96 | }
97 | } as GPURenderPipelineDescriptor)
98 | // create depthTexture for renderPass
99 | const depthTexture = device.createTexture({
100 | size, format: 'depth24plus',
101 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
102 | })
103 | const depthView = depthTexture.createView()
104 |
105 | // create vertex buffer
106 | const vertexBuffer = device.createBuffer({
107 | label: 'GPUBuffer store vertex',
108 | size: cube.vertex.byteLength,
109 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
110 | })
111 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
112 | // create a buffer with 2 mvp matrix
113 | const mvpBuffer = device.createBuffer({
114 | label: 'GPUBuffer store 2 4*4 matrix',
115 | size: 256 * 2, // 2 matrix with 256-byte aligned, or 256 + 64
116 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
117 | })
118 | // create a uniform group with dynamicOffsets
119 | const group = device.createBindGroup({
120 | layout: dynamicBindGroupLayout,
121 | entries: [
122 | {
123 | binding: 0,
124 | resource: {
125 | buffer: mvpBuffer,
126 | size: 4 * 16
127 | }
128 | }
129 | ]
130 | })
131 | // return all vars
132 | return {pipeline, vertexBuffer, mvpBuffer, group, depthTexture, depthView}
133 | }
134 |
135 | // create & submit device commands
136 | function draw(
137 | device: GPUDevice,
138 | context: GPUCanvasContext,
139 | pipelineObj: {
140 | pipeline: GPURenderPipeline,
141 | vertexBuffer: GPUBuffer,
142 | mvpBuffer: GPUBuffer,
143 | group: GPUBindGroup,
144 | depthView: GPUTextureView
145 | }
146 | ) {
147 | // start encoder
148 | const commandEncoder = device.createCommandEncoder()
149 | const renderPassDescriptor: GPURenderPassDescriptor = {
150 | colorAttachments: [
151 | {
152 | view: context.getCurrentTexture().createView(),
153 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
154 | loadOp: 'clear',
155 | storeOp: 'store'
156 | }
157 | ],
158 | depthStencilAttachment: {
159 | view: pipelineObj.depthView,
160 | depthClearValue: 1.0,
161 | depthLoadOp: 'clear',
162 | depthStoreOp: 'store',
163 | }
164 | }
165 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
166 | passEncoder.setPipeline(pipelineObj.pipeline)
167 | // set vertex
168 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
169 | const offset = new Uint32Array([0, 256])
170 | {
171 | // draw first cube with dynamicOffset 0
172 | passEncoder.setBindGroup(0, pipelineObj.group, offset, 0, 1)
173 | passEncoder.draw(cube.vertexCount)
174 | // draw second cube with dynamicOffset 256
175 | passEncoder.setBindGroup(0, pipelineObj.group, offset, 1, 1)
176 | passEncoder.draw(cube.vertexCount)
177 | }
178 | passEncoder.end()
179 | // webgpu run in a separate process, all the commands will be executed after submit
180 | device.queue.submit([commandEncoder.finish()])
181 | }
182 |
183 | async function run(){
184 | const canvas = document.querySelector('canvas')
185 | if (!canvas)
186 | throw new Error('No Canvas')
187 | const {device, context, format, size} = await initWebGPU(canvas)
188 | const pipelineObj = await initPipeline(device, format, size)
189 | // defaut state
190 | let aspect = size.width/ size.height
191 | const position1 = {x:2, y:0, z: -8}
192 | const rotation1 = {x: 0, y: 0, z:0}
193 | const scale1 = {x:1, y:1, z: 1}
194 | const position2 = {x:-2, y:0, z: -8}
195 | const rotation2 = {x: 0, y: 0, z:0}
196 | const scale2 = {x:1, y:1, z: 1}
197 | // start loop
198 | function frame(){
199 | // first, update two transform matrixs
200 | const now = Date.now() / 1000
201 | {
202 | // first cube
203 | rotation1.x = Math.sin(now)
204 | rotation1.y = Math.cos(now)
205 | const mvpMatrix1 = getMvpMatrix(aspect, position1, rotation1, scale1)
206 | device.queue.writeBuffer(
207 | pipelineObj.mvpBuffer,
208 | 0,
209 | mvpMatrix1
210 | )
211 | }
212 | {
213 | // second cube
214 | rotation2.x = Math.cos(now)
215 | rotation2.y = Math.sin(now)
216 | const mvpMatrix2 = getMvpMatrix(aspect, position2, rotation2, scale2)
217 | device.queue.writeBuffer(
218 | pipelineObj.mvpBuffer,
219 | 256,
220 | mvpMatrix2
221 | )
222 | }
223 | // then draw
224 | draw(device, context, pipelineObj)
225 | requestAnimationFrame(frame)
226 | }
227 | frame()
228 |
229 | // re-configure context on resize
230 | window.addEventListener('resize', ()=>{
231 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
232 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
233 | // don't need to recall context.configure() after v104
234 | // re-create depth texture
235 | pipelineObj.depthTexture.destroy()
236 | pipelineObj.depthTexture = device.createTexture({
237 | size, format: 'depth24plus',
238 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
239 | })
240 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
241 | // update aspect
242 | aspect = size.width/ size.height
243 | })
244 | }
245 | run()
--------------------------------------------------------------------------------
/src/cubesInstance.ts:
--------------------------------------------------------------------------------
1 | import basicInstanced from './shaders/basic.instanced.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicInstanced,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3',
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2',
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: positionFrag,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back'
71 | },
72 | // Enable depth testing since we have z-level positions
73 | // Fragment closest to the camera is rendered in front
74 | depthStencil: {
75 | depthWriteEnabled: true,
76 | depthCompare: 'less',
77 | format: 'depth24plus',
78 | }
79 | } as GPURenderPipelineDescriptor)
80 | // create depthTexture for renderPass
81 | const depthTexture = device.createTexture({
82 | size, format: 'depth24plus',
83 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
84 | })
85 | const depthView = depthTexture.createView()
86 |
87 | // create vertex buffer
88 | const vertexBuffer = device.createBuffer({
89 | label: 'GPUBuffer store vertex',
90 | size: cube.vertex.byteLength,
91 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
92 | })
93 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
94 | // create a 4x4xNUM STORAGE buffer to store matrix
95 | const mvpBuffer = device.createBuffer({
96 | label: 'GPUBuffer store n*4x4 matrix',
97 | size: 4 * 4 * 4 * NUM, // 4 x 4 x float32 x NUM
98 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
99 | })
100 | // create a uniform group for Matrix
101 | const group = device.createBindGroup({
102 | label: 'Uniform Group with matrix',
103 | layout: pipeline.getBindGroupLayout(0),
104 | entries: [
105 | {
106 | binding: 0,
107 | resource: {
108 | buffer: mvpBuffer
109 | }
110 | }
111 | ]
112 | })
113 | // return all vars
114 | return {pipeline, vertexBuffer, mvpBuffer, group, depthTexture, depthView}
115 | }
116 |
117 | // create & submit device commands
118 | function draw(
119 | device: GPUDevice,
120 | context: GPUCanvasContext,
121 | pipelineObj: {
122 | pipeline: GPURenderPipeline,
123 | vertexBuffer: GPUBuffer,
124 | mvpBuffer: GPUBuffer,
125 | group: GPUBindGroup,
126 | depthView: GPUTextureView
127 | }
128 | ) {
129 | const commandEncoder = device.createCommandEncoder()
130 | const renderPassDescriptor: GPURenderPassDescriptor = {
131 | colorAttachments: [
132 | {
133 | view: context.getCurrentTexture().createView(),
134 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
135 | loadOp: 'clear',
136 | storeOp: 'store'
137 | }
138 | ],
139 | depthStencilAttachment: {
140 | view: pipelineObj.depthView,
141 | depthClearValue: 1.0,
142 | depthLoadOp: 'clear',
143 | depthStoreOp: 'store',
144 | }
145 | }
146 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
147 | passEncoder.setPipeline(pipelineObj.pipeline)
148 | // set vertex
149 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
150 | {
151 | // draw NUM cubes in one draw()
152 | passEncoder.setBindGroup(0, pipelineObj.group)
153 | passEncoder.draw(cube.vertexCount, NUM)
154 | }
155 | passEncoder.end()
156 | // webgpu run in a separate process, all the commands will be executed after submit
157 | device.queue.submit([commandEncoder.finish()])
158 | }
159 |
160 | // total objects
161 | const NUM = 10000
162 | async function run(){
163 | const canvas = document.querySelector('canvas')
164 | if (!canvas)
165 | throw new Error('No Canvas')
166 |
167 | const {device, context, format, size} = await initWebGPU(canvas)
168 | const pipelineObj = await initPipeline(device, format, size)
169 | // create objects
170 | let aspect = size.width / size.height
171 | const scene:any[] = []
172 | const mvpBuffer = new Float32Array(NUM * 4 * 4)
173 | for(let i = 0; i < NUM; i++){
174 | // craete simple object
175 | const position = {x: Math.random() * 40 - 20, y: Math.random() * 40 - 20, z: - 50 - Math.random() * 50}
176 | const rotation = {x: 0, y: 0, z: 0}
177 | const scale = {x:1, y:1, z:1}
178 | scene.push({position, rotation, scale})
179 | }
180 | // start loop
181 | function frame(){
182 | // update rotation for each object
183 | for(let i = 0; i < scene.length - 1; i++){
184 | const obj = scene[i]
185 | const now = Date.now() / 1000
186 | obj.rotation.x = Math.sin(now + i)
187 | obj.rotation.y = Math.cos(now + i)
188 | const mvpMatrix = getMvpMatrix(aspect, obj.position, obj.rotation, obj.scale)
189 | // update buffer based on offset
190 | // device.queue.writeBuffer(
191 | // pipelineObj.mvpBuffer,
192 | // i * 4 * 4 * 4, // offset for each object, no need to 256-byte aligned
193 | // mvpMatrix
194 | // )
195 | // or save to mvpBuffer first
196 | mvpBuffer.set(mvpMatrix, i * 4 * 4)
197 | }
198 | // the better way is update buffer in one write after loop
199 | device.queue.writeBuffer(pipelineObj.mvpBuffer, 0, mvpBuffer)
200 | draw(device, context, pipelineObj)
201 | requestAnimationFrame(frame)
202 | }
203 | frame()
204 |
205 | // re-configure context on resize
206 | window.addEventListener('resize', ()=>{
207 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
208 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
209 | // don't need to recall context.configure() after v104
210 | // re-create depth texture
211 | pipelineObj.depthTexture.destroy()
212 | pipelineObj.depthTexture = device.createTexture({
213 | size, format: 'depth24plus',
214 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
215 | })
216 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
217 | // update aspect
218 | aspect = size.width/ size.height
219 | })
220 | }
221 | run()
--------------------------------------------------------------------------------
/src/cubesOffsets.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicVert,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3',
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2',
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: positionFrag,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back'
71 | },
72 | // Enable depth testing since we have z-level positions
73 | // Fragment closest to the camera is rendered in front
74 | depthStencil: {
75 | depthWriteEnabled: true,
76 | depthCompare: 'less',
77 | format: 'depth24plus',
78 | }
79 | } as GPURenderPipelineDescriptor)
80 | // create depthTexture for renderPass
81 | const depthTexture = device.createTexture({
82 | size, format: 'depth24plus',
83 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
84 | })
85 | const depthView = depthTexture.createView()
86 |
87 | // create vertex buffer
88 | const vertexBuffer = device.createBuffer({
89 | label: 'GPUBuffer store vertex',
90 | size: cube.vertex.byteLength,
91 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
92 | })
93 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
94 |
95 | // create a buffer for 2 mvp matrix
96 | const mvpBuffer = device.createBuffer({
97 | label: 'GPUBuffer store 2 4*4 matrix',
98 | size: 256 * 2, // 2 matrix with 256-byte aligned, or 256 + 64
99 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
100 | })
101 | //create two groups with different offset for matrix3
102 | const group1 = device.createBindGroup({
103 | layout: pipeline.getBindGroupLayout(0),
104 | entries: [
105 | {
106 | binding: 0,
107 | resource: {
108 | buffer: mvpBuffer,
109 | offset: 0,
110 | size: 4 * 16
111 | }
112 | }
113 | ]
114 | })
115 | // group with 256-byte offset
116 | const group2 = device.createBindGroup({
117 | layout: pipeline.getBindGroupLayout(0),
118 | entries: [
119 | {
120 | binding: 0,
121 | resource: {
122 | buffer: mvpBuffer,
123 | offset: 256, // must be 256-byte aligned
124 | size: 4 * 16
125 | }
126 | }
127 | ]
128 | })
129 | // return all vars
130 | return {pipeline, vertexBuffer, mvpBuffer, group1, group2, depthTexture, depthView}
131 | }
132 |
133 | // create & submit device commands
134 | function draw(
135 | device: GPUDevice,
136 | context: GPUCanvasContext,
137 | pipelineObj: {
138 | pipeline: GPURenderPipeline,
139 | vertexBuffer: GPUBuffer,
140 | mvpBuffer: GPUBuffer,
141 | group1: GPUBindGroup,
142 | group2: GPUBindGroup,
143 | depthView: GPUTextureView
144 | }
145 | ) {
146 | // start encoder
147 | const commandEncoder = device.createCommandEncoder()
148 | const renderPassDescriptor: GPURenderPassDescriptor = {
149 | colorAttachments: [
150 | {
151 | view: context.getCurrentTexture().createView(),
152 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
153 | loadOp: 'clear',
154 | storeOp: 'store'
155 | }
156 | ],
157 | depthStencilAttachment: {
158 | view: pipelineObj.depthView,
159 | depthClearValue: 1.0,
160 | depthLoadOp: 'clear',
161 | depthStoreOp: 'store',
162 | }
163 | }
164 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
165 | passEncoder.setPipeline(pipelineObj.pipeline)
166 | // set vertex
167 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
168 | {
169 | // draw first cube
170 | passEncoder.setBindGroup(0, pipelineObj.group1)
171 | passEncoder.draw(cube.vertexCount)
172 | // draw second cube
173 | passEncoder.setBindGroup(0, pipelineObj.group2)
174 | passEncoder.draw(cube.vertexCount)
175 | }
176 | passEncoder.end()
177 | // webgpu run in a separate process, all the commands will be executed after submit
178 | device.queue.submit([commandEncoder.finish()])
179 | }
180 |
181 | async function run(){
182 | const canvas = document.querySelector('canvas')
183 | if (!canvas)
184 | throw new Error('No Canvas')
185 | const {device, context, format, size} = await initWebGPU(canvas)
186 | const pipelineObj = await initPipeline(device, format, size)
187 | // defaut state
188 | let aspect = size.width/ size.height
189 | const position1 = {x:2, y:0, z: -8}
190 | const rotation1 = {x: 0, y: 0, z:0}
191 | const scale1 = {x:1, y:1, z: 1}
192 | const position2 = {x:-2, y:0, z: -8}
193 | const rotation2 = {x: 0, y: 0, z:0}
194 | const scale2 = {x:1, y:1, z: 1}
195 | // start loop
196 | function frame(){
197 | // first, update two transform matrixs
198 | const now = Date.now() / 1000
199 | {
200 | // first cube
201 | rotation1.x = Math.sin(now)
202 | rotation1.y = Math.cos(now)
203 | const mvpMatrix1 = getMvpMatrix(aspect, position1, rotation1, scale1)
204 | device.queue.writeBuffer(
205 | pipelineObj.mvpBuffer,
206 | 0,
207 | mvpMatrix1
208 | )
209 | }
210 | {
211 | // second cube with 256-byte offset
212 | rotation2.x = Math.cos(now)
213 | rotation2.y = Math.sin(now)
214 | const mvpMatrix2 = getMvpMatrix(aspect, position2, rotation2, scale2)
215 | device.queue.writeBuffer(
216 | pipelineObj.mvpBuffer,
217 | 256, // aligned at 256-byte
218 | mvpMatrix2
219 | )
220 | }
221 | draw(device, context, pipelineObj)
222 | requestAnimationFrame(frame)
223 | }
224 | frame()
225 |
226 | // re-configure context on resize
227 | window.addEventListener('resize', ()=>{
228 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
229 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
230 | // don't need to recall context.configure() after v104
231 | // re-create depth texture
232 | pipelineObj.depthTexture.destroy()
233 | pipelineObj.depthTexture = device.createTexture({
234 | size, format: 'depth24plus',
235 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
236 | })
237 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
238 | // update aspect
239 | aspect = size.width/ size.height
240 | })
241 | }
242 | run()
--------------------------------------------------------------------------------
/src/cubesRenderBundle.ts:
--------------------------------------------------------------------------------
1 | import basicInstanced from './shaders/basic.instanced.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicInstanced,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3',
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2',
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: positionFrag,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back'
71 | },
72 | // Enable depth testing since we have z-level positions
73 | // Fragment closest to the camera is rendered in front
74 | depthStencil: {
75 | depthWriteEnabled: true,
76 | depthCompare: 'less',
77 | format: 'depth24plus',
78 | }
79 | } as GPURenderPipelineDescriptor)
80 | // create depthTexture for renderPass
81 | const depthTexture = device.createTexture({
82 | size, format: 'depth24plus',
83 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
84 | })
85 | const depthView = depthTexture.createView()
86 |
87 | // create vertex buffer
88 | const vertexBuffer = device.createBuffer({
89 | label: 'GPUBuffer store vertex',
90 | size: cube.vertex.byteLength,
91 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
92 | })
93 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
94 | // create a 4x4xNUM STORAGE buffer to store matrix
95 | const mvpBuffer = device.createBuffer({
96 | label: 'GPUBuffer store n*4x4 matrix',
97 | size: 4 * 4 * 4 * NUM, // 4 x 4 x float32 x NUM
98 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
99 | })
100 | // create a uniform group for Matrix
101 | const group = device.createBindGroup({
102 | label: 'Uniform Group with matrix',
103 | layout: pipeline.getBindGroupLayout(0),
104 | entries: [
105 | {
106 | binding: 0,
107 | resource: {
108 | buffer: mvpBuffer
109 | }
110 | }
111 | ]
112 | })
113 | // return all vars
114 | return {pipeline, vertexBuffer, mvpBuffer, group, depthTexture, depthView}
115 | }
116 |
117 | // create & submit device commands
118 | function draw(
119 | device: GPUDevice,
120 | renderPassDescriptor: GPURenderPassDescriptor,
121 | renderBundle: Iterable
122 | ) {
123 | const commandEncoder = device.createCommandEncoder()
124 | // console.time('executeBundles')
125 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
126 | // execute bundles, could save over 10X CPU time
127 | // but won't help with GPU time
128 | passEncoder.executeBundles(renderBundle)
129 | passEncoder.end()
130 | // console.timeEnd('executeBundles')
131 | // webgpu run in a separate process, all the commands will be executed after submit
132 | device.queue.submit([commandEncoder.finish()])
133 | }
134 |
135 | // total objects
136 | const NUM = 10000
137 | async function run(){
138 | const canvas = document.querySelector('canvas')
139 | if (!canvas)
140 | throw new Error('No Canvas')
141 |
142 | const {device, context, format, size} = await initWebGPU(canvas)
143 | const pipelineObj = await initPipeline(device, format, size)
144 | // create objects
145 | let aspect = size.width / size.height
146 | const scene:any[] = []
147 | const mvpBuffer = new Float32Array(NUM * 4 * 4)
148 | for(let i = 0; i < NUM; i++){
149 | // craete simple object
150 | const position = {x: Math.random() * 40 - 20, y: Math.random() * 40 - 20, z: - 50 - Math.random() * 50}
151 | const rotation = {x: 0, y: 0, z: 0}
152 | const scale = {x:1, y:1, z:1}
153 | scene.push({position, rotation, scale})
154 | }
155 | // record renderBundle to save CPU encoder time
156 | let renderBundle: Iterable
157 | {
158 | const passEncoder = device.createRenderBundleEncoder({
159 | colorFormats: [format],
160 | depthStencilFormat: 'depth24plus'
161 | })
162 | passEncoder.setPipeline(pipelineObj.pipeline)
163 | // asume we have different objects
164 | // need to change vertex and group on every draw
165 | // that requires a lot of cpu time for a large NUM
166 | console.time('recordBundles')
167 | for(let i = 0; i< NUM; i++){
168 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
169 | passEncoder.setBindGroup(0, pipelineObj.group)
170 | passEncoder.draw(cube.vertexCount, 1, 0, i)
171 | }
172 | console.timeEnd('recordBundles')
173 | renderBundle = [passEncoder.finish()]
174 | }
175 | // start loop
176 | function frame(){
177 | // update rotation for each object
178 | for(let i = 0; i < scene.length - 1; i++){
179 | const obj = scene[i]
180 | const now = Date.now() / 1000
181 | obj.rotation.x = Math.sin(now + i)
182 | obj.rotation.y = Math.cos(now + i)
183 | const mvpMatrix = getMvpMatrix(aspect, obj.position, obj.rotation, obj.scale)
184 | mvpBuffer.set(mvpMatrix, i * 4 * 4)
185 | }
186 | // the better way is update buffer in one write after loop
187 | device.queue.writeBuffer(pipelineObj.mvpBuffer, 0, mvpBuffer)
188 | const renderPassDescriptor: GPURenderPassDescriptor = {
189 | colorAttachments: [
190 | {
191 | view: context.getCurrentTexture().createView(),
192 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
193 | loadOp: 'clear',
194 | storeOp: 'store'
195 | }
196 | ],
197 | depthStencilAttachment: {
198 | view: pipelineObj.depthView,
199 | depthClearValue: 1.0,
200 | depthLoadOp: 'clear',
201 | depthStoreOp: 'store',
202 | }
203 | }
204 | draw(device, renderPassDescriptor, renderBundle)
205 | requestAnimationFrame(frame)
206 | }
207 | frame()
208 |
209 | // re-configure context on resize
210 | window.addEventListener('resize', ()=>{
211 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
212 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
213 | // don't need to recall context.configure() after v104
214 | // re-create depth texture
215 | pipelineObj.depthTexture.destroy()
216 | pipelineObj.depthTexture = device.createTexture({
217 | size, format: 'depth24plus',
218 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
219 | })
220 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
221 | // update aspect
222 | aspect = size.width/ size.height
223 | })
224 | }
225 | run()
--------------------------------------------------------------------------------
/src/gpuCompute.ts:
--------------------------------------------------------------------------------
1 | import {mat4} from 'gl-matrix'
2 | import computeTransform from './shaders/compute.transform.wgsl?raw'
3 |
4 | async function initWebGPU(){
5 | if(!navigator.gpu)
6 | throw new Error('Not Support WebGPU')
7 | const adapter = await navigator.gpu.requestAdapter({
8 | powerPreference: 'high-performance'
9 | })
10 | if (!adapter)
11 | throw new Error('No Adapter Found')
12 | const device = await adapter.requestDevice({
13 | requiredLimits: {
14 | maxStorageBufferBindingSize: adapter.limits.maxStorageBufferBindingSize
15 | }
16 | })
17 | return device
18 | }
19 | async function initPipeline(device: GPUDevice, modelMatrix:Float32Array, projection:Float32Array){
20 | const descriptor: GPUComputePipelineDescriptor = {
21 | layout: 'auto',
22 | compute: {
23 | module: device.createShaderModule({
24 | code: computeTransform
25 | }),
26 | entryPoint: 'main'
27 | }
28 | }
29 | const pipeline = await device.createComputePipelineAsync(descriptor)
30 | // papare gpu buffers
31 | // hold nx4x4 modelView matrix buffer
32 | const modelBuffer = device.createBuffer({
33 | size: modelMatrix.byteLength,
34 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
35 | })
36 | console.time('writeBuffer')
37 | device.queue.writeBuffer(modelBuffer, 0, modelMatrix)
38 | console.timeEnd('writeBuffer')
39 | // hold a 4x4 projection buffer
40 | const projectionBuffer = device.createBuffer({
41 | size: projection.byteLength,
42 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
43 | })
44 | device.queue.writeBuffer(projectionBuffer, 0, projection)
45 | // create a n*4x4 matrix buffer to hold result
46 | const mvpBuffer = device.createBuffer({
47 | size: modelMatrix.byteLength,
48 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
49 | })
50 | // indicate the size of total matrix
51 | const countBuffer = device.createBuffer({
52 | size: 4, // just one uint32 number
53 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
54 | })
55 | device.queue.writeBuffer(countBuffer, 0, new Uint32Array([NUM]))
56 |
57 | // create a bindGroup to hold 4 buffers
58 | const bindGroup = device.createBindGroup({
59 | layout: pipeline.getBindGroupLayout(0),
60 | entries: [{
61 | binding: 0,
62 | resource: {
63 | buffer: modelBuffer
64 | }
65 | },{
66 | binding: 1,
67 | resource: {
68 | buffer: projectionBuffer
69 | }
70 | },{
71 | binding: 2,
72 | resource: {
73 | buffer: mvpBuffer
74 | }
75 | },{
76 | binding: 3,
77 | resource: {
78 | buffer: countBuffer
79 | }
80 | }]
81 | })
82 | return {pipeline, bindGroup, mvpBuffer}
83 | }
84 | async function run(){
85 | cpu.innerHTML = gpu.innerHTML = '-'
86 | button.innerHTML = 'Testing ...'
87 | button.disabled = true
88 | // small delay for rendering UI
89 | await new Promise(res=>setTimeout(res))
90 | // papare data
91 | const fakeMatrix = mat4.create()
92 | const modelMatrix = new Float32Array(NUM * 4 * 4) // hold gpu matrix
93 | const matrixArray = [] // hold cpu matrix
94 | const projection = fakeMatrix as Float32Array// fake projection matrix
95 | for(let i = 0; i < NUM; i++){
96 | matrixArray.push(fakeMatrix)
97 | modelMatrix.set(fakeMatrix, i * 4 * 4)
98 | }
99 |
100 | // start test cpu time
101 | console.time('cpu multiply x10')
102 | let start = performance.now()
103 | for(let i = 0; i < 10; i++)
104 | for(let i = 0; i < NUM; i++){
105 | let m = matrixArray[i]
106 | mat4.multiply(m, projection, m)
107 | }
108 | cpu.innerHTML = ((performance.now() - start) / 10).toFixed(2)
109 | console.timeEnd('cpu multiply x10')
110 |
111 | // papare gpu
112 | const device = await initWebGPU()
113 | const {pipeline, bindGroup, mvpBuffer} = await initPipeline(device, modelMatrix, projection)
114 | // papare a read buffer to map mvp back to js
115 | const readBuffer = device.createBuffer({
116 | size: modelMatrix.byteLength,
117 | usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST
118 | })
119 | // run test x300
120 | const commandEncoder = device.createCommandEncoder()
121 | for(let i = 0; i < 300; i++){
122 | const computePass = commandEncoder.beginComputePass()
123 | computePass.setPipeline(pipeline)
124 | computePass.setBindGroup(0, bindGroup)
125 | computePass.dispatchWorkgroups(Math.ceil(NUM / 128))
126 | computePass.end()
127 | }
128 | // copy mvpBuffer will be done after all computePasses
129 | commandEncoder.copyBufferToBuffer(mvpBuffer, 0, readBuffer, 0, modelMatrix.byteLength)
130 | device.queue.submit([commandEncoder.finish()])
131 | // compute time by mapAsync
132 | console.time('gpu multiply x300')
133 | start = performance.now()
134 | // map readBuffer from GPU to CPU/JS
135 | await readBuffer.mapAsync(GPUMapMode.READ)
136 | gpu.innerHTML = ((performance.now() - start) / 300).toFixed(2)
137 | console.timeEnd('gpu multiply x300')
138 | // transfor buffer to JS object
139 | const copyArrayBuffer = readBuffer.getMappedRange()
140 | const result = new Float32Array(copyArrayBuffer)
141 | console.log(result)
142 | // unmap GPU buffer and release CPU/JS buffer
143 | readBuffer.unmap()
144 | // reset UI
145 | button.disabled = false
146 | button.innerHTML = 'Run'
147 | }
148 |
149 | // total count
150 | let NUM = 1000000
151 | let select = document.querySelector('#select') as HTMLSelectElement
152 | let button = document.querySelector('button') as HTMLButtonElement
153 | let cpu = document.querySelector('#cpu') as HTMLSpanElement
154 | let gpu = document.querySelector('#gpu') as HTMLSpanElement
155 | select.addEventListener('change', (e:any)=>{
156 | console.log(e.target.value)
157 | NUM = +e.target.value
158 | run()
159 | })
160 | button.addEventListener('click', run)
--------------------------------------------------------------------------------
/src/gpuParticles.ts:
--------------------------------------------------------------------------------
1 | import basicInstanced from './shaders/basic.instanced.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import positionCompute from './shaders/compute.position.wgsl?raw'
4 | import * as box from './util/box'
5 | import { getModelViewMatrix, getProjectionMatrix } from './util/math'
6 |
7 | // initialize webgpu device & config canvas context
8 | async function initWebGPU(canvas: HTMLCanvasElement) {
9 | if(!navigator.gpu)
10 | throw new Error('Not Support WebGPU')
11 | const adapter = await navigator.gpu.requestAdapter({
12 | powerPreference: 'high-performance'
13 | })
14 | if (!adapter)
15 | throw new Error('No Adapter Found')
16 | const device = await adapter.requestDevice({
17 | requiredLimits: {
18 | maxStorageBufferBindingSize: adapter.limits.maxStorageBufferBindingSize
19 | }
20 | })
21 | const context = canvas.getContext('webgpu') as GPUCanvasContext
22 | const format = navigator.gpu.getPreferredCanvasFormat()
23 | const devicePixelRatio = window.devicePixelRatio || 1
24 | canvas.width = canvas.clientWidth * devicePixelRatio
25 | canvas.height = canvas.clientHeight * devicePixelRatio
26 | const size = {width: canvas.width, height: canvas.height}
27 | context.configure({
28 | device, format,
29 | // prevent chrome warning after v102
30 | alphaMode: 'opaque'
31 | })
32 | return {device, context, format, size}
33 | }
34 |
35 | // create pipiline & buffers
36 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size:{width:number, height:number}) {
37 | const renderPipeline = await device.createRenderPipelineAsync({
38 | label: 'Basic Pipline',
39 | layout: 'auto',
40 | vertex: {
41 | module: device.createShaderModule({
42 | code: basicInstanced,
43 | }),
44 | entryPoint: 'main',
45 | buffers: [{
46 | arrayStride: 8 * 4, // 3 position 2 uv,
47 | attributes: [
48 | {
49 | // position
50 | shaderLocation: 0,
51 | offset: 0,
52 | format: 'float32x3',
53 | },
54 | {
55 | // normal
56 | shaderLocation: 1,
57 | offset: 3 * 4,
58 | format: 'float32x3',
59 | },
60 | {
61 | // uv
62 | shaderLocation: 2,
63 | offset: 6 * 4,
64 | format: 'float32x2',
65 | }
66 | ]
67 | }]
68 | },
69 | fragment: {
70 | module: device.createShaderModule({
71 | code: positionFrag,
72 | }),
73 | entryPoint: 'main',
74 | targets: [
75 | {
76 | format: format
77 | }
78 | ]
79 | },
80 | primitive: {
81 | topology: 'triangle-list',
82 | // Culling backfaces pointing away from the camera
83 | cullMode: 'back'
84 | },
85 | // Enable depth testing since we have z-level positions
86 | // Fragment closest to the camera is rendered in front
87 | depthStencil: {
88 | depthWriteEnabled: true,
89 | depthCompare: 'less',
90 | format: 'depth24plus',
91 | }
92 | } as GPURenderPipelineDescriptor)
93 | // create depthTexture for renderPass
94 | const depthTexture = device.createTexture({
95 | size, format: 'depth24plus',
96 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
97 | })
98 | const depthView = depthTexture.createView()
99 | // create a compute pipeline
100 | const computePipeline = await device.createComputePipelineAsync({
101 | layout: 'auto',
102 | compute: {
103 | module: device.createShaderModule({
104 | code: positionCompute
105 | }),
106 | entryPoint: 'main'
107 | }
108 | })
109 |
110 | // create vertex buffer
111 | const vertexBuffer = device.createBuffer({
112 | label: 'GPUBuffer store vertex',
113 | size: box.vertex.byteLength,
114 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
115 | })
116 | device.queue.writeBuffer(vertexBuffer, 0, box.vertex)
117 | const indexBuffer = device.createBuffer({
118 | label: 'GPUBuffer store index',
119 | size: box.index.byteLength,
120 | usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
121 | })
122 | device.queue.writeBuffer(indexBuffer, 0, box.index)
123 |
124 | const modelBuffer = device.createBuffer({
125 | label: 'GPUBuffer store MAX model matrix',
126 | size: 4 * 4 * 4 * MAX, // mat4x4 x float32 x MAX
127 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
128 | })
129 | const projectionBuffer = device.createBuffer({
130 | label: 'GPUBuffer store camera projection',
131 | size: 4 * 4 * 4, // mat4x4 x float32
132 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
133 | })
134 | const mvpBuffer = device.createBuffer({
135 | label: 'GPUBuffer store MAX MVP',
136 | size: 4 * 4 * 4 * MAX, // mat4x4 x float32 x MAX
137 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
138 | })
139 | const velocityBuffer = device.createBuffer({
140 | label: 'GPUBuffer store MAX velocity',
141 | size: 4 * 4 * MAX, // 4 position x float32 x MAX
142 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
143 | })
144 | const inputBuffer = device.createBuffer({
145 | label: 'GPUBuffer store input vars',
146 | size: 7 * 4, // float32 * 7
147 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
148 | })
149 |
150 |
151 | // create a bindGroup for renderPass
152 | const renderGroup = device.createBindGroup({
153 | label: 'Group for renderPass',
154 | layout: renderPipeline.getBindGroupLayout(0),
155 | entries: [
156 | {
157 | binding: 0,
158 | resource: {
159 | buffer: mvpBuffer
160 | }
161 | }
162 | ]
163 | })
164 | // create bindGroup for computePass
165 | const computeGroup = device.createBindGroup({
166 | label: 'Group for computePass',
167 | layout: computePipeline.getBindGroupLayout(0),
168 | entries: [
169 | {
170 | binding: 0,
171 | resource: {
172 | buffer: inputBuffer
173 | }
174 | },
175 | {
176 | binding: 1,
177 | resource: {
178 | buffer: velocityBuffer
179 | }
180 | },
181 | {
182 | binding: 2,
183 | resource: {
184 | buffer: modelBuffer
185 | }
186 | },
187 | {
188 | binding: 3,
189 | resource: {
190 | buffer: projectionBuffer
191 | }
192 | },
193 | {
194 | binding: 4,
195 | resource: {
196 | buffer: mvpBuffer
197 | }
198 | }
199 | ]
200 | })
201 | // return all vars
202 | return {
203 | renderPipeline, computePipeline,
204 | vertexBuffer, indexBuffer,
205 | modelBuffer, projectionBuffer, inputBuffer, velocityBuffer,
206 | renderGroup, computeGroup,
207 | depthTexture, depthView
208 | }
209 | }
210 |
211 | // create & submit device commands
212 | function draw(
213 | device: GPUDevice,
214 | context: GPUCanvasContext,
215 | pipelineObj: {
216 | renderPipeline: GPURenderPipeline,
217 | computePipeline: GPUComputePipeline,
218 | vertexBuffer: GPUBuffer,
219 | indexBuffer: GPUBuffer,
220 | renderGroup: GPUBindGroup,
221 | computeGroup: GPUBindGroup,
222 | depthView: GPUTextureView
223 | }
224 | ) {
225 | const commandEncoder = device.createCommandEncoder()
226 | const computePass = commandEncoder.beginComputePass()
227 | computePass.setPipeline(pipelineObj.computePipeline)
228 | computePass.setBindGroup(0, pipelineObj.computeGroup)
229 | computePass.dispatchWorkgroups(Math.ceil(NUM / 128))
230 | computePass.end()
231 |
232 | const passEncoder = commandEncoder.beginRenderPass({
233 | colorAttachments: [
234 | {
235 | view: context.getCurrentTexture().createView(),
236 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
237 | loadOp: 'clear',
238 | storeOp: 'store'
239 | }
240 | ],
241 | depthStencilAttachment: {
242 | view: pipelineObj.depthView,
243 | depthClearValue: 1.0,
244 | depthLoadOp: 'clear',
245 | depthStoreOp: 'store',
246 | }
247 | })
248 | passEncoder.setPipeline(pipelineObj.renderPipeline)
249 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
250 | passEncoder.setIndexBuffer(pipelineObj.indexBuffer, 'uint16')
251 | passEncoder.setBindGroup(0, pipelineObj.renderGroup)
252 | passEncoder.drawIndexed(box.indexCount, NUM)
253 | passEncoder.end()
254 | device.queue.submit([commandEncoder.finish()])
255 | }
256 |
257 | // total objects
258 | let NUM = 150000, MAX = 300000
259 | async function run(){
260 | const canvas = document.querySelector('canvas')
261 | if (!canvas)
262 | throw new Error('No Canvas')
263 |
264 | const {device, context, format, size} = await initWebGPU(canvas)
265 | const pipelineObj = await initPipeline(device, format, size)
266 | // create data
267 | const inputArray = new Float32Array([NUM, -500, 500, -250, 250, -500, 500]) // count, xmin/max, ymin/max, zmin/max
268 | const modelArray = new Float32Array(MAX * 4 * 4)
269 | const velocityArray = new Float32Array(MAX * 4)
270 | for(let i = 0; i < MAX; i++){
271 | const x = Math.random() * 1000 - 500
272 | const y = Math.random() * 500 - 250
273 | const z = Math.random() * 1000 - 500
274 | const modelMatrix = getModelViewMatrix({x,y,z},{x:0,y:0,z:0},{x:2,y:2,z:2})
275 | modelArray.set(modelMatrix, i * 4 * 4)
276 |
277 | velocityArray[i * 4 + 0] = Math.random() - 0.5 // x
278 | velocityArray[i * 4 + 1] = Math.random() - 0.5 // y
279 | velocityArray[i * 4 + 2] = Math.random() - 0.5 // z
280 | velocityArray[i * 4 + 3] = 1 // w
281 | }
282 | device.queue.writeBuffer(pipelineObj.velocityBuffer, 0, velocityArray)
283 | device.queue.writeBuffer(pipelineObj.modelBuffer, 0, modelArray)
284 | device.queue.writeBuffer(pipelineObj.inputBuffer, 0, inputArray)
285 |
286 | // auto rotated camera
287 | const camera = {x:0, y: 50, z: 1000}
288 | let aspect = size.width / size.height
289 | // start loop
290 | function frame(){
291 | const time = performance.now() / 5000
292 | camera.x = 1000 * Math.sin(time)
293 | camera.z = 1000 * Math.cos(time)
294 | const projectionMatrix = getProjectionMatrix(aspect, 60 / 180 * Math.PI, 0.1, 10000, camera)
295 | device.queue.writeBuffer(pipelineObj.projectionBuffer, 0, projectionMatrix)
296 | draw(device, context, pipelineObj)
297 | requestAnimationFrame(frame)
298 | }
299 | frame()
300 |
301 | // re-configure context on resize
302 | window.addEventListener('resize', ()=>{
303 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
304 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
305 | // don't need to recall context.configure() after v104
306 | // re-create depth texture
307 | pipelineObj.depthTexture.destroy()
308 | pipelineObj.depthTexture = device.createTexture({
309 | size, format: 'depth24plus',
310 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
311 | })
312 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
313 | // update aspect
314 | aspect = size.width/ size.height
315 | })
316 |
317 | const range = document.querySelector('input') as HTMLInputElement
318 | range.max = MAX.toString()
319 | range.value = NUM.toString()
320 | range.addEventListener('input', (e:Event)=>{
321 | NUM = +(e.target as HTMLInputElement).value
322 | const span = document.querySelector('#num') as HTMLSpanElement
323 | span.innerHTML = NUM.toString()
324 | inputArray[0] = NUM
325 | device.queue.writeBuffer(pipelineObj.inputBuffer, 0, inputArray)
326 | })
327 | }
328 | run()
--------------------------------------------------------------------------------
/src/helloWebgpu.ts:
--------------------------------------------------------------------------------
1 | // check webgpu support
2 | async function initWebGPU() {
3 | try{
4 | if(!navigator.gpu)
5 | throw new Error('Not support WebGPU')
6 | const adapter = await navigator.gpu.requestAdapter()
7 | if(!adapter)
8 | throw new Error('No adapter found')
9 | console.log(adapter)
10 | adapter.features.forEach(value=>{
11 | console.log(value)
12 | })
13 | document.body.innerHTML = 'Hello WebGPU
'
14 | let i:keyof GPUSupportedLimits
15 | for(i in adapter.limits)
16 | document.body.innerHTML += `${i}:${adapter.limits[i]}
`
17 | }catch(error:any){
18 | document.body.innerHTML = `${error.message}
`
19 | }
20 | }
21 | initWebGPU()
--------------------------------------------------------------------------------
/src/imageTexture.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import imageTexture from './shaders/imageTexture.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 | import textureUrl from '/texture.webp?url'
6 |
7 | // initialize webgpu device & config canvas context
8 | async function initWebGPU(canvas: HTMLCanvasElement) {
9 | if (!navigator.gpu)
10 | throw new Error('Not Support WebGPU')
11 | const adapter = await navigator.gpu.requestAdapter()
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | device, format,
23 | // prevent chrome warning after v102
24 | alphaMode: 'opaque'
25 | })
26 | return { device, context, format, size }
27 | }
28 |
29 | // create pipiline & buffers
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: { width: number, height: number }) {
31 | const pipeline = await device.createRenderPipelineAsync({
32 | label: 'Basic Pipline',
33 | layout: 'auto',
34 | vertex: {
35 | module: device.createShaderModule({
36 | code: basicVert,
37 | }),
38 | entryPoint: 'main',
39 | buffers: [{
40 | arrayStride: 5 * 4, // 3 position 2 uv,
41 | attributes: [
42 | {
43 | // position
44 | shaderLocation: 0,
45 | offset: 0,
46 | format: 'float32x3'
47 | },
48 | {
49 | // uv
50 | shaderLocation: 1,
51 | offset: 3 * 4,
52 | format: 'float32x2'
53 | }
54 | ]
55 | }]
56 | },
57 | fragment: {
58 | module: device.createShaderModule({
59 | code: imageTexture,
60 | }),
61 | entryPoint: 'main',
62 | targets: [
63 | {
64 | format: format
65 | }
66 | ]
67 | },
68 | primitive: {
69 | topology: 'triangle-list',
70 | // Culling backfaces pointing away from the camera
71 | cullMode: 'back',
72 | frontFace: 'ccw'
73 | },
74 | // Enable depth testing since we have z-level positions
75 | // Fragment closest to the camera is rendered in front
76 | depthStencil: {
77 | depthWriteEnabled: true,
78 | depthCompare: 'less',
79 | format: 'depth24plus'
80 | }
81 | } as GPURenderPipelineDescriptor)
82 | // create depthTexture for renderPass
83 | const depthTexture = device.createTexture({
84 | size, format: 'depth24plus',
85 | usage: GPUTextureUsage.RENDER_ATTACHMENT
86 | })
87 | const depthView = depthTexture.createView()
88 | // create vertex buffer
89 | const vertexBuffer = device.createBuffer({
90 | label: 'GPUBuffer store vertex',
91 | size: cube.vertex.byteLength,
92 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
93 | })
94 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
95 | // create a mvp matrix buffer
96 | const mvpBuffer = device.createBuffer({
97 | label: 'GPUBuffer store 4x4 matrix',
98 | size: 4 * 4 * 4, // 4 x 4 x float32
99 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
100 | })
101 | // create a uniform group contains matrix
102 | const uniformGroup = device.createBindGroup({
103 | label: 'Uniform Group with Matrix',
104 | layout: pipeline.getBindGroupLayout(0),
105 | entries: [
106 | {
107 | binding: 0,
108 | resource: {
109 | buffer: mvpBuffer
110 | }
111 | }
112 | ]
113 | })
114 | // return all vars
115 | return { pipeline, vertexBuffer, mvpBuffer, uniformGroup, depthTexture, depthView }
116 | }
117 |
118 | // create & submit device commands
119 | function draw(
120 | device: GPUDevice,
121 | context: GPUCanvasContext,
122 | pipelineObj: {
123 | pipeline: GPURenderPipeline
124 | vertexBuffer: GPUBuffer
125 | mvpBuffer: GPUBuffer
126 | uniformGroup: GPUBindGroup
127 | depthView: GPUTextureView
128 | },
129 | textureGroup: GPUBindGroup
130 | ) {
131 | // start encoder
132 | const commandEncoder = device.createCommandEncoder()
133 | const renderPassDescriptor: GPURenderPassDescriptor = {
134 | colorAttachments: [
135 | {
136 | view: context.getCurrentTexture().createView(),
137 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
138 | loadOp: 'clear',
139 | storeOp: 'store'
140 | }
141 | ],
142 | depthStencilAttachment: {
143 | view: pipelineObj.depthView,
144 | depthClearValue: 1.0,
145 | depthLoadOp: 'clear',
146 | depthStoreOp: 'store',
147 | }
148 | }
149 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
150 | passEncoder.setPipeline(pipelineObj.pipeline)
151 | // set uniformGroup
152 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
153 | // set textureGroup
154 | passEncoder.setBindGroup(1, textureGroup)
155 | // set vertex
156 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
157 | // draw vertex count of cube
158 | passEncoder.draw(cube.vertexCount)
159 | passEncoder.end()
160 | // webgpu run in a separate process, all the commands will be executed after submit
161 | device.queue.submit([commandEncoder.finish()])
162 | }
163 |
164 | async function run() {
165 | const canvas = document.querySelector('canvas')
166 | if (!canvas)
167 | throw new Error('No Canvas')
168 | const { device, context, format, size } = await initWebGPU(canvas)
169 | const pipelineObj = await initPipeline(device, format, size)
170 |
171 | // fetch an image and upload to GPUTexture
172 | const res = await fetch(textureUrl)
173 | const img = await res.blob()
174 | // const img = document.createElement('img')
175 | // img.src = textureUrl
176 | // await img.decode()
177 | const bitmap = await createImageBitmap(img)
178 | const textureSize = [bitmap.width, bitmap.height]
179 | // create empty texture
180 | const texture = device.createTexture({
181 | size: textureSize,
182 | format: 'rgba8unorm',
183 | usage:
184 | GPUTextureUsage.TEXTURE_BINDING |
185 | GPUTextureUsage.COPY_DST |
186 | GPUTextureUsage.RENDER_ATTACHMENT
187 | })
188 | // update image to GPUTexture
189 | device.queue.copyExternalImageToTexture(
190 | { source: bitmap },
191 | { texture: texture },
192 | textureSize
193 | )
194 | // Create a sampler with linear filtering for smooth interpolation.
195 | const sampler = device.createSampler({
196 | // addressModeU: 'repeat',
197 | // addressModeV: 'repeat',
198 | magFilter: 'linear',
199 | minFilter: 'linear'
200 | })
201 | const textureGroup = device.createBindGroup({
202 | label: 'Texture Group with Texture/Sampler',
203 | layout: pipelineObj.pipeline.getBindGroupLayout(1),
204 | entries: [
205 | {
206 | binding: 0,
207 | resource: sampler
208 | },
209 | {
210 | binding: 1,
211 | resource: texture.createView()
212 | }
213 | ]
214 | })
215 |
216 | // default state
217 | let aspect = size.width / size.height
218 | const position = { x: 0, y: 0, z: -5 }
219 | const scale = { x: 1, y: 1, z: 1 }
220 | const rotation = { x: 0, y: 0, z: 0 }
221 | // start loop
222 | function frame() {
223 | // rotate by time, and update transform matrix
224 | const now = Date.now() / 1000
225 | rotation.x = Math.sin(now)
226 | rotation.y = Math.cos(now)
227 | const mvpMatrix = getMvpMatrix(aspect, position, rotation, scale)
228 | device.queue.writeBuffer(
229 | pipelineObj.mvpBuffer,
230 | 0,
231 | mvpMatrix.buffer
232 | )
233 | // then draw
234 | draw(device, context, pipelineObj, textureGroup)
235 | requestAnimationFrame(frame)
236 | }
237 | frame()
238 |
239 | // re-configure context on resize
240 | window.addEventListener('resize', () => {
241 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
242 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
243 | // don't need to recall context.configure() after v104
244 | // re-create depth texture
245 | pipelineObj.depthTexture.destroy()
246 | pipelineObj.depthTexture = device.createTexture({
247 | size, format: 'depth24plus',
248 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
249 | })
250 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
251 | // update aspect
252 | aspect = size.width / size.height
253 | })
254 | }
255 | run()
--------------------------------------------------------------------------------
/src/rotatingCube.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import positionFrag from './shaders/position.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 |
6 | // initialize webgpu device & config canvas context
7 | async function initWebGPU(canvas: HTMLCanvasElement) {
8 | if(!navigator.gpu)
9 | throw new Error('Not Support WebGPU')
10 | const adapter = await navigator.gpu.requestAdapter()
11 | if (!adapter)
12 | throw new Error('No Adapter Found')
13 | const device = await adapter.requestDevice()
14 | const context = canvas.getContext('webgpu') as GPUCanvasContext
15 | const format = navigator.gpu.getPreferredCanvasFormat()
16 | const devicePixelRatio = window.devicePixelRatio || 1
17 | canvas.width = canvas.clientWidth * devicePixelRatio
18 | canvas.height = canvas.clientHeight * devicePixelRatio
19 | const size = {width: canvas.width, height: canvas.height}
20 | context.configure({
21 | device, format,
22 | // prevent chrome warning after v102
23 | alphaMode: 'opaque'
24 | })
25 | return {device, context, format, size}
26 | }
27 |
28 | // create pipiline & buffers
29 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: {width:number, height:number}) {
30 | const pipeline = await device.createRenderPipelineAsync({
31 | label: 'Basic Pipline',
32 | layout: 'auto',
33 | vertex: {
34 | module: device.createShaderModule({
35 | code: basicVert,
36 | }),
37 | entryPoint: 'main',
38 | buffers: [{
39 | arrayStride: 5 * 4, // 3 position 2 uv,
40 | attributes: [
41 | {
42 | // position
43 | shaderLocation: 0,
44 | offset: 0,
45 | format: 'float32x3',
46 | },
47 | {
48 | // uv
49 | shaderLocation: 1,
50 | offset: 3 * 4,
51 | format: 'float32x2',
52 | }
53 | ]
54 | }]
55 | },
56 | fragment: {
57 | module: device.createShaderModule({
58 | code: positionFrag,
59 | }),
60 | entryPoint: 'main',
61 | targets: [
62 | {
63 | format: format
64 | }
65 | ]
66 | },
67 | primitive: {
68 | topology: 'triangle-list',
69 | // Culling backfaces pointing away from the camera
70 | cullMode: 'back',
71 | frontFace: 'ccw'
72 | },
73 | // Enable depth testing since we have z-level positions
74 | // Fragment closest to the camera is rendered in front
75 | depthStencil: {
76 | depthWriteEnabled: true,
77 | depthCompare: 'less',
78 | format: 'depth24plus',
79 | }
80 | } as GPURenderPipelineDescriptor)
81 | // create depthTexture for renderPass
82 | const depthTexture = device.createTexture({
83 | size, format: 'depth24plus',
84 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
85 | })
86 | const depthView = depthTexture.createView()
87 | // create vertex buffer
88 | const vertexBuffer = device.createBuffer({
89 | label: 'GPUBuffer store vertex',
90 | size: cube.vertex.byteLength,
91 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
92 | })
93 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
94 | // create a mvp matrix buffer
95 | const mvpBuffer = device.createBuffer({
96 | label: 'GPUBuffer store 4x4 matrix',
97 | size: 4 * 4 * 4, // 4 x 4 x float32
98 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
99 | })
100 | // create a uniform group for Matrix
101 | const uniformGroup = device.createBindGroup({
102 | label: 'Uniform Group with Matrix',
103 | layout: pipeline.getBindGroupLayout(0),
104 | entries: [
105 | {
106 | binding: 0,
107 | resource: {
108 | buffer: mvpBuffer
109 | }
110 | }
111 | ]
112 | })
113 | // return all vars
114 | return { pipeline, vertexBuffer, mvpBuffer, uniformGroup, depthTexture, depthView }
115 | }
116 |
117 | // create & submit device commands
118 | function draw(
119 | device: GPUDevice,
120 | context: GPUCanvasContext,
121 | pipelineObj: {
122 | pipeline: GPURenderPipeline
123 | vertexBuffer: GPUBuffer
124 | mvpBuffer: GPUBuffer
125 | uniformGroup: GPUBindGroup
126 | depthView: GPUTextureView
127 | }
128 | ) {
129 | // start encoder
130 | const commandEncoder = device.createCommandEncoder()
131 | const renderPassDescriptor: GPURenderPassDescriptor = {
132 | colorAttachments: [
133 | {
134 | view: context.getCurrentTexture().createView(),
135 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
136 | loadOp: 'clear',
137 | storeOp: 'store'
138 | }
139 | ],
140 | depthStencilAttachment: {
141 | view: pipelineObj.depthView,
142 | depthClearValue: 1.0,
143 | depthLoadOp: 'clear',
144 | depthStoreOp: 'store',
145 | }
146 | }
147 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
148 | passEncoder.setPipeline(pipelineObj.pipeline)
149 | // set vertex
150 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
151 | // set uniformGroup
152 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
153 | // draw vertex count of cube
154 | passEncoder.draw(cube.vertexCount)
155 | passEncoder.end()
156 | // webgpu run in a separate process, all the commands will be executed after submit
157 | device.queue.submit([commandEncoder.finish()])
158 | }
159 |
160 | async function run(){
161 | const canvas = document.querySelector('canvas')
162 | if (!canvas)
163 | throw new Error('No Canvas')
164 | const {device, context, format, size} = await initWebGPU(canvas)
165 | const pipelineObj = await initPipeline(device, format, size)
166 | // default state
167 | let aspect = size.width/ size.height
168 | const position = {x:0, y:0, z: -5}
169 | const scale = {x:1, y:1, z:1}
170 | const rotation = {x: 0, y: 0, z:0}
171 | // start loop
172 | function frame(){
173 | // rotate by time, and update transform matrix
174 | const now = Date.now() / 1000
175 | rotation.x = Math.sin(now)
176 | rotation.y = Math.cos(now)
177 | const mvpMatrix = getMvpMatrix(aspect, position, rotation, scale)
178 | device.queue.writeBuffer(
179 | pipelineObj.mvpBuffer,
180 | 0,
181 | mvpMatrix.buffer
182 | )
183 | // then draw
184 | draw(device, context, pipelineObj)
185 | requestAnimationFrame(frame)
186 | }
187 | frame()
188 |
189 | // re-configure context on resize
190 | window.addEventListener('resize', ()=>{
191 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
192 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
193 | // don't need to recall context.configure() after v104
194 | // re-create depth texture
195 | pipelineObj.depthTexture.destroy()
196 | pipelineObj.depthTexture = device.createTexture({
197 | size, format: 'depth24plus',
198 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
199 | })
200 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
201 | // update aspect
202 | aspect = size.width/ size.height
203 | })
204 | }
205 | run()
--------------------------------------------------------------------------------
/src/shaders/basic.instanced.vert.wgsl:
--------------------------------------------------------------------------------
1 | @binding(0) @group(0) var mvpMatrix : array>;
2 |
3 | struct VertexOutput {
4 | @builtin(position) Position : vec4,
5 | @location(0) fragUV : vec2,
6 | @location(1) fragPosition: vec4
7 |
8 | };
9 |
10 | @vertex
11 | fn main(
12 | @builtin(instance_index) index : u32,
13 | @location(0) position : vec4,
14 | @location(1) uv : vec2
15 | ) -> VertexOutput {
16 | var output : VertexOutput;
17 | output.Position = mvpMatrix[index] * position;
18 | output.fragUV = uv;
19 | output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));
20 | return output;
21 | }
22 |
--------------------------------------------------------------------------------
/src/shaders/basic.vert.wgsl:
--------------------------------------------------------------------------------
1 | @binding(0) @group(0) var mvpMatrix : mat4x4;
2 |
3 | struct VertexOutput {
4 | @builtin(position) Position : vec4,
5 | @location(0) fragUV : vec2,
6 | @location(1) fragPosition: vec4
7 | };
8 |
9 | @vertex
10 | fn main(
11 | @location(0) position : vec4,
12 | @location(1) uv : vec2
13 | ) -> VertexOutput {
14 | var output : VertexOutput;
15 | output.Position = mvpMatrix * position;
16 | output.fragUV = uv;
17 | output.fragPosition = 0.5 * (position + vec4(1.0, 1.0, 1.0, 1.0));
18 | return output;
19 | }
20 |
--------------------------------------------------------------------------------
/src/shaders/color.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var color : vec4;
2 |
3 | @fragment
4 | fn main() -> @location(0) vec4 {
5 | return color;
6 | }
--------------------------------------------------------------------------------
/src/shaders/compute.position.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var input: array;
2 | @group(0) @binding(1) var velocity: array>;
3 | @group(0) @binding(2) var modelView: array>;
4 | @group(0) @binding(3) var projection : mat4x4;
5 | @group(0) @binding(4) var mvp : array>;
6 |
7 | const size = u32(128);
8 | @compute @workgroup_size(size)
9 | fn main(
10 | @builtin(global_invocation_id) GlobalInvocationID : vec3
11 | ) {
12 | var index = GlobalInvocationID.x;
13 | if(index >= u32(input[0])){
14 | return;
15 | }
16 | var xMin = input[1];
17 | var xMax = input[2];
18 | var yMin = input[3];
19 | var yMax = input[4];
20 | var zMin = input[5];
21 | var zMax = input[6];
22 | var pos = modelView[index][3];
23 | var vel = velocity[index];
24 | // change x
25 | pos.x += vel.x;
26 | if(pos.x < xMin){
27 | pos.x = xMin;
28 | vel.x = -vel.x;
29 | }else if(pos.x > xMax){
30 | pos.x = xMax;
31 | vel.x = -vel.x;
32 | }
33 | // change y
34 | pos.y += vel.y;
35 | if(pos.y < yMin){
36 | pos.y = yMin;
37 | vel.y = -vel.y;
38 | }else if(pos.y > yMax){
39 | pos.y = yMax;
40 | vel.y = -vel.y;
41 | }
42 | // change z
43 | pos.z += vel.z;
44 | if(pos.z < zMin){
45 | pos.z = zMin;
46 | vel.z = -vel.z;
47 | }else if(pos.z > zMax){
48 | pos.z = zMax;
49 | vel.z = -vel.z;
50 | }
51 | // update velocity
52 | velocity[index] = vel;
53 | // update position in modelView matrix
54 | modelView[index][3] = pos;
55 | // update mvp
56 | mvp[index] = projection * modelView[index];
57 | }
--------------------------------------------------------------------------------
/src/shaders/compute.transform.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var modelView: array>;
2 | @group(0) @binding(1) var projection : mat4x4;
3 | @group(0) @binding(2) var mvp : array>;
4 | @group(0) @binding(3) var count : u32;
5 |
6 | @compute @workgroup_size(128)
7 | fn main(@builtin(global_invocation_id) global_id : vec3) {
8 | // Guard against out-of-bounds work group sizes
9 | let index = global_id.x;
10 | if (index >= count) {
11 | return;
12 | }
13 |
14 | mvp[index] = projection * modelView[index];
15 | }
--------------------------------------------------------------------------------
/src/shaders/imageTexture.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(1) @binding(0) var Sampler: sampler;
2 | @group(1) @binding(1) var Texture: texture_2d;
3 |
4 | @fragment
5 | fn main(@location(0) fragUV: vec2,
6 | @location(1) fragPosition: vec4) -> @location(0) vec4 {
7 | return textureSample(Texture, Sampler, fragUV) * fragPosition;
8 | }
9 |
--------------------------------------------------------------------------------
/src/shaders/lambert.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(1) @binding(0) var ambientIntensity : f32;
2 | @group(1) @binding(1) var pointLight : array, 2>;
3 | @group(1) @binding(2) var directionLight : array, 2>;
4 |
5 | @fragment
6 | fn main(
7 | @location(0) fragPosition : vec3,
8 | @location(1) fragNormal: vec3,
9 | @location(2) fragUV: vec2,
10 | @location(3) fragColor: vec4
11 | ) -> @location(0) vec4 {
12 | let objectColor = fragColor.rgb;
13 | let ambintLightColor = vec3(1.0,1.0,1.0);
14 | let pointLightColor = vec3(1.0,1.0,1.0);
15 | let dirLightColor = vec3(1.0,1.0,1.0);
16 |
17 | var lightResult = vec3(0.0, 0.0, 0.0);
18 | // ambient
19 | lightResult += ambintLightColor * ambientIntensity;
20 | // Directional Light
21 | var directionPosition = directionLight[0].xyz;
22 | var directionIntensity: f32 = directionLight[1][0];
23 | var diffuse: f32 = max(dot(normalize(directionPosition), fragNormal), 0.0);
24 | lightResult += dirLightColor * directionIntensity * diffuse;
25 | // Point Light
26 | var pointPosition = pointLight[0].xyz;
27 | var pointIntensity: f32 = pointLight[1][0];
28 | var pointRadius: f32 = pointLight[1][1];
29 | var L = pointPosition - fragPosition;
30 | var distance = length(L);
31 | if(distance < pointRadius){
32 | var diffuse: f32 = max(dot(normalize(L), fragNormal), 0.0);
33 | var distanceFactor: f32 = pow(1.0 - distance / pointRadius, 2.0);
34 | lightResult += pointLightColor * pointIntensity * diffuse * distanceFactor;
35 | }
36 |
37 | return vec4(objectColor * lightResult, 1.0);
38 | }
--------------------------------------------------------------------------------
/src/shaders/normal.vert.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var modelViews : array>;
2 | @group(0) @binding(1) var projection : mat4x4;
3 | @group(0) @binding(2) var colors : array>;
4 |
5 | struct VertexOutput {
6 | @builtin(position) Position : vec4,
7 | @location(0) fragPosition : vec3,
8 | @location(1) fragNormal : vec3,
9 | @location(2) fragUV: vec2,
10 | @location(3) fragColor: vec4
11 | };
12 |
13 | @vertex
14 | fn main(
15 | @builtin(instance_index) index : u32,
16 | @location(0) position : vec3,
17 | @location(1) normal : vec3,
18 | @location(2) uv : vec2,
19 | ) -> VertexOutput {
20 | let modelview = modelViews[index];
21 | let mvp = projection * modelview;
22 | let pos = vec4(position, 1.0);
23 |
24 | var output : VertexOutput;
25 | output.Position = mvp * pos;
26 | output.fragPosition = (modelview * pos).xyz;
27 | // it should use transpose(inverse(modelview)) if consider non-uniform scale
28 | // hint: inverse() is not available in wgsl, better do in JS or CS
29 | output.fragNormal = (modelview * vec4(normal, 0.0)).xyz;
30 | output.fragUV = uv;
31 | output.fragColor = colors[index];
32 | return output;
33 | }
34 |
--------------------------------------------------------------------------------
/src/shaders/position.frag.wgsl:
--------------------------------------------------------------------------------
1 | @fragment
2 | fn main(
3 | @location(0) fragUV: vec2,
4 | @location(1) fragPosition: vec4
5 | ) -> @location(0) vec4 {
6 | return fragPosition;
7 | }
--------------------------------------------------------------------------------
/src/shaders/position.vert.wgsl:
--------------------------------------------------------------------------------
1 | @vertex
2 | fn main(@location(0) position : vec3) -> @builtin(position) vec4 {
3 | return vec4(position, 1.0);
4 | }
--------------------------------------------------------------------------------
/src/shaders/red.frag.wgsl:
--------------------------------------------------------------------------------
1 | @fragment
2 | fn main() -> @location(0) vec4 {
3 | return vec4(1.0, 0.0, 0.0, 1.0);
4 | }
--------------------------------------------------------------------------------
/src/shaders/shadow.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(1) @binding(0) var lightPosition : vec4;
2 | @group(1) @binding(1) var shadowMap: texture_depth_2d;
3 | @group(1) @binding(2) var shadowSampler: sampler_comparison;
4 |
5 | @fragment
6 | fn main(
7 | @location(0) fragPosition : vec3,
8 | @location(1) fragNormal: vec3,
9 | @location(2) fragUV: vec2,
10 | @location(3) shadowPos: vec3,
11 | @location(4) fragColor: vec4
12 | ) -> @location(0) vec4 {
13 | let objectColor = fragColor.rgb;
14 | // Directional Light
15 | let diffuse: f32 = max(dot(normalize(lightPosition.xyz), fragNormal), 0.0);
16 | // add shadow factor
17 | var shadow : f32 = 0.0;
18 | // apply Percentage-closer filtering (PCF)
19 | // sample nearest 9 texels to smooth result
20 | let size = f32(textureDimensions(shadowMap).x);
21 | for (var y : i32 = -1 ; y <= 1 ; y = y + 1) {
22 | for (var x : i32 = -1 ; x <= 1 ; x = x + 1) {
23 | let offset = vec2(f32(x) / size, f32(y) / size);
24 | shadow = shadow + textureSampleCompare(
25 | shadowMap,
26 | shadowSampler,
27 | shadowPos.xy + offset,
28 | shadowPos.z - 0.005 // apply a small bias to avoid acne
29 | );
30 | }
31 | }
32 | shadow = shadow / 9.0;
33 | // ambient + diffuse * shadow
34 | let lightFactor = min(0.3 + shadow * diffuse, 1.0);
35 | return vec4(objectColor * lightFactor, 1.0);
36 | }
--------------------------------------------------------------------------------
/src/shaders/shadow.vertex.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var modelViews : array>;
2 | @group(0) @binding(1) var cameraProjection : mat4x4;
3 | @group(0) @binding(2) var lightProjection : mat4x4;
4 | @group(0) @binding(3) var colors : array>;
5 |
6 | struct VertexOutput {
7 | @builtin(position) Position: vec4,
8 | @location(0) fragPosition: vec3,
9 | @location(1) fragNormal: vec3,
10 | @location(2) fragUV: vec2,
11 | @location(3) shadowPos: vec3,
12 | @location(4) fragColor: vec4
13 | };
14 |
15 | @vertex
16 | fn main(
17 | @builtin(instance_index) index : u32,
18 | @location(0) position : vec3,
19 | @location(1) normal : vec3,
20 | @location(2) uv : vec2
21 | ) -> VertexOutput {
22 | let modelview = modelViews[index];
23 | let pos = vec4(position, 1.0);
24 | let posFromCamera: vec4 = cameraProjection * modelview * pos;
25 |
26 | var output : VertexOutput;
27 | output.Position = posFromCamera;
28 | output.fragPosition = (modelview * pos).xyz;
29 | output.fragNormal = (modelview * vec4(normal, 0.0)).xyz;
30 | output.fragUV = uv;
31 | output.fragColor = colors[index];
32 |
33 | let posFromLight: vec4 = lightProjection * modelview * pos;
34 | // Convert shadowPos XY to (0, 1) to fit texture UV
35 | output.shadowPos = vec3(posFromLight.xy * vec2(0.5, -0.5) + vec2(0.5, 0.5), posFromLight.z);
36 | return output;
37 | }
38 |
--------------------------------------------------------------------------------
/src/shaders/shadowDepth.wgsl:
--------------------------------------------------------------------------------
1 | @group(0) @binding(0) var modelViews : array>;
2 | @group(0) @binding(1) var lightProjection : mat4x4;
3 |
4 | @vertex
5 | fn main(
6 | @builtin(instance_index) index : u32,
7 | @location(0) position : vec3,
8 | @location(1) normal : vec3,
9 | @location(2) uv : vec2,
10 | ) -> @builtin(position) vec4 {
11 | let modelview = modelViews[index];
12 | let pos = vec4(position, 1.0);
13 | return lightProjection * modelview * pos;
14 | }
15 |
--------------------------------------------------------------------------------
/src/shaders/spriteTexture.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(1) @binding(0) var Sampler: sampler;
2 | @group(1) @binding(1) var Texture: texture_2d;
3 | @group(1) @binding(2) var uvOffset : vec4;
4 |
5 | @fragment
6 | fn main(@location(0) fragUV: vec2,
7 | @location(1) fragPosition: vec4) -> @location(0) vec4 {
8 | // only show specific uv area of the big texture
9 | var uv = fragUV * vec2(uvOffset[2], uvOffset[3]) + vec2(uvOffset[0], uvOffset[1]);
10 | return textureSample(Texture, Sampler, uv) * fragPosition;
11 | }
12 |
--------------------------------------------------------------------------------
/src/shaders/triangle.vert.wgsl:
--------------------------------------------------------------------------------
1 | @vertex
2 | fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4 {
3 | var pos = array, 3>(
4 | vec2(0.0, 0.5),
5 | vec2(-0.5, -0.5),
6 | vec2(0.5, -0.5)
7 | );
8 | return vec4(pos[VertexIndex], 0.0, 1.0);
9 | }
--------------------------------------------------------------------------------
/src/shaders/videoTexture.frag.wgsl:
--------------------------------------------------------------------------------
1 | @group(1) @binding(0) var Sampler: sampler;
2 | @group(1) @binding(1) var Texture: texture_external;
3 |
4 | @fragment
5 | fn main(@location(0) fragUV: vec2,
6 | @location(1) fragPosition: vec4) -> @location(0) vec4 {
7 | return textureSampleBaseClampToEdge(Texture, Sampler, fragUV) * fragPosition;
8 | }
9 |
--------------------------------------------------------------------------------
/src/shadowMapping.ts:
--------------------------------------------------------------------------------
1 | import shadowVertex from './shaders/shadow.vertex.wgsl?raw'
2 | import shadowFrag from './shaders/shadow.frag.wgsl?raw'
3 | import shadowDepth from './shaders/shadowDepth.wgsl?raw'
4 | import * as sphere from './util/sphere'
5 | import * as box from './util/box'
6 | import { getModelViewMatrix, getProjectionMatrix } from './util/math'
7 | import { mat4, vec3 } from 'gl-matrix'
8 |
9 |
10 | // initialize webgpu device & config canvas context
11 | async function initWebGPU(canvas: HTMLCanvasElement) {
12 | if (!navigator.gpu)
13 | throw new Error('Not Support WebGPU')
14 | const adapter = await navigator.gpu.requestAdapter()
15 | if (!adapter)
16 | throw new Error('No Adapter Found')
17 | const device = await adapter.requestDevice()
18 | const context = canvas.getContext('webgpu') as GPUCanvasContext
19 | const format = navigator.gpu.getPreferredCanvasFormat()
20 | const devicePixelRatio = window.devicePixelRatio || 1
21 | canvas.width = canvas.clientWidth * devicePixelRatio
22 | canvas.height = canvas.clientHeight * devicePixelRatio
23 | const size = { width: canvas.width, height: canvas.height }
24 | context.configure({
25 | device, format,
26 | // prevent chrome warning after v102
27 | alphaMode: 'opaque'
28 | })
29 | return { device, context, format, size }
30 | }
31 |
32 | // create pipiline & buffers
33 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: { width: number, height: number }) {
34 | const vertexBuffers: Iterable = [{
35 | arrayStride: 8 * 4, // 3 position 2 uv,
36 | attributes: [
37 | {
38 | // position
39 | shaderLocation: 0,
40 | offset: 0,
41 | format: 'float32x3',
42 | },
43 | {
44 | // normal
45 | shaderLocation: 1,
46 | offset: 3 * 4,
47 | format: 'float32x3',
48 | },
49 | {
50 | // uv
51 | shaderLocation: 2,
52 | offset: 6 * 4,
53 | format: 'float32x2',
54 | },
55 | ]
56 | }]
57 | const primitive = {
58 | topology: 'triangle-list',
59 | cullMode: 'back'
60 | }
61 | const depthStencil = {
62 | depthWriteEnabled: true,
63 | depthCompare: 'less',
64 | format: 'depth32float',
65 | }
66 | const shadowPipeline = await device.createRenderPipelineAsync({
67 | label: 'Shadow Pipline',
68 | layout: 'auto',
69 | vertex: {
70 | module: device.createShaderModule({
71 | code: shadowDepth,
72 | }),
73 | entryPoint: 'main',
74 | buffers: vertexBuffers
75 | },
76 | primitive, depthStencil
77 | } as GPURenderPipelineDescriptor)
78 | // create a depthTexture for shadow
79 | const shadowDepthTexture = device.createTexture({
80 | size: [2048, 2048],
81 | usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
82 | format: 'depth32float'
83 | });
84 | const renderPipeline = await device.createRenderPipelineAsync({
85 | label: 'Render Pipline',
86 | layout: 'auto',
87 | vertex: {
88 | module: device.createShaderModule({
89 | code: shadowVertex,
90 | }),
91 | entryPoint: 'main',
92 | buffers: vertexBuffers
93 | },
94 | fragment: {
95 | module: device.createShaderModule({
96 | code: shadowFrag,
97 | }),
98 | entryPoint: 'main',
99 | targets: [
100 | {
101 | format: format
102 | }
103 | ]
104 | },
105 | primitive, depthStencil
106 | } as GPURenderPipelineDescriptor)
107 | // create depthTexture for renderPass
108 | const renderDepthTexture = device.createTexture({
109 | size, format: 'depth32float',
110 | usage: GPUTextureUsage.RENDER_ATTACHMENT
111 | })
112 | // create depthTextureView
113 | const shadowDepthView = shadowDepthTexture.createView()
114 | const renderDepthView = renderDepthTexture.createView()
115 | // create vertex & index buffer
116 | const boxBuffer = {
117 | vertex: device.createBuffer({
118 | label: 'GPUBuffer store vertex',
119 | size: box.vertex.byteLength,
120 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
121 | }),
122 | index: device.createBuffer({
123 | label: 'GPUBuffer store vertex index',
124 | size: box.index.byteLength,
125 | usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
126 | })
127 | }
128 | const sphereBuffer = {
129 | vertex: device.createBuffer({
130 | label: 'GPUBuffer store vertex',
131 | size: sphere.vertex.byteLength,
132 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
133 | }),
134 | index: device.createBuffer({
135 | label: 'GPUBuffer store vertex index',
136 | size: sphere.index.byteLength,
137 | usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST
138 | })
139 | }
140 | device.queue.writeBuffer(boxBuffer.vertex, 0, box.vertex)
141 | device.queue.writeBuffer(boxBuffer.index, 0, box.index)
142 | device.queue.writeBuffer(sphereBuffer.vertex, 0, sphere.vertex)
143 | device.queue.writeBuffer(sphereBuffer.index, 0, sphere.index)
144 |
145 | // create a 4x4xNUM STORAGE buffer to store matrix
146 | const modelViewBuffer = device.createBuffer({
147 | label: 'GPUBuffer store n*4x4 matrix',
148 | size: 4 * 4 * 4 * NUM, // 4 x 4 x float32 x NUM
149 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
150 | })
151 | // create a 4x4 uniform buffer to store projection
152 | const cameraProjectionBuffer = device.createBuffer({
153 | label: 'GPUBuffer for camera projection',
154 | size: 4 * 4 * 4, // 4 x 4 x float32
155 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
156 | })
157 | // create a 4x4 uniform buffer to store projection
158 | const lightProjectionBuffer = device.createBuffer({
159 | label: 'GPUBuffer for light projection',
160 | size: 4 * 4 * 4, // 4 x 4 x float32
161 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
162 | })
163 | // create a 4x4xNUM STORAGE buffer to store color
164 | const colorBuffer = device.createBuffer({
165 | label: 'GPUBuffer store n*4 color',
166 | size: 4 * 4 * NUM, // 4 x float32 x NUM
167 | usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
168 | })
169 | // create a uniform buffer to store dirLight
170 | const lightBuffer = device.createBuffer({
171 | label: 'GPUBuffer store 4x4 matrix',
172 | size: 4 * 4, // 4 x float32: position vec4
173 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
174 | })
175 | // create a uniform group for Matrix
176 | const vsGroup = device.createBindGroup({
177 | label: 'Group for renderPass',
178 | layout: renderPipeline.getBindGroupLayout(0),
179 | entries: [
180 | {
181 | binding: 0,
182 | resource: {
183 | buffer: modelViewBuffer
184 | }
185 | },
186 | {
187 | binding: 1,
188 | resource: {
189 | buffer: cameraProjectionBuffer
190 | }
191 | },
192 | {
193 | binding: 2,
194 | resource: {
195 | buffer: lightProjectionBuffer
196 | }
197 | },
198 | {
199 | binding: 3,
200 | resource: {
201 | buffer: colorBuffer
202 | }
203 | }
204 | ]
205 | })
206 | const fsGroup = device.createBindGroup({
207 | label: 'Group for fragment',
208 | layout: renderPipeline.getBindGroupLayout(1),
209 | entries: [
210 | {
211 | binding: 0,
212 | resource: {
213 | buffer: lightBuffer
214 | }
215 | },
216 | {
217 | binding: 1,
218 | resource: shadowDepthView
219 | },
220 | {
221 | binding: 2,
222 | resource: device.createSampler({
223 | compare: 'less',
224 | })
225 | }
226 | ]
227 | })
228 | const shadowGroup = device.createBindGroup({
229 | label: 'Group for shadowPass',
230 | layout: shadowPipeline.getBindGroupLayout(0),
231 | entries: [{
232 | binding: 0,
233 | resource: {
234 | buffer: modelViewBuffer
235 | }
236 | }, {
237 | binding: 1,
238 | resource: {
239 | buffer: lightProjectionBuffer
240 | }
241 | }]
242 | })
243 | // return all vars
244 | return {
245 | renderPipeline, shadowPipeline, boxBuffer, sphereBuffer,
246 | modelViewBuffer, cameraProjectionBuffer, lightProjectionBuffer, colorBuffer, lightBuffer,
247 | vsGroup, fsGroup, shadowGroup,
248 | renderDepthTexture, renderDepthView, shadowDepthTexture, shadowDepthView
249 | }
250 | }
251 |
252 | // create & submit device commands
253 | function draw(
254 | device: GPUDevice,
255 | context: GPUCanvasContext,
256 | pipelineObj: {
257 | renderPipeline: GPURenderPipeline,
258 | shadowPipeline: GPURenderPipeline,
259 | boxBuffer: { vertex: GPUBuffer, index: GPUBuffer },
260 | sphereBuffer: { vertex: GPUBuffer, index: GPUBuffer },
261 | vsGroup: GPUBindGroup,
262 | shadowGroup: GPUBindGroup,
263 | fsGroup: GPUBindGroup,
264 | renderDepthView: GPUTextureView,
265 | shadowDepthView: GPUTextureView
266 | },
267 | ) {
268 | const commandEncoder = device.createCommandEncoder()
269 | // start shadowPass
270 | {
271 | const shadowPassDescriptor: GPURenderPassDescriptor = {
272 | colorAttachments: [],
273 | depthStencilAttachment: {
274 | view: pipelineObj.shadowDepthView,
275 | depthClearValue: 1.0,
276 | depthLoadOp: 'clear',
277 | depthStoreOp: 'store',
278 | }
279 | }
280 | const shadowPass = commandEncoder.beginRenderPass(shadowPassDescriptor)
281 | shadowPass.setPipeline(pipelineObj.shadowPipeline)
282 | shadowPass.setBindGroup(0, pipelineObj.shadowGroup)
283 | // set box vertex
284 | shadowPass.setVertexBuffer(0, pipelineObj.boxBuffer.vertex)
285 | shadowPass.setIndexBuffer(pipelineObj.boxBuffer.index, 'uint16')
286 | shadowPass.drawIndexed(box.indexCount, 2, 0, 0, 0)
287 | // set sphere vertex
288 | shadowPass.setVertexBuffer(0, pipelineObj.sphereBuffer.vertex)
289 | shadowPass.setIndexBuffer(pipelineObj.sphereBuffer.index, 'uint16')
290 | shadowPass.drawIndexed(sphere.indexCount, NUM - 2, 0, 0, NUM / 2)
291 | shadowPass.end()
292 | }
293 | // start renderPass
294 | {
295 | const renderPassDescriptor: GPURenderPassDescriptor = {
296 | colorAttachments: [
297 | {
298 | view: context.getCurrentTexture().createView(),
299 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
300 | loadOp: 'clear',
301 | storeOp: 'store'
302 | }
303 | ],
304 | depthStencilAttachment: {
305 | view: pipelineObj.renderDepthView,
306 | depthClearValue: 1.0,
307 | depthLoadOp: 'clear',
308 | depthStoreOp: 'store',
309 | }
310 | }
311 | const renderPass = commandEncoder.beginRenderPass(renderPassDescriptor)
312 | renderPass.setPipeline(pipelineObj.renderPipeline)
313 | renderPass.setBindGroup(0, pipelineObj.vsGroup)
314 | renderPass.setBindGroup(1, pipelineObj.fsGroup)
315 | // set box vertex
316 | renderPass.setVertexBuffer(0, pipelineObj.boxBuffer.vertex)
317 | renderPass.setIndexBuffer(pipelineObj.boxBuffer.index, 'uint16')
318 | renderPass.drawIndexed(box.indexCount, 2, 0, 0, 0)
319 | // set sphere vertex
320 | renderPass.setVertexBuffer(0, pipelineObj.sphereBuffer.vertex)
321 | renderPass.setIndexBuffer(pipelineObj.sphereBuffer.index, 'uint16')
322 | renderPass.drawIndexed(sphere.indexCount, NUM - 2, 0, 0, NUM / 2)
323 | renderPass.end()
324 | }
325 | // webgpu run in a separate process, all the commands will be executed after submit
326 | device.queue.submit([commandEncoder.finish()])
327 | }
328 |
329 | // total objects
330 | const NUM = 30
331 | async function run() {
332 | const canvas = document.querySelector('canvas')
333 | if (!canvas)
334 | throw new Error('No Canvas')
335 |
336 | const { device, context, format, size } = await initWebGPU(canvas)
337 | const pipelineObj = await initPipeline(device, format, size)
338 | // create objects
339 | const scene: any[] = []
340 | const modelViewMatrix = new Float32Array(NUM * 4 * 4)
341 | const colorBuffer = new Float32Array(NUM * 4)
342 | // add a center box
343 | {
344 | const position = { x: 0, y: 0, z: -20 }
345 | const rotation = { x: 0, y: Math.PI / 4, z: 0 }
346 | const scale = { x: 2, y: 20, z: 2 }
347 | const modelView = getModelViewMatrix(position, rotation, scale)
348 | modelViewMatrix.set(modelView, 0 * 4 * 4)
349 | // random color for each object
350 | colorBuffer.set([0.5, 0.5, 0.5, 1], 0 * 4)
351 | scene.push({ position, rotation, scale })
352 | }
353 | // add a floor
354 | {
355 | const position = { x: 0, y: -10, z: -20 }
356 | const rotation = { x: 0, y: 0, z: 0 }
357 | const scale = { x: 50, y: 0.5, z: 40 }
358 | const modelView = getModelViewMatrix(position, rotation, scale)
359 | modelViewMatrix.set(modelView, 1 * 4 * 4)
360 | // random color for each object
361 | colorBuffer.set([1, 1, 1, 1], 1 * 4)
362 | scene.push({ position, rotation, scale })
363 | }
364 | // add spheres
365 | for (let i = 2; i < NUM; i++) {
366 | // craete simple object
367 | const or = Math.random() > 0.5 ? 1 : -1
368 | const position = { x: (1 + Math.random() * 12) * or, y: - 8 + Math.random() * 15, z: -20 + (1 + Math.random() * 12) * or }
369 | const rotation = { x: Math.random(), y: Math.random(), z: Math.random() }
370 | const s = Math.max(0.5, Math.random())
371 | const scale = { x: s, y: s, z: s }
372 | const modelView = getModelViewMatrix(position, rotation, scale)
373 | modelViewMatrix.set(modelView, i * 4 * 4)
374 | // random color for each object
375 | colorBuffer.set([Math.random(), Math.random(), Math.random(), 1], i * 4)
376 | scene.push({ position, rotation, scale, y: position.y, v: Math.max(0.09, Math.random() / 10) * or })
377 | }
378 | // write matrix & colors
379 | device.queue.writeBuffer(pipelineObj.colorBuffer, 0, colorBuffer)
380 |
381 | // dir light, 4 position
382 | const lightViewMatrix = mat4.create()
383 | const lightProjectionMatrix = mat4.create()
384 | const lightPosition = vec3.fromValues(0, 100, 0)
385 | const up = vec3.fromValues(0, 1, 0)
386 | const origin = vec3.fromValues(0, 0, 0)
387 | // start loop
388 | function frame() {
389 | // update lights position
390 | const now = performance.now()
391 | lightPosition[0] = Math.sin(now / 1500) * 50
392 | lightPosition[2] = Math.cos(now / 1500) * 50
393 | // update lvp matrix
394 | mat4.lookAt(
395 | lightViewMatrix,
396 | lightPosition,
397 | origin, up
398 | )
399 | mat4.ortho(lightProjectionMatrix, -40, 40, -40, 40, -50, 200)
400 | mat4.multiply(lightProjectionMatrix, lightProjectionMatrix, lightViewMatrix)
401 | device.queue.writeBuffer(pipelineObj.lightProjectionBuffer, 0, lightProjectionMatrix as Float32Array)
402 | device.queue.writeBuffer(pipelineObj.lightBuffer, 0, lightPosition as Float32Array)
403 | // update obj position
404 | for (let i = 2; i < NUM; i++) {
405 | const obj = scene[i]
406 | obj.position.y += obj.v
407 | if (obj.position.y < -9 || obj.position.y > 9)
408 | obj.v *= -1
409 | const modelView = getModelViewMatrix(obj.position, obj.rotation, obj.scale)
410 | modelViewMatrix.set(modelView, i * 4 * 4)
411 | }
412 | device.queue.writeBuffer(pipelineObj.modelViewBuffer, 0, modelViewMatrix)
413 |
414 | // start draw
415 | draw(device, context, pipelineObj)
416 | requestAnimationFrame(frame)
417 | }
418 | frame()
419 |
420 | function updateCamera() {
421 | const aspect = size.width / size.height
422 | const projectionMatrix = getProjectionMatrix(aspect, 60 / 180 * Math.PI, 0.1, 1000, { x: 0, y: 10, z: 20 })
423 | device.queue.writeBuffer(pipelineObj.cameraProjectionBuffer, 0, projectionMatrix)
424 | }
425 | updateCamera()
426 | // re-configure context on resize
427 | window.addEventListener('resize', () => {
428 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
429 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
430 | // don't need to recall context.configure() after v104
431 | // re-create depth texture
432 | pipelineObj.renderDepthTexture.destroy()
433 | pipelineObj.renderDepthTexture = device.createTexture({
434 | size, format: 'depth32float',
435 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
436 | })
437 | pipelineObj.renderDepthView = pipelineObj.renderDepthTexture.createView()
438 | // update aspect
439 | updateCamera()
440 | })
441 | }
442 | run()
--------------------------------------------------------------------------------
/src/spriteTexture.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import spriteTexture from './shaders/spriteTexture.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 | import textureUrl from '/sprites.webp?url'
6 |
7 | // initialize webgpu device & config canvas context
8 | async function initWebGPU(canvas: HTMLCanvasElement) {
9 | if (!navigator.gpu)
10 | throw new Error('Not Support WebGPU')
11 | const adapter = await navigator.gpu.requestAdapter()
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | device, format,
23 | // prevent chrome warning after v102
24 | alphaMode: 'opaque'
25 | })
26 | return { device, context, format, size }
27 | }
28 |
29 | // create pipiline & buffers
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: { width: number, height: number }) {
31 | const pipeline = await device.createRenderPipelineAsync({
32 | label: 'Basic Pipline',
33 | layout: 'auto',
34 | vertex: {
35 | module: device.createShaderModule({
36 | code: basicVert,
37 | }),
38 | entryPoint: 'main',
39 | buffers: [{
40 | arrayStride: 5 * 4, // 3 position 2 uv,
41 | attributes: [
42 | {
43 | // position
44 | shaderLocation: 0,
45 | offset: 0,
46 | format: 'float32x3'
47 | },
48 | {
49 | // uv
50 | shaderLocation: 1,
51 | offset: 3 * 4,
52 | format: 'float32x2'
53 | }
54 | ]
55 | }]
56 | },
57 | fragment: {
58 | module: device.createShaderModule({
59 | code: spriteTexture,
60 | }),
61 | entryPoint: 'main',
62 | targets: [
63 | {
64 | format: format
65 | }
66 | ]
67 | },
68 | primitive: {
69 | topology: 'triangle-list',
70 | // Culling backfaces pointing away from the camera
71 | cullMode: 'back',
72 | frontFace: 'ccw'
73 | },
74 | // Enable depth testing since we have z-level positions
75 | // Fragment closest to the camera is rendered in front
76 | depthStencil: {
77 | depthWriteEnabled: true,
78 | depthCompare: 'less',
79 | format: 'depth24plus'
80 | }
81 | } as GPURenderPipelineDescriptor)
82 | // create depthTexture for renderPass
83 | const depthTexture = device.createTexture({
84 | size, format: 'depth24plus',
85 | usage: GPUTextureUsage.RENDER_ATTACHMENT
86 | })
87 | const depthView = depthTexture.createView()
88 | // create vertex buffer
89 | const vertexBuffer = device.createBuffer({
90 | label: 'GPUBuffer store vertex',
91 | size: cube.vertex.byteLength,
92 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
93 | })
94 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
95 | // create a mvp matrix buffer
96 | const mvpBuffer = device.createBuffer({
97 | label: 'GPUBuffer store 4x4 matrix',
98 | size: 4 * 4 * 4, // 4 x 4 x float32
99 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
100 | })
101 | // create a uniform group contains matrix
102 | const uniformGroup = device.createBindGroup({
103 | label: 'Uniform Group with Matrix',
104 | layout: pipeline.getBindGroupLayout(0),
105 | entries: [
106 | {
107 | binding: 0,
108 | resource: {
109 | buffer: mvpBuffer
110 | }
111 | }
112 | ]
113 | })
114 | // return all vars
115 | return { pipeline, vertexBuffer, mvpBuffer, uniformGroup, depthTexture, depthView }
116 | }
117 |
118 | // create & submit device commands
119 | function draw(
120 | device: GPUDevice,
121 | context: GPUCanvasContext,
122 | pipelineObj: {
123 | pipeline: GPURenderPipeline
124 | vertexBuffer: GPUBuffer
125 | mvpBuffer: GPUBuffer
126 | uniformGroup: GPUBindGroup
127 | depthView: GPUTextureView
128 | },
129 | textureGroup: GPUBindGroup
130 | ) {
131 | // start encoder
132 | const commandEncoder = device.createCommandEncoder()
133 | const renderPassDescriptor: GPURenderPassDescriptor = {
134 | colorAttachments: [
135 | {
136 | view: context.getCurrentTexture().createView(),
137 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
138 | loadOp: 'clear',
139 | storeOp: 'store'
140 | }
141 | ],
142 | depthStencilAttachment: {
143 | view: pipelineObj.depthView,
144 | depthClearValue: 1.0,
145 | depthLoadOp: 'clear',
146 | depthStoreOp: 'store',
147 | }
148 | }
149 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
150 | passEncoder.setPipeline(pipelineObj.pipeline)
151 | // set uniformGroup
152 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
153 | // set textureGroup
154 | passEncoder.setBindGroup(1, textureGroup)
155 | // set vertex
156 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
157 | // draw vertex count of cube
158 | passEncoder.draw(cube.vertexCount)
159 | passEncoder.end()
160 | // webgpu run in a separate process, all the commands will be executed after submit
161 | device.queue.submit([commandEncoder.finish()])
162 | }
163 |
164 | async function run() {
165 | const canvas = document.querySelector('canvas')
166 | if (!canvas)
167 | throw new Error('No Canvas')
168 | const { device, context, format, size } = await initWebGPU(canvas)
169 | const pipelineObj = await initPipeline(device, format, size)
170 |
171 | // fetch an image and upload to GPUTexture
172 | const res = await fetch(textureUrl)
173 | const img = await res.blob()
174 | const bitmap = await createImageBitmap(img)
175 | const textureSize = [bitmap.width, bitmap.height]
176 | // create empty texture
177 | const texture = device.createTexture({
178 | size: textureSize,
179 | format: 'rgba8unorm',
180 | usage:
181 | GPUTextureUsage.TEXTURE_BINDING |
182 | GPUTextureUsage.COPY_DST |
183 | GPUTextureUsage.RENDER_ATTACHMENT
184 | })
185 | // update image to GPUTexture
186 | device.queue.copyExternalImageToTexture(
187 | { source: bitmap },
188 | { texture: texture },
189 | textureSize
190 | )
191 | // Create a sampler with linear filtering for smooth interpolation.
192 | const sampler = device.createSampler({
193 | // addressModeU: 'repeat',
194 | // addressModeV: 'repeat',
195 | magFilter: 'linear',
196 | minFilter: 'linear'
197 | })
198 | // create a custom uvoffset buffer to show specific area of texture
199 | const uvOffset = new Float32Array([0, 0, 1/3, 1/2])
200 | const uvBuffer = device.createBuffer({
201 | label: 'GPUBuffer store UV offset',
202 | size: 4 * 4, // 4 x uint32
203 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
204 | })
205 | device.queue.writeBuffer(uvBuffer, 0, uvOffset)
206 | const textureGroup = device.createBindGroup({
207 | label: 'Texture Group with Texture/Sampler',
208 | layout: pipelineObj.pipeline.getBindGroupLayout(1),
209 | entries: [
210 | {
211 | binding: 0,
212 | resource: sampler
213 | },
214 | {
215 | binding: 1,
216 | resource: texture.createView()
217 | },{
218 | binding: 2,
219 | resource: {
220 | buffer: uvBuffer
221 | }
222 | }
223 | ]
224 | })
225 |
226 | // default state
227 | let aspect = size.width / size.height
228 | const position = { x: 0, y: 0, z: -5 }
229 | const scale = { x: 1, y: 1, z: 1 }
230 | const rotation = { x: 0, y: 0, z: 0 }
231 | let count = 0
232 | // start loop
233 | function frame() {
234 | count ++
235 | // update uvoffset by frame, to simulate animation
236 | if(count % 30 === 0){
237 | uvOffset[0] = uvOffset[0] >= 2/3 ? 0 : uvOffset[0] + 1/3
238 | if(count % 90 === 0)
239 | uvOffset[1] = uvOffset[1] >= 1/2 ? 0 : uvOffset[1] + 1/2
240 | device.queue.writeBuffer(uvBuffer, 0, uvOffset)
241 | }
242 | // rotate by time, and update transform matrix
243 | const now = Date.now() / 1000
244 | rotation.x = Math.sin(now)
245 | rotation.y = Math.cos(now)
246 | const mvpMatrix = getMvpMatrix(aspect, position, rotation, scale)
247 | device.queue.writeBuffer(
248 | pipelineObj.mvpBuffer,
249 | 0,
250 | mvpMatrix.buffer
251 | )
252 | // then draw
253 | draw(device, context, pipelineObj, textureGroup)
254 | requestAnimationFrame(frame)
255 | }
256 | frame()
257 |
258 | // re-configure context on resize
259 | window.addEventListener('resize', () => {
260 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
261 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
262 | // don't need to recall context.configure() after v104
263 | // re-create depth texture
264 | pipelineObj.depthTexture.destroy()
265 | pipelineObj.depthTexture = device.createTexture({
266 | size, format: 'depth24plus',
267 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
268 | })
269 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
270 | // update aspect
271 | aspect = size.width / size.height
272 | })
273 | }
274 | run()
--------------------------------------------------------------------------------
/src/util/box.ts:
--------------------------------------------------------------------------------
1 | const vertex = new Float32Array([
2 | // float3 position, float3 normal, float2 uv
3 | 0.5,0.5,0.5, 1,0,0, 0,1,
4 | 0.5,0.5,-0.5, 1,0,0, 1,1,
5 | 0.5,-0.5,0.5, 1,0,0, 0,0,
6 | 0.5,-0.5,-0.5, 1,0,0, 1,0,
7 | -0.5,0.5,-0.5, -1,0,0, 0,1,
8 | -0.5,0.5,0.5, -1,0,0, 1,1,
9 | -0.5,-0.5,-0.5, -1,0,0, 0,0,
10 | -0.5,-0.5,0.5, -1,0,0, 1,0,
11 | -0.5,0.5,-0.5, 0,1,0, 0,1,
12 | 0.5,0.5,-0.5, 0,1,0, 1,1,
13 | -0.5,0.5,0.5, 0,1,0, 0,0,
14 | 0.5,0.5,0.5, 0,1,0, 1,0,
15 | -0.5,-0.5,0.5, 0,-1,0, 0,1,
16 | 0.5,-0.5,0.5, 0,-1,0, 1,1,
17 | -0.5,-0.5,-0.5, 0,-1,0, 0,0,
18 | 0.5,-0.5,-0.5, 0,-1,0, 1,0,
19 | -0.5,0.5,0.5, 0,0,1, 0,1,
20 | 0.5,0.5,0.5, 0,0,1, 1,1,
21 | -0.5,-0.5,0.5, 0,0,1, 0,0,
22 | 0.5,-0.5,0.5, 0,0,1, 1,0,
23 | 0.5,0.5,-0.5, 0,0,-1, 0,1,
24 | -0.5,0.5,-0.5, 0,0,-1, 1,1,
25 | 0.5,-0.5,-0.5, 0,0,-1, 0,0,
26 | -0.5,-0.5,-0.5, 0,0,-1, 1,0
27 | ])
28 |
29 | const index = new Uint16Array([
30 | 0,2,1,
31 | 2,3,1,
32 | 4,6,5,
33 | 6,7,5,
34 | 8,10,9,
35 | 10,11,9,
36 | 12,14,13,
37 | 14,15,13,
38 | 16,18,17,
39 | 18,19,17,
40 | 20,22,21,
41 | 22,23,21
42 | ])
43 | const vertexCount = 24
44 | const indexCount = 36
45 |
46 | export {vertex, index, vertexCount, indexCount}
--------------------------------------------------------------------------------
/src/util/cube.ts:
--------------------------------------------------------------------------------
1 | const vertex = new Float32Array([
2 | // float3 position, float2 uv
3 | // face1
4 | +1, -1, +1, 1, 1,
5 | -1, -1, +1, 0, 1,
6 | -1, -1, -1, 0, 0,
7 | +1, -1, -1, 1, 0,
8 | +1, -1, +1, 1, 1,
9 | -1, -1, -1, 0, 0,
10 | // face2
11 | +1, +1, +1, 1, 1,
12 | +1, -1, +1, 0, 1,
13 | +1, -1, -1, 0, 0,
14 | +1, +1, -1, 1, 0,
15 | +1, +1, +1, 1, 1,
16 | +1, -1, -1, 0, 0,
17 | // face3
18 | -1, +1, +1, 1, 1,
19 | +1, +1, +1, 0, 1,
20 | +1, +1, -1, 0, 0,
21 | -1, +1, -1, 1, 0,
22 | -1, +1, +1, 1, 1,
23 | +1, +1, -1, 0, 0,
24 | // face4
25 | -1, -1, +1, 1, 1,
26 | -1, +1, +1, 0, 1,
27 | -1, +1, -1, 0, 0,
28 | -1, -1, -1, 1, 0,
29 | -1, -1, +1, 1, 1,
30 | -1, +1, -1, 0, 0,
31 | // face5
32 | +1, +1, +1, 1, 1,
33 | -1, +1, +1, 0, 1,
34 | -1, -1, +1, 0, 0,
35 | -1, -1, +1, 0, 0,
36 | +1, -1, +1, 1, 0,
37 | +1, +1, +1, 1, 1,
38 | // face6
39 | +1, -1, -1, 1, 1,
40 | -1, -1, -1, 0, 1,
41 | -1, +1, -1, 0, 0,
42 | +1, +1, -1, 1, 0,
43 | +1, -1, -1, 1, 1,
44 | -1, +1, -1, 0, 0
45 | ])
46 |
47 | const vertexCount = 36
48 |
49 | export {vertex, vertexCount}
--------------------------------------------------------------------------------
/src/util/math.ts:
--------------------------------------------------------------------------------
1 | import { mat4, vec3 } from 'gl-matrix'
2 |
3 | // return mvp matrix from given aspect, position, rotation, scale
4 | function getMvpMatrix(
5 | aspect: number,
6 | position: {x:number, y:number, z:number},
7 | rotation: {x:number, y:number, z:number},
8 | scale: {x:number, y:number, z:number}
9 | ){
10 | // get modelView Matrix
11 | const modelViewMatrix = getModelViewMatrix(position, rotation, scale)
12 | // get projection Matrix
13 | const projectionMatrix = getProjectionMatrix(aspect)
14 | // get mvp matrix
15 | const mvpMatrix = mat4.create()
16 | mat4.multiply(mvpMatrix, projectionMatrix, modelViewMatrix)
17 |
18 | // return matrix as Float32Array
19 | return mvpMatrix as Float32Array
20 | }
21 |
22 | // return modelView matrix from given position, rotation, scale
23 | function getModelViewMatrix(
24 | position = {x:0, y:0, z:0},
25 | rotation = {x:0, y:0, z:0},
26 | scale = {x:1, y:1, z:1}
27 | ){
28 | // get modelView Matrix
29 | const modelViewMatrix = mat4.create()
30 | // translate position
31 | mat4.translate(modelViewMatrix, modelViewMatrix, vec3.fromValues(position.x, position.y, position.z))
32 | // rotate
33 | mat4.rotateX(modelViewMatrix, modelViewMatrix, rotation.x)
34 | mat4.rotateY(modelViewMatrix, modelViewMatrix, rotation.y)
35 | mat4.rotateZ(modelViewMatrix, modelViewMatrix, rotation.z)
36 | // scale
37 | mat4.scale(modelViewMatrix, modelViewMatrix, vec3.fromValues(scale.x, scale.y, scale.z))
38 |
39 | // return matrix as Float32Array
40 | return modelViewMatrix as Float32Array
41 | }
42 |
43 | const center = vec3.fromValues(0,0,0)
44 | const up = vec3.fromValues(0,1,0)
45 |
46 | function getProjectionMatrix(
47 | aspect: number,
48 | fov:number = 60 / 180 * Math.PI,
49 | near:number = 0.1,
50 | far:number = 100.0,
51 | position = {x:0, y:0, z:0}
52 | ){
53 | // create cameraview
54 | const cameraView = mat4.create()
55 | const eye = vec3.fromValues(position.x, position.y, position.z)
56 | mat4.translate(cameraView, cameraView, eye)
57 | mat4.lookAt(cameraView, eye, center, up)
58 | // get a perspective Matrix
59 | const projectionMatrix = mat4.create()
60 | mat4.perspective(projectionMatrix, fov, aspect, near, far)
61 | mat4.multiply(projectionMatrix, projectionMatrix, cameraView)
62 | // return matrix as Float32Array
63 | return projectionMatrix as Float32Array
64 | }
65 |
66 | export { getMvpMatrix, getModelViewMatrix, getProjectionMatrix }
--------------------------------------------------------------------------------
/src/util/sphere.ts:
--------------------------------------------------------------------------------
1 | const vertex = new Float32Array([
2 | // float3 position, float3 normal, float2 uv
3 | 0,1,0, 0,1,0, 0.05,1,
4 | 0,1,0, 0,1,0, 0.15,1,
5 | 0,1,0, 0,1,0, 0.25,1,
6 | 0,1,0, 0,1,0, 0.35,1,
7 | 0,1,0, 0,1,0, 0.45,1,
8 | 0,1,0, 0,1,0, 0.55,1,
9 | 0,1,0, 0,1,0, 0.65,1,
10 | 0,1,0, 0,1,0, 0.75,1,
11 | 0,1,0, 0,1,0, 0.85,1,
12 | 0,1,0, 0,1,0, 0.95,1,
13 | 0,1,0, 0,1,0, 1.05,1,
14 | -0.30902,0.95106,0, -0.30902,0.95106,0, 0,0.9,
15 | -0.25,0.95106,0.18164, -0.25,0.95106,0.18164, 0.1,0.9,
16 | -0.09549,0.95106,0.29389, -0.09549,0.95106,0.29389, 0.2,0.9,
17 | 0.09549,0.95106,0.29389, 0.09549,0.95106,0.29389, 0.3,0.9,
18 | 0.25,0.95106,0.18164, 0.25,0.95106,0.18164, 0.4,0.9,
19 | 0.30902,0.95106,0, 0.30902,0.95106,0, 0.5,0.9,
20 | 0.25,0.95106,-0.18164, 0.25,0.95106,-0.18164, 0.6,0.9,
21 | 0.09549,0.95106,-0.29389, 0.09549,0.95106,-0.29389, 0.7,0.9,
22 | -0.09549,0.95106,-0.29389, -0.09549,0.95106,-0.29389, 0.8,0.9,
23 | -0.25,0.95106,-0.18164, -0.25,0.95106,-0.18164, 0.9,0.9,
24 | -0.30902,0.95106,0, -0.30902,0.95106,0, 1,0.9,
25 | -0.58779,0.80902,0, -0.58779,0.80902,0, 0,0.8,
26 | -0.47553,0.80902,0.34549, -0.47553,0.80902,0.34549, 0.1,0.8,
27 | -0.18164,0.80902,0.55902, -0.18164,0.80902,0.55902, 0.2,0.8,
28 | 0.18164,0.80902,0.55902, 0.18164,0.80902,0.55902, 0.3,0.8,
29 | 0.47553,0.80902,0.34549, 0.47553,0.80902,0.34549, 0.4,0.8,
30 | 0.58779,0.80902,0, 0.58779,0.80902,0, 0.5,0.8,
31 | 0.47553,0.80902,-0.34549, 0.47553,0.80902,-0.34549, 0.6,0.8,
32 | 0.18164,0.80902,-0.55902, 0.18164,0.80902,-0.55902, 0.7,0.8,
33 | -0.18164,0.80902,-0.55902, -0.18164,0.80902,-0.55902, 0.8,0.8,
34 | -0.47553,0.80902,-0.34549, -0.47553,0.80902,-0.34549, 0.9,0.8,
35 | -0.58779,0.80902,0, -0.58779,0.80902,0, 1,0.8,
36 | -0.80902,0.58779,0, -0.80902,0.58779,0, 0,0.7,
37 | -0.65451,0.58779,0.47553, -0.65451,0.58779,0.47553, 0.1,0.7,
38 | -0.25,0.58779,0.76942, -0.25,0.58779,0.76942, 0.2,0.7,
39 | 0.25,0.58779,0.76942, 0.25,0.58779,0.76942, 0.3,0.7,
40 | 0.65451,0.58779,0.47553, 0.65451,0.58779,0.47553, 0.4,0.7,
41 | 0.80902,0.58779,0, 0.80902,0.58779,0, 0.5,0.7,
42 | 0.65451,0.58779,-0.47553, 0.65451,0.58779,-0.47553, 0.6,0.7,
43 | 0.25,0.58779,-0.76942, 0.25,0.58779,-0.76942, 0.7,0.7,
44 | -0.25,0.58779,-0.76942, -0.25,0.58779,-0.76942, 0.8,0.7,
45 | -0.65451,0.58779,-0.47553, -0.65451,0.58779,-0.47553, 0.9,0.7,
46 | -0.80902,0.58779,0, -0.80902,0.58779,0, 1,0.7,
47 | -0.95106,0.30902,0, -0.95106,0.30902,0, 0,0.6,
48 | -0.76942,0.30902,0.55902, -0.76942,0.30902,0.55902, 0.1,0.6,
49 | -0.29389,0.30902,0.90451, -0.29389,0.30902,0.90451, 0.2,0.6,
50 | 0.29389,0.30902,0.90451, 0.29389,0.30902,0.90451, 0.3,0.6,
51 | 0.76942,0.30902,0.55902, 0.76942,0.30902,0.55902, 0.4,0.6,
52 | 0.95106,0.30902,0, 0.95106,0.30902,0, 0.5,0.6,
53 | 0.76942,0.30902,-0.55902, 0.76942,0.30902,-0.55902, 0.6,0.6,
54 | 0.29389,0.30902,-0.90451, 0.29389,0.30902,-0.90451, 0.7,0.6,
55 | -0.29389,0.30902,-0.90451, -0.29389,0.30902,-0.90451, 0.8,0.6,
56 | -0.76942,0.30902,-0.55902, -0.76942,0.30902,-0.55902, 0.9,0.6,
57 | -0.95106,0.30902,0, -0.95106,0.30902,0, 1,0.6,
58 | -1,0,0, -1,0,0, 0,0.5,
59 | -0.80902,0,0.58779, -0.80902,0,0.58779, 0.1,0.5,
60 | -0.30902,0,0.95106, -0.30902,0,0.95106, 0.2,0.5,
61 | 0.30902,0,0.95106, 0.30902,0,0.95106, 0.3,0.5,
62 | 0.80902,0,0.58779, 0.80902,0,0.58779, 0.4,0.5,
63 | 1,0,0, 1,0,0, 0.5,0.5,
64 | 0.80902,0,-0.58779, 0.80902,0,-0.58779, 0.6,0.5,
65 | 0.30902,0,-0.95106, 0.30902,0,-0.95106, 0.7,0.5,
66 | -0.30902,0,-0.95106, -0.30902,0,-0.95106, 0.8,0.5,
67 | -0.80902,0,-0.58779, -0.80902,0,-0.58779, 0.9,0.5,
68 | -1,0,0, -1,0,0, 1,0.5,
69 | -0.95106,-0.30902,0, -0.95106,-0.30902,0, 0,0.4,
70 | -0.76942,-0.30902,0.55902, -0.76942,-0.30902,0.55902, 0.1,0.4,
71 | -0.29389,-0.30902,0.90451, -0.29389,-0.30902,0.90451, 0.2,0.4,
72 | 0.29389,-0.30902,0.90451, 0.29389,-0.30902,0.90451, 0.3,0.4,
73 | 0.76942,-0.30902,0.55902, 0.76942,-0.30902,0.55902, 0.4,0.4,
74 | 0.95106,-0.30902,0, 0.95106,-0.30902,0, 0.5,0.4,
75 | 0.76942,-0.30902,-0.55902, 0.76942,-0.30902,-0.55902, 0.6,0.4,
76 | 0.29389,-0.30902,-0.90451, 0.29389,-0.30902,-0.90451, 0.7,0.4,
77 | -0.29389,-0.30902,-0.90451, -0.29389,-0.30902,-0.90451, 0.8,0.4,
78 | -0.76942,-0.30902,-0.55902, -0.76942,-0.30902,-0.55902, 0.9,0.4,
79 | -0.95106,-0.30902,0, -0.95106,-0.30902,0, 1,0.4,
80 | -0.80902,-0.58779,0, -0.80902,-0.58779,0, 0,0.3,
81 | -0.65451,-0.58779,0.47553, -0.65451,-0.58779,0.47553, 0.1,0.3,
82 | -0.25,-0.58779,0.76942, -0.25,-0.58779,0.76942, 0.2,0.3,
83 | 0.25,-0.58779,0.76942, 0.25,-0.58779,0.76942, 0.3,0.3,
84 | 0.65451,-0.58779,0.47553, 0.65451,-0.58779,0.47553, 0.4,0.3,
85 | 0.80902,-0.58779,0, 0.80902,-0.58779,0, 0.5,0.3,
86 | 0.65451,-0.58779,-0.47553, 0.65451,-0.58779,-0.47553, 0.6,0.3,
87 | 0.25,-0.58779,-0.76942, 0.25,-0.58779,-0.76942, 0.7,0.3,
88 | -0.25,-0.58779,-0.76942, -0.25,-0.58779,-0.76942, 0.8,0.3,
89 | -0.65451,-0.58779,-0.47553, -0.65451,-0.58779,-0.47553, 0.9,0.3,
90 | -0.80902,-0.58779,0, -0.80902,-0.58779,0, 1,0.3,
91 | -0.58779,-0.80902,0, -0.58779,-0.80902,0, 0,0.2,
92 | -0.47553,-0.80902,0.34549, -0.47553,-0.80902,0.34549, 0.1,0.2,
93 | -0.18164,-0.80902,0.55902, -0.18164,-0.80902,0.55902, 0.2,0.2,
94 | 0.18164,-0.80902,0.55902, 0.18164,-0.80902,0.55902, 0.3,0.2,
95 | 0.47553,-0.80902,0.34549, 0.47553,-0.80902,0.34549, 0.4,0.2,
96 | 0.58779,-0.80902,0, 0.58779,-0.80902,0, 0.5,0.2,
97 | 0.47553,-0.80902,-0.34549, 0.47553,-0.80902,-0.34549, 0.6,0.2,
98 | 0.18164,-0.80902,-0.55902, 0.18164,-0.80902,-0.55902, 0.7,0.2,
99 | -0.18164,-0.80902,-0.55902, -0.18164,-0.80902,-0.55902, 0.8,0.2,
100 | -0.47553,-0.80902,-0.34549, -0.47553,-0.80902,-0.34549, 0.9,0.2,
101 | -0.58779,-0.80902,0, -0.58779,-0.80902,0, 1,0.2,
102 | -0.30902,-0.95106,0, -0.30902,-0.95106,0, 0,0.1,
103 | -0.25,-0.95106,0.18164, -0.25,-0.95106,0.18164, 0.1,0.1,
104 | -0.09549,-0.95106,0.29389, -0.09549,-0.95106,0.29389, 0.2,0.1,
105 | 0.09549,-0.95106,0.29389, 0.09549,-0.95106,0.29389, 0.3,0.1,
106 | 0.25,-0.95106,0.18164, 0.25,-0.95106,0.18164, 0.4,0.1,
107 | 0.30902,-0.95106,0, 0.30902,-0.95106,0, 0.5,0.1,
108 | 0.25,-0.95106,-0.18164, 0.25,-0.95106,-0.18164, 0.6,0.1,
109 | 0.09549,-0.95106,-0.29389, 0.09549,-0.95106,-0.29389, 0.7,0.1,
110 | -0.09549,-0.95106,-0.29389, -0.09549,-0.95106,-0.29389, 0.8,0.1,
111 | -0.25,-0.95106,-0.18164, -0.25,-0.95106,-0.18164, 0.9,0.1,
112 | -0.30902,-0.95106,0, -0.30902,-0.95106,0, 1,0.1,
113 | 0,-1,0, 0,-1,0, -0.05,0,
114 | 0,-1,0, 0,-1,0, 0.05,0,
115 | 0,-1,0, 0,-1,0, 0.15,0,
116 | 0,-1,0, 0,-1,0, 0.25,0,
117 | 0,-1,0, 0,-1,0, 0.35,0,
118 | 0,-1,0, 0,-1,0, 0.45,0,
119 | 0,-1,0, 0,-1,0, 0.55,0,
120 | 0,-1,0, 0,-1,0, 0.65,0,
121 | 0,-1,0, 0,-1,0, 0.75,0,
122 | 0,-1,0, 0,-1,0, 0.85,0,
123 | 0,-1,0, 0,-1,0, 0.95,0
124 | ])
125 |
126 | const index = new Uint16Array([
127 | 0,11,12,
128 | 1,12,13,
129 | 2,13,14,
130 | 3,14,15,
131 | 4,15,16,
132 | 5,16,17,
133 | 6,17,18,
134 | 7,18,19,
135 | 8,19,20,
136 | 9,20,21,
137 | 12,11,23,
138 | 11,22,23,
139 | 13,12,24,
140 | 12,23,24,
141 | 14,13,25,
142 | 13,24,25,
143 | 15,14,26,
144 | 14,25,26,
145 | 16,15,27,
146 | 15,26,27,
147 | 17,16,28,
148 | 16,27,28,
149 | 18,17,29,
150 | 17,28,29,
151 | 19,18,30,
152 | 18,29,30,
153 | 20,19,31,
154 | 19,30,31,
155 | 21,20,32,
156 | 20,31,32,
157 | 23,22,34,
158 | 22,33,34,
159 | 24,23,35,
160 | 23,34,35,
161 | 25,24,36,
162 | 24,35,36,
163 | 26,25,37,
164 | 25,36,37,
165 | 27,26,38,
166 | 26,37,38,
167 | 28,27,39,
168 | 27,38,39,
169 | 29,28,40,
170 | 28,39,40,
171 | 30,29,41,
172 | 29,40,41,
173 | 31,30,42,
174 | 30,41,42,
175 | 32,31,43,
176 | 31,42,43,
177 | 34,33,45,
178 | 33,44,45,
179 | 35,34,46,
180 | 34,45,46,
181 | 36,35,47,
182 | 35,46,47,
183 | 37,36,48,
184 | 36,47,48,
185 | 38,37,49,
186 | 37,48,49,
187 | 39,38,50,
188 | 38,49,50,
189 | 40,39,51,
190 | 39,50,51,
191 | 41,40,52,
192 | 40,51,52,
193 | 42,41,53,
194 | 41,52,53,
195 | 43,42,54,
196 | 42,53,54,
197 | 45,44,56,
198 | 44,55,56,
199 | 46,45,57,
200 | 45,56,57,
201 | 47,46,58,
202 | 46,57,58,
203 | 48,47,59,
204 | 47,58,59,
205 | 49,48,60,
206 | 48,59,60,
207 | 50,49,61,
208 | 49,60,61,
209 | 51,50,62,
210 | 50,61,62,
211 | 52,51,63,
212 | 51,62,63,
213 | 53,52,64,
214 | 52,63,64,
215 | 54,53,65,
216 | 53,64,65,
217 | 56,55,67,
218 | 55,66,67,
219 | 57,56,68,
220 | 56,67,68,
221 | 58,57,69,
222 | 57,68,69,
223 | 59,58,70,
224 | 58,69,70,
225 | 60,59,71,
226 | 59,70,71,
227 | 61,60,72,
228 | 60,71,72,
229 | 62,61,73,
230 | 61,72,73,
231 | 63,62,74,
232 | 62,73,74,
233 | 64,63,75,
234 | 63,74,75,
235 | 65,64,76,
236 | 64,75,76,
237 | 67,66,78,
238 | 66,77,78,
239 | 68,67,79,
240 | 67,78,79,
241 | 69,68,80,
242 | 68,79,80,
243 | 70,69,81,
244 | 69,80,81,
245 | 71,70,82,
246 | 70,81,82,
247 | 72,71,83,
248 | 71,82,83,
249 | 73,72,84,
250 | 72,83,84,
251 | 74,73,85,
252 | 73,84,85,
253 | 75,74,86,
254 | 74,85,86,
255 | 76,75,87,
256 | 75,86,87,
257 | 78,77,89,
258 | 77,88,89,
259 | 79,78,90,
260 | 78,89,90,
261 | 80,79,91,
262 | 79,90,91,
263 | 81,80,92,
264 | 80,91,92,
265 | 82,81,93,
266 | 81,92,93,
267 | 83,82,94,
268 | 82,93,94,
269 | 84,83,95,
270 | 83,94,95,
271 | 85,84,96,
272 | 84,95,96,
273 | 86,85,97,
274 | 85,96,97,
275 | 87,86,98,
276 | 86,97,98,
277 | 89,88,100,
278 | 88,99,100,
279 | 90,89,101,
280 | 89,100,101,
281 | 91,90,102,
282 | 90,101,102,
283 | 92,91,103,
284 | 91,102,103,
285 | 93,92,104,
286 | 92,103,104,
287 | 94,93,105,
288 | 93,104,105,
289 | 95,94,106,
290 | 94,105,106,
291 | 96,95,107,
292 | 95,106,107,
293 | 97,96,108,
294 | 96,107,108,
295 | 98,97,109,
296 | 97,108,109,
297 | 100,99,111,
298 | 101,100,112,
299 | 102,101,113,
300 | 103,102,114,
301 | 104,103,115,
302 | 105,104,116,
303 | 106,105,117,
304 | 107,106,118,
305 | 108,107,119,
306 | 109,108,120
307 | ])
308 |
309 | const vertexCount = 121
310 | const indexCount = 540
311 |
312 | export {vertex, index, vertexCount, indexCount}
--------------------------------------------------------------------------------
/src/util/triangle.ts:
--------------------------------------------------------------------------------
1 | const vertex = new Float32Array([
2 | 0.0, 0.5, 0.0,
3 | -0.5, -0.5, 0.0,
4 | 0.5, -0.5, 0.0
5 | ])
6 | const vertexCount = 3
7 |
8 | export {vertex, vertexCount}
--------------------------------------------------------------------------------
/src/videoTexture.ts:
--------------------------------------------------------------------------------
1 | import basicVert from './shaders/basic.vert.wgsl?raw'
2 | import videoTexture from './shaders/videoTexture.frag.wgsl?raw'
3 | import * as cube from './util/cube'
4 | import { getMvpMatrix } from './util/math'
5 | import videoUrl from '/video.mp4?url'
6 |
7 | // initialize webgpu device & config canvas context
8 | async function initWebGPU(canvas: HTMLCanvasElement) {
9 | if (!navigator.gpu)
10 | throw new Error('Not Support WebGPU')
11 | const adapter = await navigator.gpu.requestAdapter()
12 | if (!adapter)
13 | throw new Error('No Adapter Found')
14 | const device = await adapter.requestDevice()
15 | const context = canvas.getContext('webgpu') as GPUCanvasContext
16 | const format = navigator.gpu.getPreferredCanvasFormat()
17 | const devicePixelRatio = window.devicePixelRatio || 1
18 | canvas.width = canvas.clientWidth * devicePixelRatio
19 | canvas.height = canvas.clientHeight * devicePixelRatio
20 | const size = {width: canvas.width, height: canvas.height}
21 | context.configure({
22 | device, format,
23 | // prevent chrome warning after v102
24 | alphaMode: 'opaque'
25 | })
26 | return { device, context, format, size }
27 | }
28 |
29 | // create pipiline & buffers
30 | async function initPipeline(device: GPUDevice, format: GPUTextureFormat, size: { width: number, height: number }) {
31 | const pipeline = await device.createRenderPipelineAsync({
32 | label: 'Basic Pipline',
33 | layout: 'auto',
34 | vertex: {
35 | module: device.createShaderModule({
36 | code: basicVert,
37 | }),
38 | entryPoint: 'main',
39 | buffers: [{
40 | arrayStride: 5 * 4, // 3 position 2 uv,
41 | attributes: [
42 | {
43 | // position
44 | shaderLocation: 0,
45 | offset: 0,
46 | format: 'float32x3'
47 | },
48 | {
49 | // uv
50 | shaderLocation: 1,
51 | offset: 3 * 4,
52 | format: 'float32x2'
53 | }
54 | ]
55 | }]
56 | },
57 | fragment: {
58 | module: device.createShaderModule({
59 | code: videoTexture,
60 | }),
61 | entryPoint: 'main',
62 | targets: [
63 | {
64 | format: format
65 | }
66 | ]
67 | },
68 | primitive: {
69 | topology: 'triangle-list',
70 | // Culling backfaces pointing away from the camera
71 | cullMode: 'back',
72 | frontFace: 'ccw'
73 | },
74 | // Enable depth testing since we have z-level positions
75 | // Fragment closest to the camera is rendered in front
76 | depthStencil: {
77 | depthWriteEnabled: true,
78 | depthCompare: 'less',
79 | format: 'depth24plus'
80 | }
81 | } as GPURenderPipelineDescriptor)
82 | // create depthTexture for renderPass
83 | const depthTexture = device.createTexture({
84 | size, format: 'depth24plus',
85 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
86 | })
87 | const depthView = depthTexture.createView()
88 | // create vertex buffer
89 | const vertexBuffer = device.createBuffer({
90 | label: 'GPUBuffer store vertex',
91 | size: cube.vertex.byteLength,
92 | usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
93 | })
94 | device.queue.writeBuffer(vertexBuffer, 0, cube.vertex)
95 | // create a mvp matrix buffer
96 | const mvpBuffer = device.createBuffer({
97 | label: 'GPUBuffer store 4x4 matrix',
98 | size: 4 * 4 * 4, // 4 x 4 x float32
99 | usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
100 | })
101 | // create a uniform group contains matrix
102 | const uniformGroup = device.createBindGroup({
103 | label: 'Uniform Group with Matrix',
104 | layout: pipeline.getBindGroupLayout(0),
105 | entries: [
106 | {
107 | binding: 0,
108 | resource: {
109 | buffer: mvpBuffer
110 | }
111 | }
112 | ]
113 | })
114 |
115 | // return all vars
116 | return { pipeline, vertexBuffer, mvpBuffer, uniformGroup, depthTexture, depthView }
117 | }
118 |
119 | // create & submit device commands
120 | function draw(
121 | device: GPUDevice,
122 | context: GPUCanvasContext,
123 | pipelineObj: {
124 | pipeline: GPURenderPipeline
125 | vertexBuffer: GPUBuffer
126 | mvpBuffer: GPUBuffer
127 | uniformGroup: GPUBindGroup
128 | depthView: GPUTextureView
129 | },
130 | videoGroup: GPUBindGroup
131 | ) {
132 | // start encoder
133 | const commandEncoder = device.createCommandEncoder()
134 | const renderPassDescriptor: GPURenderPassDescriptor = {
135 | colorAttachments: [
136 | {
137 | view: context.getCurrentTexture().createView(),
138 | clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
139 | loadOp: 'clear',
140 | storeOp: 'store'
141 | }
142 | ],
143 | depthStencilAttachment: {
144 | view: pipelineObj.depthView,
145 | depthClearValue: 1.0,
146 | depthLoadOp: 'clear',
147 | depthStoreOp: 'store'
148 | }
149 | }
150 | const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
151 | passEncoder.setPipeline(pipelineObj.pipeline)
152 | // set uniformGroup & videoGroup
153 | passEncoder.setBindGroup(0, pipelineObj.uniformGroup)
154 | passEncoder.setBindGroup(1, videoGroup)
155 | // set vertex
156 | passEncoder.setVertexBuffer(0, pipelineObj.vertexBuffer)
157 | // draw vertex count of cube
158 | passEncoder.draw(cube.vertexCount)
159 | passEncoder.end()
160 | // webgpu run in a separate process, all the commands will be executed after submit
161 | device.queue.submit([commandEncoder.finish()])
162 | }
163 |
164 | async function run() {
165 | // set Video element and play in advanced
166 | const video = document.createElement('video');
167 | video.loop = true
168 | video.autoplay = true
169 | video.muted = true
170 | video.src = videoUrl
171 | await video.play()
172 |
173 | const canvas = document.querySelector('canvas')
174 | if (!canvas)
175 | throw new Error('No Canvas')
176 | const { device, context, format, size } = await initWebGPU(canvas)
177 | const pipelineObj = await initPipeline(device, format, size)
178 | // Create a sampler with linear filtering for smooth interpolation.
179 | const sampler = device.createSampler({
180 | // addressModeU: 'repeat',
181 | // addressModeV: 'repeat',
182 | magFilter: 'linear',
183 | minFilter: 'linear'
184 | })
185 | // default state
186 | let aspect = size.width / size.height
187 | const position = { x: 0, y: 0, z: -5 }
188 | const scale = { x: 1, y: 1, z: 1 }
189 | const rotation = { x: 0, y: 0, z: 0 }
190 | // start loop
191 | function frame() {
192 | // video frame rate may not different with page render rate
193 | // we can use VideoFrame to force video decoding current frame
194 | const videoFrame = new VideoFrame(video)
195 | // it can be imported to webgpu as texture source with the `webgpu-developer-features` flag enabled
196 | // const texture = device.importExternalTexture({
197 | // source: videoFrame // need `webgpu-developer-features`
198 | // })
199 | // but in this demo, we don't acctully use it, just close it
200 | videoFrame.close()
201 |
202 | // external texture will be automatically destroyed as soon as JS returns
203 | // cannot be interrupt by any async functions before renderring
204 | // e.g. event callbacks, or await functions
205 | // so need to re-load external video every frame
206 | const texture = device.importExternalTexture({
207 | source: video
208 | })
209 |
210 | // also need to re-create a bindGroup for external texture
211 | const videoGroup = device.createBindGroup({
212 | layout: pipelineObj.pipeline.getBindGroupLayout(1),
213 | entries: [
214 | {
215 | binding: 0,
216 | resource: sampler
217 | },
218 | {
219 | binding: 1,
220 | resource: texture
221 | }
222 | ]
223 | })
224 | // rotate by time, and update transform matrix
225 | const now = Date.now() / 1000
226 | rotation.x = Math.sin(now)
227 | rotation.y = Math.cos(now)
228 | const mvpMatrix = getMvpMatrix(aspect, position, rotation, scale)
229 | device.queue.writeBuffer(
230 | pipelineObj.mvpBuffer,
231 | 0,
232 | mvpMatrix.buffer
233 | )
234 | // then draw
235 | draw(device, context, pipelineObj, videoGroup)
236 | requestAnimationFrame(frame)
237 | }
238 | frame()
239 |
240 | // re-configure context on resize
241 | window.addEventListener('resize', () => {
242 | size.width = canvas.width = canvas.clientWidth * devicePixelRatio
243 | size.height = canvas.height = canvas.clientHeight * devicePixelRatio
244 | // don't need to recall context.configure() after v104
245 | // re-create depth texture
246 | pipelineObj.depthTexture.destroy()
247 | pipelineObj.depthTexture = device.createTexture({
248 | size, format: 'depth24plus',
249 | usage: GPUTextureUsage.RENDER_ATTACHMENT,
250 | })
251 | pipelineObj.depthView = pipelineObj.depthTexture.createView()
252 | // update aspect
253 | aspect = size.width / size.height
254 | })
255 | }
256 | run()
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ESNext",
4 | "useDefineForClassFields": true,
5 | "module": "ESNext",
6 | "lib": ["ESNext", "DOM"],
7 | "moduleResolution": "bundler",
8 | "strict": true,
9 | "sourceMap": true,
10 | "resolveJsonModule": true,
11 | "esModuleInterop": true,
12 | "noEmit": true,
13 | "noUnusedLocals": true,
14 | "noUnusedParameters": true,
15 | "noImplicitReturns": true,
16 | "types": ["vite/client", "@webgpu/types", "@types/dom-webcodecs"]
17 | },
18 | "include": ["src"]
19 | }
--------------------------------------------------------------------------------
/vite.config.github.js:
--------------------------------------------------------------------------------
1 | const { defineConfig } = require('vite')
2 | const { resolve } = require('path')
3 | const fs = require('fs')
4 |
5 | const input = {
6 | main: resolve(__dirname, 'index.html')
7 | }
8 | const samples = fs.readdirSync(resolve(__dirname, 'samples'))
9 | for(let file of samples){
10 | if(file.endsWith('.html'))
11 | input[file.slice(0, -5)] = resolve(__dirname, 'samples/'+ file)
12 | }
13 | module.exports = defineConfig({
14 | base: '/orillusion-webgpu-samples/',
15 | build: {
16 | rollupOptions: {
17 | input
18 | }
19 | }
20 | })
--------------------------------------------------------------------------------
/vite.config.js:
--------------------------------------------------------------------------------
1 | // vite.config.js
2 | import { defineConfig } from 'vite'
3 | import dns from 'dns'
4 | dns.setDefaultResultOrder('verbatim')
5 |
6 | module.exports = defineConfig({
7 | server:{
8 | host: 'localhost',
9 | port: 3000
10 | }
11 | })
--------------------------------------------------------------------------------