├── .gitignore ├── INSTRUCTION.md ├── README.md ├── index.html ├── lib └── minimal-gltf-loader.js ├── models └── sponza │ ├── buffer_0.bin │ ├── color.jpeg │ ├── fragmentShader0.glsl │ ├── normal.png │ ├── sponza.gltf │ └── vertexShader0.glsl ├── package.json ├── renders ├── cluster-1.gif ├── deferred-1.gif ├── effects-graph.png ├── optimization-chart.png ├── optimization-graph.png ├── renderer-comparison-graph.png └── scene.png ├── src ├── init.js ├── main.js ├── renderers │ ├── clustered.js │ ├── clusteredDeferred.js │ ├── clusteredForwardPlus.js │ ├── forward.js │ └── textureBuffer.js ├── scene.js ├── shaders │ ├── clusteredForward.frag.glsl.js │ ├── clusteredForward.vert.glsl │ ├── deferred.frag.glsl.js │ ├── deferredToTexture.frag.glsl │ ├── deferredToTexture.vert.glsl │ ├── forward.frag.glsl.js │ ├── forward.vert.glsl │ └── quad.vert.glsl └── utils.js └── webpack.config.js /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /INSTRUCTION.md: -------------------------------------------------------------------------------- 1 | WebGL Clustered Deferred and Forward+ Shading - Instructions 2 | ========================================================== 3 | 4 | **This is due Thursday 10/26** 5 | 6 | ## Running the code 7 | 8 | - Clone this repository 9 | - Download and install [Node.js](https://nodejs.org/en/) 10 | - Run `npm install` in the root directory of this project. This will download and install dependences 11 | - Run `npm start` and navigate to [http://localhost:5650](http://localhost:5650) 12 | 13 | This project requires a WebGL-capable browser with support for several extensions. You can check for support on [WebGL Report](http://webglreport.com/): 14 | - OES_texture_float 15 | - OES_texture_float_linear 16 | - OES_element_index_uint 17 | - EXT_frag_depth 18 | - WEBGL_depth_texture 19 | - WEBGL_draw_buffer 20 | 21 | Google Chrome seems to work best on all platforms. If you have problems running the starter code, use Chrome or Chromium, and make sure you have updated your browser and video drivers. 22 | 23 | ## Requirements 24 | **Ask on the mailing list for any clarifications** 25 | 26 | In this project, you are given code for: 27 | - Loading glTF models 28 | - Camera control 29 | - Simple forward renderer 30 | - Partial implementation and setup for Clustered Deferred and Forward+ shading 31 | - Many helpful helpers 32 | 33 | ## Required Tasks 34 | 35 | **Before doing performance analysis**, you must disable debug mode by changing `DEBUG` to false in `src/init.js`. Keep it enabled when developing - it helps find WebGL errors *much* more easily. 36 | 37 | **Clustered Forward+** 38 | - Build a data structure to keep track of how many lights are in each cluster and what their indices are 39 | - Render the scene using only the lights that overlap a given cluster 40 | 41 | **Clustered Deferred** 42 | - Reuse clustering logic from Clustered Forward+ 43 | - Store vertex attributes in g-buffer 44 | - Read g-buffer in a shader to produce final output 45 | 46 | **Effects** 47 | - Implement deferred Blinn-Phong shading (diffuse + specular) for point lights 48 | - OR 49 | - Implement one of the following effects: 50 | - Bloom using post-process blur (box or Gaussian) 51 | - Toon shading (with ramp shading + simple depth-edge detection for outlines) 52 | 53 | **Optimizations** 54 | - Optimized g-buffer format - reduce the number and size of g-buffers: 55 | - Ideas: 56 | - Pack values together into vec4s 57 | - Use 2-component normals 58 | - Quantize values by using smaller texture types instead of gl.FLOAT 59 | - Reduce number of properties passed via g-buffer, e.g. by: 60 | - Reconstructing world space position using camera matrices and X/Y/depth 61 | - For credit, you must show a good optimization effort and record the performance of each version you test, in a simple table. 62 | - It is expected that you won't need all 4 provided g-buffers for a basic pipeline make sure you disable the unused ones. 63 | 64 | ## Performance & Analysis 65 | 66 | Compare your implementations of Clustered Forward+ and Clustered Deferred shading and analyze their differences. 67 | - Is one of them faster? 68 | - Is one of them better at certain types of workloads? 69 | - What are the benefits and tradeoffs of using one over the other? 70 | - For any differences in performance, briefly explain what may be causing the difference. 71 | 72 | **Before doing performance analysis**, you must disable debug mode by changing `DEBUG` to false in `src/init.js`. Keep it enabled when developing - it helps find WebGL errors *much* more easily. 73 | 74 | Optimize your JavaScript and/or GLSL code. Chrome/Firefox's profiling tools (see Resources section) will be useful for this. For each change that improves performance, show the before and after render times. 75 | 76 | For each new effect feature (required or extra), please provide the following analysis: 77 | - Concise overview write-up of the feature. 78 | - Performance change due to adding the feature. 79 | - If applicable, how do parameters (such as number of lights, etc.) affect performance? Show data with simple graphs. 80 | - Show timing in milliseconds, not FPS. 81 | - If you did something to accelerate the feature, what did you do and why? 82 | - How might this feature be optimized beyond your current implementation? 83 | 84 | For each performance feature (required or extra), please provide: 85 | - Concise overview write-up of the feature. 86 | - Detailed performance improvement analysis of adding the feature 87 | - What is the best case scenario for your performance improvement? What is the worst? Explain briefly. 88 | - Are there tradeoffs to this performance feature? Explain briefly. 89 | - How do parameters (such as number of lights, tile size, etc.) affect performance? Show data with graphs. 90 | - Show timing in milliseconds, not FPS. 91 | - Show debug views when possible. 92 | - If the debug view correlates with performance, explain how. 93 | 94 | ## Starter Code Tour 95 | 96 | Initialization happens in `src/init.js`. You don't need to worry about this; it is mostly initializing the gl context, debug modes, extensions, etc. 97 | 98 | `src/main.js` is configuration for the renderers. It sets up the gui for switching renderers and initializes the scene and render loop. The only important thing here are the arguments for `ClusteredForwardPlusRenderer` and `ClusteredDeferredRenderer`. These constructors take the number of x, y, and z slices to split the frustum into. 99 | 100 | `src/scene.js` handles loading a .gltf scene and initializes the lights. Here, you can modify the number of lights, their positions, and how they move around. Also, take a look at the `draw` function. This handles binding the vertex attributes, which are hardcoded to `a_position`, `a_normal`, and `a_uv`, as well as the color and normal maps to targets `gl.TEXTURE0` and `gl.TEXTURE1`. 101 | 102 | **Simple Forward Shading Pipeline** 103 | I've written a simple forward shading pipeline as an example for how everything works. Check out `src/forward.js`. 104 | 105 | The constructor for the renderer initializes a `TextureBuffer` to store the lights. This isn't totally necessary for a forward renderer, but you'll need this to do clustered shading. What we're trying to do here is upload to a shader all the positions of our lights. However, we unfortunately can't upload arbitrary data to the GPU with WebGL so we have to pack it as a texture. Figuring out how to do this is terribly painful so I did it for you. 106 | 107 | The constructor for `TextureBuffer` takes two arguments, the number of elements, and the size of each element (in floats). It will allocate a floating point texture of dimension `numElements x ceil(elementSize / 4)`. This is because we pack every 4 adjacent values into a single pixel. 108 | 109 | Go to the `render` function to see how this is used in practice. Here, the buffer for the texture storing the lights is populated with the light positions. Notice that the first four values get stored at locations: `this._lightTexture.bufferIndex(i, 0) + 0` to `this._lightTexture.bufferIndex(i, 0) + 3` and then the next three are at `this._lightTexture.bufferIndex(i, 1) + 0` to `this._lightTexture.bufferIndex(i, 0) + 2`. Keep in mind that the data is stored as a texture, so the 5th element is actually the 1st element of the pixel in the second row. 110 | 111 | Look again at the constructor of `ForwardRenderer`. Also initialized here is the shader program. The shader program takes in a vertex source, a fragment source, and then a map of what uniform and vertex attributes should be extracted from the shader. In this code, the shader location for `u_viewProjectionMatrix` gets stored as `this._shaderProgram.u_viewProjectionMatrix`. If you look at `fsSource`, there's a strange thing happening there. `fsSource` is actually a function and it's being called with a configuration object containing the number of lights. What this is doing is creating a shader source string that is parameterized. We can't have dynamic loops in WebGL, but we can dynamically generate static shaders. If you take a look at `src/shaders/forward.frag.glsl.js`, you'll see that `${numLights}` is used throughout. 112 | 113 | Now go look inside `src/shaders/forward.frag.glsl.js`. Here, there is a simple loop which loops over the lights and applies shading for each one. I've written a helper called `UnpackLight(index)` which unpacks the `index`th light from the texture into a struct. Make sure you fully understand how this is working because you will need to implement something similar for clusters. Inside `UnpackLight` I use another helper called `ExtractFloat(texture, textureWidth, textureHeight, index, component)`. This pulls out the `component`th component from the `index`th value packed inside a `textureWidth x textureHeight` texture. Again, this is meant to be an example implementation. Using this function to pull out four values into a `vec4` will be unecessarily slow. 114 | 115 | **Getting Started** 116 | Here's a few tips to get you started. 117 | 118 | 1. Complete `updateClusters` in `src/renderers/clustered.js`. This should update the cluster `TextureBuffer` with a mapping from cluster index to light count and light list (indices). 119 | 120 | 2. Update `src/shaders/clusteredForward.frag.glsl.js` to 121 | - Determine the cluster for a fragment 122 | - Read in the lights in that cluster from the populated data 123 | - Do shading for just those lights 124 | - You may find it necessary to bind additional uniforms in `src/renderers/clusteredForwardPlus.js` 125 | 126 | 3. Update `src/shaders/deferredToTexture.frag.glsl` to write desired data to the g-buffer 127 | 4. Update `src/deferred.frag.glsl` to read values from the g-buffer and perform simple forward rendering. (Right now it just outputs the screen xy coordinate) 128 | 5. Update it to use clustered shading. You should be able to reuse lots of stuff from Clustered Forward+ for this. You will also likely need to update shader inputs in `src/renderers/clusteredDeferred.js` 129 | 130 | ## README 131 | 132 | Replace the contents of the README.md in a clear manner with the following: 133 | - A brief description of the project and the specific features you implemented. 134 | - At least one screenshot of your project running. 135 | - A 30+ second video/gif of your project running showing all features. (Even though your demo can be seen online, using multiple render targets means it won't run on many computers. A video will work everywhere.) 136 | - Performance analysis (described above) 137 | 138 | **GitHub Pages** 139 | Since this assignment is in WebGL, you can make your project easily viewable by taking advantage of GitHub's project pages feature. 140 | 141 | Once you are done with the assignment, create a new branch: 142 | 143 | `git branch gh-pages` 144 | 145 | Run `npm run build` and commit the compiled files 146 | 147 | Push the branch to GitHub: 148 | 149 | `git push origin gh-pages` 150 | 151 | Now, you can go to `.github.io/` to see your renderer online from anywhere. Add this link to your README. 152 | 153 | ## Submit 154 | 155 | Beware of any build issues discussed on the Google Group. 156 | 157 | Open a GitHub pull request so that we can see that you have finished. The title should be "Project 5B: YOUR NAME". The template of the comment section of your pull request is attached below, you can do some copy and paste: 158 | 159 | - Repo Link 160 | - (Briefly) Mentions features that you've completed. Especially those bells and whistles you want to highlight 161 | - Feature 0 162 | - Feature 1 163 | - ... 164 | - Feedback on the project itself, if any. 165 | 166 | ### Third-Party Code Policy 167 | 168 | - Use of any third-party code must be approved by asking on our mailing list. 169 | - If it is approved, all students are welcome to use it. Generally, we approve use of third-party code that is not a core part of the project. For example, for the path tracer, we would approve using a third-party library for loading models, but would not approve copying and pasting a CUDA function for doing refraction. 170 | - Third-party code **MUST** be credited in README.md. 171 | - Using third-party code without its approval, including using another student's code, is an academic integrity violation, and will, at minimum, result in you receiving an F for the semester. 172 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | WebGL Clustered Deferred and Forward+ Shading 2 | ====================== 3 | 4 | **Course project #5 for CIS 565: GPU Programming and Architecture, University of Pennsylvania** 5 | 6 | * (TODO) YOUR NAME HERE 7 | * Tested on: **Google Chrome 62.0.3202.62** on: 8 | - Mac OSX 10.10.5 9 | - Processor: 2.5 GHz Intel Core i7 10 | - Memory: 16 GB 1600 MHz DDR3 11 | - Graphics: Intel Iris Pro 1536 MB 12 | 13 | 14 | ## Project Overview 15 | The goal of this project was to get an introduction to Clustered Deferred and Clustered Forward+ Shading in WebGL. 16 | 17 | ### Live Online 18 | 19 | [![](./renders/scene.png)](http://MegSesh.github.io/Project5B-WebGL-Deferred-Shading) 20 | 21 | ### Demo Video/GIF 22 | #### Forward+ (100 lights) 23 | ![](./renders/cluster-1.gif) 24 | 25 | #### Deferred (250 lights, with Blinn-Phong shading and gamma correction) 26 | ![](./renders/deferred-1.gif) 27 | 28 | ### Features and Optimizations 29 | * Clustered Forward+ shading 30 | * Clustered Deferred shading with g-buffers 31 | * Blinn-Phong shading (diffuse + specular) for point lights 32 | * Gamma Correction 33 | * Optimized g-buffer format (by reducing the number and size of g-buffers) 34 | - Packing values together into vec4s 35 | - Using 2-component normals 36 | 37 | 38 | ### Algorithm Descriptions 39 | 40 | #### Forward Rendering 41 | Forward rendering works by rasterizing each geometric object in the scene. For each light in the scene, each object is shaded according to their material/light-type, which means there is one shader per material/light-type. This means that every geometric object has to consider every light in the scene. 42 | 43 | One optimization is to remove geometric objects that are occluded or do not appear in the view frustum of the camera. This can also be applied to lights as well. You can perform frustum culling on the light volumes before rendering the scene geometry. 44 | 45 | Object culling and light volume culling provide limited optimizations for this technique and light culling is often not practiced when using a forward rendering pipeline. It is better to limit the number of lights that affect the entire object. 46 | 47 | #### Clustered Forward+ 48 | 49 | Clustered Forward+ is a rendering technique that combines forward rendering with tiled light culling to reduce the number of lights that must be considered during shading. Forward+ primarily consists of two stages: light culling and forward rendering. 50 | 51 | The first pass of the Forward+ rendering technique uses a uniform grid of tiles in screen space to partition the lights into per-tile lists. 52 | 53 | Rather than using 2D tiles, we use 3D versions of them called "clusters". Lights in the scene are divided into these clusters. Each cluster represents a portion of the camera frustum that we currently see as we move around in the scene. Each cluster is stored as a 2D texture, which holds information about how many lights each cluster contains, and a list of which lights they are. 54 | 55 | The second pass uses a standard forward rendering pass to shade the objects in the scene but instead of looping over every dynamic light in the scene, the current pixel’s screen-space position is used to look-up the list of lights in the cluster that was computed in the previous pass. The light culling provides a significant performance improvement over the standard forward rendering technique as it greatly reduces the number of lights that must be iterated to correctly light the pixel. 56 | 57 | #### Clustered Deferred 58 | 59 | Clustered deferred works by rasterizing all of the scene objects (without lighting) into a series of 2D image buffers (g-buffers) that store the geometric information that is required to perform the lighting calculations in a later pass. The information that is stored into the 2D image buffers can be things like: 60 | 61 | * screen space depth 62 | * surface normals 63 | * diffuse color 64 | 65 | After the g-buffer has been generated, the geometric information can then be used to compute the lighting information in the lighting pass. The lighting pass is performed by rendering each light source as a geometric object in the scene. Each pixel that is touched by the light’s geometric representation is shaded using the desired lighting equation. This is done using the same clustering technique as described in the Forward+ section above. 66 | 67 | Advantages compared to forward rendering: 68 | 69 | - It decouples lighting from the scene complexity 70 | - You only transform and rasterize each object once 71 | - The expensive lighting calculations are only computed once per light per covered pixel. 72 | 73 | Disadvantages: 74 | - Memory bandwidth usage: must read g-buffer for each light 75 | - Must recalculate full lighting equation for each light 76 | - Can't handle transparent objects because only have g-buffers for front-most fragment 77 | 78 | More on transparency (from [Rendering Technique Comparisons](https://www.3dgep.com/forward-plus/)): 79 | 80 | One of the disadvantage of using deferred shading is that only opaque objects can be rasterized into the G-buffers. The reason for this is that multiple transparent objects may cover the same screen pixels but it is only possible to store a single value per pixel in the G-buffers. In the lighting pass the depth value, surface normal, diffuse and specular colors are sampled for the current screen pixel that is being lit. Since only a single value from each G-buffer is sampled, transparent objects cannot be supported in the lighting pass. 81 | 82 | To circumvent this issue, transparent geometry must be rendered using the standard forward rendering technique which limits either the amount of transparent geometry in the scene or the number of dynamic lights in the scene. A scene which consists of only opaque objects can handle about 2000 dynamic lights before frame-rate issues start appearing. 83 | 84 | Another disadvantage of deferred shading is that only a single lighting model can be simulated in the lighting pass. This is due to the fact that it is only possible to bind a single pixel shader when rendering the light geometry. This is usually not an issue for pipelines that make use of übershaders as rendering with a single pixel shader is the norm, however if your rendering pipeline takes advantage of several different lighting models implemented in various pixel shaders then it will be problematic to switch your rendering pipeline to use deferred shading. 85 | 86 | 87 | ## Performance Analysis 88 | 89 | ### Rendering Analysis: Forward vs. Clustered Forward+ vs. Clustered Deferred 90 | 91 | ![](./renders/renderer-comparison-graph.png) 92 | 93 | As can be seen in the graph above, deferred shading is drastically faster than forward+ and forward rendering, starting from 10 lights in the scene. As explained in the section above, by decoupling lights from the scene complexity and storing geometry information in g-buffers, rasterization is done only once per object and expensive lighting calculations are only computed once per light per covered pixel. Light culling through cluster organization also offers a huge time advantage. 94 | 95 | 96 | ### Effects Analysis: Blinn-Phong shading with gamma correction 97 | 98 | This reflection model uses a combination of diffuse reflection, specular reflection (shiny surfaces), and ambient lighting (lighting in places which aren't lightened by direct light rays). This is model of local lighting of points on a surface, where result of lighting doesn't depend on other objects in the scene or on repeatedly reflected light rays. [More info here](http://sunandblackcat.com/tipFullView.php?l=eng&topicid=30&topic=Phong-Lighting) 99 | 100 | ![](./renders/effects-graph.png) 101 | 102 | As can be seen in the graph above, the extra computations needed to accomplish a Blinn-Phong shading model add time as the number of lights increases. 103 | 104 | 105 | ### Optimization Analysis 106 | 107 | In the first pass of the deferred shader, you want to send over the color, normals, and fragment position data. Rather than using 3 g-buffers, you can use 2 by compacting the x and y values of the normal into the first 2 buffers. Make sure to multiply the normal by the view matrix, which makes sure that the z value of the normal are all positive. You also know that then the magnitude of the vector is 1. With this information, you can use the equation of calculating the magnitude of a vector in order to decode the z value in the second shader pass. 108 | 109 | ![](./renders/optimization-graph.png) 110 | 111 | ![](./renders/optimization-chart.png) 112 | 113 | As can be seen in the graph and chart above, compacting normals creates somewhat of advantage (about 10ms faster) especially when rendering above 500 lights in the scene. 114 | 115 | 116 | #### Other optimizations to consider 117 | 118 | Some other optimizations that I would like to implement would be: 119 | 120 | * Using octahedron normal encoding 121 | * Calculating the fragment position in view/camera space in the vertex shader by multiplying it with the view matrix 122 | 123 | 124 | ### Credits and Resources 125 | 126 | * [Three.js](https://github.com/mrdoob/three.js) by [@mrdoob](https://github.com/mrdoob) and contributors 127 | * [stats.js](https://github.com/mrdoob/stats.js) by [@mrdoob](https://github.com/mrdoob) and contributors 128 | * [webgl-debug](https://github.com/KhronosGroup/WebGLDeveloperTools) by Khronos Group Inc. 129 | * [glMatrix](https://github.com/toji/gl-matrix) by [@toji](https://github.com/toji) and contributors 130 | * [minimal-gltf-loader](https://github.com/shrekshao/minimal-gltf-loader) by [@shrekshao](https://github.com/shrekshao) 131 | 132 | * [CIS 460 lecture notes on camera frustum](https://docs.google.com/presentation/d/e/2PACX-1vQrlrzC6XQCvRCQTr9k5dtUCpZFnbqlbcYXoFt1lcjBRdn_r4HD7GabLiGo7Ht0Dxvp4w_cWdV_ZaYh/pub?start=false&loop=false&delayms=60000&slide=id.g2492ec6f45_0_215) 133 | * [Blinn-Phong Shading Model](https://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_shading_model) 134 | * [Foward vs Deferred Rendering](https://gamedevelopment.tutsplus.com/articles/forward-rendering-vs-deferred-rendering--gamedev-12342) 135 | * [glMatrix Documentation](http://glmatrix.net/docs/module-vec3.html) 136 | * [Intro to real-time shading of many lights SIGGRAPH course notes](https://newq.net/dl/pub/SA2014ManyLightIntro.pdf) 137 | * [Practical Clustered Shading - Avalanche Studios](http://www.humus.name/Articles/PracticalClusteredShading.pdf) 138 | * [Rendering Technique Comparisons](https://www.3dgep.com/forward-plus/) 139 | 140 | **Normal Compression** 141 | * [Compact Normals for g-buffers](https://aras-p.info/texts/CompactNormalStorage.html) 142 | * [(Not implemented) Octahedron Normal Encoding](https://knarkowicz.wordpress.com/2014/04/16/octahedron-normal-vector-encoding/) 143 | 144 | **Other good resources (unused)** 145 | * [Extracting View Frustum Plans From Projection Matrix](http://gamedevs.org/uploads/fast-extraction-viewing-frustum-planes-from-world-view-projection-matrix.pdf) 146 | * [BVH light storage](https://worldoffries.wordpress.com/2015/02/19/simple-alternative-to-clustered-shading-for-thousands-of-lights/) 147 | * [Deferred Rendering Tutorial](http://www.codinglabs.net/tutorial_simple_def_rendering.aspx) 148 | * [Deferred Lighting](https://www.opengl.org/discussion_boards/showthread.php/167687-Deferred-lighting) -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /lib/minimal-gltf-loader.js: -------------------------------------------------------------------------------- 1 | // From https://github.com/shrekshao/minimal-gltf-loader 2 | import {vec3, vec4, quat, mat4} from 'gl-matrix'; 3 | 4 | var MinimalGLTFLoader = MinimalGLTFLoader || {}; 5 | 6 | // Data classes 7 | var Scene = MinimalGLTFLoader.Scene = function () { 8 | // not 1-1 to meshes in json file 9 | // each mesh with a different node hierarchy is a new instance 10 | this.meshes = []; 11 | //this.meshes = {}; 12 | }; 13 | 14 | // Node 15 | 16 | var Mesh = MinimalGLTFLoader.Mesh = function () { 17 | this.meshID = ''; // mesh id name in glTF json meshes 18 | this.primitives = []; 19 | }; 20 | 21 | var Primitive = MinimalGLTFLoader.Primitive = function () { 22 | this.mode = 4; // default: gl.TRIANGLES 23 | 24 | this.matrix = mat4.create(); 25 | 26 | this.indices = null; 27 | this.indicesComponentType = 5123; // default: gl.UNSIGNED_SHORT 28 | 29 | // !!: assume vertex buffer is interleaved 30 | // see discussion https://github.com/KhronosGroup/glTF/issues/21 31 | this.vertexBuffer = null; 32 | 33 | // attribute info (stride, offset, etc) 34 | this.attributes = {}; 35 | 36 | // cur glTF spec supports only one material per primitive 37 | this.material = null; 38 | this.technique = null; 39 | 40 | 41 | 42 | // // Program gl buffer name 43 | // // ?? reconsider if it's suitable to put it here 44 | // this.indicesWebGLBufferName = null; 45 | // this.vertexWebGLBufferName = null; 46 | 47 | }; 48 | 49 | 50 | /** 51 | * 52 | */ 53 | var glTFModel = MinimalGLTFLoader.glTFModel = function () { 54 | this.defaultScene = ''; 55 | this.scenes = {}; 56 | 57 | this.nodeMatrix = {}; 58 | 59 | this.json = null; 60 | 61 | this.shaders = {}; 62 | this.programs = {}; 63 | 64 | this.images = {}; 65 | 66 | }; 67 | 68 | 69 | 70 | var gl; 71 | 72 | var glTFLoader = MinimalGLTFLoader.glTFLoader = function (glContext) { 73 | gl = glContext; 74 | this._init(); 75 | this.glTF = null; 76 | }; 77 | 78 | glTFLoader.prototype._init = function() { 79 | this._parseDone = false; 80 | this._loadDone = false; 81 | 82 | this._bufferRequested = 0; 83 | this._bufferLoaded = 0; 84 | this._buffers = {}; 85 | this._bufferTasks = {}; 86 | 87 | // ?? Move to glTFModel to avoid collected by GC ?? 88 | this._bufferViews = {}; 89 | 90 | this._shaderRequested = 0; 91 | this._shaderLoaded = 0; 92 | 93 | this._imageRequested = 0; 94 | this._imageLoaded = 0; 95 | 96 | this._pendingTasks = 0; 97 | this._finishedPendingTasks = 0; 98 | 99 | this.onload = null; 100 | 101 | }; 102 | 103 | 104 | glTFLoader.prototype._getBufferViewData = function(json, bufferViewID, callback) { 105 | var bufferViewData = this._bufferViews[bufferViewID]; 106 | if(!bufferViewData) { 107 | // load bufferView for the first time 108 | var bufferView = json.bufferViews[bufferViewID]; 109 | var bufferData = this._buffers[bufferView.buffer]; 110 | if (bufferData) { 111 | // buffer already loaded 112 | //console.log("dependent buffer ready, create bufferView" + bufferViewID); 113 | this._bufferViews[bufferViewID] = bufferData.slice(bufferView.byteOffset, bufferView.byteOffset + bufferView.byteLength); 114 | callback(bufferViewData); 115 | } else { 116 | // buffer not yet loaded 117 | // add pending task to _bufferTasks 118 | //console.log("pending Task: wait for buffer to load bufferView " + bufferViewID); 119 | this._pendingTasks++; 120 | var bufferTask = this._bufferTasks[bufferView.buffer]; 121 | if (!bufferTask) { 122 | this._bufferTasks[bufferView.buffer] = []; 123 | bufferTask = this._bufferTasks[bufferView.buffer]; 124 | } 125 | var loader = this; 126 | bufferTask.push(function(newBufferData) { 127 | // share same bufferView 128 | // hierarchy needs to be post processed in the renderer 129 | var curBufferViewData = loader._bufferViews[bufferViewID]; 130 | if (!curBufferViewData) { 131 | console.log('create new BufferView Data for ' + bufferViewID); 132 | curBufferViewData = loader._bufferViews[bufferViewID] = newBufferData.slice(bufferView.byteOffset, bufferView.byteOffset + bufferView.byteLength); 133 | } 134 | loader._finishedPendingTasks++; 135 | callback(curBufferViewData); 136 | 137 | // // create new bufferView for each mesh access with a different hierarchy 138 | // // hierarchy transformation will be prepared in this way 139 | // console.log('create new BufferView Data for ' + bufferViewID); 140 | // loader._bufferViews[bufferViewID] = newBufferData.slice(bufferView.byteOffset, bufferView.byteOffset + bufferView.byteLength); 141 | // loader._finishedPendingTasks++; 142 | // callback(loader._bufferViews[bufferViewID]); 143 | }); 144 | } 145 | 146 | } else { 147 | // no need to load buffer from file 148 | // use cached ones 149 | //console.log("use cached bufferView " + bufferViewID); 150 | callback(bufferViewData); 151 | } 152 | }; 153 | 154 | // glTFLoader.prototype._doNextLoadTaskInList = function () { 155 | // }; 156 | 157 | glTFLoader.prototype._checkComplete = function () { 158 | if (this._bufferRequested == this._bufferLoaded && 159 | this._shaderRequested == this._shaderLoaded && 160 | this._imageRequested == this._imageLoaded 161 | // && other resources finish loading 162 | ) { 163 | this._loadDone = true; 164 | } 165 | 166 | if (this._loadDone && this._parseDone && this._pendingTasks == this._finishedPendingTasks) { 167 | this.onload(this.glTF); 168 | } 169 | }; 170 | 171 | 172 | glTFLoader.prototype._parseGLTF = function (json) { 173 | 174 | this.glTF.json = json; 175 | this.glTF.defaultScene = json.scene; 176 | 177 | // Iterate through every scene 178 | if (json.scenes) { 179 | for (var sceneID in json.scenes) { 180 | var newScene = new Scene(); 181 | this.glTF.scenes[sceneID] = newScene; 182 | 183 | var scene = json.scenes[sceneID]; 184 | var nodes = scene.nodes; 185 | var nodeLen = nodes.length; 186 | 187 | // Iterate through every node within scene 188 | for (var n = 0; n < nodeLen; ++n) { 189 | var nodeID = nodes[n]; 190 | //var node = json.nodes[nodeName]; 191 | 192 | // Traverse node 193 | this._parseNode(json, nodeID, newScene); 194 | } 195 | } 196 | } 197 | 198 | this._parseDone = true; 199 | this._checkComplete(); 200 | }; 201 | 202 | 203 | var translationVec3 = vec3.create(); 204 | var rotationQuat = quat.create(); 205 | var scaleVec3 = vec3.create(); 206 | var TRMatrix = mat4.create(); 207 | 208 | glTFLoader.prototype._parseNode = function(json, nodeID, newScene, matrix) { 209 | var node = json.nodes[nodeID]; 210 | 211 | if (matrix === undefined) { 212 | matrix = mat4.create(); 213 | } 214 | 215 | var curMatrix = mat4.create(); 216 | 217 | if (node.hasOwnProperty('matrix')) { 218 | // matrix 219 | for(var i = 0; i < 16; ++i) { 220 | curMatrix[i] = node.matrix[i]; 221 | } 222 | mat4.multiply(curMatrix, matrix, curMatrix); 223 | //mat4.multiply(curMatrix, curMatrix, matrix); 224 | } else { 225 | // translation, rotation, scale (TRS) 226 | // TODO: these labels are optional 227 | vec3.set(translationVec3, node.translation[0], node.translation[1], node.translation[2]); 228 | quat.set(rotationQuat, node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3]); 229 | mat4.fromRotationTranslation(TRMatrix, rotationQuat, translationVec3); 230 | mat4.multiply(curMatrix, curMatrix, TRMatrix); 231 | vec3.set(scaleVec3, node.scale[0], node.scale[1], node.scale[2]); 232 | mat4.scale(curMatrix, curMatrix, scaleVec3); 233 | } 234 | 235 | // store node matrix 236 | this.glTF.nodeMatrix[nodeID] = curMatrix; 237 | 238 | 239 | 240 | // Iterate through every mesh within node 241 | var meshes = node.meshes; 242 | if(!!meshes) { 243 | var meshLen = meshes.length; 244 | for (var m = 0; m < meshLen; ++m) { 245 | var newMesh = new Mesh(); 246 | newScene.meshes.push(newMesh); 247 | 248 | var meshName = meshes[m]; 249 | var mesh = json.meshes[meshName]; 250 | 251 | newMesh.meshID = meshName; 252 | 253 | // Iterate through primitives 254 | var primitives = mesh.primitives; 255 | var primitiveLen = primitives.length; 256 | 257 | for (var p = 0; p < primitiveLen; ++p) { 258 | var newPrimitive = new Primitive(); 259 | newMesh.primitives.push(newPrimitive); 260 | 261 | var primitive = primitives[p]; 262 | 263 | if (primitive.indices) { 264 | this._parseIndices(json, primitive, newPrimitive); 265 | } 266 | 267 | this._parseAttributes(json, primitive, newPrimitive, curMatrix); 268 | 269 | // required 270 | newPrimitive.material = json.materials[primitive.material]; 271 | 272 | if (newPrimitive.material.technique) { 273 | newPrimitive.technique = json.techniques[newPrimitive.material.technique]; 274 | } else { 275 | // TODO: use default technique in glTF spec Appendix A 276 | } 277 | 278 | } 279 | } 280 | } 281 | 282 | 283 | // Go through all the children recursively 284 | var children = node.children; 285 | var childreLen = children.length; 286 | for (var c = 0; c < childreLen; ++c) { 287 | var childNodeID = children[c]; 288 | this._parseNode(json, childNodeID, newScene, curMatrix); 289 | } 290 | 291 | }; 292 | 293 | 294 | glTFLoader.prototype._parseIndices = function(json, primitive, newPrimitive) { 295 | 296 | var accessorName = primitive.indices; 297 | var accessor = json.accessors[accessorName]; 298 | 299 | newPrimitive.mode = primitive.mode || 4; 300 | newPrimitive.indicesComponentType = accessor.componentType; 301 | 302 | var loader = this; 303 | this._getBufferViewData(json, accessor.bufferView, function(bufferViewData) { 304 | newPrimitive.indices = _getAccessorData(bufferViewData, accessor); 305 | loader._checkComplete(); 306 | }); 307 | }; 308 | 309 | 310 | 311 | 312 | //var tmpVec4 = vec4.create(); 313 | //var inverseTransposeMatrix = mat4.create(); 314 | 315 | glTFLoader.prototype._parseAttributes = function(json, primitive, newPrimitive, matrix) { 316 | // !! Assume interleaved vertex attributes 317 | // i.e., all attributes share one bufferView 318 | 319 | 320 | // vertex buffer processing 321 | var firstSemantic = Object.keys(primitive.attributes)[0]; 322 | var firstAccessor = json.accessors[primitive.attributes[firstSemantic]]; 323 | var vertexBufferViewID = firstAccessor.bufferView; 324 | var bufferView = json.bufferViews[vertexBufferViewID]; 325 | 326 | var loader = this; 327 | 328 | this._getBufferViewData(json, vertexBufferViewID, function(bufferViewData) { 329 | var data = newPrimitive.vertexBuffer = _arrayBuffer2TypedArray( 330 | bufferViewData, 331 | 0, 332 | bufferView.byteLength / ComponentType2ByteSize[firstAccessor.componentType], 333 | firstAccessor.componentType 334 | ); 335 | 336 | for (var attributeName in primitive.attributes) { 337 | var accessorName = primitive.attributes[attributeName]; 338 | var accessor = json.accessors[accessorName]; 339 | 340 | var componentTypeByteSize = ComponentType2ByteSize[accessor.componentType]; 341 | 342 | var stride = accessor.byteStride / componentTypeByteSize; 343 | var offset = accessor.byteOffset / componentTypeByteSize; 344 | var count = accessor.count; 345 | 346 | // // Matrix transformation 347 | // if (attributeName === 'POSITION') { 348 | // for (var i = 0; i < count; ++i) { 349 | // // TODO: add vec2 and other(needed?) support 350 | // vec4.set(tmpVec4, data[stride * i + offset] 351 | // , data[stride * i + offset + 1] 352 | // , data[stride * i + offset + 2] 353 | // , 1); 354 | // vec4.transformMat4(tmpVec4, tmpVec4, matrix); 355 | // vec4.scale(tmpVec4, tmpVec4, 1 / tmpVec4[3]); 356 | // data[stride * i + offset] = tmpVec4[0]; 357 | // data[stride * i + offset + 1] = tmpVec4[1]; 358 | // data[stride * i + offset + 2] = tmpVec4[2]; 359 | // } 360 | // } 361 | // else if (attributeName === 'NORMAL') { 362 | // mat4.invert(inverseTransposeMatrix, matrix); 363 | // mat4.transpose(inverseTransposeMatrix, inverseTransposeMatrix); 364 | 365 | // for (var i = 0; i < count; ++i) { 366 | // // @todo: add vec2 and other(needed?) support 367 | // vec4.set(tmpVec4, data[stride * i + offset] 368 | // , data[stride * i + offset + 1] 369 | // , data[stride * i + offset + 2] 370 | // , 0); 371 | // vec4.transformMat4(tmpVec4, tmpVec4, inverseTransposeMatrix); 372 | // vec4.normalize(tmpVec4, tmpVec4); 373 | // data[stride * i + offset] = tmpVec4[0]; 374 | // data[stride * i + offset + 1] = tmpVec4[1]; 375 | // data[stride * i + offset + 2] = tmpVec4[2]; 376 | // } 377 | // } 378 | 379 | 380 | // local transform matrix 381 | 382 | mat4.copy(newPrimitive.matrix, matrix); 383 | 384 | 385 | 386 | // for vertexAttribPointer 387 | newPrimitive.attributes[attributeName] = { 388 | //GLuint program location, 389 | size: Type2NumOfComponent[accessor.type], 390 | type: accessor.componentType, 391 | //GLboolean normalized 392 | stride: accessor.byteStride, 393 | offset: accessor.byteOffset 394 | }; 395 | 396 | } 397 | 398 | loader._checkComplete(); 399 | }); 400 | 401 | }; 402 | 403 | /** 404 | * load a glTF model 405 | * 406 | * @param {String} uri uri of the .glTF file. Other resources (bins, images) are assumed to be in the same base path 407 | * @param {Function} callback the onload callback function 408 | */ 409 | glTFLoader.prototype.loadGLTF = function (uri, callback) { 410 | 411 | this._init(); 412 | 413 | this.onload = callback || function(glTF) { 414 | console.log('glTF model loaded.'); 415 | console.log(glTF); 416 | }; 417 | 418 | 419 | this.glTF = new glTFModel(); 420 | 421 | this.baseUri = _getBaseUri(uri); 422 | 423 | var loader = this; 424 | 425 | _loadJSON(uri, function (response) { 426 | // Parse JSON string into object 427 | var json = JSON.parse(response); 428 | 429 | var bid; 430 | 431 | var loadArrayBufferCallback = function (resource) { 432 | 433 | loader._buffers[bid] = resource; 434 | loader._bufferLoaded++; 435 | if (loader._bufferTasks[bid]) { 436 | var i,len; 437 | for (i = 0, len = loader._bufferTasks[bid].length; i < len; ++i) { 438 | (loader._bufferTasks[bid][i])(resource); 439 | } 440 | } 441 | loader._checkComplete(); 442 | 443 | }; 444 | 445 | // Launch loading resources task: buffers, etc. 446 | if (json.buffers) { 447 | for (bid in json.buffers) { 448 | 449 | loader._bufferRequested++; 450 | 451 | _loadArrayBuffer(loader.baseUri + json.buffers[bid].uri, loadArrayBufferCallback); 452 | 453 | } 454 | } 455 | 456 | // load images 457 | 458 | 459 | var loadImageCallback = function (img, iid) { 460 | loader._imageLoaded++; 461 | loader.glTF.images[iid] = img; 462 | loader._checkComplete(); 463 | }; 464 | 465 | var iid; 466 | 467 | if (json.images) { 468 | for (iid in json.images) { 469 | loader._imageRequested++; 470 | _loadImage(loader.baseUri + json.images[iid].uri, iid, loadImageCallback); 471 | } 472 | } 473 | 474 | 475 | // load shaders 476 | var pid; 477 | var newProgram; 478 | 479 | var loadVertexShaderFileCallback = function (resource) { 480 | loader._shaderLoaded++; 481 | newProgram.vertexShader = resource; 482 | if (newProgram.fragmentShader) { 483 | // create Program 484 | newProgram.program = _createProgram(gl, newProgram.vertexShader, newProgram.fragmentShader); 485 | loader._checkComplete(); 486 | } 487 | }; 488 | var loadFragmentShaderFileCallback = function (resource) { 489 | loader._shaderLoaded++; 490 | newProgram.fragmentShader = resource; 491 | if (newProgram.vertexShader) { 492 | // create Program 493 | newProgram.program = _createProgram(gl, newProgram.vertexShader, newProgram.fragmentShader); 494 | loader._checkComplete(); 495 | } 496 | }; 497 | 498 | if (json.programs) { 499 | for (pid in json.programs) { 500 | newProgram = loader.glTF.programs[pid] = { 501 | vertexShader: null, 502 | fragmentShader: null, 503 | program: null 504 | }; 505 | var program = json.programs[pid]; 506 | loader._shaderRequested += 2; 507 | 508 | _loadShaderFile(loader.baseUri + json.shaders[program.vertexShader].uri, loadVertexShaderFileCallback); 509 | _loadShaderFile(loader.baseUri + json.shaders[program.fragmentShader].uri, loadFragmentShaderFileCallback); 510 | } 511 | } 512 | 513 | 514 | 515 | 516 | // start glTF scene parsing 517 | loader._parseGLTF(json); 518 | }); 519 | }; 520 | 521 | 522 | 523 | 524 | // TODO: get from gl context 525 | var ComponentType2ByteSize = { 526 | 5120: 1, // BYTE 527 | 5121: 1, // UNSIGNED_BYTE 528 | 5122: 2, // SHORT 529 | 5123: 2, // UNSIGNED_SHORT 530 | 5126: 4 // FLOAT 531 | }; 532 | 533 | var Type2NumOfComponent = { 534 | 'SCALAR': 1, 535 | 'VEC2': 2, 536 | 'VEC3': 3, 537 | 'VEC4': 4, 538 | 'MAT2': 4, 539 | 'MAT3': 9, 540 | 'MAT4': 16 541 | }; 542 | 543 | MinimalGLTFLoader.Attributes = [ 544 | 'POSITION', 545 | 'NORMAL', 546 | 'TEXCOORD', 547 | 'COLOR', 548 | 'JOINT', 549 | 'WEIGHT' 550 | ]; 551 | 552 | // MinimalGLTFLoader.UniformFunctionsBind = { 553 | // 35676: gl.uniformMatrix4fv // FLOAT_MAT4 554 | // }; 555 | 556 | 557 | // ------ Scope limited private util functions--------------- 558 | 559 | function _arrayBuffer2TypedArray(resource, byteOffset, countOfComponentType, componentType) { 560 | switch(componentType) { 561 | // @todo: finish 562 | case 5122: return new Int16Array(resource, byteOffset, countOfComponentType); 563 | case 5123: return new Uint16Array(resource, byteOffset, countOfComponentType); 564 | case 5124: return new Int32Array(resource, byteOffset, countOfComponentType); 565 | case 5125: return new Uint32Array(resource, byteOffset, countOfComponentType); 566 | case 5126: return new Float32Array(resource, byteOffset, countOfComponentType); 567 | default: return null; 568 | } 569 | } 570 | 571 | function _getAccessorData(bufferViewData, accessor) { 572 | return _arrayBuffer2TypedArray( 573 | bufferViewData, 574 | accessor.byteOffset, 575 | accessor.count * Type2NumOfComponent[accessor.type], 576 | accessor.componentType 577 | ); 578 | } 579 | 580 | function _getBaseUri(uri) { 581 | 582 | // https://github.com/AnalyticalGraphicsInc/cesium/blob/master/Source/Core/getBaseUri.js 583 | 584 | var basePath = ''; 585 | var i = uri.lastIndexOf('/'); 586 | if(i !== -1) { 587 | basePath = uri.substring(0, i + 1); 588 | } 589 | 590 | return basePath; 591 | } 592 | 593 | function _loadJSON(src, callback) { 594 | 595 | // native json loading technique from @KryptoniteDove: 596 | // http://codepen.io/KryptoniteDove/post/load-json-file-locally-using-pure-javascript 597 | 598 | var xobj = new XMLHttpRequest(); 599 | xobj.overrideMimeType("application/json"); 600 | xobj.open('GET', src, true); 601 | xobj.onreadystatechange = function () { 602 | if (xobj.readyState == 4 && // Request finished, response ready 603 | xobj.status == "200") { // Status OK 604 | callback(xobj.responseText, this); 605 | } 606 | }; 607 | xobj.send(null); 608 | } 609 | 610 | function _loadArrayBuffer(url, callback) { 611 | var xobj = new XMLHttpRequest(); 612 | xobj.responseType = 'arraybuffer'; 613 | xobj.open('GET', url, true); 614 | xobj.onreadystatechange = function () { 615 | if (xobj.readyState == 4 && // Request finished, response ready 616 | xobj.status == "200") { // Status OK 617 | var arrayBuffer = xobj.response; 618 | if (arrayBuffer && callback) { 619 | callback(arrayBuffer); 620 | } 621 | } 622 | }; 623 | xobj.send(null); 624 | } 625 | 626 | function _loadShaderFile(url, callback) { 627 | var xobj = new XMLHttpRequest(); 628 | xobj.responseType = 'text'; 629 | xobj.open('GET', url, true); 630 | xobj.onreadystatechange = function () { 631 | if (xobj.readyState == 4 && // Request finished, response ready 632 | xobj.status == "200") { // Status OK 633 | var file = xobj.response; 634 | if (file && callback) { 635 | callback(file); 636 | } 637 | } 638 | }; 639 | xobj.send(null); 640 | } 641 | 642 | function _loadImage(url, iid, onload) { 643 | var img = new Image(); 644 | img.src = url; 645 | img.onload = function() { 646 | onload(img, iid); 647 | }; 648 | } 649 | 650 | 651 | function _createShader(gl, source, type) { 652 | var shader = gl.createShader(type); 653 | gl.shaderSource(shader, source); 654 | gl.compileShader(shader); 655 | return shader; 656 | } 657 | 658 | function _createProgram(gl, vertexShaderSource, fragmentShaderSource) { 659 | var program = gl.createProgram(); 660 | var vshader = _createShader(gl, vertexShaderSource, gl.VERTEX_SHADER); 661 | var fshader = _createShader(gl, fragmentShaderSource, gl.FRAGMENT_SHADER); 662 | gl.attachShader(program, vshader); 663 | gl.deleteShader(vshader); 664 | gl.attachShader(program, fshader); 665 | gl.deleteShader(fshader); 666 | gl.linkProgram(program); 667 | 668 | var log = gl.getProgramInfoLog(program); 669 | if (log) { 670 | console.log(log); 671 | } 672 | 673 | log = gl.getShaderInfoLog(vshader); 674 | if (log) { 675 | console.log(log); 676 | } 677 | 678 | log = gl.getShaderInfoLog(fshader); 679 | if (log) { 680 | console.log(log); 681 | } 682 | 683 | return program; 684 | } 685 | 686 | export { glTFLoader }; -------------------------------------------------------------------------------- /models/sponza/buffer_0.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/models/sponza/buffer_0.bin -------------------------------------------------------------------------------- /models/sponza/color.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/models/sponza/color.jpeg -------------------------------------------------------------------------------- /models/sponza/fragmentShader0.glsl: -------------------------------------------------------------------------------- 1 | precision highp float; 2 | uniform vec4 u_ambient; 3 | uniform sampler2D u_diffuse; 4 | uniform sampler2D u_normal; 5 | uniform vec4 u_emission; 6 | uniform vec4 u_specular; 7 | uniform float u_shininess; 8 | uniform float u_transparency; 9 | varying vec3 v_positionEC; 10 | varying vec3 v_normal; 11 | varying vec2 v_texcoord_0; 12 | 13 | vec3 applyNormalMap(vec3 geomnor, vec3 normap) { 14 | normap = normap * 2.0 - 1.0; 15 | vec3 up = normalize(vec3(0.001, 1, 0.001)); 16 | vec3 surftan = normalize(cross(geomnor, up)); 17 | vec3 surfbinor = cross(geomnor, surftan); 18 | return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; 19 | } 20 | 21 | void main(void) { 22 | vec3 normal = applyNormalMap(normalize(v_normal), texture2D(u_normal, v_texcoord_0).rgb); 23 | vec4 diffuse = texture2D(u_diffuse, v_texcoord_0); 24 | vec3 diffuseLight = vec3(0.0, 0.0, 0.0); 25 | vec3 specular = u_specular.rgb; 26 | vec3 specularLight = vec3(0.0, 0.0, 0.0); 27 | vec3 emission = u_emission.rgb; 28 | vec3 ambient = u_ambient.rgb; 29 | vec3 viewDir = -normalize(v_positionEC); 30 | vec3 ambientLight = vec3(0.0, 0.0, 0.0); 31 | ambientLight += vec3(0.2, 0.2, 0.2); 32 | vec3 l = vec3(0.0, 0.0, 1.0); 33 | diffuseLight += vec3(1.0, 1.0, 1.0) * max(dot(normal,l), 0.); 34 | vec3 h = normalize(l + viewDir); 35 | float specularIntensity = max(0., pow(max(dot(normal, h), 0.), u_shininess)); 36 | specularLight += vec3(1.0, 1.0, 1.0) * specularIntensity; 37 | vec3 color = vec3(0.0, 0.0, 0.0); 38 | color += diffuse.rgb * diffuseLight; 39 | color += specular * specularLight; 40 | color += emission; 41 | color += ambient * ambientLight; 42 | gl_FragColor = vec4(color * diffuse.a, diffuse.a * u_transparency); 43 | } 44 | -------------------------------------------------------------------------------- /models/sponza/normal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/models/sponza/normal.png -------------------------------------------------------------------------------- /models/sponza/sponza.gltf: -------------------------------------------------------------------------------- 1 | { 2 | "accessors": { 3 | "accessor_index_0": { 4 | "bufferView": "bufferView_1", 5 | "byteOffset": 0, 6 | "byteStride": 0, 7 | "componentType": 5125, 8 | "count": 199269, 9 | "type": "SCALAR", 10 | "min": [ 11 | 0 12 | ], 13 | "max": [ 14 | 199268 15 | ] 16 | }, 17 | "accessor_position": { 18 | "bufferView": "bufferView_0", 19 | "byteOffset": 0, 20 | "byteStride": 0, 21 | "componentType": 5126, 22 | "count": 148975, 23 | "min": [ 24 | -17.268321990966797, 25 | -0.006653999909758568, 26 | -7.7815141677856445 27 | ], 28 | "max": [ 29 | 17.551677703857422, 30 | 15.55334758758545, 31 | 7.818483829498291 32 | ], 33 | "type": "VEC3" 34 | }, 35 | "accessor_normal": { 36 | "bufferView": "bufferView_0", 37 | "byteOffset": 1787700, 38 | "byteStride": 0, 39 | "componentType": 5126, 40 | "count": 148975, 41 | "type": "VEC3", 42 | "min": [ 43 | null, 44 | null, 45 | null 46 | ], 47 | "max": [ 48 | null, 49 | null, 50 | null 51 | ] 52 | }, 53 | "accessor_uv": { 54 | "bufferView": "bufferView_0", 55 | "byteOffset": 3575400, 56 | "byteStride": 0, 57 | "componentType": 5126, 58 | "count": 148975, 59 | "type": "VEC2", 60 | "min": [ 61 | -57.04376983642578, 62 | -61.176544189453125 63 | ], 64 | "max": [ 65 | 57.97621536254883, 66 | 62.176544189453125 67 | ] 68 | } 69 | }, 70 | "asset": { 71 | "generator": "OBJ2GLTF", 72 | "premultipliedAlpha": true, 73 | "profile": { 74 | "api": "WebGL", 75 | "version": "1.0" 76 | }, 77 | "version": "1.0" 78 | }, 79 | "buffers": { 80 | "buffer_0": { 81 | "type": "arraybuffer", 82 | "byteLength": 5564276, 83 | "uri": "buffer_0.bin" 84 | } 85 | }, 86 | "bufferViews": { 87 | "bufferView_0": { 88 | "buffer": "buffer_0", 89 | "byteLength": 4767200, 90 | "byteOffset": 0, 91 | "target": 34962 92 | }, 93 | "bufferView_1": { 94 | "buffer": "buffer_0", 95 | "byteLength": 797076, 96 | "byteOffset": 4767200, 97 | "target": 34963 98 | } 99 | }, 100 | "images": { 101 | "color": { 102 | "uri": "color.jpeg" 103 | }, 104 | "normals": { 105 | "uri": "normal.png" 106 | } 107 | }, 108 | "materials": { 109 | "material_lambert2SG": { 110 | "name": "lambert2SG", 111 | "extensions": {}, 112 | "values": { 113 | "ambient": [ 114 | 0, 115 | 0, 116 | 0, 117 | 1 118 | ], 119 | "diffuse": "texture_color", 120 | "normalMap": "texture_normal", 121 | "emission": [ 122 | 0, 123 | 0, 124 | 0, 125 | 1 126 | ], 127 | "specular": [ 128 | 0, 129 | 0, 130 | 0, 131 | 1 132 | ], 133 | "shininess": 0, 134 | "transparency": 1 135 | }, 136 | "technique": "technique0" 137 | } 138 | }, 139 | "meshes": { 140 | "mesh_sponza": { 141 | "name": "sponza", 142 | "primitives": [ 143 | { 144 | "attributes": { 145 | "POSITION": "accessor_position", 146 | "NORMAL": "accessor_normal", 147 | "TEXCOORD_0": "accessor_uv" 148 | }, 149 | "indices": "accessor_index_0", 150 | "material": "material_lambert2SG", 151 | "mode": 4 152 | } 153 | ] 154 | } 155 | }, 156 | "nodes": { 157 | "rootNode": { 158 | "children": [], 159 | "meshes": [ 160 | "mesh_sponza" 161 | ], 162 | "matrix": [ 163 | 1, 164 | 0, 165 | 0, 166 | 0, 167 | 0, 168 | 1, 169 | 0, 170 | 0, 171 | 0, 172 | 0, 173 | 1, 174 | 0, 175 | 0, 176 | 0, 177 | 0, 178 | 1 179 | ] 180 | } 181 | }, 182 | "samplers": { 183 | "sampler_0": { 184 | "magFilter": 9729, 185 | "minFilter": 9986, 186 | "wrapS": 10497, 187 | "wrapT": 10497 188 | } 189 | }, 190 | "scene": "scene_sponza", 191 | "scenes": { 192 | "scene_sponza": { 193 | "nodes": [ 194 | "rootNode" 195 | ] 196 | } 197 | }, 198 | "textures": { 199 | "texture_color": { 200 | "format": 6407, 201 | "internalFormat": 6407, 202 | "sampler": "sampler_0", 203 | "source": "color", 204 | "target": 3553, 205 | "type": 5121 206 | }, 207 | "texture_normal": { 208 | "format": 6407, 209 | "internalFormat": 6407, 210 | "sampler": "sampler_0", 211 | "source": "normals", 212 | "target": 3553, 213 | "type": 5121 214 | } 215 | }, 216 | "extensionsUsed": [], 217 | "animations": {}, 218 | "cameras": {}, 219 | "techniques": { 220 | "technique0": { 221 | "attributes": { 222 | "a_position": "position", 223 | "a_normal": "normal", 224 | "a_texcoord_0": "texcoord_0" 225 | }, 226 | "parameters": { 227 | "modelViewMatrix": { 228 | "semantic": "MODELVIEW", 229 | "type": 35676 230 | }, 231 | "projectionMatrix": { 232 | "semantic": "PROJECTION", 233 | "type": 35676 234 | }, 235 | "normalMatrix": { 236 | "semantic": "MODELVIEWINVERSETRANSPOSE", 237 | "type": 35675 238 | }, 239 | "ambient": { 240 | "type": 35666 241 | }, 242 | "diffuse": { 243 | "type": 35678 244 | }, 245 | "normalMap": { 246 | "type": 35678 247 | }, 248 | "emission": { 249 | "type": 35666 250 | }, 251 | "specular": { 252 | "type": 35666 253 | }, 254 | "shininess": { 255 | "type": 5126 256 | }, 257 | "transparency": { 258 | "type": 5126 259 | }, 260 | "position": { 261 | "semantic": "POSITION", 262 | "type": 35665 263 | }, 264 | "normal": { 265 | "semantic": "NORMAL", 266 | "type": 35665 267 | }, 268 | "texcoord_0": { 269 | "semantic": "TEXCOORD_0", 270 | "type": 35664 271 | } 272 | }, 273 | "program": "program0", 274 | "states": { 275 | "enable": [ 276 | 2884, 277 | 2929 278 | ] 279 | }, 280 | "uniforms": { 281 | "u_modelViewMatrix": "modelViewMatrix", 282 | "u_projectionMatrix": "projectionMatrix", 283 | "u_normalMatrix": "normalMatrix", 284 | "u_ambient": "ambient", 285 | "u_diffuse": "diffuse", 286 | "u_normal": "normalMap", 287 | "u_emission": "emission", 288 | "u_specular": "specular", 289 | "u_shininess": "shininess", 290 | "u_transparency": "transparency" 291 | } 292 | } 293 | }, 294 | "programs": { 295 | "program0": { 296 | "attributes": [ 297 | "a_position", 298 | "a_normal", 299 | "a_texcoord_0" 300 | ], 301 | "fragmentShader": "fragmentShader0", 302 | "vertexShader": "vertexShader0" 303 | } 304 | }, 305 | "shaders": { 306 | "vertexShader0": { 307 | "type": 35633, 308 | "uri": "vertexShader0.glsl" 309 | }, 310 | "fragmentShader0": { 311 | "type": 35632, 312 | "uri": "fragmentShader0.glsl" 313 | } 314 | }, 315 | "skins": {}, 316 | "extensions": {} 317 | } 318 | -------------------------------------------------------------------------------- /models/sponza/vertexShader0.glsl: -------------------------------------------------------------------------------- 1 | precision highp float; 2 | uniform mat4 u_modelViewMatrix; 3 | uniform mat4 u_projectionMatrix; 4 | uniform mat3 u_normalMatrix; 5 | attribute vec3 a_position; 6 | varying vec3 v_positionEC; 7 | attribute vec3 a_normal; 8 | varying vec3 v_normal; 9 | attribute vec2 a_texcoord_0; 10 | varying vec2 v_texcoord_0; 11 | void main(void) { 12 | vec4 pos = u_modelViewMatrix * vec4(a_position,1.0); 13 | v_positionEC = pos.xyz; 14 | gl_Position = u_projectionMatrix * pos; 15 | v_normal = u_normalMatrix * a_normal; 16 | v_texcoord_0 = a_texcoord_0; 17 | } 18 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "start": "webpack-dev-server", 4 | "start:production": "webpack-dev-server --env.production", 5 | "build": "webpack --env.production" 6 | }, 7 | "dependencies": { 8 | "dat-gui": "^0.5.0", 9 | "gl-matrix": "^2.4.0", 10 | "spectorjs": "^0.9.0", 11 | "stats-js": "^1.0.0-alpha1", 12 | "three": "^0.87.1", 13 | "three-js": "^79.0.0", 14 | "three-orbitcontrols": "^1.2.1", 15 | "webgl-debug": "^1.0.2" 16 | }, 17 | "devDependencies": { 18 | "babel-core": "^6.26.0", 19 | "babel-loader": "^7.1.2", 20 | "babel-minify-webpack-plugin": "^0.2.0", 21 | "babel-preset-env": "^1.6.0", 22 | "webpack": "^3.7.1", 23 | "webpack-dev-server": "^2.9.2", 24 | "webpack-glsl-loader": "^1.0.1" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /renders/cluster-1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/cluster-1.gif -------------------------------------------------------------------------------- /renders/deferred-1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/deferred-1.gif -------------------------------------------------------------------------------- /renders/effects-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/effects-graph.png -------------------------------------------------------------------------------- /renders/optimization-chart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/optimization-chart.png -------------------------------------------------------------------------------- /renders/optimization-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/optimization-graph.png -------------------------------------------------------------------------------- /renders/renderer-comparison-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/renderer-comparison-graph.png -------------------------------------------------------------------------------- /renders/scene.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MegSesh/Project5-WebGL-Clustered-Deferred-Forward-Plus/7eb0d086295073902ce2f1920d426291247c6c7d/renders/scene.png -------------------------------------------------------------------------------- /src/init.js: -------------------------------------------------------------------------------- 1 | // TODO: Change this to enable / disable debug mode 2 | export const DEBUG = false && process.env.NODE_ENV === 'development'; 3 | 4 | import DAT from 'dat-gui'; 5 | import WebGLDebug from 'webgl-debug'; 6 | import Stats from 'stats-js'; 7 | import { PerspectiveCamera } from 'three'; 8 | import OrbitControls from 'three-orbitcontrols'; 9 | import { Spector } from 'spectorjs'; 10 | 11 | export var ABORTED = false; 12 | export function abort(message) { 13 | ABORTED = true; 14 | throw message; 15 | } 16 | 17 | // Get the canvas element 18 | export const canvas = document.getElementById('canvas'); 19 | 20 | // Initialize the WebGL context 21 | const glContext = canvas.getContext('webgl'); 22 | 23 | // Get a debug context 24 | export const gl = DEBUG ? WebGLDebug.makeDebugContext(glContext, (err, funcName, args) => { 25 | abort(WebGLDebug.glEnumToString(err) + ' was caused by call to: ' + funcName); 26 | }) : glContext; 27 | 28 | const supportedExtensions = gl.getSupportedExtensions(); 29 | const requiredExtensions = [ 30 | 'OES_texture_float', 31 | 'OES_texture_float_linear', 32 | 'OES_element_index_uint', 33 | 'WEBGL_depth_texture', 34 | 'WEBGL_draw_buffers', 35 | ]; 36 | 37 | // Check that all required extensions are supported 38 | for (let i = 0; i < requiredExtensions.length; ++i) { 39 | if (supportedExtensions.indexOf(requiredExtensions[i]) < 0) { 40 | throw 'Unable to load extension ' + requiredExtensions[i]; 41 | } 42 | } 43 | 44 | // Get the maximum number of draw buffers 45 | gl.getExtension('OES_texture_float'); 46 | gl.getExtension('OES_texture_float_linear'); 47 | gl.getExtension('OES_element_index_uint'); 48 | gl.getExtension('WEBGL_depth_texture'); 49 | export const WEBGL_draw_buffers = gl.getExtension('WEBGL_draw_buffers'); 50 | export const MAX_DRAW_BUFFERS_WEBGL = gl.getParameter(WEBGL_draw_buffers.MAX_DRAW_BUFFERS_WEBGL); 51 | 52 | export const gui = new DAT.GUI(); 53 | 54 | // initialize statistics widget 55 | const stats = new Stats(); 56 | stats.setMode(1); // 0: fps, 1: ms 57 | stats.domElement.style.position = 'absolute'; 58 | stats.domElement.style.left = '0px'; 59 | stats.domElement.style.top = '0px'; 60 | document.body.appendChild(stats.domElement); 61 | 62 | // Initialize camera 63 | export const camera = new PerspectiveCamera(75, canvas.clientWidth / canvas.clientHeight, 0.1, 1000); 64 | 65 | // Initialize camera controls 66 | export const cameraControls = new OrbitControls(camera, canvas); 67 | cameraControls.enableDamping = true; 68 | cameraControls.enableZoom = true; 69 | cameraControls.rotateSpeed = 0.3; 70 | cameraControls.zoomSpeed = 1.0; 71 | cameraControls.panSpeed = 2.0; 72 | 73 | function setSize(width, height) { 74 | canvas.width = width; 75 | canvas.height = height; 76 | camera.aspect = width / height; 77 | camera.updateProjectionMatrix(); 78 | } 79 | 80 | setSize(canvas.clientWidth, canvas.clientHeight); 81 | window.addEventListener('resize', () => setSize(canvas.clientWidth, canvas.clientHeight)); 82 | 83 | if (DEBUG) { 84 | const spector = new Spector(); 85 | spector.displayUI(); 86 | } 87 | 88 | // Creates a render loop that is wrapped with camera update and stats logging 89 | export function makeRenderLoop(render) { 90 | return function tick() { 91 | cameraControls.update(); 92 | stats.begin(); 93 | render(); 94 | stats.end(); 95 | if (!ABORTED) { 96 | requestAnimationFrame(tick) 97 | } 98 | } 99 | } 100 | 101 | // import the main application 102 | require('./main'); 103 | -------------------------------------------------------------------------------- /src/main.js: -------------------------------------------------------------------------------- 1 | import { makeRenderLoop, camera, cameraControls, gui, gl } from './init'; 2 | import ForwardRenderer from './renderers/forward'; 3 | import ClusteredForwardPlusRenderer from './renderers/clusteredForwardPlus'; 4 | import ClusteredDeferredRenderer from './renderers/clusteredDeferred'; 5 | import Scene from './scene'; 6 | 7 | const FORWARD = 'Forward'; 8 | const CLUSTERED_FORWARD_PLUS = 'Clustered Forward+'; 9 | const CLUSTERED_DEFFERED = 'Clustered Deferred'; 10 | 11 | const params = { 12 | renderer: CLUSTERED_FORWARD_PLUS, 13 | _renderer: null, 14 | }; 15 | 16 | setRenderer(params.renderer); 17 | 18 | //Inputs into renderers: number of x, y, and z slices to split the frustum into 19 | function setRenderer(renderer) { 20 | switch(renderer) { 21 | case FORWARD: 22 | params._renderer = new ForwardRenderer(); 23 | break; 24 | case CLUSTERED_FORWARD_PLUS: 25 | params._renderer = new ClusteredForwardPlusRenderer(15, 15, 15); 26 | break; 27 | case CLUSTERED_DEFFERED: 28 | params._renderer = new ClusteredDeferredRenderer(15, 15, 15); 29 | break; 30 | } 31 | } 32 | 33 | gui.add(params, 'renderer', [FORWARD, CLUSTERED_FORWARD_PLUS, CLUSTERED_DEFFERED]).onChange(setRenderer); 34 | 35 | const scene = new Scene(); 36 | scene.loadGLTF('models/sponza/sponza.gltf'); 37 | 38 | camera.position.set(-10, 8, 0); 39 | cameraControls.target.set(0, 2, 0); 40 | gl.enable(gl.DEPTH_TEST); 41 | 42 | function render() { 43 | scene.update(); 44 | params._renderer.render(camera, scene); 45 | } 46 | 47 | makeRenderLoop(render)(); 48 | -------------------------------------------------------------------------------- /src/renderers/clustered.js: -------------------------------------------------------------------------------- 1 | import { mat4, vec4, vec3 } from 'gl-matrix'; 2 | import { NUM_LIGHTS } from '../scene'; 3 | import TextureBuffer from './textureBuffer'; 4 | 5 | import { MAX_LIGHTS_PER_CLUSTER } from '../scene'; 6 | 7 | // I've defined this in scene.js now 8 | // export const MAX_LIGHTS_PER_CLUSTER = 100; 9 | 10 | const YZ_PLANE = false; 11 | const XZ_PLANE = false; 12 | 13 | export default class ClusteredRenderer { 14 | constructor(xSlices, ySlices, zSlices) { 15 | // Create a texture to store cluster data. Each cluster stores the number of lights followed by the light indices 16 | this._clusterTexture = new TextureBuffer(xSlices * ySlices * zSlices, MAX_LIGHTS_PER_CLUSTER + 1); 17 | this._xSlices = xSlices; 18 | this._ySlices = ySlices; 19 | this._zSlices = zSlices; 20 | } 21 | 22 | clamp(val, minVal, maxVal) 23 | { 24 | return Math.max(minVal, Math.min(val, maxVal)); 25 | // return Math.min(Math.max(val, minVal), maxVal); 26 | } 27 | 28 | updateClusters(camera, viewMatrix, scene) { 29 | // TODO: Update the cluster texture with the count and indices of the lights in each cluster 30 | // This will take some time. The math is nontrivial... 31 | 32 | // AKA: For all lights, figure out the clusters that the light overlaps 33 | 34 | for (let z = 0; z < this._zSlices; ++z) { 35 | for (let y = 0; y < this._ySlices; ++y) { 36 | for (let x = 0; x < this._xSlices; ++x) { 37 | let i = x + y * this._xSlices + z * this._xSlices * this._ySlices; 38 | // Reset the light count to 0 for every cluster 39 | this._clusterTexture.buffer[this._clusterTexture.bufferIndex(i, 0)] = 0; 40 | }//end for x 41 | }//end for y 42 | }//end for z 43 | 44 | // =================================== BEGIN CLUSTERING =================================== 45 | 46 | 47 | var totalNumLightsInCluster = 0; 48 | 49 | for(let lightIdx = 0; lightIdx < NUM_LIGHTS; lightIdx++) 50 | { 51 | // Get light's AABB 52 | // Multiply by viewMatrix to get point in camera space 53 | var _lightPos = vec3.fromValues(scene.lights[lightIdx].position[0], 54 | scene.lights[lightIdx].position[1], 55 | scene.lights[lightIdx].position[2]); 56 | vec3.transformMat4(_lightPos, _lightPos, viewMatrix); 57 | var lightPos = vec3.fromValues(_lightPos[0], _lightPos[1], -1.0 * _lightPos[2]); 58 | var lightRadius = scene.lights[lightIdx].radius; 59 | 60 | // Get min & max AABB positions 61 | // NOTE: Consider doing the 45deg boundary check to include those edge case clusters too 62 | var minBB = vec3.fromValues(lightPos[0] - lightRadius, lightPos[1] - lightRadius, lightPos[2] - lightRadius); 63 | var maxBB = vec3.fromValues(lightPos[0] + lightRadius, lightPos[1] + lightRadius, lightPos[2] + lightRadius); 64 | 65 | // console.log("minBB: ", minBB[0], ", ", minBB[1], ", ", minBB[2]); 66 | // console.log("maxBB: ", maxBB[0], ", ", maxBB[1], ", ", maxBB[2]); 67 | 68 | // Calculate frustum width and height 69 | // Note: camera.fov = vertical fov 70 | var half_fov_rad = ((camera.fov / 2.0) * Math.PI) / 180.0; 71 | 72 | // console.log("half_fov_rad", half_fov_rad); 73 | 74 | // var frustum_width = 2.0 * Math.atan(Math.tan(half_fov_rad) * camera.aspect); 75 | // var frustum_height = 2.0 * Math.tan(half_fov_rad) * camera.far; 76 | 77 | // Calculate the frustum width and height according to lightPos 78 | var light_frustum_height = Math.abs(2 * lightPos[2] * Math.tan(half_fov_rad)); 79 | var light_frustum_width = Math.abs(light_frustum_height * camera.aspect); 80 | 81 | // console.log("light_frustum_height: ", light_frustum_height); 82 | // console.log("light_frustum_width: ", light_frustum_width); 83 | 84 | // Stride = total distance of frustum / slice size (aka 15) 85 | var z_stride = (camera.far - camera.near) / this._zSlices; 86 | var y_stride = light_frustum_height / this._ySlices; 87 | var x_stride = light_frustum_width / this._xSlices; 88 | 89 | // console.log("zstride: ", z_stride); 90 | // console.log("ystride: ", y_stride); 91 | // console.log("xstride: ", x_stride); 92 | 93 | // Divide height and width by 2 because we have 0,0 in the middle 94 | // ADDING -1 and +1 TO MIN AND MAX TO Z OR X? 95 | var z_min = Math.floor(minBB[2] / z_stride);// - 1; 96 | var z_max = Math.floor(maxBB[2] / z_stride);// + 1; 97 | var y_min = Math.floor((minBB[1] + (light_frustum_height / 2.0)) / y_stride); 98 | var y_max = Math.floor((maxBB[1] + (light_frustum_height / 2.0)) / y_stride); 99 | var x_min = Math.floor((minBB[0] + (light_frustum_width / 2.0)) / x_stride) - 1; 100 | var x_max = Math.floor((maxBB[0] + (light_frustum_width / 2.0)) / x_stride) + 1; 101 | 102 | // console.log("z_min to max: ", z_min, ", ", z_max); 103 | // console.log("y_min to max: ", y_min, ", ", y_max); 104 | // console.log("x_min to max: ", x_min, ", ", x_max); 105 | 106 | // NOTE & TODO: If the min and max index in any dimension is out of frustum bounds, then don't include light in any cluster 107 | // if((z_min < 0 && z_max < 0) || (z_min > this._zSlices - 1 && z_max > this._zSlices - 1)) continue; 108 | // if((y_min < 0 && y_max < 0) || (y_min > this._ySlices - 1 && y_max > this._ySlices - 1)) continue; 109 | // if((x_min < 0 && x_max < 0) || (x_min > this._xSlices - 1 && x_max > this._xSlices - 1)) continue; 110 | 111 | // Note: Make sure to clamp b/c you might get a light that's outside of frustum, hence index out of bounds 112 | if((z_min < 0 && z_max < 0) || (z_min >= this._zSlices && z_max >= this._zSlices)) continue; 113 | if((y_min < 0 && y_max < 0) || (y_min >= this._ySlices && y_max >= this._ySlices)) continue; 114 | if((x_min < 0 && x_max < 0) || (x_min >= this._xSlices && x_max >= this._xSlices)) continue; 115 | 116 | z_min = this.clamp(z_min, 0, this._zSlices - 1); 117 | z_max = this.clamp(z_max, 0, this._zSlices - 1); 118 | y_min = this.clamp(y_min, 0, this._ySlices - 1); 119 | y_max = this.clamp(y_max, 0, this._ySlices - 1); 120 | x_min = this.clamp(x_min, 0, this._xSlices - 1); 121 | x_max = this.clamp(x_max, 0, this._xSlices - 1); 122 | 123 | 124 | for (let _z = z_min; _z <= z_max; ++_z) { 125 | for (let _y = y_min; _y <= y_max; ++_y) { 126 | for (let _x = x_min; _x <= x_max; ++_x) { 127 | 128 | // for (let _z = 0; _z < 15; ++_z) { 129 | // for (let _y = 0; _y < 15; ++_y) { 130 | // for (let _x = 0; _x < 15; ++_x) { 131 | 132 | // Index into texture buffer 133 | let u = _x + (_y * this._xSlices) + (_z * this._xSlices * this._ySlices); 134 | 135 | var v0 = this._clusterTexture.bufferIndex(u, 0); 136 | totalNumLightsInCluster = this._clusterTexture.buffer[v0 + 0]; 137 | 138 | if(totalNumLightsInCluster + 1 <= MAX_LIGHTS_PER_CLUSTER) 139 | { 140 | this._clusterTexture.buffer[v0] = totalNumLightsInCluster + 1; 141 | 142 | let v = Math.floor((totalNumLightsInCluster + 1) / 4); 143 | 144 | let pixelIdx = (totalNumLightsInCluster + 1) % 4; 145 | // let pixelIdx = (totalNumLightsInCluster + 1) - (v * 4); 146 | 147 | // Allocate lights into the cluster texture 148 | // bufferIndex(u, 0) + 0] = LIGHT COUNT and bufferIndex(u, v) + 0...3] = LIGHT ID'S; 149 | this._clusterTexture.buffer[this._clusterTexture.bufferIndex(u, v) + pixelIdx] = lightIdx; 150 | }//end light if check 151 | 152 | }//end for x 153 | }//end for y 154 | }//end for z 155 | }//end for all lights 156 | 157 | // =================================== END CLUSTERING =================================== 158 | 159 | this._clusterTexture.update(); 160 | }//end updateClusters 161 | }//end ClusteredRenderer -------------------------------------------------------------------------------- /src/renderers/clusteredDeferred.js: -------------------------------------------------------------------------------- 1 | import { gl, WEBGL_draw_buffers, canvas } from '../init'; 2 | import { mat4, vec4 } from 'gl-matrix'; 3 | import { loadShaderProgram, renderFullscreenQuad } from '../utils'; 4 | import { NUM_LIGHTS } from '../scene'; 5 | import toTextureVert from '../shaders/deferredToTexture.vert.glsl'; 6 | import toTextureFrag from '../shaders/deferredToTexture.frag.glsl'; 7 | import QuadVertSource from '../shaders/quad.vert.glsl'; 8 | import fsSource from '../shaders/deferred.frag.glsl.js'; 9 | import TextureBuffer from './textureBuffer'; 10 | import ClusteredRenderer from './clustered'; 11 | 12 | import { MAX_LIGHTS_PER_CLUSTER } from '../scene'; 13 | 14 | 15 | export const NUM_GBUFFERS = 2; 16 | // export const NUM_GBUFFERS = 3; 17 | 18 | 19 | export default class ClusteredDeferredRenderer extends ClusteredRenderer { 20 | constructor(xSlices, ySlices, zSlices) { 21 | super(xSlices, ySlices, zSlices); 22 | 23 | this.setupDrawBuffers(canvas.width, canvas.height); 24 | 25 | // Create a texture to store light data 26 | this._lightTexture = new TextureBuffer(NUM_LIGHTS, 8); 27 | 28 | this._progCopy = loadShaderProgram(toTextureVert, toTextureFrag, { 29 | uniforms: ['u_viewProjectionMatrix', 'u_colmap', 'u_normap', 'u_viewMatrix'], 30 | attribs: ['a_position', 'a_normal', 'a_uv'], 31 | }); 32 | 33 | this._progShade = loadShaderProgram(QuadVertSource, fsSource({ 34 | numLights: NUM_LIGHTS, 35 | numGBuffers: NUM_GBUFFERS, 36 | maxLightsPerCluster : MAX_LIGHTS_PER_CLUSTER, 37 | x_slices : xSlices, 38 | y_slices : ySlices, 39 | z_slices : zSlices, 40 | }), { 41 | uniforms: ['u_gbuffers[0]', 'u_gbuffers[1]', 'u_gbuffers[2]', 'u_gbuffers[3]', 42 | 'u_viewProjectionMatrix', 'u_viewMatrix', 'u_inverseViewMatrix', 43 | 'u_screenHeight','u_screenWidth', 'u_camNear', 'u_camFar', 44 | 'u_lightbuffer', 'u_clusterbuffer'], 45 | attribs: ['a_uv'], 46 | }); 47 | 48 | this._projectionMatrix = mat4.create(); 49 | this._viewMatrix = mat4.create(); 50 | this._viewProjectionMatrix = mat4.create(); 51 | this._inverseViewMatrix = mat4.create(); 52 | } 53 | 54 | setupDrawBuffers(width, height) { 55 | this._width = width; 56 | this._height = height; 57 | 58 | this._fbo = gl.createFramebuffer(); 59 | 60 | //Create, bind, and store a depth target texture for the FBO 61 | this._depthTex = gl.createTexture(); 62 | gl.bindTexture(gl.TEXTURE_2D, this._depthTex); 63 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST); 64 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST); 65 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); 66 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); 67 | gl.texImage2D(gl.TEXTURE_2D, 0, gl.DEPTH_COMPONENT, width, height, 0, gl.DEPTH_COMPONENT, gl.UNSIGNED_SHORT, null); 68 | gl.bindTexture(gl.TEXTURE_2D, null); 69 | 70 | gl.bindFramebuffer(gl.FRAMEBUFFER, this._fbo); 71 | gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.DEPTH_ATTACHMENT, gl.TEXTURE_2D, this._depthTex, 0); 72 | 73 | // Create, bind, and store "color" target textures for the FBO 74 | this._gbuffers = new Array(NUM_GBUFFERS); 75 | let attachments = new Array(NUM_GBUFFERS); 76 | for (let i = 0; i < NUM_GBUFFERS; i++) { 77 | attachments[i] = WEBGL_draw_buffers[`COLOR_ATTACHMENT${i}_WEBGL`]; 78 | this._gbuffers[i] = gl.createTexture(); 79 | gl.bindTexture(gl.TEXTURE_2D, this._gbuffers[i]); 80 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST); 81 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST); 82 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); 83 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); 84 | gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.FLOAT, null); 85 | gl.bindTexture(gl.TEXTURE_2D, null); 86 | 87 | gl.framebufferTexture2D(gl.FRAMEBUFFER, attachments[i], gl.TEXTURE_2D, this._gbuffers[i], 0); 88 | } 89 | 90 | if (gl.checkFramebufferStatus(gl.FRAMEBUFFER) != gl.FRAMEBUFFER_COMPLETE) { 91 | throw "Framebuffer incomplete"; 92 | } 93 | 94 | // Tell the WEBGL_draw_buffers extension which FBO attachments are 95 | // being used. (This extension allows for multiple render targets.) 96 | WEBGL_draw_buffers.drawBuffersWEBGL(attachments); 97 | 98 | gl.bindFramebuffer(gl.FRAMEBUFFER, null); 99 | } 100 | 101 | resize(width, height) { 102 | this._width = width; 103 | this._height = height; 104 | 105 | gl.bindTexture(gl.TEXTURE_2D, this._depthTex); 106 | gl.texImage2D(gl.TEXTURE_2D, 0, gl.DEPTH_COMPONENT, width, height, 0, gl.DEPTH_COMPONENT, gl.UNSIGNED_SHORT, null); 107 | for (let i = 0; i < NUM_GBUFFERS; i++) { 108 | gl.bindTexture(gl.TEXTURE_2D, this._gbuffers[i]); 109 | gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.FLOAT, null); 110 | } 111 | gl.bindTexture(gl.TEXTURE_2D, null); 112 | } 113 | 114 | render(camera, scene) { 115 | if (canvas.width != this._width || canvas.height != this._height) { 116 | this.resize(canvas.width, canvas.height); 117 | } 118 | 119 | // Update the camera matrices 120 | camera.updateMatrixWorld(); 121 | mat4.invert(this._viewMatrix, camera.matrixWorld.elements); 122 | mat4.copy(this._projectionMatrix, camera.projectionMatrix.elements); 123 | mat4.multiply(this._viewProjectionMatrix, this._projectionMatrix, this._viewMatrix); 124 | 125 | // Invert the view matrix 126 | mat4.invert(this._inverseViewMatrix, this._viewMatrix); 127 | 128 | // Render to the whole screen 129 | gl.viewport(0, 0, canvas.width, canvas.height); 130 | 131 | // Bind the framebuffer 132 | gl.bindFramebuffer(gl.FRAMEBUFFER, this._fbo); 133 | 134 | // Clear the frame 135 | gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); 136 | 137 | // Use the shader program to copy to the draw buffers 138 | gl.useProgram(this._progCopy.glShaderProgram); 139 | 140 | // Upload the camera matrix 141 | gl.uniformMatrix4fv(this._progCopy.u_viewProjectionMatrix, false, this._viewProjectionMatrix); 142 | // Upload the view matrix 143 | gl.uniformMatrix4fv(this._progCopy.u_viewMatrix, false, this._viewMatrix); 144 | 145 | // Draw the scene. This function takes the shader program so that the model's textures can be bound to the right inputs 146 | scene.draw(this._progCopy); 147 | 148 | // Update the clusters for the frame 149 | this.updateClusters(camera, this._viewMatrix, scene); 150 | 151 | // Update the buffer used to populate the texture packed with light data 152 | for (let i = 0; i < NUM_LIGHTS; ++i) { 153 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 0] = scene.lights[i].position[0]; 154 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 1] = scene.lights[i].position[1]; 155 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 2] = scene.lights[i].position[2]; 156 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 3] = scene.lights[i].radius; 157 | 158 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 0] = scene.lights[i].color[0]; 159 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 1] = scene.lights[i].color[1]; 160 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 2] = scene.lights[i].color[2]; 161 | } 162 | // Update the light texture 163 | this._lightTexture.update(); 164 | 165 | // Bind the default null framebuffer which is the screen 166 | gl.bindFramebuffer(gl.FRAMEBUFFER, null); 167 | 168 | // Clear the frame 169 | gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); 170 | 171 | // Use this shader program 172 | gl.useProgram(this._progShade.glShaderProgram); 173 | 174 | 175 | // Upload the camera matrix 176 | gl.uniformMatrix4fv(this._progShade.u_viewProjectionMatrix, false, this._viewProjectionMatrix); 177 | 178 | // Set the light texture as a uniform input to the shader 179 | gl.activeTexture(gl.TEXTURE2); 180 | gl.bindTexture(gl.TEXTURE_2D, this._lightTexture.glTexture); 181 | gl.uniform1i(this._progShade.u_lightbuffer, 2); 182 | 183 | // Set the cluster texture as a uniform input to the shader 184 | gl.activeTexture(gl.TEXTURE3); 185 | gl.bindTexture(gl.TEXTURE_2D, this._clusterTexture.glTexture); 186 | gl.uniform1i(this._progShade.u_clusterbuffer, 3); 187 | 188 | 189 | // TODO: Bind any other shader inputs 190 | gl.uniformMatrix4fv(this._progShade.u_viewMatrix, false, this._viewMatrix); 191 | gl.uniformMatrix4fv(this._progShade.u_inverseViewMatrix, false, this._inverseViewMatrix); 192 | gl.uniform1f(this._progShade.u_camNear, camera.near); 193 | gl.uniform1f(this._progShade.u_camFar, camera.far); 194 | gl.uniform1f(this._progShade.u_screenHeight, canvas.height); 195 | gl.uniform1f(this._progShade.u_screenWidth, canvas.width); 196 | 197 | 198 | // Bind g-buffers 199 | // Since I have light and cluster textures at 2 and 3, start this at 4 200 | const firstGBufferBinding = 4;//0; // You may have to change this if you use other texture slots 201 | 202 | for (let i = 0; i < NUM_GBUFFERS; i++) { 203 | gl.activeTexture(gl[`TEXTURE${i + firstGBufferBinding}`]); 204 | gl.bindTexture(gl.TEXTURE_2D, this._gbuffers[i]); 205 | gl.uniform1i(this._progShade[`u_gbuffers[${i}]`], i + firstGBufferBinding); 206 | } 207 | 208 | renderFullscreenQuad(this._progShade); 209 | } 210 | }; 211 | -------------------------------------------------------------------------------- /src/renderers/clusteredForwardPlus.js: -------------------------------------------------------------------------------- 1 | import { gl } from '../init'; 2 | import { mat4, vec4, vec3 } from 'gl-matrix'; 3 | import { loadShaderProgram } from '../utils'; 4 | import { NUM_LIGHTS } from '../scene'; 5 | import vsSource from '../shaders/clusteredForward.vert.glsl'; 6 | import fsSource from '../shaders/clusteredForward.frag.glsl.js'; 7 | import TextureBuffer from './textureBuffer'; 8 | import ClusteredRenderer from './clustered'; 9 | 10 | import { MAX_LIGHTS_PER_CLUSTER } from '../scene'; 11 | 12 | export default class ClusteredForwardPlusRenderer extends ClusteredRenderer { 13 | constructor(xSlices, ySlices, zSlices) { 14 | super(xSlices, ySlices, zSlices); 15 | 16 | // Create a texture to store light data 17 | this._lightTexture = new TextureBuffer(NUM_LIGHTS, 8); 18 | 19 | this._shaderProgram = loadShaderProgram(vsSource, fsSource({ 20 | numLights: NUM_LIGHTS, 21 | maxLightsPerCluster : MAX_LIGHTS_PER_CLUSTER, 22 | x_slices : xSlices, 23 | y_slices : ySlices, 24 | z_slices : zSlices, 25 | }), { 26 | uniforms: ['u_viewProjectionMatrix', 'u_colmap', 'u_normap', 'u_lightbuffer', 'u_clusterbuffer', 27 | 'u_screenHeight','u_screenWidth', 'u_camNear', 'u_camFar', 'u_viewMatrix'], 28 | attribs: ['a_position', 'a_normal', 'a_uv'], 29 | }); 30 | 31 | this._projectionMatrix = mat4.create(); 32 | this._viewMatrix = mat4.create(); 33 | this._viewProjectionMatrix = mat4.create(); 34 | } 35 | 36 | render(camera, scene) { 37 | // Update the camera matrices 38 | camera.updateMatrixWorld(); 39 | mat4.invert(this._viewMatrix, camera.matrixWorld.elements); 40 | mat4.copy(this._projectionMatrix, camera.projectionMatrix.elements); 41 | mat4.multiply(this._viewProjectionMatrix, this._projectionMatrix, this._viewMatrix); 42 | 43 | // Update cluster texture which maps from cluster index to light list 44 | this.updateClusters(camera, this._viewMatrix, scene); 45 | 46 | // Update the buffer used to populate the texture packed with light data 47 | for (let i = 0; i < NUM_LIGHTS; ++i) { 48 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 0] = scene.lights[i].position[0]; 49 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 1] = scene.lights[i].position[1]; 50 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 2] = scene.lights[i].position[2]; 51 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 3] = scene.lights[i].radius; 52 | 53 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 0] = scene.lights[i].color[0]; 54 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 1] = scene.lights[i].color[1]; 55 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 2] = scene.lights[i].color[2]; 56 | } 57 | // Update the light texture 58 | this._lightTexture.update(); 59 | 60 | // Bind the default null framebuffer which is the screen 61 | gl.bindFramebuffer(gl.FRAMEBUFFER, null); 62 | 63 | // Render to the whole screen 64 | gl.viewport(0, 0, canvas.width, canvas.height); 65 | 66 | // Clear the frame 67 | gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); 68 | 69 | // Use this shader program 70 | gl.useProgram(this._shaderProgram.glShaderProgram); 71 | 72 | // Upload the camera matrix 73 | gl.uniformMatrix4fv(this._shaderProgram.u_viewProjectionMatrix, false, this._viewProjectionMatrix); 74 | 75 | // Set the light texture as a uniform input to the shader 76 | gl.activeTexture(gl.TEXTURE2); 77 | gl.bindTexture(gl.TEXTURE_2D, this._lightTexture.glTexture); 78 | gl.uniform1i(this._shaderProgram.u_lightbuffer, 2); 79 | 80 | // Set the cluster texture as a uniform input to the shader 81 | gl.activeTexture(gl.TEXTURE3); 82 | gl.bindTexture(gl.TEXTURE_2D, this._clusterTexture.glTexture); 83 | gl.uniform1i(this._shaderProgram.u_clusterbuffer, 3); 84 | 85 | // TODO: Bind any other shader inputs 86 | gl.uniformMatrix4fv(this._shaderProgram.u_viewMatrix, false, this._viewMatrix); 87 | gl.uniform1f(this._shaderProgram.u_camNear, camera.near); 88 | gl.uniform1f(this._shaderProgram.u_camFar, camera.far); 89 | 90 | // var half_fov_rad = ((camera.fov / 2.0) * Math.PI) / 180.0; 91 | // var frustum_width = 2.0 * Math.atan(Math.tan(half_fov_rad) * camera.aspect); 92 | // var frustum_height = 2.0 * Math.tan(half_fov_rad) * camera.far; 93 | gl.uniform1f(this._shaderProgram.u_screenHeight, canvas.height); 94 | gl.uniform1f(this._shaderProgram.u_screenWidth, canvas.width); 95 | 96 | // Draw the scene. This function takes the shader program so that the model's textures can be bound to the right inputs 97 | scene.draw(this._shaderProgram); 98 | } 99 | }; -------------------------------------------------------------------------------- /src/renderers/forward.js: -------------------------------------------------------------------------------- 1 | import { gl } from '../init'; 2 | import { mat4, vec4 } from 'gl-matrix'; 3 | import { loadShaderProgram } from '../utils'; 4 | import { NUM_LIGHTS } from '../scene'; 5 | import vsSource from '../shaders/forward.vert.glsl'; 6 | import fsSource from '../shaders/forward.frag.glsl.js'; 7 | import TextureBuffer from './textureBuffer'; 8 | 9 | export default class ForwardRenderer { 10 | constructor() { 11 | // Create a texture to store light data 12 | this._lightTexture = new TextureBuffer(NUM_LIGHTS, 8); 13 | 14 | // Initialize a shader program. The fragment shader source is compiled based on the number of lights 15 | this._shaderProgram = loadShaderProgram(vsSource, fsSource({ 16 | numLights: NUM_LIGHTS, 17 | }), { 18 | uniforms: ['u_viewProjectionMatrix', 'u_colmap', 'u_normap', 'u_lightbuffer'], 19 | attribs: ['a_position', 'a_normal', 'a_uv'], 20 | }); 21 | 22 | this._projectionMatrix = mat4.create(); 23 | this._viewMatrix = mat4.create(); 24 | this._viewProjectionMatrix = mat4.create(); 25 | } 26 | 27 | render(camera, scene) { 28 | // Update the camera matrices 29 | camera.updateMatrixWorld(); 30 | mat4.invert(this._viewMatrix, camera.matrixWorld.elements); 31 | mat4.copy(this._projectionMatrix, camera.projectionMatrix.elements); 32 | mat4.multiply(this._viewProjectionMatrix, this._projectionMatrix, this._viewMatrix); 33 | 34 | // Update the buffer used to populate the texture packed with light data 35 | for (let i = 0; i < NUM_LIGHTS; ++i) { 36 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 0] = scene.lights[i].position[0]; 37 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 1] = scene.lights[i].position[1]; 38 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 2] = scene.lights[i].position[2]; 39 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 0) + 3] = scene.lights[i].radius; 40 | 41 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 0] = scene.lights[i].color[0]; 42 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 1] = scene.lights[i].color[1]; 43 | this._lightTexture.buffer[this._lightTexture.bufferIndex(i, 1) + 2] = scene.lights[i].color[2]; 44 | } 45 | // Update the light texture 46 | this._lightTexture.update(); 47 | 48 | // Bind the default null framebuffer which is the screen 49 | gl.bindFramebuffer(gl.FRAMEBUFFER, null); 50 | 51 | // Render to the whole screen 52 | gl.viewport(0, 0, canvas.width, canvas.height); 53 | 54 | // Clear the frame 55 | gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); 56 | 57 | // Use this shader program 58 | gl.useProgram(this._shaderProgram.glShaderProgram); 59 | 60 | // Upload the camera matrix 61 | gl.uniformMatrix4fv(this._shaderProgram.u_viewProjectionMatrix, false, this._viewProjectionMatrix); 62 | 63 | // Set the light texture as a uniform input to the shader 64 | gl.activeTexture(gl.TEXTURE2); 65 | gl.bindTexture(gl.TEXTURE_2D, this._lightTexture.glTexture); 66 | gl.uniform1i(this._shaderProgram.u_lightbuffer, 2); 67 | 68 | // Draw the scene. This function takes the shader program so that the model's textures can be bound to the right inputs 69 | scene.draw(this._shaderProgram); 70 | } 71 | }; 72 | -------------------------------------------------------------------------------- /src/renderers/textureBuffer.js: -------------------------------------------------------------------------------- 1 | import { gl } from '../init'; 2 | 3 | export default class TextureBuffer { 4 | /** 5 | * This class represents a buffer in a shader. Unforunately we can't bind arbitrary buffers so we need to pack the data as a texture 6 | * @param {Number} elementCount The number of items in the buffer 7 | * @param {Number} elementSize The number of values in each item of the buffer 8 | */ 9 | constructor(elementCount, elementSize) { 10 | // Initialize the texture. We use gl.NEAREST for texture filtering because we don't want to blend between values in the buffer. We want the exact value 11 | this._glTexture = gl.createTexture(); 12 | gl.bindTexture(gl.TEXTURE_2D, this._glTexture); 13 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST); 14 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST); 15 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); 16 | gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); 17 | 18 | // The texture stores 4 values in each "pixel". Thus, the texture we create is elementCount x ceil(elementSize / 4) 19 | this._pixelsPerElement = Math.ceil(elementSize / 4); 20 | this._elementCount = elementCount; 21 | gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, elementCount, this._pixelsPerElement, 0, gl.RGBA, gl.FLOAT, null); 22 | gl.bindTexture(gl.TEXTURE_2D, null); 23 | 24 | // Create a buffer to use to upload to the texture 25 | this._buffer = new Float32Array(elementCount * 4 * this._pixelsPerElement); 26 | } 27 | 28 | get glTexture() { 29 | return this._glTexture; 30 | } 31 | 32 | get buffer() { 33 | return this._buffer; 34 | } 35 | 36 | /** 37 | * Computes the starting buffer index to a particular item. 38 | * @param {*} index The index of the item 39 | * @param {*} component The ith float of an element is located in the (i/4)th pixel 40 | */ 41 | bufferIndex(index, component) { 42 | return 4 * index + 4 * component * this._elementCount; 43 | } 44 | 45 | /** 46 | * Update the texture with the data in the buffer 47 | */ 48 | update() { 49 | gl.bindTexture(gl.TEXTURE_2D, this._glTexture); 50 | gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, this._elementCount, this._pixelsPerElement, gl.RGBA, gl.FLOAT, this._buffer); 51 | gl.bindTexture(gl.TEXTURE_2D, null); 52 | } 53 | }; -------------------------------------------------------------------------------- /src/scene.js: -------------------------------------------------------------------------------- 1 | const MinimalGLTFLoader = require('../lib/minimal-gltf-loader'); 2 | import { gl } from './init'; 3 | 4 | // TODO: Edit if you want to change the light initial positions 5 | export const LIGHT_MIN = [-14, 0, -6]; 6 | export const LIGHT_MAX = [14, 20, 6]; 7 | export const LIGHT_RADIUS = 5.0; 8 | export const LIGHT_DT = -0.03; 9 | 10 | // TODO: This controls the number of lights 11 | export const NUM_LIGHTS = 100; 12 | export const MAX_LIGHTS_PER_CLUSTER = 100; 13 | 14 | class Scene { 15 | constructor() { 16 | this.lights = []; 17 | this.models = []; 18 | 19 | for (let i = 0; i < NUM_LIGHTS; ++i) { 20 | this.lights.push({ 21 | position: new Float32Array([ 22 | Math.random() * (LIGHT_MAX[0] - LIGHT_MIN[0]) + LIGHT_MIN[0], 23 | Math.random() * (LIGHT_MAX[1] - LIGHT_MIN[1]) + LIGHT_MIN[1], 24 | Math.random() * (LIGHT_MAX[2] - LIGHT_MIN[2]) + LIGHT_MIN[2], 25 | ]), 26 | color: new Float32Array([ 27 | 0.5 + 0.5 * Math.random(), 28 | 0.5 + 0.5 * Math.random(), 29 | 0.5 + Math.random(), 30 | ]), 31 | radius: LIGHT_RADIUS, 32 | }); 33 | } 34 | } 35 | 36 | loadGLTF(url) { 37 | var glTFLoader = new MinimalGLTFLoader.glTFLoader(gl); 38 | glTFLoader.loadGLTF(url, glTF => { 39 | var curScene = glTF.scenes[glTF.defaultScene]; 40 | 41 | var webGLTextures = {}; 42 | 43 | // temp var 44 | var i,len; 45 | var primitiveOrderID; 46 | 47 | var mesh; 48 | var primitive; 49 | var vertexBuffer; 50 | var indicesBuffer; 51 | 52 | // textures setting 53 | var textureID = 0; 54 | var textureInfo; 55 | var samplerInfo; 56 | var target, format, internalFormat, type; // texture info 57 | var magFilter, minFilter, wrapS, wrapT; 58 | var image; 59 | var texture; 60 | 61 | // temp for sponza 62 | var colorTextureName = 'texture_color'; 63 | var normalTextureName = 'texture_normal'; 64 | 65 | for (var tid in glTF.json.textures) { 66 | textureInfo = glTF.json.textures[tid]; 67 | target = textureInfo.target || gl.TEXTURE_2D; 68 | format = textureInfo.format || gl.RGBA; 69 | internalFormat = textureInfo.format || gl.RGBA; 70 | type = textureInfo.type || gl.UNSIGNED_BYTE; 71 | 72 | image = glTF.images[textureInfo.source]; 73 | 74 | texture = gl.createTexture(); 75 | gl.activeTexture(gl.TEXTURE0 + textureID); 76 | gl.bindTexture(target, texture); 77 | 78 | switch(target) { 79 | case 3553: // gl.TEXTURE_2D 80 | gl.texImage2D(target, 0, internalFormat, format, type, image); 81 | break; 82 | } 83 | 84 | // !! Sampler 85 | // raw WebGL 1, no sampler object, set magfilter, wrapS, etc 86 | samplerInfo = glTF.json.samplers[textureInfo.sampler]; 87 | minFilter = samplerInfo.minFilter || gl.NEAREST_MIPMAP_LINEAR; 88 | magFilter = samplerInfo.magFilter || gl.LINEAR; 89 | wrapS = samplerInfo.wrapS || gl.REPEAT; 90 | wrapT = samplerInfo.wrapT || gl.REPEAT; 91 | gl.texParameteri(target, gl.TEXTURE_MIN_FILTER, minFilter); 92 | gl.texParameteri(target, gl.TEXTURE_MAG_FILTER, magFilter); 93 | gl.texParameteri(target, gl.TEXTURE_WRAP_S, wrapS); 94 | gl.texParameteri(target, gl.TEXTURE_WRAP_T, wrapT); 95 | if (minFilter == gl.NEAREST_MIPMAP_NEAREST || 96 | minFilter == gl.NEAREST_MIPMAP_LINEAR || 97 | minFilter == gl.LINEAR_MIPMAP_NEAREST || 98 | minFilter == gl.LINEAR_MIPMAP_LINEAR ) { 99 | gl.generateMipmap(target); 100 | } 101 | 102 | 103 | gl.bindTexture(target, null); 104 | 105 | webGLTextures[tid] = { 106 | texture: texture, 107 | target: target, 108 | id: textureID 109 | }; 110 | 111 | textureID++; 112 | } 113 | 114 | // vertex attributes 115 | for (var mid in curScene.meshes) { 116 | mesh = curScene.meshes[mid]; 117 | 118 | for (i = 0, len = mesh.primitives.length; i < len; ++i) { 119 | primitive = mesh.primitives[i]; 120 | 121 | vertexBuffer = gl.createBuffer(); 122 | indicesBuffer = gl.createBuffer(); 123 | 124 | // initialize buffer 125 | var vertices = primitive.vertexBuffer; 126 | gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer); 127 | gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW); 128 | gl.bindBuffer(gl.ARRAY_BUFFER, null); 129 | 130 | var indices = primitive.indices; 131 | gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indicesBuffer); 132 | gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW); 133 | gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null); 134 | 135 | var posInfo = primitive.attributes[primitive.technique.parameters['position'].semantic]; 136 | var norInfo = primitive.attributes[primitive.technique.parameters['normal'].semantic]; 137 | var uvInfo = primitive.attributes[primitive.technique.parameters['texcoord_0'].semantic]; 138 | 139 | this.models.push({ 140 | gltf: primitive, 141 | 142 | idx: indicesBuffer, 143 | 144 | attributes: vertexBuffer, 145 | posInfo: {size: posInfo.size, type: posInfo.type, stride: posInfo.stride, offset: posInfo.offset}, 146 | norInfo: {size: norInfo.size, type: norInfo.type, stride: norInfo.stride, offset: norInfo.offset}, 147 | uvInfo: {size: uvInfo.size, type: uvInfo.type, stride: uvInfo.stride, offset: uvInfo.offset}, 148 | 149 | // specific textures temp test 150 | colmap: webGLTextures[colorTextureName].texture, 151 | normap: webGLTextures[normalTextureName].texture 152 | }); 153 | } 154 | } 155 | 156 | }); 157 | } 158 | 159 | update() { 160 | for (let i = 0; i < NUM_LIGHTS; i++) { 161 | // OPTIONAL TODO: Edit if you want to change how lights move 162 | this.lights[i].position[1] += LIGHT_DT; 163 | // wrap lights from bottom to top 164 | this.lights[i].position[1] = (this.lights[i].position[1] + LIGHT_MAX[1] - LIGHT_MIN[1]) % LIGHT_MAX[1] + LIGHT_MIN[1]; 165 | } 166 | } 167 | 168 | draw(shaderProgram) { 169 | for (let i = 0; i < this.models.length; ++i) { 170 | const model = this.models[i]; 171 | if (model.colmap) { 172 | gl.activeTexture(gl.TEXTURE0); 173 | gl.bindTexture(gl.TEXTURE_2D, model.colmap); 174 | gl.uniform1i(shaderProgram.u_colmap, 0); 175 | } 176 | 177 | if (model.normap) { 178 | gl.activeTexture(gl.TEXTURE1); 179 | gl.bindTexture(gl.TEXTURE_2D, model.normap); 180 | gl.uniform1i(shaderProgram.u_normap, 1); 181 | } 182 | 183 | gl.bindBuffer(gl.ARRAY_BUFFER, model.attributes); 184 | 185 | gl.enableVertexAttribArray(shaderProgram.a_position); 186 | gl.vertexAttribPointer(shaderProgram.a_position, model.posInfo.size, model.posInfo.type, false, model.posInfo.stride, model.posInfo.offset); 187 | 188 | gl.enableVertexAttribArray(shaderProgram.a_normal); 189 | gl.vertexAttribPointer(shaderProgram.a_normal, model.norInfo.size, model.norInfo.type, false, model.norInfo.stride, model.norInfo.offset); 190 | 191 | gl.enableVertexAttribArray(shaderProgram.a_uv); 192 | gl.vertexAttribPointer(shaderProgram.a_uv, model.uvInfo.size, model.uvInfo.type, false, model.uvInfo.stride, model.uvInfo.offset); 193 | 194 | gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, model.idx); 195 | 196 | gl.drawElements(model.gltf.mode, model.gltf.indices.length, model.gltf.indicesComponentType, 0); 197 | } 198 | } 199 | 200 | } 201 | 202 | export default Scene; -------------------------------------------------------------------------------- /src/shaders/clusteredForward.frag.glsl.js: -------------------------------------------------------------------------------- 1 | export default function (params) { 2 | return ` 3 | /* 4 | TODO: 5 | - Determine the cluster for a fragment 6 | - Read in the lights in that cluster from the populated data 7 | - Do shading for just those lights 8 | - You may find it necessary to bind additional uniforms in src/renderers/clusteredForwardPlus.js 9 | */ 10 | 11 | // TODO: This is pretty much just a clone of forward.frag.glsl.js 12 | 13 | #version 100 14 | precision highp float; 15 | 16 | uniform sampler2D u_colmap; 17 | uniform sampler2D u_normap; 18 | uniform sampler2D u_lightbuffer; 19 | 20 | // TODO: Read this buffer to determine the lights influencing a cluster 21 | uniform sampler2D u_clusterbuffer; 22 | 23 | // More uniforms 24 | uniform float u_screenHeight; 25 | uniform float u_screenWidth; 26 | uniform float u_camFar; 27 | uniform float u_camNear; 28 | uniform mat4 u_viewMatrix; 29 | 30 | varying vec3 v_position; 31 | varying vec3 v_normal; 32 | varying vec2 v_uv; 33 | 34 | 35 | // ========================================================================== 36 | 37 | vec3 applyNormalMap(vec3 geomnor, vec3 normap) { 38 | normap = normap * 2.0 - 1.0; 39 | vec3 up = normalize(vec3(0.001, 1, 0.001)); 40 | vec3 surftan = normalize(cross(geomnor, up)); 41 | vec3 surfbinor = cross(geomnor, surftan); 42 | return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; 43 | } 44 | 45 | // -------------------------------------------------------------------------- 46 | 47 | struct Light { 48 | vec3 position; 49 | float radius; 50 | vec3 color; 51 | }; 52 | 53 | // -------------------------------------------------------------------------- 54 | 55 | float ExtractFloat(sampler2D texture, int textureWidth, int textureHeight, int index, int component) { 56 | float u = float(index + 1) / float(textureWidth + 1); 57 | int pixel = component / 4; 58 | float v = float(pixel + 1) / float(textureHeight + 1); 59 | vec4 texel = texture2D(texture, vec2(u, v)); 60 | int pixelComponent = component - pixel * 4; 61 | if (pixelComponent == 0) { 62 | return texel[0]; 63 | } else if (pixelComponent == 1) { 64 | return texel[1]; 65 | } else if (pixelComponent == 2) { 66 | return texel[2]; 67 | } else if (pixelComponent == 3) { 68 | return texel[3]; 69 | } 70 | } 71 | 72 | // -------------------------------------------------------------------------- 73 | 74 | Light UnpackLight(int index) { 75 | Light light; 76 | float u = float(index + 1) / float(${params.numLights + 1}); 77 | vec4 v1 = texture2D(u_lightbuffer, vec2(u, 0.3)); 78 | vec4 v2 = texture2D(u_lightbuffer, vec2(u, 0.6)); 79 | light.position = v1.xyz; 80 | 81 | // LOOK: This extracts the 4th float (radius) of the (index)th light in the buffer 82 | // Note that this is just an example implementation to extract one float. 83 | // There are more efficient ways if you need adjacent values 84 | light.radius = ExtractFloat(u_lightbuffer, ${params.numLights}, 2, index, 3); 85 | 86 | light.color = v2.rgb; 87 | return light; 88 | } 89 | 90 | // -------------------------------------------------------------------------- 91 | 92 | // Cubic approximation of gaussian curve so we falloff to exactly 0 at the light radius 93 | float cubicGaussian(float h) { 94 | if (h < 1.0) { 95 | return 0.25 * pow(2.0 - h, 3.0) - pow(1.0 - h, 3.0); 96 | } else if (h < 2.0) { 97 | return 0.25 * pow(2.0 - h, 3.0); 98 | } else { 99 | return 0.0; 100 | } 101 | } 102 | 103 | // -------------------------------------------------------------------------- 104 | 105 | // float ExtractLightInfoFromCluster(int u, int lightIdx) 106 | // { 107 | // int v = int( floor( (float(lightIdx) + 1.0) / 4.0 ) ); 108 | // vec4 pixelComponent = texture2D(u_clusterbuffer, vec2(u, v)); 109 | // int lightIdxInTexture = int(mod(float(lightIdx) + 1.0, 4.0)); 110 | 111 | // if (lightIdxInTexture == 0) { 112 | // return pixelComponent[0]; 113 | // } else if (lightIdxInTexture == 1) { 114 | // return pixelComponent[1]; 115 | // } else if (lightIdxInTexture == 2) { 116 | // return pixelComponent[2]; 117 | // } else if (lightIdxInTexture == 3) { 118 | // return pixelComponent[3]; 119 | // } 120 | // } 121 | 122 | // -------------------------------------------------------------------------- 123 | 124 | void main() 125 | { 126 | vec4 _fragPos = u_viewMatrix * vec4(v_position, 1.0); 127 | vec3 fragPos = vec3(_fragPos[0], _fragPos[1], -1.0 * _fragPos[2]); // Make sure to negate z 128 | 129 | float z_stride = float(u_camFar - u_camNear) / float(${params.z_slices}); 130 | float y_stride = float(u_screenHeight) / float(${params.y_slices}); 131 | float x_stride = float(u_screenWidth) / float(${params.x_slices}); 132 | 133 | // gl_FragCoord.xy are in pixel/screen space, .z is in [0, 1] 134 | int z_cluster_idx = int(floor((fragPos[2] - u_camNear) / z_stride)); 135 | 136 | int y_cluster_idx = int(floor(gl_FragCoord.y / y_stride)); 137 | int x_cluster_idx = int(floor(gl_FragCoord.x / x_stride)); 138 | // int y_cluster_idx = int(floor((gl_FragCoord.y + (u_screenHeight / 2.0)) / y_stride)); 139 | // int x_cluster_idx = int(floor((gl_FragCoord.x + (u_screenWidth / 2.0)) / x_stride)); 140 | 141 | //Test to see if you have 15 slices 142 | // gl_FragColor = vec4(float(x_cluster_idx)/15.0, float(x_cluster_idx)/15.0, float(x_cluster_idx)/15.0, 1.0); 143 | 144 | 145 | // Calculate u index into cluster texture 146 | int clusterIdx = x_cluster_idx + 147 | (y_cluster_idx * ${params.x_slices}) + 148 | (z_cluster_idx * ${params.x_slices} * ${params.y_slices}); 149 | 150 | int totalNumClusters = ${params.x_slices} * ${params.y_slices} * ${params.z_slices}; 151 | int texWidth = int(ceil(float(${params.maxLightsPerCluster} + 1) / 4.0)); 152 | const int maxLightsPerCluster = int(${params.maxLightsPerCluster}); 153 | 154 | // Get the light count from cluster texture, iterate through that 155 | // Note: Textures go from [0, 1], so divide u and v by width and height before calling texture2D 156 | // "+ 1" to make sure you hit inside pixel 157 | float u = float(clusterIdx + 1) / float(totalNumClusters + 1); 158 | vec4 firstVComponent = texture2D(u_clusterbuffer, vec2(u, 0)); 159 | int numLightsInCluster = int(firstVComponent.r); 160 | 161 | // Light Calculation 162 | vec3 albedo = texture2D(u_colmap, v_uv).rgb; 163 | vec3 normap = texture2D(u_normap, v_uv).xyz; 164 | vec3 normal = applyNormalMap(v_normal, normap); 165 | vec3 fragColor = vec3(0.0); 166 | 167 | for (int i = 0; i < maxLightsPerCluster; ++i) 168 | { 169 | if(i >= numLightsInCluster) break; 170 | 171 | // Get the light index in cluster texture 172 | int lightIdxInTexture = int(ExtractFloat(u_clusterbuffer, totalNumClusters, texWidth, clusterIdx, i+1)); 173 | // int lightIdxInTexture = int(ExtractLightInfoFromCluster(u, i)); 174 | 175 | Light light = UnpackLight(lightIdxInTexture); 176 | float lightDistance = distance(light.position, v_position); 177 | vec3 L = (light.position - v_position) / lightDistance; 178 | float lightIntensity = cubicGaussian(2.0 * lightDistance / light.radius); 179 | float lambertTerm = max(dot(L, normal), 0.0); 180 | fragColor += albedo * lambertTerm * light.color * vec3(lightIntensity); 181 | } 182 | 183 | const vec3 ambientLight = vec3(0.025); 184 | fragColor += albedo * ambientLight; 185 | gl_FragColor = vec4(fragColor, 1.0); 186 | 187 | } 188 | `; 189 | 190 | 191 | 192 | /* 193 | TODO: 194 | 195 | - Determine the cluster this fragment belongs to 196 | - Get current fragment with glFragCoord 197 | - ??? Make a cluster struct (like Lights). It would have numLights and listList 198 | - Read data inside u_clusterbuffer 199 | - Loop over light indices 200 | 201 | index -> pixel index 202 | pixel index => v coord of texture 203 | get texel from texture 204 | access correct component of texture 205 | 206 | pixel 0, component z 207 | pixel = (index + 1)/4 208 | component = (index + 1) - 4 * pixel 209 | 210 | v = (pixel + 1) / (max lights per cluter + 1 + 1) 211 | texel = texture2d(buffer, vec2(uv)) 212 | lightindex = texel[component] 213 | if(compoenet == 0) 214 | lightindex = texel[0] 215 | 216 | if(compoenet == 1) 217 | lightindex = texel[1] 218 | 219 | if(compoenet == 2) 220 | lightindex = texel[2] 221 | 222 | if(compoenet == 3) 223 | lightindex = texel[3] 224 | 225 | 226 | dividing by texture 227 | (2 + 1)/ (5 + 1) 228 | 229 | 230 | 231 | 232 | ------------------ OTHER NOTES ------------------ 233 | 234 | // send inverse projection matrix and number of slices to fragment shader to bring into view space 235 | // position is in world space (v_position from vertex shader), and you want it in eye space. multiply by view matrix 236 | //do this to get z coord 237 | //glfragcoord.xy to get x and y (this is your pixel position) 238 | // you need to send height and width of screen 239 | 240 | 241 | ------------------ WHITEBOARD NOTES ------------------ 242 | you get v_position from vertex shader. this is in world space 243 | multiply this by view matrix to bring the position into camera space 244 | this means you need to pass in the view matrix into the fragment shader 245 | 246 | get the z value from this, floor it to the chunks of slice size 247 | if in camera space, it goes from 0 to farPlaneZ (FIGURE OUT HOW TO GET THIS) 248 | divide the space by 15 (slice size), and find which slice of 15th the z value floors to 249 | 250 | identify the cluster the fragment is located in 251 | 252 | get the x and y value for the fragment with glFragCoord.xy. this is in pixel space 253 | need to pass in screen width and height 254 | divide x and y by 15 (or whatever your cluster logic is), then floor or ceil it 255 | 256 | find index value into cluster texture using x, y, z 257 | get uv coord 258 | get light info 259 | iterate through those lights for that current cluster 260 | */ 261 | 262 | 263 | }//end export default function 264 | -------------------------------------------------------------------------------- /src/shaders/clusteredForward.vert.glsl: -------------------------------------------------------------------------------- 1 | #version 100 2 | precision highp float; 3 | 4 | uniform mat4 u_viewProjectionMatrix; 5 | 6 | attribute vec3 a_position; 7 | attribute vec3 a_normal; 8 | attribute vec2 a_uv; 9 | 10 | varying vec3 v_position; 11 | varying vec3 v_normal; 12 | varying vec2 v_uv; 13 | 14 | void main() { 15 | gl_Position = u_viewProjectionMatrix * vec4(a_position, 1.0); 16 | v_position = a_position; 17 | v_normal = a_normal; 18 | v_uv = a_uv; 19 | } -------------------------------------------------------------------------------- /src/shaders/deferred.frag.glsl.js: -------------------------------------------------------------------------------- 1 | export default function(params) { 2 | return ` 3 | #version 100 4 | precision highp float; 5 | 6 | uniform sampler2D u_gbuffers[${params.numGBuffers}]; 7 | 8 | uniform sampler2D u_lightbuffer; 9 | uniform sampler2D u_clusterbuffer; 10 | 11 | uniform float u_screenHeight; 12 | uniform float u_screenWidth; 13 | uniform float u_camFar; 14 | uniform float u_camNear; 15 | 16 | uniform mat4 u_viewMatrix; 17 | uniform mat4 u_inverseViewMatrix; 18 | 19 | varying vec2 v_uv; 20 | 21 | const vec3 specColor = vec3(0.1, 0.1, 0.1); 22 | const float shininess = 800.0; 23 | const float screenGamma = 2.0; 24 | 25 | #define GAMMA_CORRECTION true 26 | #define BLINN_PHONG true 27 | 28 | 29 | // ========================================================================== 30 | 31 | struct Light { 32 | vec3 position; 33 | float radius; 34 | vec3 color; 35 | }; 36 | 37 | // -------------------------------------------------------------------------- 38 | 39 | float ExtractFloat(sampler2D texture, int textureWidth, int textureHeight, int index, int component) { 40 | float u = float(index + 1) / float(textureWidth + 1); 41 | int pixel = component / 4; 42 | float v = float(pixel + 1) / float(textureHeight + 1); 43 | vec4 texel = texture2D(texture, vec2(u, v)); 44 | int pixelComponent = component - pixel * 4; 45 | if (pixelComponent == 0) { 46 | return texel[0]; 47 | } else if (pixelComponent == 1) { 48 | return texel[1]; 49 | } else if (pixelComponent == 2) { 50 | return texel[2]; 51 | } else if (pixelComponent == 3) { 52 | return texel[3]; 53 | } 54 | } 55 | 56 | // -------------------------------------------------------------------------- 57 | 58 | Light UnpackLight(int index) { 59 | Light light; 60 | float u = float(index + 1) / float(${params.numLights + 1}); 61 | vec4 v1 = texture2D(u_lightbuffer, vec2(u, 0.3)); 62 | vec4 v2 = texture2D(u_lightbuffer, vec2(u, 0.6)); 63 | light.position = v1.xyz; 64 | 65 | // LOOK: This extracts the 4th float (radius) of the (index)th light in the buffer 66 | // Note that this is just an example implementation to extract one float. 67 | // There are more efficient ways if you need adjacent values 68 | light.radius = ExtractFloat(u_lightbuffer, ${params.numLights}, 2, index, 3); 69 | 70 | light.color = v2.rgb; 71 | return light; 72 | } 73 | 74 | // -------------------------------------------------------------------------- 75 | 76 | // Cubic approximation of gaussian curve so we falloff to exactly 0 at the light radius 77 | float cubicGaussian(float h) { 78 | if (h < 1.0) { 79 | return 0.25 * pow(2.0 - h, 3.0) - pow(1.0 - h, 3.0); 80 | } else if (h < 2.0) { 81 | return 0.25 * pow(2.0 - h, 3.0); 82 | } else { 83 | return 0.0; 84 | } 85 | } 86 | 87 | // -------------------------------------------------------------------------- 88 | 89 | // https://aras-p.info/texts/CompactNormalStorage.html 90 | vec3 getUnreducedNormal(vec2 reducedNorm) 91 | { 92 | vec3 normal; 93 | normal.xy = (2.0 * reducedNorm) - 1.0; 94 | normal.z = sqrt(1.0 - ((normal.x * normal.x) + (normal.y * normal.y))); 95 | return normal; 96 | } 97 | 98 | // -------------------------------------------------------------------------- 99 | 100 | void main() { 101 | 102 | // ============================== Normal Compression ============================== 103 | 104 | // TODO: extract data from g buffers and do lighting 105 | vec4 gb0 = texture2D(u_gbuffers[0], v_uv); 106 | vec4 gb1 = texture2D(u_gbuffers[1], v_uv); 107 | // vec4 gb2 = texture2D(u_gbuffers[2], v_uv); 108 | // vec4 gb3 = texture2D(u_gbuffers[3], v_uv); 109 | 110 | 111 | // Get data from g-buffers 112 | vec3 albedo = gb0.rgb; 113 | vec3 v_position = gb1.rgb; 114 | 115 | // Calculate unreduced normal 116 | // Make sure to multiply inverse view matrix to bring back to world space 117 | vec2 reducedNormal = vec2(gb0[3], gb1[3]); 118 | vec3 unReducedNormal = getUnreducedNormal(reducedNormal); 119 | vec4 _normal = u_inverseViewMatrix * vec4(unReducedNormal, 0.0); 120 | vec3 normal = vec3(_normal[0], _normal[1], _normal[2]); //using world space normal 121 | // vec3 normal = vec3(unReducedNormal[0], unReducedNormal[1], unReducedNormal[2]); //using view space normal 122 | 123 | // ============================== No Normal Compression ============================== 124 | 125 | // // TODO: extract data from g buffers and do lighting 126 | // vec4 gb0 = texture2D(u_gbuffers[0], v_uv); 127 | // vec4 gb1 = texture2D(u_gbuffers[1], v_uv); 128 | // vec4 gb2 = texture2D(u_gbuffers[2], v_uv); 129 | 130 | // // Get data from g-buffers 131 | // vec3 albedo = gb0.rgb; 132 | // vec3 v_position = gb1.rgb; 133 | // vec3 normal = gb2.rgb; 134 | 135 | // ============================== End No Normal Compression ============================== 136 | 137 | 138 | vec3 fragColor = vec3(0.0); 139 | 140 | vec4 _fragPos = u_viewMatrix * vec4(v_position, 1.0); 141 | vec3 fragPos = vec3(_fragPos[0], _fragPos[1], -1.0 * _fragPos[2]); // Make sure to negate z 142 | 143 | float z_stride = float(u_camFar - u_camNear) / float(${params.z_slices}); 144 | float y_stride = float(u_screenHeight) / float(${params.y_slices}); 145 | float x_stride = float(u_screenWidth) / float(${params.x_slices}); 146 | 147 | // gl_FragCoord.xy are in pixel/screen space, .z is in [0, 1] 148 | int z_cluster_idx = int(floor((fragPos[2] - u_camNear) / z_stride)); 149 | 150 | int y_cluster_idx = int(floor(gl_FragCoord.y / y_stride)); 151 | int x_cluster_idx = int(floor(gl_FragCoord.x / x_stride)); 152 | 153 | // Calculate u index into cluster texture 154 | int clusterIdx = x_cluster_idx + 155 | (y_cluster_idx * ${params.x_slices}) + 156 | (z_cluster_idx * ${params.x_slices} * ${params.y_slices}); 157 | 158 | int totalNumClusters = ${params.x_slices} * ${params.y_slices} * ${params.z_slices}; 159 | int texWidth = int(ceil(float(${params.maxLightsPerCluster} + 1) / 4.0)); 160 | const int maxLightsPerCluster = int(${params.maxLightsPerCluster}); 161 | 162 | // Get the light count from cluster texture, iterate through that 163 | // Note: Textures go from [0, 1], so divide u and v by width and height before calling texture2D 164 | // "+ 1" to make sure you hit inside pixel 165 | float u = float(clusterIdx + 1) / float(totalNumClusters + 1); 166 | vec4 firstVComponent = texture2D(u_clusterbuffer, vec2(u, 0)); 167 | int numLightsInCluster = int(firstVComponent.r); 168 | 169 | // Light Calculation 170 | for (int i = 0; i < maxLightsPerCluster; ++i) 171 | { 172 | if(i >= numLightsInCluster) break; 173 | 174 | // Get the light index in cluster texture 175 | int lightIdxInTexture = int(ExtractFloat(u_clusterbuffer, totalNumClusters, texWidth, clusterIdx, i+1)); 176 | 177 | Light light = UnpackLight(lightIdxInTexture); 178 | float lightDistance = distance(light.position, v_position); 179 | vec3 L = (light.position - v_position) / lightDistance; 180 | float lightIntensity = cubicGaussian(2.0 * lightDistance / light.radius); 181 | float lambertTerm = max(dot(L, normal), 0.0); 182 | 183 | if(BLINN_PHONG) 184 | { 185 | // Blinn Phong Model ---------------------- 186 | // https://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_shading_model 187 | 188 | float specular = 0.0; 189 | if(lambertTerm > 0.0) 190 | { 191 | //Blinn Phong 192 | vec3 lightDir = light.position - v_position; 193 | vec3 viewDir = normalize(-v_position); 194 | 195 | // vec3 halfDir = normalize(L + viewDir); 196 | vec3 halfDir = normalize(lightDir + viewDir); 197 | 198 | float specAngle = max(dot(halfDir, normal), 0.0); 199 | specular = pow(specAngle, shininess); 200 | } 201 | 202 | fragColor += albedo * lambertTerm * light.color * vec3(lightIntensity) + specular * specColor; 203 | } 204 | else 205 | { 206 | // Lambert Only ---------------------- 207 | fragColor += albedo * lambertTerm * light.color * vec3(lightIntensity); 208 | } 209 | 210 | } 211 | 212 | const vec3 ambientLight = vec3(0.025); 213 | fragColor += albedo * ambientLight; 214 | 215 | // --------------------- Gamma Correction --------------------- 216 | 217 | if(GAMMA_CORRECTION) 218 | { 219 | vec3 gammaCorrectedColor = pow(fragColor, vec3(1.0 / screenGamma)); 220 | gl_FragColor = vec4(gammaCorrectedColor, 1.0); 221 | } 222 | else 223 | { 224 | gl_FragColor = vec4(fragColor, 1.0); 225 | } 226 | 227 | 228 | 229 | 230 | } 231 | `; 232 | } -------------------------------------------------------------------------------- /src/shaders/deferredToTexture.frag.glsl: -------------------------------------------------------------------------------- 1 | #version 100 2 | #extension GL_EXT_draw_buffers: enable 3 | precision highp float; 4 | 5 | uniform sampler2D u_colmap; 6 | uniform sampler2D u_normap; 7 | 8 | uniform mat4 u_viewMatrix; 9 | 10 | varying vec3 v_position; 11 | varying vec3 v_normal; 12 | varying vec2 v_uv; 13 | 14 | 15 | vec3 applyNormalMap(vec3 geomnor, vec3 normap) { 16 | normap = normap * 2.0 - 1.0; 17 | vec3 up = normalize(vec3(0.001, 1, 0.001)); 18 | vec3 surftan = normalize(cross(geomnor, up)); 19 | vec3 surfbinor = cross(geomnor, surftan); 20 | return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; 21 | } 22 | 23 | void main() { 24 | vec3 norm = applyNormalMap(v_normal, vec3(texture2D(u_normap, v_uv))); 25 | vec3 col = vec3(texture2D(u_colmap, v_uv)); 26 | 27 | // TODO: populate your g buffer 28 | // Populate g buffer with depth, normal, color, and anything else 29 | 30 | // ============================== Non-optimized way ============================== 31 | //gl_FragData[0] = vec4(col, 0.0); 32 | //gl_FragData[1] = vec4(v_position, 0.0); 33 | //gl_FragData[2] = vec4(norm, 0.0); 34 | 35 | // ============================== Optimized way ============================== 36 | // Use less g buffers 37 | // Reduce normals to only 2 components and pack them into already used g buffers 38 | // If you reduce normals, must bring to camera space by multiplying view matrix 39 | 40 | // Compacting normals, resconstructing z --> https://aras-p.info/texts/CompactNormalStorage.html 41 | // You want to adjust the normal post normalization so that you can adjust it again in the frag shader 42 | // OpenGL apparently clamps values that are stored in a texture because it reads everything as a color 43 | 44 | // You know the magnitude of the normal is 1, and you have x and y 45 | // Use the magnitude formula in the frag shader to reconstruct and find z 46 | 47 | vec3 reducedNorm = 0.5 + (0.5 * normalize(vec3(u_viewMatrix * vec4(norm, 0.0)))); 48 | gl_FragData[0] = vec4(col, reducedNorm.x); 49 | gl_FragData[1] = vec4(v_position, reducedNorm.y); 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | } -------------------------------------------------------------------------------- /src/shaders/deferredToTexture.vert.glsl: -------------------------------------------------------------------------------- 1 | #version 100 2 | precision highp float; 3 | 4 | uniform mat4 u_viewProjectionMatrix; 5 | 6 | attribute vec3 a_position; 7 | attribute vec3 a_normal; 8 | attribute vec2 a_uv; 9 | 10 | varying vec3 v_position; 11 | varying vec3 v_normal; 12 | varying vec2 v_uv; 13 | 14 | void main() { 15 | gl_Position = u_viewProjectionMatrix * vec4(a_position, 1.0); 16 | v_position = a_position; 17 | v_normal = a_normal; 18 | v_uv = a_uv; 19 | } -------------------------------------------------------------------------------- /src/shaders/forward.frag.glsl.js: -------------------------------------------------------------------------------- 1 | export default function(params) { 2 | return ` 3 | #version 100 4 | precision highp float; 5 | 6 | uniform sampler2D u_colmap; 7 | uniform sampler2D u_normap; 8 | uniform sampler2D u_lightbuffer; 9 | 10 | varying vec3 v_position; 11 | varying vec3 v_normal; 12 | varying vec2 v_uv; 13 | 14 | vec3 applyNormalMap(vec3 geomnor, vec3 normap) { 15 | normap = normap * 2.0 - 1.0; 16 | vec3 up = normalize(vec3(0.001, 1, 0.001)); 17 | vec3 surftan = normalize(cross(geomnor, up)); 18 | vec3 surfbinor = cross(geomnor, surftan); 19 | return normap.y * surftan + normap.x * surfbinor + normap.z * geomnor; 20 | } 21 | 22 | struct Light { 23 | vec3 position; 24 | float radius; 25 | vec3 color; 26 | }; 27 | 28 | float ExtractFloat(sampler2D texture, int textureWidth, int textureHeight, int index, int component) { 29 | float u = float(index + 1) / float(textureWidth + 1); 30 | int pixel = component / 4; 31 | float v = float(pixel + 1) / float(textureHeight + 1); 32 | vec4 texel = texture2D(texture, vec2(u, v)); 33 | int pixelComponent = component - pixel * 4; 34 | if (pixelComponent == 0) { 35 | return texel[0]; 36 | } else if (pixelComponent == 1) { 37 | return texel[1]; 38 | } else if (pixelComponent == 2) { 39 | return texel[2]; 40 | } else if (pixelComponent == 3) { 41 | return texel[3]; 42 | } 43 | } 44 | 45 | Light UnpackLight(int index) { 46 | Light light; 47 | float u = float(index + 1) / float(${params.numLights + 1}); 48 | vec4 v1 = texture2D(u_lightbuffer, vec2(u, 0.0)); 49 | vec4 v2 = texture2D(u_lightbuffer, vec2(u, 0.5)); 50 | light.position = v1.xyz; 51 | 52 | // LOOK: This extracts the 4th float (radius) of the (index)th light in the buffer 53 | // Note that this is just an example implementation to extract one float. 54 | // There are more efficient ways if you need adjacent values 55 | light.radius = ExtractFloat(u_lightbuffer, ${params.numLights}, 2, index, 3); 56 | 57 | light.color = v2.rgb; 58 | return light; 59 | } 60 | 61 | // Cubic approximation of gaussian curve so we falloff to exactly 0 at the light radius 62 | float cubicGaussian(float h) { 63 | if (h < 1.0) { 64 | return 0.25 * pow(2.0 - h, 3.0) - pow(1.0 - h, 3.0); 65 | } else if (h < 2.0) { 66 | return 0.25 * pow(2.0 - h, 3.0); 67 | } else { 68 | return 0.0; 69 | } 70 | } 71 | 72 | void main() { 73 | vec3 albedo = texture2D(u_colmap, v_uv).rgb; 74 | vec3 normap = texture2D(u_normap, v_uv).xyz; 75 | vec3 normal = applyNormalMap(v_normal, normap); 76 | 77 | vec3 fragColor = vec3(0.0); 78 | 79 | for (int i = 0; i < ${params.numLights}; ++i) { 80 | Light light = UnpackLight(i); 81 | float lightDistance = distance(light.position, v_position); 82 | vec3 L = (light.position - v_position) / lightDistance; 83 | 84 | float lightIntensity = cubicGaussian(2.0 * lightDistance / light.radius); 85 | float lambertTerm = max(dot(L, normal), 0.0); 86 | 87 | fragColor += albedo * lambertTerm * light.color * vec3(lightIntensity); 88 | } 89 | 90 | const vec3 ambientLight = vec3(0.025); 91 | fragColor += albedo * ambientLight; 92 | 93 | gl_FragColor = vec4(fragColor, 1.0); 94 | } 95 | `; 96 | } 97 | -------------------------------------------------------------------------------- /src/shaders/forward.vert.glsl: -------------------------------------------------------------------------------- 1 | #version 100 2 | precision highp float; 3 | 4 | uniform mat4 u_viewProjectionMatrix; 5 | 6 | attribute vec3 a_position; 7 | attribute vec3 a_normal; 8 | attribute vec2 a_uv; 9 | 10 | varying vec3 v_position; 11 | varying vec3 v_normal; 12 | varying vec2 v_uv; 13 | 14 | void main() { 15 | gl_Position = u_viewProjectionMatrix * vec4(a_position, 1.0); 16 | v_position = a_position; 17 | v_normal = a_normal; 18 | v_uv = a_uv; 19 | } -------------------------------------------------------------------------------- /src/shaders/quad.vert.glsl: -------------------------------------------------------------------------------- 1 | #version 100 2 | precision highp float; 3 | 4 | attribute vec3 a_position; 5 | 6 | varying vec2 v_uv; 7 | 8 | void main() { 9 | gl_Position = vec4(a_position, 1.0); 10 | v_uv = a_position.xy * 0.5 + 0.5; 11 | } -------------------------------------------------------------------------------- /src/utils.js: -------------------------------------------------------------------------------- 1 | import { gl, canvas, abort } from './init'; 2 | import QuadVertSource from './shaders/quad.vert.glsl'; 3 | 4 | function downloadURI(uri, name) { 5 | var link = document.createElement('a'); 6 | link.download = name; 7 | link.href = uri; 8 | document.body.appendChild(link); 9 | link.click(); 10 | document.body.removeChild(link); 11 | }; 12 | 13 | export function saveCanvas() { 14 | downloadURI(canvas.toDataURL('image/png'), 'webgl-canvas-' + Date.now() + '.png'); 15 | } 16 | 17 | function compileShader(shaderSource, shaderType) { 18 | var shader = gl.createShader(shaderType); 19 | gl.shaderSource(shader, shaderSource); 20 | gl.compileShader(shader); 21 | if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) { 22 | console.error(shaderSource); 23 | abort('shader compiler error:\n' + gl.getShaderInfoLog(shader)); 24 | } 25 | 26 | return shader; 27 | }; 28 | 29 | function linkShader(vs, fs) { 30 | var prog = gl.createProgram(); 31 | gl.attachShader(prog, vs); 32 | gl.attachShader(prog, fs); 33 | gl.linkProgram(prog); 34 | if (!gl.getProgramParameter(prog, gl.LINK_STATUS)) { 35 | abort('shader linker error:\n' + gl.getProgramInfoLog(prog)); 36 | } 37 | return prog; 38 | }; 39 | 40 | function addShaderLocations(result, shaderLocations) { 41 | if (shaderLocations && shaderLocations.uniforms && shaderLocations.uniforms.length) { 42 | for (let i = 0; i < shaderLocations.uniforms.length; ++i) { 43 | result = Object.assign(result, { 44 | [shaderLocations.uniforms[i]]: gl.getUniformLocation(result.glShaderProgram, shaderLocations.uniforms[i]), 45 | }); 46 | } 47 | } 48 | if (shaderLocations && shaderLocations.attribs && shaderLocations.attribs.length) { 49 | for (let i = 0; i < shaderLocations.attribs.length; ++i) { 50 | result = Object.assign(result, { 51 | [shaderLocations.attribs[i]]: gl.getAttribLocation(result.glShaderProgram, shaderLocations.attribs[i]), 52 | }); 53 | } 54 | } 55 | return result; 56 | } 57 | 58 | export function loadShaderProgram(vsSource, fsSource, shaderLocations) { 59 | const vs = compileShader(vsSource, gl.VERTEX_SHADER); 60 | const fs = compileShader(fsSource, gl.FRAGMENT_SHADER); 61 | return addShaderLocations({ 62 | glShaderProgram: linkShader(vs, fs), 63 | }, shaderLocations); 64 | } 65 | 66 | const quadPositions = new Float32Array([ 67 | -1.0, -1.0, 0.0, 68 | 1.0, -1.0, 0.0, 69 | -1.0, 1.0, 0.0, 70 | 1.0, 1.0, 0.0 71 | ]); 72 | 73 | const quadBuffer = gl.createBuffer(); 74 | gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer); 75 | gl.bufferData(gl.ARRAY_BUFFER, quadPositions, gl.STATIC_DRAW); 76 | 77 | export function renderFullscreenQuad(program) { 78 | // Bind the program to use to draw the quad 79 | gl.useProgram(program.glShaderProgram); 80 | 81 | // Bind the VBO as the gl.ARRAY_BUFFER 82 | gl.bindBuffer(gl.ARRAY_BUFFER, quadBuffer); 83 | 84 | // Enable the bound buffer as the vertex attrib array for 85 | // program.a_position, using gl.enableVertexAttribArray 86 | gl.enableVertexAttribArray(program.a_position); 87 | 88 | // Use gl.vertexAttribPointer to tell WebGL the type/layout for 89 | // program.a_position's access pattern. 90 | gl.vertexAttribPointer(program.a_position, 3, gl.FLOAT, gl.FALSE, 0, 0); 91 | 92 | // Use gl.drawArrays to draw the quad 93 | gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4); 94 | 95 | // Disable the enabled vertex attrib array 96 | gl.disableVertexAttribArray(program.a_position); 97 | 98 | // Unbind the array buffer. 99 | gl.bindBuffer(gl.ARRAY_BUFFER, null); 100 | } -------------------------------------------------------------------------------- /webpack.config.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | const webpack = require('webpack'); 3 | const MinifyPlugin = require('babel-minify-webpack-plugin'); 4 | 5 | module.exports = function(env) { 6 | const isProduction = env && env.production === true; 7 | 8 | return { 9 | entry: path.join(__dirname, 'src/init'), 10 | output: { 11 | path: path.join(__dirname, 'build'), 12 | filename: 'bundle.js', 13 | }, 14 | module: { 15 | loaders: [ 16 | { 17 | test: /\.js$/, 18 | exclude: /(node_modules|bower_components)/, 19 | loader: 'babel-loader', 20 | query: { 21 | presets: [['env', { 22 | targets: { 23 | browsers: ['> 1%', 'last 2 major versions'], 24 | }, 25 | loose: true, 26 | modules: false, 27 | }]], 28 | }, 29 | }, 30 | { 31 | test: /\.glsl$/, 32 | loader: 'webpack-glsl-loader' 33 | }, 34 | ], 35 | }, 36 | plugins: [ 37 | isProduction ? new MinifyPlugin({ 38 | keepFnName: true, 39 | keepClassName: true, 40 | }) : undefined, 41 | new webpack.DefinePlugin({ 42 | 'process.env': { 43 | 'NODE_ENV': (isProduction ? JSON.stringify('production'): JSON.stringify('development')), 44 | } 45 | }), 46 | ].filter(p => p), 47 | devtool: 'source-map', 48 | devServer: { 49 | port: 5650, 50 | publicPath: '/build/' 51 | }, 52 | }; 53 | }; 54 | --------------------------------------------------------------------------------