├── .gitignore ├── LICENSE ├── MetalTexturedMesh.xcodeproj ├── project.pbxproj ├── project.xcworkspace │ └── contents.xcworkspacedata └── xcshareddata │ └── xcschemes │ └── MetalTexturedMesh OSX.xcscheme ├── MetalTexturedMesh ├── AAPLMathUtilities.h ├── AAPLMathUtilities.m ├── AppDelegate.swift ├── Base.lproj │ └── MainMenu.xib ├── Bridging-Header.h ├── Images.xcassets │ ├── Contents.json │ └── checkerboard.dataset │ │ ├── Contents.json │ │ └── checkerboard.png ├── Info.plist ├── Mesh.swift ├── Renderer.swift ├── Shaders.metal └── ViewController.swift ├── README.md ├── doc └── implementing-deferred-shading-in-metal.md └── img ├── deferred.png └── posts └── implementing-deferred-shading-in-metal ├── albedo-fixed.png ├── albedo.png ├── final.png ├── gbuffer-data.png ├── lights-albedo.png ├── lights-colour-albedo.png ├── lights-colour-flat.png ├── lights-normal-fake.png ├── lights-normal.png ├── lights-sphere.png ├── normal-fixed.png ├── normal.png ├── position-fixed.png ├── position.png └── stencil-buffer.png /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/xcode,macos 2 | 3 | ### Xcode ### 4 | # Xcode 5 | # 6 | # gitignore contributors: remember to update Global/Xcode.gitignore, 7 | # Objective-C.gitignore & Swift.gitignore 8 | 9 | ## Build generated 10 | build/ 11 | DerivedData/ 12 | 13 | ## Various settings 14 | *.pbxuser 15 | !default.pbxuser 16 | *.mode1v3 17 | !default.mode1v3 18 | *.mode2v3 19 | !default.mode2v3 20 | *.perspectivev3 21 | !default.perspectivev3 22 | xcuserdata/ 23 | 24 | ## Other 25 | *.moved-aside 26 | *.xccheckout 27 | *.xcscmblueprint 28 | 29 | 30 | ### macOS ### 31 | *.DS_Store 32 | .AppleDouble 33 | .LSOverride 34 | 35 | # Icon must end with two \r 36 | Icon 37 | # Thumbnails 38 | ._* 39 | # Files that might appear in the root of a volume 40 | .DocumentRevisions-V100 41 | .fseventsd 42 | .Spotlight-V100 43 | .TemporaryItems 44 | .Trashes 45 | .VolumeIcon.icns 46 | .com.apple.timemachine.donotpresent 47 | # Directories potentially created on remote AFP share 48 | .AppleDB 49 | .AppleDesktop 50 | Network Trash Folder 51 | Temporary Items 52 | .apdisk 53 | 54 | # End of https://www.gitignore.io/api/xcode,macos 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Samuel Evans-Powell 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MetalTexturedMesh.xcodeproj/project.pbxproj: -------------------------------------------------------------------------------- 1 | // !$*UTF8*$! 2 | { 3 | archiveVersion = 1; 4 | classes = { 5 | }; 6 | objectVersion = 46; 7 | objects = { 8 | 9 | /* Begin PBXBuildFile section */ 10 | 833681371D02A5DF0096C63A /* AAPLMathUtilities.m in Sources */ = {isa = PBXBuildFile; fileRef = 8336812F1D02A5DF0096C63A /* AAPLMathUtilities.m */; }; 11 | 833681381D02A5DF0096C63A /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 833681301D02A5DF0096C63A /* AppDelegate.swift */; }; 12 | 833681391D02A5DF0096C63A /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 833681321D02A5DF0096C63A /* Images.xcassets */; }; 13 | 8336813A1D02A5DF0096C63A /* Mesh.swift in Sources */ = {isa = PBXBuildFile; fileRef = 833681331D02A5DF0096C63A /* Mesh.swift */; }; 14 | 8336813B1D02A5DF0096C63A /* Renderer.swift in Sources */ = {isa = PBXBuildFile; fileRef = 833681341D02A5DF0096C63A /* Renderer.swift */; }; 15 | 8336813C1D02A5DF0096C63A /* Shaders.metal in Sources */ = {isa = PBXBuildFile; fileRef = 833681351D02A5DF0096C63A /* Shaders.metal */; }; 16 | 8336813D1D02A5DF0096C63A /* ViewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = 833681361D02A5DF0096C63A /* ViewController.swift */; }; 17 | 833681401D02A5E90096C63A /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = 8336813E1D02A5E90096C63A /* MainMenu.xib */; }; 18 | /* End PBXBuildFile section */ 19 | 20 | /* Begin PBXFileReference section */ 21 | 6E23171D1D04DDE7002FBCB0 /* README.md */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; 22 | 8336812E1D02A5DF0096C63A /* AAPLMathUtilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AAPLMathUtilities.h; path = MetalTexturedMesh/AAPLMathUtilities.h; sourceTree = ""; }; 23 | 8336812F1D02A5DF0096C63A /* AAPLMathUtilities.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = AAPLMathUtilities.m; path = MetalTexturedMesh/AAPLMathUtilities.m; sourceTree = ""; }; 24 | 833681301D02A5DF0096C63A /* AppDelegate.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; name = AppDelegate.swift; path = MetalTexturedMesh/AppDelegate.swift; sourceTree = ""; }; 25 | 833681311D02A5DF0096C63A /* Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "Bridging-Header.h"; path = "MetalTexturedMesh/Bridging-Header.h"; sourceTree = ""; }; 26 | 833681321D02A5DF0096C63A /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; name = Images.xcassets; path = MetalTexturedMesh/Images.xcassets; sourceTree = ""; }; 27 | 833681331D02A5DF0096C63A /* Mesh.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; name = Mesh.swift; path = MetalTexturedMesh/Mesh.swift; sourceTree = ""; }; 28 | 833681341D02A5DF0096C63A /* Renderer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; name = Renderer.swift; path = MetalTexturedMesh/Renderer.swift; sourceTree = ""; }; 29 | 833681351D02A5DF0096C63A /* Shaders.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; name = Shaders.metal; path = MetalTexturedMesh/Shaders.metal; sourceTree = ""; }; 30 | 833681361D02A5DF0096C63A /* ViewController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; name = ViewController.swift; path = MetalTexturedMesh/ViewController.swift; sourceTree = ""; }; 31 | 8336813F1D02A5E90096C63A /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = MetalTexturedMesh/Base.lproj/MainMenu.xib; sourceTree = ""; }; 32 | 833681411D02A5F20096C63A /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; name = Info.plist; path = MetalTexturedMesh/Info.plist; sourceTree = ""; }; 33 | 838E9C321C287FC300A022DD /* MetalTexturedMesh OSX.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "MetalTexturedMesh OSX.app"; sourceTree = BUILT_PRODUCTS_DIR; }; 34 | /* End PBXFileReference section */ 35 | 36 | /* Begin PBXFrameworksBuildPhase section */ 37 | 838E9C2F1C287FC300A022DD /* Frameworks */ = { 38 | isa = PBXFrameworksBuildPhase; 39 | buildActionMask = 2147483647; 40 | files = ( 41 | ); 42 | runOnlyForDeploymentPostprocessing = 0; 43 | }; 44 | /* End PBXFrameworksBuildPhase section */ 45 | 46 | /* Begin PBXGroup section */ 47 | 838E9C291C287FC300A022DD = { 48 | isa = PBXGroup; 49 | children = ( 50 | 6E23171D1D04DDE7002FBCB0 /* README.md */, 51 | 838E9C671C28817700A022DD /* MetalTexturedMesh */, 52 | 838E9C331C287FC300A022DD /* Products */, 53 | ); 54 | sourceTree = ""; 55 | }; 56 | 838E9C331C287FC300A022DD /* Products */ = { 57 | isa = PBXGroup; 58 | children = ( 59 | 838E9C321C287FC300A022DD /* MetalTexturedMesh OSX.app */, 60 | ); 61 | name = Products; 62 | sourceTree = ""; 63 | }; 64 | 838E9C671C28817700A022DD /* MetalTexturedMesh */ = { 65 | isa = PBXGroup; 66 | children = ( 67 | 833681311D02A5DF0096C63A /* Bridging-Header.h */, 68 | 8336812E1D02A5DF0096C63A /* AAPLMathUtilities.h */, 69 | 8336812F1D02A5DF0096C63A /* AAPLMathUtilities.m */, 70 | 833681301D02A5DF0096C63A /* AppDelegate.swift */, 71 | 833681331D02A5DF0096C63A /* Mesh.swift */, 72 | 833681341D02A5DF0096C63A /* Renderer.swift */, 73 | 833681361D02A5DF0096C63A /* ViewController.swift */, 74 | 833681351D02A5DF0096C63A /* Shaders.metal */, 75 | 833681321D02A5DF0096C63A /* Images.xcassets */, 76 | 8336813E1D02A5E90096C63A /* MainMenu.xib */, 77 | 833681411D02A5F20096C63A /* Info.plist */, 78 | ); 79 | name = MetalTexturedMesh; 80 | sourceTree = ""; 81 | }; 82 | /* End PBXGroup section */ 83 | 84 | /* Begin PBXNativeTarget section */ 85 | 838E9C311C287FC300A022DD /* MetalTexturedMesh OSX */ = { 86 | isa = PBXNativeTarget; 87 | buildConfigurationList = 838E9C491C287FC300A022DD /* Build configuration list for PBXNativeTarget "MetalTexturedMesh OSX" */; 88 | buildPhases = ( 89 | 838E9C2E1C287FC300A022DD /* Sources */, 90 | 838E9C2F1C287FC300A022DD /* Frameworks */, 91 | 838E9C301C287FC300A022DD /* Resources */, 92 | ); 93 | buildRules = ( 94 | ); 95 | dependencies = ( 96 | ); 97 | name = "MetalTexturedMesh OSX"; 98 | productName = MetalTemplate; 99 | productReference = 838E9C321C287FC300A022DD /* MetalTexturedMesh OSX.app */; 100 | productType = "com.apple.product-type.application"; 101 | }; 102 | /* End PBXNativeTarget section */ 103 | 104 | /* Begin PBXProject section */ 105 | 838E9C2A1C287FC300A022DD /* Project object */ = { 106 | isa = PBXProject; 107 | attributes = { 108 | LastUpgradeCheck = 0800; 109 | ORGANIZATIONNAME = "Apple, Inc."; 110 | TargetAttributes = { 111 | 838E9C311C287FC300A022DD = { 112 | CreatedOnToolsVersion = 7.3; 113 | DevelopmentTeam = 4P3Z7HWQQE; 114 | LastSwiftMigration = 0800; 115 | ProvisioningStyle = Automatic; 116 | }; 117 | }; 118 | }; 119 | buildConfigurationList = 838E9C2D1C287FC300A022DD /* Build configuration list for PBXProject "MetalTexturedMesh" */; 120 | compatibilityVersion = "Xcode 3.2"; 121 | developmentRegion = English; 122 | hasScannedForEncodings = 0; 123 | knownRegions = ( 124 | en, 125 | Base, 126 | ); 127 | mainGroup = 838E9C291C287FC300A022DD; 128 | productRefGroup = 838E9C331C287FC300A022DD /* Products */; 129 | projectDirPath = ""; 130 | projectRoot = ""; 131 | targets = ( 132 | 838E9C311C287FC300A022DD /* MetalTexturedMesh OSX */, 133 | ); 134 | }; 135 | /* End PBXProject section */ 136 | 137 | /* Begin PBXResourcesBuildPhase section */ 138 | 838E9C301C287FC300A022DD /* Resources */ = { 139 | isa = PBXResourcesBuildPhase; 140 | buildActionMask = 2147483647; 141 | files = ( 142 | 833681391D02A5DF0096C63A /* Images.xcassets in Resources */, 143 | 833681401D02A5E90096C63A /* MainMenu.xib in Resources */, 144 | ); 145 | runOnlyForDeploymentPostprocessing = 0; 146 | }; 147 | /* End PBXResourcesBuildPhase section */ 148 | 149 | /* Begin PBXSourcesBuildPhase section */ 150 | 838E9C2E1C287FC300A022DD /* Sources */ = { 151 | isa = PBXSourcesBuildPhase; 152 | buildActionMask = 2147483647; 153 | files = ( 154 | 833681381D02A5DF0096C63A /* AppDelegate.swift in Sources */, 155 | 833681371D02A5DF0096C63A /* AAPLMathUtilities.m in Sources */, 156 | 8336813D1D02A5DF0096C63A /* ViewController.swift in Sources */, 157 | 8336813A1D02A5DF0096C63A /* Mesh.swift in Sources */, 158 | 8336813C1D02A5DF0096C63A /* Shaders.metal in Sources */, 159 | 8336813B1D02A5DF0096C63A /* Renderer.swift in Sources */, 160 | ); 161 | runOnlyForDeploymentPostprocessing = 0; 162 | }; 163 | /* End PBXSourcesBuildPhase section */ 164 | 165 | /* Begin PBXVariantGroup section */ 166 | 8336813E1D02A5E90096C63A /* MainMenu.xib */ = { 167 | isa = PBXVariantGroup; 168 | children = ( 169 | 8336813F1D02A5E90096C63A /* Base */, 170 | ); 171 | name = MainMenu.xib; 172 | sourceTree = ""; 173 | }; 174 | /* End PBXVariantGroup section */ 175 | 176 | /* Begin XCBuildConfiguration section */ 177 | 838E9C471C287FC300A022DD /* Debug */ = { 178 | isa = XCBuildConfiguration; 179 | buildSettings = { 180 | ALWAYS_SEARCH_USER_PATHS = NO; 181 | ASSETCATALOG_COMPRESSION = lossless; 182 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; 183 | CLANG_CXX_LIBRARY = "libc++"; 184 | CLANG_ENABLE_MODULES = YES; 185 | CLANG_ENABLE_OBJC_ARC = YES; 186 | CLANG_WARN_BOOL_CONVERSION = YES; 187 | CLANG_WARN_CONSTANT_CONVERSION = YES; 188 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 189 | CLANG_WARN_EMPTY_BODY = YES; 190 | CLANG_WARN_ENUM_CONVERSION = YES; 191 | CLANG_WARN_INT_CONVERSION = YES; 192 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 193 | CLANG_WARN_UNREACHABLE_CODE = YES; 194 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 195 | CODE_SIGN_IDENTITY = "-"; 196 | COPY_PHASE_STRIP = NO; 197 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 198 | ENABLE_STRICT_OBJC_MSGSEND = YES; 199 | ENABLE_TESTABILITY = YES; 200 | GCC_C_LANGUAGE_STANDARD = gnu99; 201 | GCC_DYNAMIC_NO_PIC = NO; 202 | GCC_NO_COMMON_BLOCKS = YES; 203 | GCC_OPTIMIZATION_LEVEL = 0; 204 | GCC_PREPROCESSOR_DEFINITIONS = ( 205 | "DEBUG=1", 206 | "$(inherited)", 207 | ); 208 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 209 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 210 | GCC_WARN_UNDECLARED_SELECTOR = YES; 211 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 212 | GCC_WARN_UNUSED_FUNCTION = YES; 213 | GCC_WARN_UNUSED_VARIABLE = YES; 214 | MACOSX_DEPLOYMENT_TARGET = 10.11; 215 | MTL_ENABLE_DEBUG_INFO = YES; 216 | ONLY_ACTIVE_ARCH = YES; 217 | SDKROOT = macosx; 218 | }; 219 | name = Debug; 220 | }; 221 | 838E9C481C287FC300A022DD /* Release */ = { 222 | isa = XCBuildConfiguration; 223 | buildSettings = { 224 | ALWAYS_SEARCH_USER_PATHS = NO; 225 | ASSETCATALOG_COMPRESSION = "respect-asset-catalog"; 226 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; 227 | CLANG_CXX_LIBRARY = "libc++"; 228 | CLANG_ENABLE_MODULES = YES; 229 | CLANG_ENABLE_OBJC_ARC = YES; 230 | CLANG_WARN_BOOL_CONVERSION = YES; 231 | CLANG_WARN_CONSTANT_CONVERSION = YES; 232 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 233 | CLANG_WARN_EMPTY_BODY = YES; 234 | CLANG_WARN_ENUM_CONVERSION = YES; 235 | CLANG_WARN_INT_CONVERSION = YES; 236 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 237 | CLANG_WARN_UNREACHABLE_CODE = YES; 238 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 239 | CODE_SIGN_IDENTITY = "-"; 240 | COPY_PHASE_STRIP = NO; 241 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 242 | ENABLE_NS_ASSERTIONS = NO; 243 | ENABLE_STRICT_OBJC_MSGSEND = YES; 244 | GCC_C_LANGUAGE_STANDARD = gnu99; 245 | GCC_NO_COMMON_BLOCKS = YES; 246 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 247 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 248 | GCC_WARN_UNDECLARED_SELECTOR = YES; 249 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 250 | GCC_WARN_UNUSED_FUNCTION = YES; 251 | GCC_WARN_UNUSED_VARIABLE = YES; 252 | MACOSX_DEPLOYMENT_TARGET = 10.11; 253 | MTL_ENABLE_DEBUG_INFO = NO; 254 | SDKROOT = macosx; 255 | }; 256 | name = Release; 257 | }; 258 | 838E9C4A1C287FC300A022DD /* Debug */ = { 259 | isa = XCBuildConfiguration; 260 | buildSettings = { 261 | CLANG_ENABLE_MODULES = YES; 262 | CODE_SIGN_IDENTITY = "Mac Developer"; 263 | COMBINE_HIDPI_IMAGES = YES; 264 | DEVELOPMENT_TEAM = 4P3Z7HWQQE; 265 | INFOPLIST_FILE = "$(SRCROOT)/MetalTexturedMesh/Info.plist"; 266 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/../Frameworks"; 267 | PRODUCT_BUNDLE_IDENTIFIER = "com.example.apple-samplecode.MetalHillside"; 268 | PRODUCT_NAME = "$(TARGET_NAME)"; 269 | SWIFT_OBJC_BRIDGING_HEADER = "MetalTexturedMesh/Bridging-Header.h"; 270 | SWIFT_OPTIMIZATION_LEVEL = "-Onone"; 271 | SWIFT_VERSION = 3.0; 272 | }; 273 | name = Debug; 274 | }; 275 | 838E9C4B1C287FC300A022DD /* Release */ = { 276 | isa = XCBuildConfiguration; 277 | buildSettings = { 278 | CLANG_ENABLE_MODULES = YES; 279 | CODE_SIGN_IDENTITY = "Mac Developer"; 280 | COMBINE_HIDPI_IMAGES = YES; 281 | DEVELOPMENT_TEAM = 4P3Z7HWQQE; 282 | INFOPLIST_FILE = "$(SRCROOT)/MetalTexturedMesh/Info.plist"; 283 | LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/../Frameworks"; 284 | PRODUCT_BUNDLE_IDENTIFIER = "com.example.apple-samplecode.MetalHillside"; 285 | PRODUCT_NAME = "$(TARGET_NAME)"; 286 | SWIFT_OBJC_BRIDGING_HEADER = "MetalTexturedMesh/Bridging-Header.h"; 287 | SWIFT_VERSION = 3.0; 288 | }; 289 | name = Release; 290 | }; 291 | /* End XCBuildConfiguration section */ 292 | 293 | /* Begin XCConfigurationList section */ 294 | 838E9C2D1C287FC300A022DD /* Build configuration list for PBXProject "MetalTexturedMesh" */ = { 295 | isa = XCConfigurationList; 296 | buildConfigurations = ( 297 | 838E9C471C287FC300A022DD /* Debug */, 298 | 838E9C481C287FC300A022DD /* Release */, 299 | ); 300 | defaultConfigurationIsVisible = 0; 301 | defaultConfigurationName = Release; 302 | }; 303 | 838E9C491C287FC300A022DD /* Build configuration list for PBXNativeTarget "MetalTexturedMesh OSX" */ = { 304 | isa = XCConfigurationList; 305 | buildConfigurations = ( 306 | 838E9C4A1C287FC300A022DD /* Debug */, 307 | 838E9C4B1C287FC300A022DD /* Release */, 308 | ); 309 | defaultConfigurationIsVisible = 0; 310 | defaultConfigurationName = Release; 311 | }; 312 | /* End XCConfigurationList section */ 313 | }; 314 | rootObject = 838E9C2A1C287FC300A022DD /* Project object */; 315 | } 316 | -------------------------------------------------------------------------------- /MetalTexturedMesh.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /MetalTexturedMesh.xcodeproj/xcshareddata/xcschemes/MetalTexturedMesh OSX.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 8 | 9 | 15 | 21 | 22 | 23 | 24 | 25 | 30 | 31 | 32 | 33 | 39 | 40 | 41 | 42 | 43 | 44 | 56 | 58 | 64 | 65 | 66 | 67 | 68 | 69 | 75 | 77 | 83 | 84 | 85 | 86 | 88 | 89 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /MetalTexturedMesh/AAPLMathUtilities.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | Collection of vector, matrix, and quaternion utility functions. 7 | */ 8 | 9 | #import 10 | 11 | #define AAPL_SIMD_OVERLOAD __attribute__((__overloadable__)) 12 | 13 | /// A single-precision quaternion type 14 | typedef vector_float4 quaternion_float; 15 | 16 | /// Returns the number of degrees in the specified number of radians 17 | float degrees_from_radians(float radians); 18 | 19 | /// Returns the number of radians in the specified number of degrees 20 | float radians_from_degrees(float degrees); 21 | 22 | /// Returns a vector that is linearly interpolated between the two provided vectors 23 | vector_float3 AAPL_SIMD_OVERLOAD vector_lerp(vector_float3 v0, vector_float3 v1, float t); 24 | 25 | /// Returns a vector that is linearly interpolated between the two provided vectors 26 | vector_float4 AAPL_SIMD_OVERLOAD vector_lerp(vector_float4 v0, vector_float4 v1, float t); 27 | 28 | /// Converts a unit-norm quaternion into its corresponding rotation matrix 29 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_from_quaternion(quaternion_float q); 30 | 31 | /// Constructs a rotation matrix from the provided angle and axis 32 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_rotation(float radians, vector_float3 axis); 33 | 34 | /// Constructs a rotation matrix from the provided angle and the axis (x, y, z) 35 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_rotation(float radians, float x, float y, float z); 36 | 37 | /// Constructs a scaling matrix with the specified scaling factors 38 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_scale(float x, float y, float z); 39 | 40 | /// Constructs a scaling matrix, using the provided vector as an array of scaling factors 41 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_scale(vector_float3 s); 42 | 43 | /// Returns the inverse of the transpose of the provided matrix 44 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix_inverse_transpose(matrix_float3x3 m); 45 | 46 | /// Constructs a (homogeneous) rotation matrix from the provided angle and axis 47 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_from_quaternion(quaternion_float q); 48 | 49 | /// Constructs a rotation matrix from the provided angle and axis 50 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_rotation(float radians, vector_float3 axis); 51 | 52 | /// Constructs a rotation matrix from the provided angle and the axis (x, y, z) 53 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_rotation(float radians, float x, float y, float z); 54 | 55 | /// Constructs a scaling matrix with the specified scaling factors 56 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_scale(float sx, float sy, float sz); 57 | 58 | /// Constructs a scaling matrix, using the provided vector as an array of scaling factors 59 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_scale(vector_float3 s); 60 | 61 | /// Constructs a translation matrix that translates by the vector (tx, ty, tz) 62 | matrix_float4x4 matrix4x4_translation(float tx, float ty, float tz); 63 | 64 | /// Constructs a view matrix that is positioned at (eyeX, eyeY, eyeZ) and looks toward 65 | /// (centerX, centerY, centerZ), with the vector (upX, upY, upZ) pointing up 66 | matrix_float4x4 matrix_look_at(float eyeX, float eyeY, float eyeZ, 67 | float centerX, float centerY, float centerZ, 68 | float upX, float upY, float upZ); 69 | 70 | /// Constructs a symmetric orthographic projection matrix that maps (left, top) to (-1, 1), 71 | /// (right, bottom) to (1, -1), and (nearZ, farZ) to (0, 1) 72 | matrix_float4x4 matrix_ortho(float left, float right, float bottom, float top, float nearZ, float farZ); 73 | 74 | /// Constructs a symmetric perspective projection matrix with a vertical viewing angle of fovyRadians, 75 | /// the specified aspect ratio, and the provided near and far Z distances 76 | matrix_float4x4 matrix_perspective(float fovyRadians, float aspect, float nearZ, float farZ); 77 | 78 | /// Extracts the upper-left 3x3 submatrix of the provided 4x4 matrix 79 | matrix_float3x3 matrix_upper_left_3x3(matrix_float4x4 m); 80 | 81 | /// Returns the inverse of the transpose of the provided matrix 82 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix_inverse_transpose(matrix_float4x4 m); 83 | 84 | /// Constructs a quaternion of the form w + xi + yj + zk 85 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(float x, float y, float z, float w); 86 | 87 | /// Constructs a quaternion of the form w + v.x*i + v.y*j + v.z*k 88 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(vector_float3 v, float w); 89 | 90 | /// Constructs a unit-norm quaternion that represents rotation by the specified angle about the axis (x, y, z) 91 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(float radians, float x, float y, float z); 92 | 93 | /// Constructs a unit-norm quaternion that represents rotation by the specified angle about the specified axis 94 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(float radians, vector_float3 axis); 95 | 96 | /// Constructs a unit-norm quaternion from the provided matrix. 97 | /// The result is undefined if the matrix does not represent a pure rotation. 98 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(matrix_float3x3 m); 99 | 100 | /// Constructs a unit-norm quaternion from the provided matrix. 101 | /// The result is undefined if the matrix does not represent a pure rotation. 102 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(matrix_float4x4 m); 103 | 104 | /// Returns the length of the specified quaternion 105 | float quaternion_length(quaternion_float q); 106 | 107 | /// Returns the rotation axis of the specified unit-norm quaternion 108 | vector_float3 quaternion_axis(quaternion_float q); 109 | 110 | /// Returns the rotation angle of the specified unit-norm quaternion 111 | float quaternion_angle(quaternion_float q); 112 | 113 | /// Returns a unit-norm quaternion 114 | quaternion_float quaternion_normalize(quaternion_float q); 115 | 116 | /// Returns the inverse quaternion of the provided quaternion 117 | quaternion_float quaternion_inverse(quaternion_float q); 118 | 119 | /// Returns the conjugate quaternion of the provided quaternion 120 | quaternion_float quaternion_conjugate(quaternion_float q); 121 | 122 | /// Returns the product of two quaternions 123 | quaternion_float quaternion_multiply(quaternion_float q0, quaternion_float q1); 124 | 125 | /// Returns the quaternion that results from spherically interpolating between the two provided quaternions 126 | quaternion_float quaternion_slerp(quaternion_float q0, quaternion_float q1, float t); 127 | 128 | /// Returns the vector that results from rotating the provided vector by the provided unit-norm quaternion 129 | vector_float3 quaternion_rotate_vector(quaternion_float q, vector_float3 v); 130 | -------------------------------------------------------------------------------- /MetalTexturedMesh/AAPLMathUtilities.m: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | Implementation of vector, matrix, and quaternion utility functions. 7 | */ 8 | 9 | #import "AAPLMathUtilities.h" 10 | #include 11 | 12 | float degrees_from_radians(float radians) { 13 | return (radians / M_PI) * 180; 14 | } 15 | 16 | float radians_from_degrees(float degrees) { 17 | return (degrees / 180) * M_PI; 18 | } 19 | 20 | static vector_float3 AAPL_SIMD_OVERLOAD vector_make(float x, float y, float z) { 21 | return (vector_float3){ x, y, z }; 22 | } 23 | 24 | vector_float3 AAPL_SIMD_OVERLOAD vector_lerp(vector_float3 v0, vector_float3 v1, float t) { 25 | return ((1 - t) * v0) + (t * v1); 26 | } 27 | 28 | vector_float4 AAPL_SIMD_OVERLOAD vector_lerp(vector_float4 v0, vector_float4 v1, float t) { 29 | return ((1 - t) * v0) + (t * v1); 30 | } 31 | 32 | static matrix_float3x3 AAPL_SIMD_OVERLOAD matrix_make(float m00, float m10, float m20, 33 | float m01, float m11, float m21, 34 | float m02, float m12, float m22) 35 | { 36 | return (matrix_float3x3){ { { m00, m10, m20 }, { m01, m11, m21 }, { m02, m12, m22 } } }; 37 | } 38 | 39 | static matrix_float3x3 AAPL_SIMD_OVERLOAD matrix_make(vector_float3 col0, vector_float3 col1, vector_float3 col2) { 40 | return (matrix_float3x3){ col0, col1, col2 }; 41 | } 42 | 43 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_from_quaternion(quaternion_float q) { 44 | float xx = q.x * q.x; 45 | float xy = q.x * q.y; 46 | float xz = q.x * q.z; 47 | float xw = q.x * q.w; 48 | float yy = q.y * q.y; 49 | float yz = q.y * q.z; 50 | float yw = q.y * q.w; 51 | float zz = q.z * q.z; 52 | float zw = q.z * q.w; 53 | 54 | float m00 = 1 - 2 * (yy + zz); 55 | float m01 = 2 * (xy - zw); 56 | float m02 = 2 * (xz + yw); 57 | 58 | float m10 = 2 * (xy + zw); 59 | float m11 = 1 - 2 * (xx + zz); 60 | float m12 = 2 * (yz - xw); 61 | 62 | float m20 = 2 * (xz - yw); 63 | float m21 = 2 * (yz + xw); 64 | float m22 = 1 - 2 * (xx + yy); 65 | 66 | return matrix_make(m00, m10, m20, 67 | m01, m11, m21, 68 | m02, m12, m22); 69 | } 70 | 71 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_rotation(float radians, vector_float3 axis) { 72 | axis = vector_normalize(axis); 73 | float ct = cosf(radians); 74 | float st = sinf(radians); 75 | float ci = 1 - ct; 76 | float x = axis.x, y = axis.y, z = axis.z; 77 | return matrix_make(ct + x * x * ci, y * x * ci + z * st, z * x * ci - y * st, 78 | x * y * ci - z * st, ct + y * y * ci, z * y * ci + x * st, 79 | x * z * ci + y * st, y * z * ci - x * st, ct + z * z * ci); 80 | } 81 | 82 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_rotation(float radians, float x, float y, float z) { 83 | return matrix3x3_rotation(radians, vector_make(x, y, z)); 84 | } 85 | 86 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_scale(float sx, float sy, float sz) { 87 | return matrix_make(sx, 0, 0, 0, sy, 0, 0, 0, sz); 88 | } 89 | 90 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix3x3_scale(vector_float3 s) { 91 | return matrix_make(s.x, 0, 0, 0, s.y, 0, 0, 0, s.z); 92 | } 93 | 94 | matrix_float3x3 AAPL_SIMD_OVERLOAD matrix_inverse_transpose(matrix_float3x3 m) { 95 | return matrix_invert(matrix_transpose(m)); 96 | } 97 | 98 | static matrix_float4x4 AAPL_SIMD_OVERLOAD matrix_make(float m00, float m10, float m20, float m30, 99 | float m01, float m11, float m21, float m31, 100 | float m02, float m12, float m22, float m32, 101 | float m03, float m13, float m23, float m33) 102 | { 103 | return (matrix_float4x4){ { 104 | { m00, m10, m20, m30 }, 105 | { m01, m11, m21, m31 }, 106 | { m02, m12, m22, m32 }, 107 | { m03, m13, m23, m33 } } }; 108 | } 109 | 110 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_from_quaternion(quaternion_float q) { 111 | float xx = q.x * q.x; 112 | float xy = q.x * q.y; 113 | float xz = q.x * q.z; 114 | float xw = q.x * q.w; 115 | float yy = q.y * q.y; 116 | float yz = q.y * q.z; 117 | float yw = q.y * q.w; 118 | float zz = q.z * q.z; 119 | float zw = q.z * q.w; 120 | 121 | float m00 = 1 - 2 * (yy + zz); 122 | float m01 = 2 * (xy - zw); 123 | float m02 = 2 * (xz + yw); 124 | 125 | float m10 = 2 * (xy + zw); 126 | float m11 = 1 - 2 * (xx + zz); 127 | float m12 = 2 * (yz - xw); 128 | 129 | float m20 = 2 * (xz - yw); 130 | float m21 = 2 * (yz + xw); 131 | float m22 = 1 - 2 * (xx + yy); 132 | 133 | return matrix_make(m00, m10, m20, 0, 134 | m01, m11, m21, 0, 135 | m02, m12, m22, 0, 136 | 0, 0, 0, 1); 137 | } 138 | 139 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_rotation(float radians, vector_float3 axis) { 140 | axis = vector_normalize(axis); 141 | float ct = cosf(radians); 142 | float st = sinf(radians); 143 | float ci = 1 - ct; 144 | float x = axis.x, y = axis.y, z = axis.z; 145 | return matrix_make(ct + x * x * ci, y * x * ci + z * st, z * x * ci - y * st, 0, 146 | x * y * ci - z * st, ct + y * y * ci, z * y * ci + x * st, 0, 147 | x * z * ci + y * st, y * z * ci - x * st, ct + z * z * ci, 0, 148 | 0, 0, 0, 1); 149 | } 150 | 151 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_rotation(float radians, float x, float y, float z) { 152 | return matrix4x4_rotation(radians, vector_make(x, y, z)); 153 | } 154 | 155 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_scale(float sx, float sy, float sz) { 156 | return matrix_make(sx, 0, 0, 0, 157 | 0, sy, 0, 0, 158 | 0, 0, sz, 0, 159 | 0, 0, 0, 1); 160 | } 161 | 162 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix4x4_scale(vector_float3 s) { 163 | return matrix_make(s.x, 0, 0, 0, 164 | 0, s.y, 0, 0, 165 | 0, 0, s.z, 0, 166 | 0, 0, 0, 1); 167 | } 168 | 169 | matrix_float4x4 matrix4x4_translation(float tx, float ty, float tz) { 170 | return matrix_make(1, 0, 0, 0, 171 | 0, 1, 0, 0, 172 | 0, 0, 1, 0, 173 | tx, ty, tz, 1); 174 | } 175 | 176 | matrix_float4x4 matrix_look_at(float eyeX, float eyeY, float eyeZ, 177 | float centerX, float centerY, float centerZ, 178 | float upX, float upY, float upZ) 179 | { 180 | vector_float3 eye = vector_make(eyeX, eyeY, eyeZ); 181 | vector_float3 center = vector_make(centerX, centerY, centerZ); 182 | vector_float3 up = vector_make(upX, upY, upZ); 183 | 184 | vector_float3 z = vector_normalize(eye - center); 185 | vector_float3 x = vector_normalize(vector_cross(up, z)); 186 | vector_float3 y = vector_cross(z, x); 187 | vector_float3 t = vector_make(-vector_dot(x, eye), -vector_dot(y, eye), -vector_dot(z, eye)); 188 | 189 | return matrix_make(x.x, y.x, z.x, 0, 190 | x.y, y.y, z.y, 0, 191 | x.z, y.z, z.z, 0, 192 | t.x, t.y, t.z, 1); 193 | } 194 | 195 | matrix_float4x4 matrix_ortho(float left, float right, float bottom, float top, float nearZ, float farZ) { 196 | return matrix_make(2 / (right - left), 0, 0, 0, 197 | 0, 2 / (top - bottom), 0, 0, 198 | 0, 0, 1 / (farZ - nearZ), 0, 199 | (left + right) / (left - right), (top + bottom) / (bottom - top), nearZ / (nearZ - farZ), 1); 200 | } 201 | 202 | matrix_float4x4 matrix_perspective(float fovyRadians, float aspect, float nearZ, float farZ) { 203 | float ys = 1 / tanf(fovyRadians * 0.5); 204 | float xs = ys / aspect; 205 | float zs = farZ / (nearZ - farZ); 206 | return matrix_make(xs, 0, 0, 0, 207 | 0, ys, 0, 0, 208 | 0, 0, zs, -1, 209 | 0, 0, zs * nearZ, 0); 210 | } 211 | 212 | matrix_float3x3 matrix_upper_left_3x3(matrix_float4x4 m) { 213 | vector_float3 x = m.columns[0].xyz; 214 | vector_float3 y = m.columns[1].xyz; 215 | vector_float3 z = m.columns[2].xyz; 216 | return matrix_make(x, y, z); 217 | } 218 | 219 | matrix_float4x4 AAPL_SIMD_OVERLOAD matrix_inverse_transpose(matrix_float4x4 m) { 220 | return matrix_invert(matrix_transpose(m)); 221 | } 222 | 223 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(float x, float y, float z, float w) { 224 | return (quaternion_float){ x, y, z, w }; 225 | } 226 | 227 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(vector_float3 v, float w) { 228 | return (quaternion_float){ v.x, v.y, v.z, w }; 229 | } 230 | 231 | quaternion_float AAPL_SIMD_OVERLOAD quaternion_from_axis_angle(vector_float3 axis, float radians) { 232 | float t = radians * 0.5; 233 | return quaternion(axis.x * sinf(t), axis.y * sinf(t), axis.z * sinf(t), cosf(t)); 234 | } 235 | 236 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(matrix_float3x3 m) { 237 | float m00 = m.columns[0].x; 238 | float m11 = m.columns[1].y; 239 | float m22 = m.columns[2].z; 240 | float x = sqrtf(1 + m00 - m11 - m22) * 0.5; 241 | float y = sqrtf(1 - m00 + m11 - m22) * 0.5; 242 | float z = sqrtf(1 - m00 - m11 + m22) * 0.5; 243 | float w = sqrtf(1 + m00 + m11 + m22) * 0.5; 244 | return quaternion(x, y, z, w); 245 | } 246 | 247 | quaternion_float AAPL_SIMD_OVERLOAD quaternion(matrix_float4x4 m) { 248 | return quaternion(matrix_upper_left_3x3(m)); 249 | } 250 | 251 | float quaternion_length(quaternion_float q) { 252 | return vector_length(q); 253 | } 254 | 255 | float quaternion_length_squared(quaternion_float q) { 256 | return vector_length_squared(q); 257 | } 258 | 259 | vector_float3 quaternion_axis(quaternion_float q) { 260 | // This query doesn't make sense if w > 1, but we do our best by 261 | // forcing q to be a unit quaternion if it obviously isn't 262 | if (q.w > 1.0) { 263 | q = quaternion_normalize(q); 264 | } 265 | 266 | float axisLen = sqrtf(1 - q.w * q.w); 267 | 268 | if (axisLen < 1e-5) { 269 | // At lengths this small, direction is arbitrary 270 | return vector_make(1, 0, 0); 271 | } else { 272 | return vector_make(q.x / axisLen, q.y / axisLen, q.z / axisLen); 273 | } 274 | } 275 | 276 | float quaternion_angle(quaternion_float q) { 277 | return 2 * acosf(q.w); 278 | } 279 | 280 | quaternion_float quaternion_normalize(quaternion_float q) { 281 | return vector_normalize(q); 282 | } 283 | 284 | quaternion_float quaternion_inverse(quaternion_float q) { 285 | return quaternion_conjugate(q) / quaternion_length_squared(q); 286 | } 287 | 288 | quaternion_float quaternion_conjugate(quaternion_float q) { 289 | return quaternion(-q.x, -q.y, -q.z, q.w); 290 | } 291 | 292 | quaternion_float quaternion_multiply(quaternion_float q0, quaternion_float q1) { 293 | return quaternion(q0.y * q1.x + q0.x * q1.y + q0.z * q1.w - q0.w * q1.z, 294 | q0.x * q1.z - q0.y * q1.w + q0.z * q1.x + q0.w * q1.y, 295 | q0.x * q1.w + q0.y * q1.z - q0.z * q1.y + q0.w * q1.x, 296 | q0.x * q1.x - q0.y * q1.y - q0.z * q1.z - q0.w * q1.w); 297 | } 298 | 299 | quaternion_float quaternion_slerp(quaternion_float q0, quaternion_float q1, float t) { 300 | float dot = vector_dot(q0, q1); 301 | 302 | if ((1 - dot) < 1e-5) { 303 | return vector_normalize(vector_lerp(q0, q1, t)); 304 | } 305 | 306 | dot = fminf(fmaxf(-1, dot), 1); 307 | 308 | float angle = acosf(dot); 309 | float angleInc = t * angle; 310 | 311 | quaternion_float q2 = q1 + q0 * dot; 312 | q2 = vector_normalize(q2); 313 | 314 | return q0 * cosf(angleInc) + q2 * sinf(angleInc); 315 | } 316 | 317 | vector_float3 AAPL_SIMD_OVERLOAD quaternion_rotate_vector(quaternion_float q, vector_float3 v) { 318 | 319 | vector_float3 qp = vector_make(q.x, q.y, q.z); 320 | float w = q.w; 321 | return 2 * vector_dot(qp, v) * qp + 322 | ((w * w) - vector_dot(qp, qp)) * v + 323 | 2 * w * vector_cross(qp, v); 324 | } 325 | -------------------------------------------------------------------------------- /MetalTexturedMesh/AppDelegate.swift: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | Application delegate class 7 | */ 8 | 9 | import Cocoa 10 | 11 | @NSApplicationMain 12 | class AppDelegate: NSObject, NSApplicationDelegate { 13 | 14 | var window: NSWindow! 15 | 16 | @objc func applicationShouldTerminateAfterLastWindowClosed(_ sender: NSApplication) -> Bool { 17 | return true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Base.lproj/MainMenu.xib: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | 393 | 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | 543 | 544 | 545 | 546 | 547 | 548 | 549 | 550 | 551 | 552 | 553 | 554 | Default 555 | 556 | 557 | 558 | 559 | 560 | 561 | Left to Right 562 | 563 | 564 | 565 | 566 | 567 | 568 | Right to Left 569 | 570 | 571 | 572 | 573 | 574 | 575 | 576 | 577 | 578 | 579 | Default 580 | 581 | 582 | 583 | 584 | 585 | 586 | Left to Right 587 | 588 | 589 | 590 | 591 | 592 | 593 | Right to Left 594 | 595 | 596 | 597 | 598 | 599 | 600 | 601 | 602 | 603 | 604 | 605 | 606 | 607 | 608 | 609 | 610 | 611 | 612 | 613 | 614 | 615 | 616 | 617 | 618 | 619 | 620 | 621 | 622 | 623 | 624 | 625 | 626 | 627 | 628 | 629 | 630 | 631 | 632 | 633 | 634 | 635 | 636 | 637 | 638 | 639 | 640 | 641 | 642 | 643 | 644 | 645 | 646 | 647 | 648 | 649 | 650 | 651 | 652 | 653 | 654 | 655 | 656 | 657 | 658 | 659 | 660 | 661 | 662 | 663 | 664 | 665 | 666 | 667 | 668 | 669 | 670 | 671 | 672 | 673 | 674 | 675 | 676 | 677 | 678 | 679 | 680 | 681 | 682 | 683 | 684 | 685 | 686 | 687 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Bridging-Header.h: -------------------------------------------------------------------------------- 1 | 2 | #import "AAPLMathUtilities.h" 3 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Images.xcassets/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "version" : 1, 4 | "author" : "xcode" 5 | } 6 | } -------------------------------------------------------------------------------- /MetalTexturedMesh/Images.xcassets/checkerboard.dataset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "version" : 1, 4 | "author" : "xcode" 5 | }, 6 | "data" : [ 7 | { 8 | "idiom" : "universal", 9 | "filename" : "checkerboard.png", 10 | "universal-type-identifier" : "public.png" 11 | } 12 | ] 13 | } -------------------------------------------------------------------------------- /MetalTexturedMesh/Images.xcassets/checkerboard.dataset/checkerboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/MetalTexturedMesh/Images.xcassets/checkerboard.dataset/checkerboard.png -------------------------------------------------------------------------------- /MetalTexturedMesh/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | en 7 | CFBundleExecutable 8 | $(EXECUTABLE_NAME) 9 | CFBundleIdentifier 10 | $(PRODUCT_BUNDLE_IDENTIFIER) 11 | CFBundleInfoDictionaryVersion 12 | 6.0 13 | CFBundleName 14 | $(PRODUCT_NAME) 15 | CFBundlePackageType 16 | APPL 17 | CFBundleShortVersionString 18 | 1.0 19 | CFBundleSignature 20 | ???? 21 | CFBundleVersion 22 | 1 23 | LSMinimumSystemVersion 24 | $(MACOSX_DEPLOYMENT_TARGET) 25 | NSHumanReadableCopyright 26 | Copyright © 2015 Apple, Inc. All rights reserved. 27 | NSMainNibFile 28 | MainMenu 29 | NSPrincipalClass 30 | NSApplication 31 | 32 | 33 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Mesh.swift: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | Basic mesh class that exposes buffers generated by Model I/O for rendering with Metal 7 | */ 8 | 9 | import Foundation 10 | import Metal 11 | import MetalKit 12 | import ModelIO 13 | 14 | class Mesh { 15 | var vertexBuffer: MTLBuffer 16 | var vertexDescriptor: MTLVertexDescriptor 17 | var primitiveType: MTLPrimitiveType 18 | var indexBuffer: MTLBuffer 19 | var indexCount: Int 20 | var indexType: MTLIndexType 21 | 22 | init?(cubeWithSize size: Float, device: MTLDevice) 23 | { 24 | let allocator = MTKMeshBufferAllocator(device: device) 25 | 26 | let mdlMesh = MDLMesh(boxWithExtent: vector_float3(size, size, size), 27 | segments: vector_uint3(10, 10, 10), 28 | inwardNormals: false, 29 | geometryType: .triangles, 30 | allocator: allocator) 31 | 32 | do { 33 | let mtkMesh = try MTKMesh(mesh: mdlMesh, device: device) 34 | let mtkVertexBuffer = mtkMesh.vertexBuffers[0] 35 | let submesh = mtkMesh.submeshes[0] 36 | let mtkIndexBuffer = submesh.indexBuffer 37 | 38 | vertexBuffer = mtkVertexBuffer.buffer 39 | vertexBuffer.label = "Mesh Vertices" 40 | 41 | vertexDescriptor = MTKMetalVertexDescriptorFromModelIO(mdlMesh.vertexDescriptor) 42 | primitiveType = submesh.primitiveType 43 | indexBuffer = mtkIndexBuffer.buffer 44 | indexBuffer.label = "Mesh Indices" 45 | 46 | indexCount = submesh.indexCount 47 | indexType = submesh.indexType 48 | } catch _ { 49 | return nil // Unable to create MTK mesh from MDL mesh 50 | } 51 | } 52 | 53 | init?(sphereWithSize size: Float, device: MTLDevice) 54 | { 55 | let allocator = MTKMeshBufferAllocator(device: device) 56 | 57 | let mdlMesh = MDLMesh(sphereWithExtent: vector_float3(size, size, size), segments: vector_uint2(30, 30), inwardNormals: false, geometryType: .triangles, allocator: allocator) 58 | 59 | do { 60 | let mtkMesh = try MTKMesh(mesh: mdlMesh, device: device) 61 | let mtkVertexBuffer = mtkMesh.vertexBuffers[0] 62 | let submesh = mtkMesh.submeshes[0] 63 | let mtkIndexBuffer = submesh.indexBuffer 64 | 65 | vertexBuffer = mtkVertexBuffer.buffer 66 | vertexBuffer.label = "Mesh Vertices" 67 | 68 | vertexDescriptor = MTKMetalVertexDescriptorFromModelIO(mdlMesh.vertexDescriptor) 69 | primitiveType = submesh.primitiveType 70 | indexBuffer = mtkIndexBuffer.buffer 71 | indexBuffer.label = "Mesh Indices" 72 | 73 | indexCount = submesh.indexCount 74 | indexType = submesh.indexType 75 | } catch _ { 76 | return nil // Unable to create MTK mesh from MDL mesh 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Renderer.swift: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | The Renderer class. This is the reason for the sample. Here you'll find all the detail about how to setup and interact with Metal types to render content to the screen. This type conforms to MTKViewDelegate and performs the rendering in the appropriate call backs. It is created in the ViewController.viewDidLoad() method. 7 | */ 8 | 9 | import Metal 10 | import simd 11 | import MetalKit 12 | 13 | struct Constants { 14 | var modelViewProjectionMatrix = matrix_identity_float4x4 15 | var normalMatrix = matrix_identity_float3x3 16 | var modelMatrix = matrix_identity_float4x4 17 | } 18 | 19 | struct PointLight { 20 | var worldPosition = float3(0.0, 0.0, 0.0) 21 | var radius = Float(1.0) 22 | var color = float3(1, 1, 1) 23 | } 24 | 25 | struct LightFragmentInput { 26 | var screenSize = float2(1, 1) 27 | var camWorldPos = float3(0.0, 0.0, 2.5) 28 | } 29 | 30 | @objc 31 | class Renderer : NSObject, MTKViewDelegate 32 | { 33 | weak var view: MTKView! 34 | 35 | let device: MTLDevice 36 | let commandQueue: MTLCommandQueue 37 | let renderPipelineState: MTLRenderPipelineState 38 | let depthStencilState: MTLDepthStencilState 39 | let sampler: MTLSamplerState 40 | let texture: MTLTexture 41 | let mesh: Mesh 42 | 43 | var time = TimeInterval(0.0) 44 | var constants = Constants() 45 | 46 | var gBufferAlbedoTexture: MTLTexture 47 | var gBufferNormalTexture: MTLTexture 48 | var gBufferPositionTexture: MTLTexture 49 | var gBufferDepthTexture: MTLTexture 50 | let gBufferDepthStencilState: MTLDepthStencilState 51 | var gBufferRenderPassDescriptor: MTLRenderPassDescriptor 52 | let gBufferRenderPipeline: MTLRenderPipelineState 53 | 54 | let lightSphere: Mesh 55 | let lightNumber = 2 56 | var lightConstants = [Constants]() 57 | var lightProperties = [PointLight]() 58 | var lightFragmentInput = LightFragmentInput() 59 | 60 | let stencilPassDepthStencilState: MTLDepthStencilState 61 | let stencilRenderPassDescriptor: MTLRenderPassDescriptor 62 | let stencilRenderPipeline: MTLRenderPipelineState 63 | 64 | let lightVolumeDepthStencilState: MTLDepthStencilState 65 | var lightVolumeRenderPassDescriptor: MTLRenderPassDescriptor = MTLRenderPassDescriptor() 66 | let lightVolumeRenderPipeline: MTLRenderPipelineState 67 | // The final texture we'll blit to the screen 68 | var compositeTexture: MTLTexture 69 | 70 | init?(mtkView: MTKView) { 71 | 72 | view = mtkView 73 | 74 | // Use 4x MSAA multisampling 75 | view.sampleCount = 4 76 | // Clear to solid white 77 | view.clearColor = MTLClearColorMake(1, 1, 1, 1) 78 | // Use a BGRA 8-bit normalized texture for the drawable 79 | view.colorPixelFormat = .bgra8Unorm 80 | // Use a 32-bit depth buffer 81 | view.depthStencilPixelFormat = .depth32Float 82 | 83 | // Ask for the default Metal device; this represents our GPU. 84 | if let defaultDevice = MTLCreateSystemDefaultDevice() { 85 | device = defaultDevice 86 | } 87 | else { 88 | print("Metal is not supported") 89 | return nil 90 | } 91 | 92 | // Create the command queue we will be using to submit work to the GPU. 93 | commandQueue = device.makeCommandQueue() 94 | 95 | // Compile the functions and other state into a pipeline object. 96 | do { 97 | renderPipelineState = try Renderer.buildRenderPipelineWithDevice(device, view: mtkView) 98 | } 99 | catch { 100 | print("Unable to compile render pipeline state") 101 | return nil 102 | } 103 | 104 | mesh = Mesh(sphereWithSize: 1.0, device: device)! 105 | 106 | do { 107 | texture = try Renderer.buildTexture(name: "checkerboard", device) 108 | } 109 | catch { 110 | print("Unable to load texture from main bundle") 111 | return nil 112 | } 113 | 114 | // Make a depth-stencil state that passes when fragments are nearer to the camera than previous fragments 115 | depthStencilState = Renderer.buildDepthStencilStateWithDevice(device, compareFunc: .less, isWriteEnabled: true) 116 | 117 | // Make a texture sampler that wraps in both directions and performs bilinear filtering 118 | sampler = Renderer.buildSamplerStateWithDevice(device, addressMode: .repeat, filter: .linear) 119 | 120 | // To be used for the size of the render textures 121 | let drawableWidth = Int(self.view.drawableSize.width) 122 | let drawableHeight = Int(self.view.drawableSize.height) 123 | // We create our shaders from here 124 | let library = device.newDefaultLibrary()! 125 | 126 | // ---- BEGIN GBUFFER PASS PREP ---- // 127 | 128 | // Create GBuffer albedo texture 129 | // First we create a descriptor that describes the texture we're about to create 130 | let gBufferAlbedoTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 131 | gBufferAlbedoTextureDescriptor.sampleCount = 1 132 | gBufferAlbedoTextureDescriptor.storageMode = .private 133 | gBufferAlbedoTextureDescriptor.textureType = .type2D 134 | gBufferAlbedoTextureDescriptor.usage = [.renderTarget, .shaderRead] 135 | 136 | // Then we make the texture 137 | gBufferAlbedoTexture = device.makeTexture(descriptor: gBufferAlbedoTextureDescriptor) 138 | 139 | // Create GBuffer normal texture 140 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 141 | gBufferNormalTextureDescriptor.sampleCount = 1 142 | gBufferNormalTextureDescriptor.storageMode = .private 143 | gBufferNormalTextureDescriptor.textureType = .type2D 144 | gBufferNormalTextureDescriptor.usage = [.renderTarget, .shaderRead] 145 | 146 | gBufferNormalTexture = device.makeTexture(descriptor: gBufferNormalTextureDescriptor) 147 | 148 | // Create GBuffer position texture 149 | let gBufferPositionTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 150 | gBufferPositionTextureDescriptor.sampleCount = 1 151 | gBufferPositionTextureDescriptor.storageMode = .private 152 | gBufferPositionTextureDescriptor.textureType = .type2D 153 | gBufferPositionTextureDescriptor.usage = [.renderTarget, .shaderRead] 154 | 155 | gBufferPositionTexture = device.makeTexture(descriptor: gBufferPositionTextureDescriptor) 156 | 157 | // Create GBuffer depth (and stencil) texture 158 | let gBufferDepthDesc: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float_stencil8, width: drawableWidth, height: drawableHeight, mipmapped: false) 159 | gBufferDepthDesc.sampleCount = 1 160 | gBufferDepthDesc.storageMode = .private 161 | gBufferDepthDesc.textureType = .type2D 162 | gBufferDepthDesc.usage = [.renderTarget, .shaderRead] 163 | 164 | gBufferDepthTexture = device.makeTexture(descriptor: gBufferDepthDesc) 165 | 166 | // Build GBuffer depth/stencil state 167 | // Again we create a descriptor that describes the object we're about to create 168 | let gBufferDepthStencilStateDescriptor: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 169 | gBufferDepthStencilStateDescriptor.isDepthWriteEnabled = true 170 | gBufferDepthStencilStateDescriptor.depthCompareFunction = .lessEqual 171 | gBufferDepthStencilStateDescriptor.frontFaceStencil = nil 172 | gBufferDepthStencilStateDescriptor.backFaceStencil = nil 173 | 174 | // Then we create the depth/stencil state 175 | gBufferDepthStencilState = device.makeDepthStencilState(descriptor: gBufferDepthStencilStateDescriptor) 176 | 177 | // Create GBuffer render pass descriptor 178 | gBufferRenderPassDescriptor = MTLRenderPassDescriptor() 179 | // Specify the properties of the first color attachment (our albedo texture) 180 | gBufferRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0) 181 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 182 | gBufferRenderPassDescriptor.colorAttachments[0].loadAction = .clear 183 | gBufferRenderPassDescriptor.colorAttachments[0].storeAction = .store 184 | // Specify the properties of the second color attachment (our normal texture) 185 | gBufferRenderPassDescriptor.colorAttachments[1].clearColor = MTLClearColorMake(0, 0, 0, 1) 186 | gBufferRenderPassDescriptor.colorAttachments[1].texture = gBufferNormalTexture 187 | gBufferRenderPassDescriptor.colorAttachments[1].loadAction = .clear 188 | gBufferRenderPassDescriptor.colorAttachments[1].storeAction = .store 189 | // Specify the properties of the third color attachment (our position texture) 190 | gBufferRenderPassDescriptor.colorAttachments[2].clearColor = MTLClearColorMake(0, 0, 0, 1) 191 | gBufferRenderPassDescriptor.colorAttachments[2].texture = gBufferPositionTexture 192 | gBufferRenderPassDescriptor.colorAttachments[2].loadAction = .clear 193 | gBufferRenderPassDescriptor.colorAttachments[2].storeAction = .store 194 | 195 | // Specify the properties of the depth attachment 196 | gBufferRenderPassDescriptor.depthAttachment.loadAction = .clear 197 | gBufferRenderPassDescriptor.depthAttachment.storeAction = .store 198 | gBufferRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 199 | gBufferRenderPassDescriptor.depthAttachment.clearDepth = 1.0 200 | 201 | // Create GBuffer render pipeline 202 | let gBufferRenderPipelineDesc = MTLRenderPipelineDescriptor() 203 | gBufferRenderPipelineDesc.colorAttachments[0].pixelFormat = .rgba8Unorm 204 | 205 | gBufferRenderPipelineDesc.colorAttachments[0].isBlendingEnabled = true 206 | gBufferRenderPipelineDesc.colorAttachments[0].rgbBlendOperation = .add 207 | gBufferRenderPipelineDesc.colorAttachments[0].sourceRGBBlendFactor = .one 208 | gBufferRenderPipelineDesc.colorAttachments[0].destinationRGBBlendFactor = .one 209 | gBufferRenderPipelineDesc.colorAttachments[0].alphaBlendOperation = .add 210 | gBufferRenderPipelineDesc.colorAttachments[0].sourceAlphaBlendFactor = .one 211 | gBufferRenderPipelineDesc.colorAttachments[0].destinationAlphaBlendFactor = .one 212 | 213 | gBufferRenderPipelineDesc.colorAttachments[1].pixelFormat = .rgba16Float 214 | gBufferRenderPipelineDesc.colorAttachments[2].pixelFormat = .rgba16Float 215 | gBufferRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 216 | gBufferRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 217 | gBufferRenderPipelineDesc.sampleCount = 1 218 | gBufferRenderPipelineDesc.label = "GBuffer Render" 219 | gBufferRenderPipelineDesc.vertexFunction = library.makeFunction(name: "gBufferVert") 220 | gBufferRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "gBufferFrag") 221 | do { 222 | try gBufferRenderPipeline = device.makeRenderPipelineState(descriptor: gBufferRenderPipelineDesc) 223 | } catch let error { 224 | fatalError("Failed to create GBuffer pipeline state, error \(error)") 225 | } 226 | 227 | // ---- END GBUFFER PASS PREP ---- // 228 | 229 | lightSphere = Mesh(sphereWithSize: 1.0, device: device)! 230 | 231 | // Add space for each light's data 232 | for _ in 0...(lightNumber - 1) { 233 | lightProperties.append(PointLight()) 234 | lightConstants.append(Constants()) 235 | } 236 | 237 | // Hard-code position and radius 238 | lightProperties[0].worldPosition = float3(1, 1, 1.5) 239 | lightProperties[0].radius = 3.0 240 | lightProperties[0].color = float3(1, 0, 0) 241 | 242 | lightProperties[1].worldPosition = float3(-1, 1, 1.5) 243 | lightProperties[1].radius = 3.0 244 | lightProperties[1].color = float3(0, 1, 0) 245 | 246 | // ---- BEGIN STENCIL PASS PREP ---- // 247 | 248 | /* Be very careful with these operations, I clear the stencil buffer to a value of 0, so it's 249 | * very important that I set the depthFailureOperation to 'decrementWRAP' and 'incrementWRAP' 250 | * for the front and back face stencil operations (respectively) rather than 'decrementClamp' 251 | * and 'incrementClamp'. This is because we don't know in which order these operations will 252 | * occur. Let's say we use clamping: 253 | * 254 | * - Back then front order - two failures, expected stencil buffer value: 0 255 | * - Stencil buffer starts at 0 256 | * - Back face depth test fails first: stencil buffer incremented to 1 257 | * - Front face depth test fails second: stencil buffer decremented to 0 258 | * - Stencil buffer final value = 0 (== expected value) - all good! 259 | * 260 | * - Front then back order - two failures, expected stencil buffer value: 0 261 | * - Stencil buffer starts at 0 262 | * - Front face depth test fails first: stencil buffer decremented and clamped to 0 263 | * - Back face depth test fails second: stencil buffer incremented to 1 264 | * - Stencil buffer final value = 1 (!= expected value) - problem here! 265 | * 266 | * Wrapping does not have this issue. There are of course other ways to avoid this problem. 267 | */ 268 | // Decrement when front faces depth fail 269 | let frontFaceStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 270 | frontFaceStencilOp.stencilCompareFunction = .always // Stencil test always succeeds, only concerned about depth test 271 | frontFaceStencilOp.stencilFailureOperation = .keep // Stencil test always succeeds 272 | frontFaceStencilOp.depthStencilPassOperation = .keep // Do nothing if depth test passes 273 | frontFaceStencilOp.depthFailureOperation = .decrementWrap // Decrement if depth test fails 274 | 275 | // Increment when back faces depth fail 276 | let backFaceStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 277 | backFaceStencilOp.stencilCompareFunction = .always // Stencil test always succeeds, only concerned about depth test 278 | backFaceStencilOp.stencilFailureOperation = .keep // Stencil test always succeeds 279 | backFaceStencilOp.depthStencilPassOperation = .keep // Do nothing if depth test passes 280 | backFaceStencilOp.depthFailureOperation = .incrementWrap // Increment if depth test fails 281 | 282 | let stencilPassDepthStencilStateDesc: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 283 | stencilPassDepthStencilStateDesc.isDepthWriteEnabled = false // Only concerned with modifying stencil buffer 284 | stencilPassDepthStencilStateDesc.depthCompareFunction = .lessEqual // Only perform stencil op when depth function fails 285 | stencilPassDepthStencilStateDesc.frontFaceStencil = frontFaceStencilOp // For front-facing polygons 286 | stencilPassDepthStencilStateDesc.backFaceStencil = backFaceStencilOp // For back-facing polygons 287 | stencilPassDepthStencilState = device.makeDepthStencilState(descriptor: stencilPassDepthStencilStateDesc) 288 | 289 | let stencilRenderPipelineDesc = MTLRenderPipelineDescriptor() 290 | stencilRenderPipelineDesc.label = "Stencil Pipeline" 291 | stencilRenderPipelineDesc.sampleCount = view.sampleCount 292 | stencilRenderPipelineDesc.vertexFunction = library.makeFunction(name: "stencilPassVert") 293 | stencilRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "stencilPassNullFrag") 294 | stencilRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 295 | stencilRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 296 | do { 297 | try stencilRenderPipeline = device.makeRenderPipelineState(descriptor: stencilRenderPipelineDesc) 298 | } catch let error { 299 | fatalError("Failed to create Stencil pipeline state, error \(error)") 300 | } 301 | 302 | stencilRenderPassDescriptor = MTLRenderPassDescriptor() 303 | stencilRenderPassDescriptor.depthAttachment.loadAction = .load // Load up depth information from GBuffer pass 304 | stencilRenderPassDescriptor.depthAttachment.storeAction = .store // We'll use depth information in later passes 305 | stencilRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 306 | stencilRenderPassDescriptor.stencilAttachment.loadAction = .clear // Contents of stencil buffer unkown at this point, clear it 307 | stencilRenderPassDescriptor.stencilAttachment.storeAction = .store // Store the stencil buffer so that the next pass can use it 308 | stencilRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 309 | 310 | // ---- END STENCIL PASS PREP ---- // 311 | 312 | // ---- BEGIN LIGHTING PASS PREP ---- // 313 | 314 | lightFragmentInput.screenSize.x = Float(view.drawableSize.width) 315 | lightFragmentInput.screenSize.y = Float(view.drawableSize.height) 316 | 317 | // Create composite texture 318 | let compositeTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 319 | compositeTextureDescriptor.sampleCount = 1 320 | compositeTextureDescriptor.storageMode = .private 321 | compositeTextureDescriptor.textureType = .type2D 322 | compositeTextureDescriptor.usage = [.renderTarget] 323 | 324 | compositeTexture = device.makeTexture(descriptor: compositeTextureDescriptor) 325 | 326 | // Build light volume depth-stencil state 327 | let lightVolumeStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 328 | lightVolumeStencilOp.stencilCompareFunction = .notEqual // Only pass if not equal to reference value (ref. value is 0 by default) 329 | lightVolumeStencilOp.stencilFailureOperation = .keep // Don't modify stencil value at all 330 | lightVolumeStencilOp.depthStencilPassOperation = .keep 331 | lightVolumeStencilOp.depthFailureOperation = .keep // Depth test is set to always succeed 332 | 333 | let lightVolumeDepthStencilStateDesc: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 334 | lightVolumeDepthStencilStateDesc.isDepthWriteEnabled = false // Don't modify depth buffer 335 | lightVolumeDepthStencilStateDesc.depthCompareFunction = .always // Stencil buffer will be used to determine if we should light this fragment, ignore depth value (always pass) 336 | lightVolumeDepthStencilStateDesc.backFaceStencil = lightVolumeStencilOp 337 | lightVolumeDepthStencilStateDesc.frontFaceStencil = lightVolumeStencilOp 338 | lightVolumeDepthStencilState = device.makeDepthStencilState(descriptor: lightVolumeDepthStencilStateDesc) 339 | 340 | // Build light volume render pass descriptor 341 | // Get current render pass descriptor instead 342 | lightVolumeRenderPassDescriptor = MTLRenderPassDescriptor() 343 | lightVolumeRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1) 344 | lightVolumeRenderPassDescriptor.colorAttachments[0].texture = compositeTexture 345 | lightVolumeRenderPassDescriptor.colorAttachments[0].loadAction = .clear 346 | lightVolumeRenderPassDescriptor.colorAttachments[0].storeAction = .store // Store for blitting 347 | lightVolumeRenderPassDescriptor.depthAttachment.clearDepth = 1.0 348 | // Aren't using depth 349 | /* 350 | lightVolumeRenderPassDescriptor.depthAttachment.loadAction = .load 351 | lightVolumeRenderPassDescriptor.depthAttachment.storeAction = .store 352 | lightVolumeRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 353 | */ 354 | lightVolumeRenderPassDescriptor.stencilAttachment.loadAction = .load 355 | lightVolumeRenderPassDescriptor.stencilAttachment.storeAction = .dontCare // Aren't using stencil buffer after this point 356 | lightVolumeRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 357 | 358 | // Build light volume render pipeline 359 | let lightVolumeRenderPipelineDesc = MTLRenderPipelineDescriptor() 360 | lightVolumeRenderPipelineDesc.colorAttachments[0].pixelFormat = .bgra8Unorm 361 | // We need to enable blending as each light volume is additive (it 'adds' to the contribution of the previous one) 362 | lightVolumeRenderPipelineDesc.colorAttachments[0].isBlendingEnabled = true 363 | lightVolumeRenderPipelineDesc.colorAttachments[0].rgbBlendOperation = .add 364 | lightVolumeRenderPipelineDesc.colorAttachments[0].sourceRGBBlendFactor = .one 365 | lightVolumeRenderPipelineDesc.colorAttachments[0].destinationRGBBlendFactor = .one 366 | lightVolumeRenderPipelineDesc.colorAttachments[0].alphaBlendOperation = .add 367 | lightVolumeRenderPipelineDesc.colorAttachments[0].sourceAlphaBlendFactor = .one 368 | lightVolumeRenderPipelineDesc.colorAttachments[0].destinationAlphaBlendFactor = .one 369 | lightVolumeRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 370 | lightVolumeRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 371 | lightVolumeRenderPipelineDesc.sampleCount = 1 372 | lightVolumeRenderPipelineDesc.label = "Light Volume Render" 373 | lightVolumeRenderPipelineDesc.vertexFunction = library.makeFunction(name: "stencilPassVert") 374 | lightVolumeRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "lightVolumeFrag") 375 | do { 376 | try lightVolumeRenderPipeline = device.makeRenderPipelineState(descriptor: lightVolumeRenderPipelineDesc) 377 | } catch let error { 378 | fatalError("Failed to create lightVolume pipeline state, error \(error)") 379 | } 380 | 381 | // ---- END LIGHTING PASS PREP ---- // 382 | 383 | 384 | super.init() 385 | 386 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 387 | view.delegate = self 388 | view.device = device 389 | } 390 | 391 | class func buildRenderPipelineWithDevice(_ device: MTLDevice, view: MTKView) throws -> MTLRenderPipelineState { 392 | // The default library contains all of the shader functions that were compiled into our app bundle 393 | let library = device.newDefaultLibrary()! 394 | 395 | // Retrieve the functions that will comprise our pipeline 396 | let vertexFunction = library.makeFunction(name: "vertex_transform") 397 | let fragmentFunction = library.makeFunction(name: "fragment_lit_textured") 398 | 399 | // A render pipeline descriptor describes the configuration of our programmable pipeline 400 | let pipelineDescriptor = MTLRenderPipelineDescriptor() 401 | pipelineDescriptor.label = "Render Pipeline" 402 | pipelineDescriptor.sampleCount = view.sampleCount 403 | pipelineDescriptor.vertexFunction = vertexFunction 404 | pipelineDescriptor.fragmentFunction = fragmentFunction 405 | pipelineDescriptor.colorAttachments[0].pixelFormat = view.colorPixelFormat 406 | pipelineDescriptor.depthAttachmentPixelFormat = view.depthStencilPixelFormat 407 | 408 | return try device.makeRenderPipelineState(descriptor: pipelineDescriptor) 409 | } 410 | 411 | class func buildTexture(name: String, _ device: MTLDevice) throws -> MTLTexture { 412 | let textureLoader = MTKTextureLoader(device: device) 413 | let asset = NSDataAsset.init(name: name) 414 | if let data = asset?.data { 415 | return try textureLoader.newTexture(with: data, options: [:]) 416 | } else { 417 | fatalError("Could not load image \(name) from an asset catalog in the main bundle") 418 | } 419 | } 420 | 421 | class func buildSamplerStateWithDevice(_ device: MTLDevice, 422 | addressMode: MTLSamplerAddressMode, 423 | filter: MTLSamplerMinMagFilter) -> MTLSamplerState 424 | { 425 | let samplerDescriptor = MTLSamplerDescriptor() 426 | samplerDescriptor.sAddressMode = addressMode 427 | samplerDescriptor.tAddressMode = addressMode 428 | samplerDescriptor.minFilter = filter 429 | samplerDescriptor.magFilter = filter 430 | return device.makeSamplerState(descriptor: samplerDescriptor) 431 | } 432 | 433 | class func buildDepthStencilStateWithDevice(_ device: MTLDevice, 434 | compareFunc: MTLCompareFunction, 435 | isWriteEnabled: Bool) -> MTLDepthStencilState 436 | { 437 | let desc = MTLDepthStencilDescriptor() 438 | desc.depthCompareFunction = compareFunc 439 | desc.isDepthWriteEnabled = isWriteEnabled 440 | return device.makeDepthStencilState(descriptor: desc) 441 | } 442 | 443 | func updateWithTimestep(_ timestep: TimeInterval) 444 | { 445 | // We keep track of time so we can animate the various transformations 446 | time = time + timestep 447 | let modelToWorldMatrix = matrix4x4_rotation(Float(time) * 0.5, vector_float3(0.7, 1, 0)) 448 | 449 | // So that the figure doesn't get distorted when the window changes size or rotates, 450 | // we factor the current aspect ration into our projection matrix. We also select 451 | // sensible values for the vertical view angle and the distances to the near and far planes. 452 | let viewSize = self.view.bounds.size 453 | let aspectRatio = Float(viewSize.width / viewSize.height) 454 | let verticalViewAngle = radians_from_degrees(65) 455 | let nearZ: Float = 0.1 456 | let farZ: Float = 100.0 457 | let projectionMatrix = matrix_perspective(verticalViewAngle, aspectRatio, nearZ, farZ) 458 | 459 | let viewMatrix = matrix_look_at(lightFragmentInput.camWorldPos.x, lightFragmentInput.camWorldPos.y, lightFragmentInput.camWorldPos.z, 0, 0, 0, 0, 1, 0) 460 | 461 | // The combined model-view-projection matrix moves our vertices from model space into clip space 462 | let mvMatrix = matrix_multiply(viewMatrix, modelToWorldMatrix); 463 | constants.modelViewProjectionMatrix = matrix_multiply(projectionMatrix, mvMatrix) 464 | constants.normalMatrix = matrix_inverse_transpose(matrix_upper_left_3x3(mvMatrix)) 465 | constants.modelMatrix = modelToWorldMatrix 466 | 467 | // Update light constants 468 | for i in 0...(lightNumber-1) { 469 | let lightModelToWorldMatrix = matrix_multiply(matrix4x4_translation(lightProperties[i].worldPosition.x, lightProperties[i].worldPosition.y, lightProperties[i].worldPosition.z), matrix4x4_scale(vector3(lightProperties[i].radius, lightProperties[i].radius, lightProperties[i].radius))) 470 | let lightMvMatrix = matrix_multiply(viewMatrix, lightModelToWorldMatrix); 471 | lightConstants[i].modelViewProjectionMatrix = matrix_multiply(projectionMatrix, lightMvMatrix) 472 | lightConstants[i].normalMatrix = matrix_inverse_transpose(matrix_upper_left_3x3(lightMvMatrix)) 473 | lightConstants[i].modelMatrix = lightModelToWorldMatrix; 474 | } 475 | } 476 | 477 | func render(_ view: MTKView) { 478 | // Our animation will be dependent on the frame time, so that regardless of how 479 | // fast we're animating, the speed of the transformations will be roughly constant. 480 | let timestep = 1.0 / TimeInterval(view.preferredFramesPerSecond) 481 | updateWithTimestep(timestep) 482 | 483 | // Our command buffer is a container for the work we want to perform with the GPU. 484 | let commandBuffer = commandQueue.makeCommandBuffer() 485 | 486 | let currDrawable = view.currentDrawable 487 | 488 | // ---- GBUFFER ---- // 489 | // Draw our scene to texture 490 | // We use an encoder to 'encode' commands into a command buffer 491 | let gBufferEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: gBufferRenderPassDescriptor) 492 | gBufferEncoder.pushDebugGroup("GBuffer") // For debugging 493 | gBufferEncoder.label = "GBuffer" 494 | // Use the depth stencil state we created earlier 495 | gBufferEncoder.setDepthStencilState(gBufferDepthStencilState) 496 | gBufferEncoder.setCullMode(.back) 497 | // Set winding order 498 | gBufferEncoder.setFrontFacing(.counterClockwise) 499 | // Use the render pipeline state we created earlier 500 | gBufferEncoder.setRenderPipelineState(gBufferRenderPipeline) 501 | // Upload vertex data 502 | gBufferEncoder.setVertexBuffer(mesh.vertexBuffer, offset:0, at:0) 503 | // Upload uniforms 504 | gBufferEncoder.setVertexBytes(&constants, length: MemoryLayout.size, at: 1) 505 | // Bind the checkerboard texture (for the cube) 506 | gBufferEncoder.setFragmentTexture(texture, at: 0) 507 | // Draw our mesh 508 | gBufferEncoder.drawIndexedPrimitives(type: mesh.primitiveType, 509 | indexCount: mesh.indexCount, 510 | indexType: mesh.indexType, 511 | indexBuffer: mesh.indexBuffer, 512 | indexBufferOffset: 0) 513 | gBufferEncoder.popDebugGroup() // For debugging 514 | // Finish encoding commands in this encoder 515 | gBufferEncoder.endEncoding() 516 | 517 | // ---- STENCIL ---- // 518 | let stencilPassEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: stencilRenderPassDescriptor) 519 | stencilPassEncoder.pushDebugGroup("Stencil Pass") 520 | stencilPassEncoder.label = "Stencil Pass" 521 | stencilPassEncoder.setDepthStencilState(stencilPassDepthStencilState) 522 | // We want to draw back-facing AND front-facing polygons 523 | stencilPassEncoder.setCullMode(.none) 524 | stencilPassEncoder.setFrontFacing(.counterClockwise) 525 | stencilPassEncoder.setRenderPipelineState(stencilRenderPipeline) 526 | stencilPassEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 527 | 528 | for i in 0...(lightNumber-1) { 529 | stencilPassEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 530 | stencilPassEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, indexCount: lightSphere.indexCount, indexType: lightSphere.indexType, indexBuffer: lightSphere.indexBuffer, indexBufferOffset: 0) 531 | } 532 | 533 | stencilPassEncoder.popDebugGroup() 534 | stencilPassEncoder.endEncoding() 535 | 536 | // ---- LIGHTING ---- // 537 | let lightPassEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: lightVolumeRenderPassDescriptor) 538 | lightPassEncoder.pushDebugGroup("Light Volume Pass") 539 | lightPassEncoder.label = "Light Volume Pass" 540 | // Use our previously configured depth stencil state 541 | lightPassEncoder.setDepthStencilState(lightVolumeDepthStencilState) 542 | // Set our stencil reference value to 0 (in the depth stencil state we configured fragments to pass only if they are NOT EQUAL to the reference value 543 | lightPassEncoder.setStencilReferenceValue(0) 544 | // We cull the front of the spherical light volume and not the back, in-case we are inside the light volume. I'm not 100% certain this is the best way to do this, but it seems to work. 545 | lightPassEncoder.setCullMode(.front) 546 | lightPassEncoder.setFrontFacing(.counterClockwise) 547 | lightPassEncoder.setRenderPipelineState(lightVolumeRenderPipeline) 548 | // Bind our GBuffer textures 549 | lightPassEncoder.setFragmentTexture(gBufferAlbedoTexture, at: 0) 550 | lightPassEncoder.setFragmentTexture(gBufferNormalTexture, at: 1) 551 | lightPassEncoder.setFragmentTexture(gBufferPositionTexture, at: 2) 552 | lightPassEncoder.setFragmentTexture(gBufferDepthTexture, at: 3) 553 | lightPassEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 554 | // Upload our screen size 555 | lightPassEncoder.setFragmentBytes(&lightFragmentInput, length: MemoryLayout.size, at: 0) 556 | // Render light volumes 557 | for i in 0...(lightNumber - 1) { 558 | lightPassEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 559 | lightPassEncoder.setFragmentBytes(&lightProperties[i], length: MemoryLayout.size, at: 1) 560 | lightPassEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, indexCount: lightSphere.indexCount, indexType: lightSphere.indexType, indexBuffer: lightSphere.indexBuffer, indexBufferOffset: 0) 561 | } 562 | 563 | lightPassEncoder.popDebugGroup() 564 | lightPassEncoder.endEncoding() 565 | 566 | // ---- BLIT ---- // 567 | // Blit our texture to the screen 568 | let blitEncoder = commandBuffer.makeBlitCommandEncoder() 569 | blitEncoder.pushDebugGroup("Blit") 570 | 571 | // Create a region that covers the entire texture we want to blit to the screen 572 | let origin: MTLOrigin = MTLOriginMake(0, 0, 0) 573 | let size: MTLSize = MTLSizeMake(Int(self.view.drawableSize.width), Int(self.view.drawableSize.height), 1) 574 | 575 | // Encode copy command, copying from our albedo texture to the 'current drawable' texture 576 | // The 'current drawable' is essentially a render target that can be displayed on the screen 577 | blitEncoder.copy(from: compositeTexture, sourceSlice: 0, sourceLevel: 0, sourceOrigin: origin, sourceSize: size, to: (currDrawable?.texture)!, destinationSlice: 0, destinationLevel: 0, destinationOrigin: origin) 578 | 579 | blitEncoder.endEncoding() 580 | blitEncoder.popDebugGroup() 581 | 582 | if let drawable = currDrawable 583 | { 584 | // Display our drawable to the screen 585 | commandBuffer.present(drawable) 586 | } 587 | 588 | // Finish encoding commands 589 | commandBuffer.commit() 590 | } 591 | 592 | func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) { 593 | // respond to resize 594 | let drawableWidth = Int(size.width) 595 | let drawableHeight = Int(size.height) 596 | 597 | lightFragmentInput.screenSize.x = Float(size.width) 598 | lightFragmentInput.screenSize.y = Float(size.height) 599 | 600 | // Create resized GBuffer albedo texture 601 | let gBufferAlbedoTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 602 | gBufferAlbedoTextureDescriptor.sampleCount = 1 603 | gBufferAlbedoTextureDescriptor.storageMode = .private 604 | gBufferAlbedoTextureDescriptor.textureType = .type2D 605 | gBufferAlbedoTextureDescriptor.usage = [.renderTarget, .shaderRead] 606 | 607 | gBufferAlbedoTexture = device.makeTexture(descriptor: gBufferAlbedoTextureDescriptor) 608 | 609 | // Create resized GBuffer normal texture 610 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 611 | gBufferNormalTextureDescriptor.sampleCount = 1 612 | gBufferNormalTextureDescriptor.storageMode = .private 613 | gBufferNormalTextureDescriptor.textureType = .type2D 614 | gBufferNormalTextureDescriptor.usage = [.renderTarget, .shaderRead] 615 | 616 | gBufferNormalTexture = device.makeTexture(descriptor: gBufferNormalTextureDescriptor) 617 | 618 | // Create resized GBuffer position texture 619 | let gBufferPositionTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 620 | gBufferPositionTextureDescriptor.sampleCount = 1 621 | gBufferPositionTextureDescriptor.storageMode = .private 622 | gBufferPositionTextureDescriptor.textureType = .type2D 623 | gBufferPositionTextureDescriptor.usage = [.renderTarget, .shaderRead] 624 | 625 | gBufferPositionTexture = device.makeTexture(descriptor: gBufferPositionTextureDescriptor) 626 | 627 | // Create resized GBuffer depth (and stencil) texture 628 | let gBufferDepthDesc: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float_stencil8, width: drawableWidth, height: drawableHeight, mipmapped: false) 629 | gBufferDepthDesc.sampleCount = 1 630 | gBufferDepthDesc.storageMode = .private 631 | gBufferDepthDesc.textureType = .type2D 632 | gBufferDepthDesc.usage = [.renderTarget, .shaderRead] 633 | 634 | gBufferDepthTexture = device.makeTexture(descriptor: gBufferDepthDesc) 635 | 636 | // Create resized composite texture 637 | let compositeTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 638 | compositeTextureDescriptor.sampleCount = 1 639 | compositeTextureDescriptor.storageMode = .private 640 | compositeTextureDescriptor.textureType = .type2D 641 | compositeTextureDescriptor.usage = [.renderTarget] 642 | 643 | compositeTexture = device.makeTexture(descriptor: compositeTextureDescriptor) 644 | 645 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 646 | gBufferRenderPassDescriptor.colorAttachments[1].texture = gBufferNormalTexture 647 | gBufferRenderPassDescriptor.colorAttachments[2].texture = gBufferPositionTexture 648 | gBufferRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 649 | 650 | stencilRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 651 | stencilRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 652 | 653 | lightVolumeRenderPassDescriptor.colorAttachments[0].texture = compositeTexture 654 | lightVolumeRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 655 | } 656 | 657 | @objc(drawInMTKView:) 658 | func draw(in metalView: MTKView) 659 | { 660 | render(metalView) 661 | } 662 | } 663 | -------------------------------------------------------------------------------- /MetalTexturedMesh/Shaders.metal: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | Shader file with functions for rendering lit, textured geometry. 7 | */ 8 | 9 | #include 10 | using namespace metal; 11 | 12 | struct Constants { 13 | float4x4 modelViewProjectionMatrix; 14 | float3x3 normalMatrix; 15 | float4x4 modelMatrix; 16 | }; 17 | 18 | constant half3 ambientLightIntensity(0.1, 0.1, 0.1); 19 | constant half3 diffuseLightIntensity(0.9, 0.9, 0.9); 20 | constant half3 lightDirection(-0.577, -0.577, -0.577); 21 | 22 | struct VertexIn { 23 | packed_float3 position; 24 | packed_float3 normal; 25 | packed_float2 texCoords; 26 | }; 27 | 28 | struct VertexOut { 29 | float4 position [[position]]; 30 | float3 normal; 31 | float2 texCoords; 32 | float4 worldPosition; 33 | }; 34 | 35 | struct StencilPassOut { 36 | float4 position [[position]]; 37 | }; 38 | 39 | vertex StencilPassOut stencilPassVert(const device VertexIn *vertices [[buffer(0)]], 40 | const device Constants &uniforms [[buffer(1)]], 41 | unsigned int vid [[vertex_id]]) { 42 | StencilPassOut out; 43 | 44 | out.position = uniforms.modelViewProjectionMatrix * float4(vertices[vid].position, 1.0); 45 | 46 | return out; 47 | } 48 | 49 | fragment void stencilPassNullFrag(StencilPassOut in [[stage_in]]) 50 | { 51 | } 52 | 53 | struct LightFragmentInput { 54 | float2 screenSize; 55 | float3 camWorldPos; 56 | }; 57 | 58 | struct PointLight { 59 | float3 worldPosition; 60 | float radius; 61 | float3 color; 62 | }; 63 | 64 | fragment float4 lightVolumeFrag(StencilPassOut in [[stage_in]], 65 | constant LightFragmentInput *lightData [[ buffer(0) ]], 66 | constant PointLight *pointLight [[ buffer(1) ]], 67 | texture2d albedoTexture [[ texture(0) ]], 68 | texture2d normalsTexture [[ texture(1) ]], 69 | texture2d positionTexture [[ texture(2) ]]) 70 | { 71 | // We sample albedo, normals and position from the position of this fragment, normalized to be 0-1 within screen space 72 | float2 sampleCoords = in.position.xy / lightData->screenSize; 73 | 74 | constexpr sampler texSampler; 75 | 76 | // Extract data for this fragment from GBuffer textures 77 | const float3 albedo = float3(albedoTexture.sample(texSampler, sampleCoords)); 78 | const float3 worldPosition = float3(positionTexture.sample(texSampler, sampleCoords)); 79 | const float3 normal = normalize(float3(normalsTexture.sample(texSampler, sampleCoords))); 80 | 81 | const float3 lightDir = normalize(pointLight->worldPosition - worldPosition); 82 | 83 | // Diffuse 84 | const float nDotL = max(dot(normal, lightDir), 0.0); 85 | const float3 diffuse = nDotL * albedo * pointLight->color; 86 | 87 | float3 result = diffuse; 88 | 89 | // Specular - if you want 90 | //const float3 viewDir = normalize(lightData->camWorldPos - worldPosition); 91 | //const float3 halfwayDir = normalize(lightDir + viewDir); 92 | //const float3 specular = pow(max(dot(normal, halfwayDir), 0.0), 60.0) * 0.2; 93 | //result = (diffuse + specular); 94 | 95 | const float3 gammaCorrect = pow(float3(result), (1.0/2.2)); 96 | return float4(gammaCorrect, 1.0); 97 | } 98 | 99 | struct GBufferOut { 100 | float4 albedo [[color(0)]]; 101 | float4 normal [[color(1)]]; 102 | float4 position [[color(2)]]; 103 | }; 104 | 105 | vertex VertexOut gBufferVert(const device VertexIn *vertices [[buffer(0)]], 106 | const device Constants &uniforms [[buffer(1)]], 107 | unsigned int vid [[vertex_id]]) { 108 | VertexOut out; 109 | VertexIn vin = vertices[vid]; 110 | 111 | float4 inPosition = float4(vin.position, 1.0); 112 | out.position = uniforms.modelViewProjectionMatrix * inPosition; 113 | float3 normal = vin.normal; 114 | float3 eyeNormal = normalize(uniforms.normalMatrix * normal); 115 | 116 | out.normal = eyeNormal; 117 | out.texCoords = vin.texCoords; 118 | out.worldPosition = uniforms.modelMatrix * inPosition; 119 | 120 | return out; 121 | } 122 | 123 | fragment GBufferOut gBufferFrag(VertexOut in [[stage_in]], 124 | texture2d albedo_texture [[texture(0)]]) 125 | { 126 | constexpr sampler linear_sampler(min_filter::linear, mag_filter::linear); 127 | float4 albedo = albedo_texture.sample(linear_sampler, in.texCoords); 128 | 129 | GBufferOut output; 130 | 131 | output.albedo = albedo; 132 | output.normal = float4(in.normal, 1.0); 133 | output.position = in.worldPosition; 134 | 135 | return output; 136 | } 137 | 138 | vertex VertexOut vertex_transform(device VertexIn *vertices [[buffer(0)]], 139 | constant Constants &uniforms [[buffer(1)]], 140 | uint vertexId [[vertex_id]]) 141 | { 142 | float3 modelPosition = vertices[vertexId].position; 143 | float3 modelNormal = vertices[vertexId].normal; 144 | 145 | VertexOut out; 146 | // Multiplying the model position by the model-view-projection matrix moves us into clip space 147 | out.position = uniforms.modelViewProjectionMatrix * float4(modelPosition, 1); 148 | // Copy the vertex normal and texture coordinates 149 | out.normal = uniforms.normalMatrix * modelNormal; 150 | out.texCoords = vertices[vertexId].texCoords; 151 | return out; 152 | } 153 | 154 | fragment half4 fragment_lit_textured(VertexOut fragmentIn [[stage_in]], 155 | texture2d tex2d [[texture(0)]], 156 | sampler sampler2d [[sampler(0)]]) 157 | { 158 | // Sample the texture to get the surface color at this point 159 | half3 surfaceColor = half3(tex2d.sample(sampler2d, fragmentIn.texCoords).rrr); 160 | // Re-normalize the interpolated surface normal 161 | half3 normal = normalize(half3(fragmentIn.normal)); 162 | // Compute the ambient color contribution 163 | half3 color = ambientLightIntensity * surfaceColor; 164 | // Calculate the diffuse factor as the dot product of the normal and light direction 165 | float diffuseFactor = saturate(dot(normal, -lightDirection)); 166 | // Add in the diffuse contribution from the light 167 | color += diffuseFactor * diffuseLightIntensity * surfaceColor; 168 | return half4(color, 1); 169 | } 170 | -------------------------------------------------------------------------------- /MetalTexturedMesh/ViewController.swift: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (C) 2016 Apple Inc. All Rights Reserved. 3 | See LICENSE.txt for this sample’s licensing information 4 | 5 | Abstract: 6 | View controller class that manages the MTKView and renderer. 7 | */ 8 | 9 | import MetalKit 10 | import Cocoa 11 | 12 | class ViewController: NSViewController { 13 | 14 | var renderer: Renderer! 15 | 16 | override func viewDidLoad() { 17 | super.viewDidLoad() 18 | 19 | let metalView = self.view as! MTKView 20 | 21 | // We initialize our renderer object with the MTKView it will be drawing into 22 | renderer = Renderer(mtkView:metalView) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MetalDeferredLightingTutorial 2 | Lit Sphere 3 | 4 | Tutorial source for implementing deferred lighting in Metal. 5 | 6 | Tutorial [here](https://github.com/sevanspowell/MetalDeferredLightingTutorial/blob/master/doc/implementing-deferred-shading-in-metal.md). 7 | -------------------------------------------------------------------------------- /doc/implementing-deferred-shading-in-metal.md: -------------------------------------------------------------------------------- 1 | headlines 4 2 | 3 | [< Home](../index.org) 4 | 5 | Implementing Deferred Shading in Metal 6 | ====================================== 7 | 8 | Introduction 9 | ============ 10 | 11 | Deferred shading is a rendering technique usually used to optimize scenes with a large number of light sources. Rather than perform expensive lighting calcuations on every fragment in the scene, we 'defer' the lighting calculations to a later time, when we know which fragments are affected by which lights. 12 | 13 | Process 14 | ======= 15 | 16 | The process of my particular implementation of deferred shading is as follows: 17 | 18 | We first render the scene in what is known as the 'Geometry Buffer' or 'GBuffer' pass, where we gather information about a scene (such as albedo, normal, depth and position information) and render it to separate textures. 19 | 20 | *The position, normal, world position and depth of a simple scene are shown below. Note that the normal and position textures look the same because the sphere is positioned at (0, 0, 0) and hence the position of each fragment is equivalent to that fragment's normal.* 21 | 22 | ![](../img/posts/implementing-deferred-shading-in-metal/gbuffer-data.png) 23 | 24 | Next, we render the light volumes in the scene. The light volumes are pieces of geometry chosen to mimic the area-of-effect of a particular light. For example, a point light can be represented by a sphere mesh. The radius of the sphere mesh should be such that the light contribution provided by that light at the edge of the mesh be 0. 25 | 26 | *A visualization of the lights in the example scene:* 27 | 28 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-normal-fake.png) 29 | 30 | When we render the light volumes, we use the GBuffer pass's unmodified depth texture as as the render pass's depth texture, this allows us to compare the depth of the light volumes with the scene depth. We also render both the front and back faces of the light volume. If a front-face fails the depth test, we increment the stencil buffer value at that point. If a back-face fails the depth test, we decrement the stencil buffer value at that point. This way, fragments within a light volume have a value of 1 in the stencil buffer and all other fragments have a value of 0 (A visual explanation of this stencil buffer algorithm can be found [here](http://ogldev.atspace.co.uk/www/tutorial37/tutorial37.html)). 31 | 32 | *Red represents pixels that are within the light volume:* 33 | 34 | ![](../img/posts/implementing-deferred-shading-in-metal/stencil-buffer.png) 35 | 36 | Finally, we combine all the information we've gathered so far and perform the lighting calculations. The depth test has already determined that these fragments are the fragments that can be seen by the camera and the stencil pass has allowed us to determine which fragments are actually within (and so affected by) the lights. As a result, the number of expensive lighting calculations we have to perform is minimized, helping us to improve our render performance. 37 | 38 | The final result looks something like this: 39 | 40 | ![](../img/posts/implementing-deferred-shading-in-metal/final.png) 41 | 42 | Implementation 43 | ============== 44 | 45 | The concept is actually quite simple, implementing it can be a little tricky, especially when using a framework you're not familiar with. 46 | 47 | *Disclaimer: There might be some bugs with this implementation that I haven't discovered yet.* 48 | 49 | Note that I've tried to code this explicitly as I can, I've avoided encapsulating some of these things into functions so that you can see what's going on more clearly. I also do a nominal amount of error-checking. 50 | 51 | To follow along with the rest of this tutorial, do the following: 52 | 53 | - Download the [sample code](https://developer.apple.com/library/content/samplecode/AdoptingMetalI/AdoptingMetalIApracticalapproachtoyourfirstMetalapp.zip) (I started with the sample code from [Apple's Adopting Metal I](https://developer.apple.com/library/content/samplecode/AdoptingMetalI/Introduction/Intro.html) sample). 54 | - Open the project in Xcode. 55 | - Click on the "Metal Textured Mesh" project in the Project Navigator. 56 | - Choose a development team under "General > Signing". 57 | - Run the project 58 | 59 | You should see a rotating cube with a checkerboard texture on a white background. You might also find it easier to follow along with the finished program in hand: [MetalDeferredLightingTutorial](https://github.com/sevanspowell/MetalDeferredLightingTutorial). 60 | 61 | GBuffer pass 62 | ------------ 63 | 64 | ### Albedo and depth 65 | 66 | Let's setup our GBuffer pass. For now, we'll just render the scene albedo and depth. 67 | 68 | We'll need a few resources to start us off: 69 | 70 | **Renderer.swift** 71 | 72 | ``` swift 73 | @objc 74 | class Renderer : NSObject, MTKViewDelegate 75 | { 76 | // ... 77 | var time = TimeInterval(0.0) 78 | var constants = Constants() 79 | 80 | var gBufferAlbedoTexture: MTLTexture 81 | var gBufferDepthTexture: MTLTexture 82 | let gBufferDepthStencilState: MTLDepthStencilState 83 | var gBufferRenderPassDescriptor: MTLRenderPassDescriptor 84 | let gBufferRenderPipeline: MTLRenderPipelineState 85 | 86 | init?(mtkView: MTKView) { 87 | // ... 88 | // To be used for the size of the render textures 89 | let drawableWidth = Int(self.view.drawableSize.width) 90 | let drawableHeight = Int(self.view.drawableSize.height) 91 | // We create our shaders from here 92 | let library = device.newDefaultLibrary()! 93 | 94 | // Create GBuffer albedo texture 95 | // First we create a descriptor that describes the texture we're about to create 96 | let gBufferAlbedoTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 97 | gBufferAlbedoTextureDescriptor.sampleCount = 1 98 | gBufferAlbedoTextureDescriptor.storageMode = .private 99 | gBufferAlbedoTextureDescriptor.textureType = .type2D 100 | gBufferAlbedoTextureDescriptor.usage = [.renderTarget, .shaderRead] 101 | 102 | // Then we make the texture 103 | gBufferAlbedoTexture = device.makeTexture(descriptor: gBufferAlbedoTextureDescriptor) 104 | 105 | // Create GBuffer depth texture 106 | let gBufferDepthDesc: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 107 | gBufferDepthDesc.sampleCount = 1 108 | gBufferDepthDesc.storageMode = .private 109 | gBufferDepthDesc.textureType = .type2D 110 | gBufferDepthDesc.usage = [.renderTarget, .shaderRead] 111 | 112 | gBufferDepthTexture = device.makeTexture(descriptor: gBufferDepthDesc) 113 | 114 | // Build GBuffer depth/stencil state 115 | // Again we create a descriptor that describes the object we're about to create 116 | let gBufferDepthStencilStateDescriptor: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 117 | gBufferDepthStencilStateDescriptor.isDepthWriteEnabled = true 118 | gBufferDepthStencilStateDescriptor.depthCompareFunction = .lessEqual 119 | gBufferDepthStencilStateDescriptor.frontFaceStencil = nil 120 | gBufferDepthStencilStateDescriptor.backFaceStencil = nil 121 | 122 | // Then we create the depth/stencil state 123 | gBufferDepthStencilState = device.makeDepthStencilState(descriptor: gBufferDepthStencilStateDescriptor) 124 | 125 | // Create GBuffer render pass descriptor 126 | gBufferRenderPassDescriptor = MTLRenderPassDescriptor() 127 | // Specify the properties of the first color attachment (our albedo texture) 128 | gBufferRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0) 129 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 130 | gBufferRenderPassDescriptor.colorAttachments[0].loadAction = .clear 131 | gBufferRenderPassDescriptor.colorAttachments[0].storeAction = .store 132 | 133 | // Specify the properties of the depth attachment 134 | gBufferRenderPassDescriptor.depthAttachment.loadAction = .clear 135 | gBufferRenderPassDescriptor.depthAttachment.storeAction = .store 136 | gBufferRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 137 | gBufferRenderPassDescriptor.depthAttachment.clearDepth = 1.0 138 | 139 | // Create GBuffer render pipeline 140 | let gBufferRenderPipelineDesc = MTLRenderPipelineDescriptor() 141 | gBufferRenderPipelineDesc.colorAttachments[0].pixelFormat = .rgba8Unorm 142 | gBufferRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 143 | gBufferRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 144 | gBufferRenderPipelineDesc.sampleCount = 1 145 | gBufferRenderPipelineDesc.label = "GBuffer Render" 146 | gBufferRenderPipelineDesc.vertexFunction = library.makeFunction(name: "gBufferVert") 147 | gBufferRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "gBufferFrag") 148 | do { 149 | try gBufferRenderPipeline = device.makeRenderPipelineState(descriptor: gBufferRenderPipelineDesc) 150 | } catch let error { 151 | fatalError("Failed to create GBuffer pipeline state, error \(error)") 152 | } 153 | 154 | super.init() 155 | 156 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 157 | view.delegate = self 158 | view.device = device 159 | } 160 | // ... 161 | } 162 | ``` 163 | 164 | You'll also want to add the following lines to your "Shaders.metal" file: 165 | 166 | **Shaders.metal** 167 | 168 | 169 | struct GBufferOut { 170 | float4 albedo [[color(0)]]; 171 | }; 172 | 173 | vertex VertexOut gBufferVert(const device VertexIn *vertices [[buffer(0)]], 174 | const device Constants &uniforms [[buffer(1)]], 175 | unsigned int vid [[vertex_id]]) { 176 | VertexOut out; 177 | VertexIn vin = vertices[vid]; 178 | 179 | float4 inPosition = float4(vin.position, 1.0); 180 | out.position = uniforms.modelViewProjectionMatrix * inPosition; 181 | float3 normal = vin.normal; 182 | float3 eyeNormal = normalize(uniforms.normalMatrix * normal); 183 | 184 | out.normal = eyeNormal; 185 | out.texCoords = vin.texCoords; 186 | 187 | return out; 188 | } 189 | 190 | fragment GBufferOut gBufferFrag(VertexOut in [[stage_in]], 191 | texture2d albedo_texture [[texture(0)]]) 192 | { 193 | // Sample from checkerboard texture 194 | constexpr sampler linear_sampler(min_filter::linear, mag_filter::linear); 195 | float4 albedo = albedo_texture.sample(linear_sampler, in.texCoords); 196 | 197 | GBufferOut output; 198 | 199 | // Output to our GBuffer albedo texture 200 | output.albedo = albedo; 201 | 202 | return output; 203 | } 204 | 205 | To actually do anything with all this, we'll need to almost completely replace our render function: 206 | 207 | **Renderer.swift** 208 | 209 | ``` swift 210 | func render(_ view: MTKView) { 211 | // Our animation will be dependent on the frame time, so that regardless of how 212 | // fast we're animating, the speed of the transformations will be roughly constant. 213 | let timestep = 1.0 / TimeInterval(view.preferredFramesPerSecond) 214 | updateWithTimestep(timestep) 215 | 216 | // A command buffer is a container for the work we want to perform with the GPU. 217 | let commandBuffer = commandQueue.makeCommandBuffer() 218 | 219 | // ---- GBUFFER ---- // 220 | // Draw our scene to texture 221 | // We use an encoder to 'encode' commands into a command buffer 222 | let gBufferEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: gBufferRenderPassDescriptor) 223 | gBufferEncoder.pushDebugGroup("GBuffer") // For debugging 224 | gBufferEncoder.label = "GBuffer" 225 | // Use the depth stencil state we created earlier 226 | gBufferEncoder.setDepthStencilState(gBufferDepthStencilState) 227 | gBufferEncoder.setCullMode(.back) 228 | // Set winding order 229 | gBufferEncoder.setFrontFacing(.counterClockwise) 230 | // Use the render pipeline state we created earlier 231 | gBufferEncoder.setRenderPipelineState(gBufferRenderPipeline) 232 | // Upload vertex data 233 | gBufferEncoder.setVertexBuffer(mesh.vertexBuffer, offset:0, at:0) 234 | // Upload uniforms 235 | gBufferEncoder.setVertexBytes(&constants, length: MemoryLayout.size, at: 1) 236 | // Bind the checkerboard texture (for the cube) 237 | gBufferEncoder.setFragmentTexture(texture, at: 0) 238 | // Draw our mesh 239 | gBufferEncoder.drawIndexedPrimitives(type: mesh.primitiveType, 240 | indexCount: mesh.indexCount, 241 | indexType: mesh.indexType, 242 | indexBuffer: mesh.indexBuffer, 243 | indexBufferOffset: 0) 244 | gBufferEncoder.popDebugGroup() // For debugging 245 | // Finish encoding commands in this encoder 246 | gBufferEncoder.endEncoding() 247 | 248 | // ---- BLIT ---- // 249 | // A 'drawable' is essentially a render target that can be displayed on the screen 250 | let currDrawable = view.currentDrawable 251 | 252 | // Blit our texture to the screen 253 | let blitEncoder = commandBuffer.makeBlitCommandEncoder() 254 | blitEncoder.pushDebugGroup("Blit") 255 | 256 | // Create a region that covers the entire texture we want to blit to the screen 257 | let origin: MTLOrigin = MTLOriginMake(0, 0, 0) 258 | let size: MTLSize = MTLSizeMake(Int(self.view.drawableSize.width), Int(self.view.drawableSize.height), 1) 259 | 260 | // Encode copy command, copying from our albedo texture to the 'current drawable' texture 261 | blitEncoder.copy(from: gBufferAlbedoTexture, sourceSlice: 0, sourceLevel: 0, sourceOrigin: origin, sourceSize: size, to: (currDrawable?.texture)!, destinationSlice: 0, destinationLevel: 0, destinationOrigin: origin) 262 | 263 | blitEncoder.endEncoding() 264 | blitEncoder.popDebugGroup() 265 | 266 | if let drawable = currDrawable 267 | { 268 | // Display our drawable to the screen 269 | commandBuffer.present(drawable) 270 | } 271 | 272 | // Finish encoding commands 273 | commandBuffer.commit() 274 | } 275 | ``` 276 | 277 | You should see the following image on screen: 278 | 279 | ![](../img/posts/implementing-deferred-shading-in-metal/albedo.png) 280 | 281 | Great, that's step 1. Let's move on to rendering the normal and position data to a texture. 282 | 283 | ### Normal and position 284 | 285 | First, the normal data. Add a normal texture to the renderer: 286 | 287 | **Renderer.swift** 288 | 289 | ``` swift 290 | // ... 291 | var time = TimeInterval(0.0) 292 | var constants = Constants() 293 | 294 | var gBufferAlbedoTexture: MTLTexture 295 | var gBufferDepthTexture: MTLTexture 296 | let gBufferDepthStencilState: MTLDepthStencilState 297 | var gBufferRenderPassDescriptor: MTLRenderPassDescriptor 298 | let gBufferRenderPipeline: MTLRenderPipelineState 299 | 300 | var gBufferNormalTexture: MTLTexture 301 | 302 | init?(mtkView: MTKView) { 303 | // ... 304 | // Create GBuffer albedo texture 305 | // First we create a descriptor that describes the texture we're about to create 306 | let gBufferAlbedoTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 307 | gBufferAlbedoTextureDescriptor.sampleCount = 1 308 | gBufferAlbedoTextureDescriptor.storageMode = .private 309 | gBufferAlbedoTextureDescriptor.textureType = .type2D 310 | gBufferAlbedoTextureDescriptor.usage = [.renderTarget, .shaderRead] 311 | 312 | // Then we make the texture 313 | gBufferAlbedoTexture = device.makeTexture(descriptor: gBufferAlbedoTextureDescriptor) 314 | 315 | // Create GBuffer normal texture 316 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 317 | gBufferNormalTextureDescriptor.sampleCount = 1 318 | gBufferNormalTextureDescriptor.storageMode = .private 319 | gBufferNormalTextureDescriptor.textureType = .type2D 320 | gBufferNormalTextureDescriptor.usage = [.renderTarget, .shaderRead] 321 | 322 | gBufferNormalTexture = device.makeTexture(descriptor: gBufferNormalTextureDescriptor) 323 | 324 | // ... 325 | // Create GBuffer render pass descriptor 326 | gBufferRenderPassDescriptor = MTLRenderPassDescriptor() 327 | // Specify the properties of the first color attachment (our albedo texture) 328 | gBufferRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0) 329 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 330 | gBufferRenderPassDescriptor.colorAttachments[0].loadAction = .clear 331 | gBufferRenderPassDescriptor.colorAttachments[0].storeAction = .store 332 | // Specify the properties of the second color attachment (our normal texture) 333 | gBufferRenderPassDescriptor.colorAttachments[1].clearColor = MTLClearColorMake(0, 0, 0, 1) 334 | gBufferRenderPassDescriptor.colorAttachments[1].texture = gBufferNormalTexture 335 | gBufferRenderPassDescriptor.colorAttachments[1].loadAction = .clear 336 | gBufferRenderPassDescriptor.colorAttachments[1].storeAction = .store 337 | 338 | // ... 339 | // Create GBuffer render pipeline 340 | let gBufferRenderPipelineDesc = MTLRenderPipelineDescriptor() 341 | gBufferRenderPipelineDesc.colorAttachments[0].pixelFormat = .rgba8Unorm 342 | // Add the following line to describe the pixel format of the normal texture 343 | gBufferRenderPipelineDesc.colorAttachments[1].pixelFormat = .rgba16Float 344 | gBufferRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 345 | gBufferRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 346 | gBufferRenderPipelineDesc.sampleCount = 1 347 | gBufferRenderPipelineDesc.label = "GBuffer Render" 348 | gBufferRenderPipelineDesc.vertexFunction = library.makeFunction(name: "gBufferVert") 349 | gBufferRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "gBufferFrag") 350 | do { 351 | try gBufferRenderPipeline = device.makeRenderPipelineState(descriptor: gBufferRenderPipelineDesc) 352 | } catch let error { 353 | fatalError("Failed to create GBuffer pipeline state, error \(error)") 354 | } 355 | 356 | // ... 357 | } 358 | ``` 359 | 360 | **Shaders.metal** 361 | 362 | ``` swift 363 | // ... 364 | struct GBufferOut { 365 | float4 albedo [[color(0)]]; 366 | float4 normal [[color(1)]]; // Add normal texture output 367 | }; 368 | 369 | // ... 370 | 371 | fragment GBufferOut gBufferFrag(VertexOut in [[stage_in]], 372 | texture2d albedo_texture [[texture(0)]]) 373 | { 374 | constexpr sampler linear_sampler(min_filter::linear, mag_filter::linear); 375 | float4 albedo = albedo_texture.sample(linear_sampler, in.texCoords); 376 | 377 | GBufferOut output; 378 | 379 | output.albedo = albedo; 380 | output.normal = float4(in.normal, 1.0); // Add the following line to the fragment function 381 | 382 | return output; 383 | } 384 | ``` 385 | 386 | Now, change the blit copy command to copy from the normal texture, rather than the albedo texture. This will display the normal texture to the screen: 387 | 388 | **Renderer.swift** 389 | 390 | ``` swift 391 | func render(_ view: MTKView) { 392 | // ... 393 | blitEncoder.copy(from: gBufferNormalTexture, sourceSlice: 0, sourceLevel: 0, sourceOrigin: origin, sourceSize: size, to: (currDrawable?.texture)!, destinationSlice: 0, destinationLevel: 0, destinationOrigin: origin) 394 | // ... 395 | } 396 | ``` 397 | 398 | ![](../img/posts/implementing-deferred-shading-in-metal/normal.png) 399 | 400 | You might notice some artifacts, this has to do with the fact that we're blitting a 16-bit float directly to an 8-bit drawable texture. You can replace ".rgba16Float" to ".rgba8Unorm" in your code to confirm this. 401 | 402 | **Renderer.swift** 403 | 404 | ``` swift 405 | // ... 406 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 407 | // ... 408 | gBufferRenderPipelineDesc.colorAttachments[1].pixelFormat = .rgba8Unorm 409 | // ... 410 | ``` 411 | 412 | To finish up the GBuffer pass, let's render the world position of our scene to texture. 413 | 414 | **Renderer.swift** 415 | 416 | ``` swift 417 | @objc 418 | class Renderer : NSObject, MTKViewDelegate 419 | { 420 | // ... 421 | var gBufferAlbedoTexture: MTLTexture 422 | var gBufferNormalTexture: MTLTexture 423 | // Add position texture 424 | var gBufferPositionTexture: MTLTexture 425 | var gBufferDepthTexture: MTLTexture 426 | let gBufferDepthStencilState: MTLDepthStencilState 427 | var gBufferRenderPassDescriptor: MTLRenderPassDescriptor 428 | let gBufferRenderPipeline: MTLRenderPipelineState 429 | 430 | 431 | init?(mtkView: MTKView) { 432 | // ... 433 | // Create GBuffer normal texture 434 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 435 | gBufferNormalTextureDescriptor.sampleCount = 1 436 | gBufferNormalTextureDescriptor.storageMode = .private 437 | gBufferNormalTextureDescriptor.textureType = .type2D 438 | gBufferNormalTextureDescriptor.usage = [.renderTarget, .shaderRead] 439 | 440 | gBufferNormalTexture = device.makeTexture(descriptor: gBufferNormalTextureDescriptor) 441 | 442 | // Create GBuffer position texture 443 | let gBufferPositionTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 444 | gBufferPositionTextureDescriptor.sampleCount = 1 445 | gBufferPositionTextureDescriptor.storageMode = .private 446 | gBufferPositionTextureDescriptor.textureType = .type2D 447 | gBufferPositionTextureDescriptor.usage = [.renderTarget, .shaderRead] 448 | 449 | gBufferPositionTexture = device.makeTexture(descriptor: gBufferPositionTextureDescriptor) 450 | 451 | // ... 452 | 453 | // Create GBuffer render pass descriptor 454 | gBufferRenderPassDescriptor = MTLRenderPassDescriptor() 455 | // Specify the properties of the first color attachment (our albedo texture) 456 | gBufferRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1.0) 457 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 458 | gBufferRenderPassDescriptor.colorAttachments[0].loadAction = .clear 459 | gBufferRenderPassDescriptor.colorAttachments[0].storeAction = .store 460 | // Specify the properties of the second color attachment (our normal texture) 461 | gBufferRenderPassDescriptor.colorAttachments[1].clearColor = MTLClearColorMake(0, 0, 0, 1) 462 | gBufferRenderPassDescriptor.colorAttachments[1].texture = gBufferNormalTexture 463 | gBufferRenderPassDescriptor.colorAttachments[1].loadAction = .clear 464 | gBufferRenderPassDescriptor.colorAttachments[1].storeAction = .store 465 | // Specify the properties of the third color attachment (our position texture) 466 | gBufferRenderPassDescriptor.colorAttachments[2].clearColor = MTLClearColorMake(0, 0, 0, 1) 467 | gBufferRenderPassDescriptor.colorAttachments[2].texture = gBufferPositionTexture 468 | gBufferRenderPassDescriptor.colorAttachments[2].loadAction = .clear 469 | gBufferRenderPassDescriptor.colorAttachments[2].storeAction = .store 470 | 471 | // ... 472 | 473 | // Create GBuffer render pipeline 474 | let gBufferRenderPipelineDesc = MTLRenderPipelineDescriptor() 475 | gBufferRenderPipelineDesc.colorAttachments[0].pixelFormat = .rgba8Unorm 476 | gBufferRenderPipelineDesc.colorAttachments[1].pixelFormat = .rgba16Float 477 | // Add this line to describe the pixel format of the position texture 478 | gBufferRenderPipelineDesc.colorAttachments[2].pixelFormat = .rgba16Float 479 | 480 | // ... 481 | } 482 | 483 | // ... 484 | } 485 | ``` 486 | 487 | Again, we'll have to make a few adjustments to our shader. Things get a little bit more complicated. 488 | 489 | **Shaders.metal** 490 | 491 | ``` swift 492 | // ... 493 | #include 494 | using namespace metal; 495 | 496 | struct Constants { 497 | float4x4 modelViewProjectionMatrix; 498 | float3x3 normalMatrix; 499 | // Add space for a model matrix in our constants structs 500 | float4x4 modelMatrix; 501 | }; 502 | 503 | // ... 504 | struct VertexOut { 505 | float4 position [[position]]; 506 | float3 normal; 507 | float2 texCoords; 508 | // Add world position to our vertex shader out struct 509 | float4 worldPosition; 510 | }; 511 | 512 | struct GBufferOut { 513 | float4 albedo [[color(0)]]; 514 | float4 normal [[color(1)]]; 515 | // Add another texture output to our GBuffer struct 516 | float4 position [[color(2)]]; 517 | }; 518 | 519 | vertex VertexOut gBufferVert(const device VertexIn *vertices [[buffer(0)]], 520 | const device Constants &uniforms [[buffer(1)]], 521 | unsigned int vid [[vertex_id]]) { 522 | VertexOut out; 523 | VertexIn vin = vertices[vid]; 524 | 525 | float4 inPosition = float4(vin.position, 1.0); 526 | out.position = uniforms.modelViewProjectionMatrix * inPosition; 527 | float3 normal = vin.normal; 528 | float3 eyeNormal = normalize(uniforms.normalMatrix * normal); 529 | 530 | out.normal = eyeNormal; 531 | out.texCoords = vin.texCoords; 532 | // Calculate the world position of this vertex 533 | out.worldPosition = uniforms.modelMatrix * inPosition; 534 | 535 | return out; 536 | } 537 | 538 | fragment GBufferOut gBufferFrag(VertexOut in [[stage_in]], 539 | texture2d albedo_texture [[texture(0)]]) 540 | { 541 | constexpr sampler linear_sampler(min_filter::linear, mag_filter::linear); 542 | float4 albedo = albedo_texture.sample(linear_sampler, in.texCoords); 543 | 544 | GBufferOut output; 545 | 546 | output.albedo = albedo; 547 | output.normal = float4(in.normal, 1.0); 548 | // Output this fragment's world position 549 | output.position = in.worldPosition; 550 | 551 | return output; 552 | } 553 | ``` 554 | 555 | Because we changed the Constants struct in the shader, we'll have to change it in 'Renderer.swift' too: 556 | 557 | **Renderer.swift** 558 | 559 | ``` swift 560 | // ... 561 | import Metal 562 | import simd 563 | import MetalKit 564 | 565 | struct Constants { 566 | var modelViewProjectionMatrix = matrix_identity_float4x4 567 | var normalMatrix = matrix_identity_float3x3 568 | // Add model matrix to Constants struct 569 | var modelMatrix = matrix_identity_float4x4 570 | } 571 | 572 | // ... 573 | ``` 574 | 575 | We also need to make sure that we update the model matrix for the cube. 576 | 577 | **Renderer.swift** 578 | 579 | ``` swift 580 | func updateWithTimestep(_ timestep: TimeInterval) 581 | { 582 | // We keep track of time so we can animate the various transformations 583 | time = time + timestep 584 | let modelToWorldMatrix = matrix4x4_rotation(Float(time) * 0.5, vector_float3(0.7, 1, 0)) 585 | 586 | // So that the figure doesn't get distorted when the window changes size or rotates, 587 | // we factor the current aspect ration into our projection matrix. We also select 588 | // sensible values for the vertical view angle and the distances to the near and far planes. 589 | let viewSize = self.view.bounds.size 590 | let aspectRatio = Float(viewSize.width / viewSize.height) 591 | let verticalViewAngle = radians_from_degrees(65) 592 | let nearZ: Float = 0.1 593 | let farZ: Float = 100.0 594 | let projectionMatrix = matrix_perspective(verticalViewAngle, aspectRatio, nearZ, farZ) 595 | 596 | let viewMatrix = matrix_look_at(0, 0, 2.5, 0, 0, 0, 0, 1, 0) 597 | 598 | // The combined model-view-projection matrix moves our vertices from model space into clip space 599 | let mvMatrix = matrix_multiply(viewMatrix, modelToWorldMatrix); 600 | constants.modelViewProjectionMatrix = matrix_multiply(projectionMatrix, mvMatrix) 601 | constants.normalMatrix = matrix_inverse_transpose(matrix_upper_left_3x3(mvMatrix)) 602 | 603 | // Make sure to update model matrix 604 | constants.modelMatrix = modelToWorldMatrix 605 | } 606 | ``` 607 | 608 | Finally, we can change our blit copy command to copy from the position texture and view the results. Again, you will notice some artifacts and again you can temporarily change the format of the texture to "rgba8Unorm" to avoid these artifacts. 609 | 610 | **Renderer.swift** 611 | 612 | ``` swift 613 | func render(_ view: MTKView) { 614 | // ... 615 | blitEncoder.copy(from: gBufferPositionTexture, sourceSlice: 0, sourceLevel: 0, sourceOrigin: origin, sourceSize: size, to: (currDrawable?.texture)!, destinationSlice: 0, destinationLevel: 0, destinationOrigin: origin) 616 | // ... 617 | } 618 | ``` 619 | 620 | ![](../img/posts/implementing-deferred-shading-in-metal/position.png) 621 | 622 | Great! We've finished rendering the GBuffer data from our scene. Now onto the stencil pass. 623 | 624 | Stencil pass 625 | ------------ 626 | 627 | ### Prepare the light volumes 628 | 629 | Before we can render any lights, we need to prepare the meshes that will represent the light volumes. For now we're only going to represent point lights using spheres. It's possible to represent other light types using different volumes (e.g. represent directional lights using a quad that covers the entire screen). 630 | 631 | The easiest way to do this is to modify the Mesh class that is included in the example: 632 | 633 | **Mesh.swift** 634 | 635 | ``` swift 636 | // ... 637 | class Mesh { 638 | var vertexBuffer: MTLBuffer 639 | var vertexDescriptor: MTLVertexDescriptor 640 | var primitiveType: MTLPrimitiveType 641 | var indexBuffer: MTLBuffer 642 | var indexCount: Int 643 | var indexType: MTLIndexType 644 | 645 | init?(cubeWithSize size: Float, device: MTLDevice) 646 | { 647 | // ... 648 | } 649 | 650 | // Add a new init method for spheres 651 | init?(sphereWithSize size: Float, device: MTLDevice) 652 | { 653 | let allocator = MTKMeshBufferAllocator(device: device) 654 | 655 | let mdlMesh = MDLMesh(sphereWithExtent: vector_float3(size, size, size), segments: vector_uint2(30, 30), inwardNormals: false, geometryType: .triangles, allocator: allocator) 656 | 657 | do { 658 | let mtkMesh = try MTKMesh(mesh: mdlMesh, device: device) 659 | let mtkVertexBuffer = mtkMesh.vertexBuffers[0] 660 | let submesh = mtkMesh.submeshes[0] 661 | let mtkIndexBuffer = submesh.indexBuffer 662 | 663 | vertexBuffer = mtkVertexBuffer.buffer 664 | vertexBuffer.label = "Mesh Vertices" 665 | 666 | vertexDescriptor = MTKMetalVertexDescriptorFromModelIO(mdlMesh.vertexDescriptor) 667 | primitiveType = submesh.primitiveType 668 | indexBuffer = mtkIndexBuffer.buffer 669 | indexBuffer.label = "Mesh Indices" 670 | 671 | indexCount = submesh.indexCount 672 | indexType = submesh.indexType 673 | } catch _ { 674 | return nil // Unable to create MTK mesh from MDL mesh 675 | } 676 | } 677 | } 678 | ``` 679 | 680 | Now we can create a light volume mesh in our renderer: 681 | 682 | **Renderer.swift** 683 | 684 | ``` swift 685 | // ... 686 | let gBufferRenderPipeline: MTLRenderPipelineState 687 | 688 | let lightSphere: Mesh 689 | 690 | init?(mtkView: MTKView) { 691 | // ... 692 | // Make a unit sphere, we'll scale each light volume by it's radius in the vertex shader 693 | lightSphere = Mesh(sphereWithSize: 1.0, device: device)! 694 | 695 | super.init() 696 | 697 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 698 | view.delegate = self 699 | view.device = device 700 | } 701 | ``` 702 | 703 | And add a number of properties for our lights: 704 | 705 | **Renderer.swift** 706 | 707 | ``` swift 708 | // ... 709 | struct Constants { 710 | var modelViewProjectionMatrix = matrix_identity_float4x4 711 | var normalMatrix = matrix_identity_float3x3 712 | var modelMatrix = matrix_identity_float4x4 713 | } 714 | 715 | struct PointLight { 716 | var worldPosition = float3(0.0, 0.0, 0.0) 717 | var radius = Float(1.0) 718 | } 719 | 720 | @objc 721 | class Renderer : NSObject, MTKViewDelegate 722 | { 723 | // ... 724 | let lightSphere: Mesh 725 | 726 | let lightNumber = 2 727 | var lightConstants = [Constants]() 728 | var lightProperties = [PointLight]() 729 | } 730 | 731 | init?(mtkView: MTKView) { 732 | // ... 733 | lightSphere = Mesh(sphereWithSize: 1.0, device: device)! 734 | 735 | // Add space for each light's data 736 | for _ in 0...(lightNumber - 1) { 737 | lightProperties.append(PointLight()) 738 | lightConstants.append(Constants()) 739 | } 740 | 741 | // Hard-code position and radius 742 | lightProperties[0].worldPosition = float3(0.0, 0.4, 0.0) 743 | lightProperties[0].radius = 0.7 744 | 745 | lightProperties[1].worldPosition = float3(-0.4, 0.0, 0.0) 746 | lightProperties[1].radius = 0.6 747 | 748 | super.init() 749 | 750 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 751 | view.delegate = self 752 | view.device = device 753 | } 754 | ``` 755 | 756 | Before we do anything further, let's render these lights in the GBuffer pass so that we can ensure they're working as we'd expect. 757 | 758 | **Renderer.swift** 759 | 760 | ``` swift 761 | // ... 762 | func updateWithTimestep(_ timestep: TimeInterval) 763 | { 764 | // ... 765 | 766 | // Update light constants 767 | for i in 0...(lightNumber-1) { 768 | let lightModelToWorldMatrix = matrix_multiply(matrix4x4_translation(lightProperties[i].worldPosition.x, lightProperties[i].worldPosition.y, lightProperties[i].worldPosition.z), matrix4x4_scale(vector3(lightProperties[i].radius, lightProperties[i].radius, lightProperties[i].radius))) 769 | let lightMvMatrix = matrix_multiply(viewMatrix, lightModelToWorldMatrix); 770 | lightConstants[i].modelViewProjectionMatrix = matrix_multiply(projectionMatrix, lightMvMatrix) 771 | lightConstants[i].normalMatrix = matrix_inverse_transpose(matrix_upper_left_3x3(lightMvMatrix)) 772 | lightConstants[i].modelMatrix = lightModelToWorldMatrix; 773 | } 774 | } 775 | 776 | func render(_ view: MTKView) { 777 | // ... 778 | 779 | // ---- GBUFFER ---- // 780 | // Draw our scene to texture 781 | // We use an encoder to 'encode' commands into a command buffer 782 | let gBufferEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: gBufferRenderPassDescriptor) 783 | gBufferEncoder.pushDebugGroup("GBuffer") // For debugging 784 | gBufferEncoder.label = "GBuffer" 785 | // Use the depth stencil state we created earlier 786 | gBufferEncoder.setDepthStencilState(gBufferDepthStencilState) 787 | gBufferEncoder.setCullMode(.back) 788 | // Set winding order 789 | gBufferEncoder.setFrontFacing(.counterClockwise) 790 | // Use the render pipeline state we created earlier 791 | gBufferEncoder.setRenderPipelineState(gBufferRenderPipeline) 792 | // Upload vertex data 793 | gBufferEncoder.setVertexBuffer(mesh.vertexBuffer, offset:0, at:0) 794 | // Upload uniforms 795 | gBufferEncoder.setVertexBytes(&constants, length: MemoryLayout.size, at: 1) 796 | // Bind the checkerboard texture (for the cube) 797 | gBufferEncoder.setFragmentTexture(texture, at: 0) 798 | // Draw our mesh 799 | gBufferEncoder.drawIndexedPrimitives(type: mesh.primitiveType, 800 | indexCount: mesh.indexCount, 801 | indexType: mesh.indexType, 802 | indexBuffer: mesh.indexBuffer, 803 | indexBufferOffset: 0) 804 | // Draw our light meshes 805 | // Upload light vertex data 806 | gBufferEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 807 | for i in 0...(lightNumber - 1) { 808 | // Upload uniforms 809 | gBufferEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 810 | gBufferEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, 811 | indexCount: lightSphere.indexCount, 812 | indexType: lightSphere.indexType, 813 | indexBuffer: lightSphere.indexBuffer, 814 | indexBufferOffset: 0) 815 | } 816 | gBufferEncoder.popDebugGroup() // For debugging 817 | // Finish encoding commands in this encoder 818 | gBufferEncoder.endEncoding() 819 | 820 | // ... 821 | } 822 | ``` 823 | 824 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-normal.png) 825 | 826 | Looks alright to me! Note that I'm visualizing the normals in the above image. 827 | 828 | When you're happy with the light volumes, remove them from the above drawing code. 829 | 830 | ### Make the stencil pass 831 | 832 | Let's start with the shaders: 833 | 834 | **Shaders.metal** 835 | 836 | struct StencilPassOut { 837 | float4 position [[position]]; 838 | }; 839 | 840 | vertex StencilPassOut stencilPassVert(const device VertexIn *vertices [[buffer(0)]], 841 | const device Constants &uniforms [[buffer(1)]], 842 | unsigned int vid [[vertex_id]]) { 843 | StencilPassOut out; 844 | 845 | out.position = uniforms.modelViewProjectionMatrix * float4(vertices[vid].position, 1.0); 846 | 847 | return out; 848 | } 849 | 850 | fragment void stencilPassNullFrag(StencilPassOut in [[stage_in]]) 851 | { 852 | } 853 | 854 | Note that the fragment shader doesn't output anything, it has no need to. This pass will only populate the stencil buffer. 855 | 856 | Instead of creating a new texture for our stencil buffer, let's piggyback on an existing one: 857 | 858 | **Renderer.swift** 859 | 860 | ``` swift 861 | init?(mtkView: MTKView) { 862 | // ... 863 | 864 | // Create GBuffer depth (and stencil) texture 865 | let gBufferDepthDesc: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float_stencil8, width: drawableWidth, height: drawableHeight, mipmapped: false) 866 | gBufferDepthDesc.sampleCount = 1 867 | gBufferDepthDesc.storageMode = .private 868 | gBufferDepthDesc.textureType = .type2D 869 | gBufferDepthDesc.usage = [.renderTarget, .shaderRead] 870 | 871 | gBufferDepthTexture = device.makeTexture(descriptor: gBufferDepthDesc) 872 | 873 | // ... 874 | } 875 | ``` 876 | 877 | Ok, next we'll need to configure the behaviour of the stencil pass: 878 | 879 | **Renderer.swift** 880 | 881 | ``` swift 882 | // ... 883 | let stencilPassDepthStencilState: MTLDepthStencilState 884 | let stencilRenderPassDescriptor: MTLRenderPassDescriptor 885 | let stencilRenderPipeline: MTLRenderPipelineState 886 | 887 | init?(mtkView: MTKView) { 888 | // ... 889 | 890 | /* Be very careful with these operations, I clear the stencil buffer to a value of 0, so it's 891 | * very important that I set the depthFailureOperation to 'decrementWRAP' and 'incrementWRAP' 892 | * for the front and back face stencil operations (respectively) rather than 'decrementClamp' 893 | * and 'incrementClamp'. This is because we don't know in which order these operations will 894 | * occur. Let's say we use clamping: 895 | * 896 | * - Back then front order - two failures, expected stencil buffer value: 0 897 | * - Stencil buffer starts at 0 898 | * - Back face depth test fails first: stencil buffer incremented to 1 899 | * - Front face depth test fails second: stencil buffer decremented to 0 900 | * - Stencil buffer final value = 0 (== expected value) - all good! 901 | * 902 | * - Front then back order - two failures, expected stencil buffer value: 0 903 | * - Stencil buffer starts at 0 904 | * - Front face depth test fails first: stencil buffer decremented and clamped to 0 905 | * - Back face depth test fails second: stencil buffer incremented to 1 906 | * - Stencil buffer final value = 1 (!= expected value) - problem here! 907 | * 908 | * Wrapping does not have this issue. There are of course other ways to avoid this problem. 909 | */ 910 | // Decrement when front faces depth fail 911 | let frontFaceStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 912 | frontFaceStencilOp.stencilCompareFunction = .always // Stencil test always succeeds, only concerned about depth test 913 | frontFaceStencilOp.stencilFailureOperation = .keep // Stencil test always succeeds 914 | frontFaceStencilOp.depthStencilPassOperation = .keep // Do nothing if depth test passes 915 | frontFaceStencilOp.depthFailureOperation = .decrementWrap // Decrement if depth test fails 916 | 917 | // Increment when back faces depth fail 918 | let backFaceStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 919 | backFaceStencilOp.stencilCompareFunction = .always // Stencil test always succeeds, only concerned about depth test 920 | backFaceStencilOp.stencilFailureOperation = .keep // Stencil test always succeeds 921 | backFaceStencilOp.depthStencilPassOperation = .keep // Do nothing if depth test passes 922 | backFaceStencilOp.depthFailureOperation = .incrementWrap // Increment if depth test fails 923 | 924 | let stencilPassDepthStencilStateDesc: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 925 | stencilPassDepthStencilStateDesc.isDepthWriteEnabled = false // Only concerned with modifying stencil buffer 926 | stencilPassDepthStencilStateDesc.depthCompareFunction = .lessEqual // Only perform stencil op when depth function fails 927 | stencilPassDepthStencilStateDesc.frontFaceStencil = frontFaceStencilOp // For front-facing polygons 928 | stencilPassDepthStencilStateDesc.backFaceStencil = backFaceStencilOp // For back-facing polygons 929 | stencilPassDepthStencilState = device.makeDepthStencilState(descriptor: stencilPassDepthStencilStateDesc) 930 | 931 | let stencilRenderPipelineDesc = MTLRenderPipelineDescriptor() 932 | stencilRenderPipelineDesc.label = "Stencil Pipeline" 933 | stencilRenderPipelineDesc.sampleCount = view.sampleCount 934 | stencilRenderPipelineDesc.vertexFunction = library.makeFunction(name: "stencilPassVert") 935 | stencilRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "stencilPassNullFrag") 936 | stencilRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 937 | stencilRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 938 | do { 939 | try stencilRenderPipeline = device.makeRenderPipelineState(descriptor: stencilRenderPipelineDesc) 940 | } catch let error { 941 | fatalError("Failed to create Stencil pipeline state, error \(error)") 942 | } 943 | 944 | stencilRenderPassDescriptor = MTLRenderPassDescriptor() 945 | stencilRenderPassDescriptor.depthAttachment.loadAction = .load // Load up depth information from GBuffer pass 946 | stencilRenderPassDescriptor.depthAttachment.storeAction = .store // We'll use depth information in later passes 947 | stencilRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 948 | stencilRenderPassDescriptor.stencilAttachment.loadAction = .clear // Contents of stencil buffer unkown at this point, clear it 949 | stencilRenderPassDescriptor.stencilAttachment.storeAction = .store // Store the stencil buffer so that the next pass can use it 950 | stencilRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 951 | 952 | super.init() 953 | 954 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 955 | view.delegate = self 956 | view.device = device 957 | } 958 | ``` 959 | 960 | Finally, we do the stencil pass: 961 | 962 | **Renderer.swift** 963 | 964 | ``` swift 965 | func render(_ view: MTKView) { 966 | // ... 967 | 968 | // ---- GBUFFER ---- // 969 | // Draw our scene to texture 970 | // We use an encoder to 'encode' commands into a command buffer 971 | let gBufferEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: gBufferRenderPassDescriptor) 972 | gBufferEncoder.pushDebugGroup("GBuffer") // For debugging 973 | gBufferEncoder.label = "GBuffer" 974 | // Use the depth stencil state we created earlier 975 | gBufferEncoder.setDepthStencilState(gBufferDepthStencilState) 976 | gBufferEncoder.setCullMode(.back) 977 | // Set winding order 978 | gBufferEncoder.setFrontFacing(.counterClockwise) 979 | // Use the render pipeline state we created earlier 980 | gBufferEncoder.setRenderPipelineState(gBufferRenderPipeline) 981 | // Upload vertex data 982 | gBufferEncoder.setVertexBuffer(mesh.vertexBuffer, offset:0, at:0) 983 | // Upload uniforms 984 | gBufferEncoder.setVertexBytes(&constants, length: MemoryLayout.size, at: 1) 985 | // Bind the checkerboard texture (for the cube) 986 | gBufferEncoder.setFragmentTexture(texture, at: 0) 987 | // Draw our mesh 988 | gBufferEncoder.drawIndexedPrimitives(type: mesh.primitiveType, 989 | indexCount: mesh.indexCount, 990 | indexType: mesh.indexType, 991 | indexBuffer: mesh.indexBuffer, 992 | indexBufferOffset: 0) 993 | gBufferEncoder.popDebugGroup() // For debugging 994 | // Finish encoding commands in this encoder 995 | gBufferEncoder.endEncoding() 996 | 997 | // ---- STENCIL ---- // 998 | let stencilPassEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: stencilRenderPassDescriptor) 999 | stencilPassEncoder.pushDebugGroup("Stencil Pass") 1000 | stencilPassEncoder.label = "Stencil Pass" 1001 | stencilPassEncoder.setDepthStencilState(stencilPassDepthStencilState) 1002 | // We want to draw back-facing AND front-facing polygons 1003 | stencilPassEncoder.setCullMode(.none) 1004 | stencilPassEncoder.setFrontFacing(.counterClockwise) 1005 | stencilPassEncoder.setRenderPipelineState(stencilRenderPipeline) 1006 | stencilPassEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 1007 | 1008 | for i in 0...(lightNumber-1) { 1009 | stencilPassEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 1010 | stencilPassEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, indexCount: lightSphere.indexCount, indexType: lightSphere.indexType, indexBuffer: lightSphere.indexBuffer, indexBufferOffset: 0) 1011 | } 1012 | 1013 | stencilPassEncoder.popDebugGroup() 1014 | stencilPassEncoder.endEncoding() 1015 | 1016 | // ---- BLIT ---- // 1017 | // Blit our texture to the screen 1018 | let blitEncoder = commandBuffer.makeBlitCommandEncoder() 1019 | blitEncoder.pushDebugGroup("Blit") 1020 | 1021 | // ... 1022 | } 1023 | ``` 1024 | 1025 | Of course we won't see anything yet, we'll have to trust that the stencil buffer is filled for now. 1026 | 1027 | Lighting pass 1028 | ------------- 1029 | 1030 | ### A first pass 1031 | 1032 | Let's start off with some simplified shaders for our lighting pass: 1033 | 1034 | **Shaders.metal** 1035 | 1036 | ``` swift 1037 | // ... 1038 | vertex StencilPassOut stencilPassVert(const device VertexIn *vertices [[buffer(0)]], 1039 | const device Constants &uniforms [[buffer(1)]], 1040 | unsigned int vid [[vertex_id]]) { 1041 | StencilPassOut out; 1042 | 1043 | out.position = uniforms.modelViewProjectionMatrix * float4(vertices[vid].position, 1.0); 1044 | 1045 | return out; 1046 | } 1047 | 1048 | fragment void stencilPassNullFrag(StencilPassOut in [[stage_in]]) 1049 | { 1050 | } 1051 | 1052 | // Used to calculate texture sampling co-ordinates 1053 | struct LightFragmentInput { 1054 | float2 screenSize; 1055 | }; 1056 | 1057 | fragment float4 lightVolumeFrag(StencilPassOut in [[stage_in]], 1058 | constant LightFragmentInput *lightData [[ buffer(0) ]], 1059 | texture2d albedoTexture [[ texture(0) ]], 1060 | texture2d normalsTexture [[ texture(1) ]], 1061 | texture2d positionTexture [[ texture(2) ]]) 1062 | { 1063 | // We sample albedo, normals and position from the position of this fragment, normalized to be 0-1 within screen space 1064 | float2 sampleCoords = in.position.xy / lightData->screenSize; 1065 | 1066 | constexpr sampler texSampler; 1067 | 1068 | // Multiply by 0.5 so we can see the lights blending 1069 | float3 albedo = float3(0.5) * float3(albedoTexture.sample(texSampler, sampleCoords)); 1070 | 1071 | // Gamma correct the texture 1072 | float3 gammaCorrect = pow(albedo, (1.0/2.2)); 1073 | return float4(gammaCorrect, 1.0); 1074 | } 1075 | ``` 1076 | 1077 | We can actually re-use the vertex shader from our stencil pass. In the fragment shader, we use the screen size to normalize the fragment position. The result of this calculation we use as the texture co-ordinates for our sampling of the various input textures from the GBuffer pass. 1078 | 1079 | **Renderer.swift** 1080 | 1081 | ``` swift 1082 | // Add light fragment input struct 1083 | struct LightFragmentInput { 1084 | var screenSize = float2(1, 1) 1085 | } 1086 | 1087 | @objc 1088 | class Renderer : NSObject, MTKViewDelegate 1089 | { 1090 | let lightSphere: Mesh 1091 | let lightNumber = 2 1092 | var lightConstants = [Constants]() 1093 | var lightProperties = [PointLight]() 1094 | // Add light fragment input property 1095 | var lightFragmentInput = LightFragmentInput() 1096 | 1097 | let stencilPassDepthStencilState: MTLDepthStencilState 1098 | let stencilRenderPassDescriptor: MTLRenderPassDescriptor 1099 | let stencilRenderPipeline: MTLRenderPipelineState 1100 | 1101 | init?(mtkView: MTKView) { 1102 | // ... 1103 | 1104 | lightFragmentInput.screenSize.x = Float(view.drawableSize.width) 1105 | lightFragmentInput.screenSize.y = Float(view.drawableSize.height) 1106 | 1107 | super.init() 1108 | 1109 | // Now that all of our members are initialized, set ourselves as the drawing delegate of the view 1110 | view.delegate = self 1111 | view.device = device 1112 | } 1113 | ``` 1114 | 1115 | Now we can prepare the lighting pass: 1116 | 1117 | **Renderer.swift** 1118 | 1119 | ``` swift 1120 | // ... 1121 | let lightVolumeDepthStencilState: MTLDepthStencilState 1122 | var lightVolumeRenderPassDescriptor: MTLRenderPassDescriptor = MTLRenderPassDescriptor() 1123 | let lightVolumeRenderPipeline: MTLRenderPipelineState 1124 | // The final texture we'll blit to the screen 1125 | var compositeTexture: MTLTexture 1126 | 1127 | init?(mtkView: MTKView) { 1128 | // ... 1129 | lightFragmentInput.screenSize.x = Float(view.drawableSize.width) 1130 | lightFragmentInput.screenSize.y = Float(view.drawableSize.height) 1131 | 1132 | // Create composite texture 1133 | let compositeTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 1134 | compositeTextureDescriptor.sampleCount = 1 1135 | compositeTextureDescriptor.storageMode = .private 1136 | compositeTextureDescriptor.textureType = .type2D 1137 | compositeTextureDescriptor.usage = [.renderTarget] 1138 | 1139 | compositeTexture = device.makeTexture(descriptor: compositeTextureDescriptor) 1140 | 1141 | // Build light volume depth-stencil state 1142 | let lightVolumeStencilOp: MTLStencilDescriptor = MTLStencilDescriptor() 1143 | lightVolumeStencilOp.stencilCompareFunction = .notEqual // Only pass if not equal to reference value (ref. value is 0 by default) 1144 | lightVolumeStencilOp.stencilFailureOperation = .keep // Don't modify stencil value at all 1145 | lightVolumeStencilOp.depthStencilPassOperation = .keep 1146 | lightVolumeStencilOp.depthFailureOperation = .keep // Depth test is set to always succeed 1147 | 1148 | let lightVolumeDepthStencilStateDesc: MTLDepthStencilDescriptor = MTLDepthStencilDescriptor() 1149 | lightVolumeDepthStencilStateDesc.isDepthWriteEnabled = false // Don't modify depth buffer 1150 | lightVolumeDepthStencilStateDesc.depthCompareFunction = .always // Stencil buffer will be used to determine if we should light this fragment, ignore depth value (always pass) 1151 | lightVolumeDepthStencilStateDesc.backFaceStencil = lightVolumeStencilOp 1152 | lightVolumeDepthStencilStateDesc.frontFaceStencil = lightVolumeStencilOp 1153 | lightVolumeDepthStencilState = device.makeDepthStencilState(descriptor: lightVolumeDepthStencilStateDesc) 1154 | 1155 | // Build light volume render pass descriptor 1156 | // Get current render pass descriptor instead 1157 | lightVolumeRenderPassDescriptor = MTLRenderPassDescriptor() 1158 | lightVolumeRenderPassDescriptor.colorAttachments[0].clearColor = MTLClearColorMake(0.0, 0.0, 0.0, 1) 1159 | lightVolumeRenderPassDescriptor.colorAttachments[0].texture = compositeTexture 1160 | lightVolumeRenderPassDescriptor.colorAttachments[0].loadAction = .clear 1161 | lightVolumeRenderPassDescriptor.colorAttachments[0].storeAction = .store // Store for blitting 1162 | lightVolumeRenderPassDescriptor.depthAttachment.clearDepth = 1.0 1163 | // Aren't using depth 1164 | /* 1165 | lightVolumeRenderPassDescriptor.depthAttachment.loadAction = .load 1166 | lightVolumeRenderPassDescriptor.depthAttachment.storeAction = .store 1167 | lightVolumeRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 1168 | */ 1169 | lightVolumeRenderPassDescriptor.stencilAttachment.loadAction = .load 1170 | lightVolumeRenderPassDescriptor.stencilAttachment.storeAction = .dontCare // Aren't using stencil buffer after this point 1171 | lightVolumeRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 1172 | 1173 | // Build light volume render pipeline 1174 | let lightVolumeRenderPipelineDesc = MTLRenderPipelineDescriptor() 1175 | lightVolumeRenderPipelineDesc.colorAttachments[0].pixelFormat = .bgra8Unorm 1176 | // We need to enable blending as each light volume is additive (it 'adds' to the contribution of the previous one) 1177 | lightVolumeRenderPipelineDesc.colorAttachments[0].isBlendingEnabled = true 1178 | lightVolumeRenderPipelineDesc.colorAttachments[0].rgbBlendOperation = .add 1179 | lightVolumeRenderPipelineDesc.colorAttachments[0].sourceRGBBlendFactor = .one 1180 | lightVolumeRenderPipelineDesc.colorAttachments[0].destinationRGBBlendFactor = .one 1181 | lightVolumeRenderPipelineDesc.colorAttachments[0].alphaBlendOperation = .add 1182 | lightVolumeRenderPipelineDesc.colorAttachments[0].sourceAlphaBlendFactor = .one 1183 | lightVolumeRenderPipelineDesc.colorAttachments[0].destinationAlphaBlendFactor = .one 1184 | lightVolumeRenderPipelineDesc.depthAttachmentPixelFormat = .depth32Float_stencil8 1185 | lightVolumeRenderPipelineDesc.stencilAttachmentPixelFormat = .depth32Float_stencil8 1186 | lightVolumeRenderPipelineDesc.sampleCount = 1 1187 | lightVolumeRenderPipelineDesc.label = "Light Volume Render" 1188 | lightVolumeRenderPipelineDesc.vertexFunction = library.makeFunction(name: "stencilPassVert") 1189 | lightVolumeRenderPipelineDesc.fragmentFunction = library.makeFunction(name: "lightVolumeFrag") 1190 | do { 1191 | try lightVolumeRenderPipeline = device.makeRenderPipelineState(descriptor: lightVolumeRenderPipelineDesc) 1192 | } catch let error { 1193 | fatalError("Failed to create lightVolume pipeline state, error \(error)") 1194 | } 1195 | 1196 | super.init() 1197 | // ... 1198 | } 1199 | ``` 1200 | 1201 | There's alot going on there, so read through it carefully. 1202 | 1203 | Finally, let's perform the lighting pass: 1204 | 1205 | **Renderer.swift** 1206 | 1207 | ``` swift 1208 | func render(_ view: MTKView) { 1209 | // ... 1210 | stencilPassEncoder.popDebugGroup() 1211 | stencilPassEncoder.endEncoding() 1212 | 1213 | // ---- LIGHTING ---- // 1214 | let lightEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: lightVolumeRenderPassDescriptor) 1215 | lightEncoder.pushDebugGroup("Light Volume Pass") 1216 | lightEncoder.label = "Light Volume Pass" 1217 | lightEncoder.setDepthStencilState(lightVolumeDepthStencilState) 1218 | lightEncoder.setStencilReferenceValue(0) 1219 | lightEncoder.setCullMode(.front) 1220 | lightEncoder.setFrontFacing(.counterClockwise) 1221 | lightEncoder.setRenderPipelineState(lightVolumeRenderPipeline) 1222 | lightEncoder.setFragmentTexture(gBufferAlbedoTexture, at: 0) 1223 | lightEncoder.setFragmentTexture(gBufferNormalTexture, at: 1) 1224 | lightEncoder.setFragmentTexture(gBufferPositionTexture, at: 2) 1225 | lightEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 1226 | lightEncoder.setFragmentBytes(&lightFragmentInput, length: MemoryLayout.size, at: 0) 1227 | for i in 0...(lightNumber - 1) { 1228 | lightEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 1229 | lightEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, indexCount: lightSphere.indexCount, indexType: lightSphere.indexType, indexBuffer: lightSphere.indexBuffer, indexBufferOffset: 0) 1230 | } 1231 | 1232 | lightEncoder.popDebugGroup() 1233 | lightEncoder.endEncoding() 1234 | } 1235 | ``` 1236 | 1237 | We also need to blit the final composite texture to the screen. 1238 | 1239 | **Renderer.swift** 1240 | 1241 | ``` swift 1242 | func render(_ view: MTKView) { 1243 | // ... 1244 | blitEncoder.copy(from: compositeTexture, sourceSlice: 0, sourceLevel: 0, sourceOrigin: origin, sourceSize: size, to: (currDrawable?.texture)!, destinationSlice: 0, destinationLevel: 0, destinationOrigin: origin) 1245 | // ... 1246 | } 1247 | ``` 1248 | 1249 | You should see something like this: 1250 | 1251 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-albedo.png) 1252 | 1253 | You can see the lights blending nicely in the middle. Notice how the side of the cube facing away from the lights is still getting shaded? That might seem erroneous but that case will be handled in our shaders, which won't shade triangles facing away from the light source. 1254 | 1255 | Obviously these are not the kind of shaders you'd normally use, they're only for visualization purposes. 1256 | 1257 | ### Improving those shaders 1258 | 1259 | We have all that information from the GBuffer available to us in our shaders, let's use it! 1260 | 1261 | At this point I decided to change the cube mesh to a sphere so I could better see the effect of the lights. I also tweaked the light positions: 1262 | 1263 | **Renderer.swift** 1264 | 1265 | ``` swift 1266 | init?(mtkView: MTKView) { 1267 | // ... 1268 | 1269 | // Compile the functions and other state into a pipeline object. 1270 | do { 1271 | renderPipelineState = try Renderer.buildRenderPipelineWithDevice(device, view: mtkView) 1272 | } 1273 | catch { 1274 | print("Unable to compile render pipeline state") 1275 | return nil 1276 | } 1277 | 1278 | mesh = Mesh(cubeWithSize: 1.0, device: device)! 1279 | 1280 | // ... 1281 | 1282 | // Hard-code position and radius 1283 | lightProperties[0].worldPosition = float3(1, 1, 1.5) 1284 | lightProperties[0].radius = 1.0 1285 | 1286 | lightProperties[1].worldPosition = float3(-0.4, 0.0, 0.0) 1287 | lightProperties[1].radius = 1.0 1288 | } 1289 | ``` 1290 | 1291 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-sphere.png) 1292 | 1293 | Notice the wobbly edges of the light volume? It looks like that because of the low number of segments we're using in our sphere mesh. You can increase the number of segments in your 'Mesh.swift' file, but they won't be noticable when we do the lighting calculations (lighting contributions at the edge of the light volume should approach 0). 1294 | 1295 | I am hard-coding the radii here. You can calculate the radius of the sphere based on fall-off, but I think this article is big enough for now. A good explanation of how to calculate the radius of a light volume is given in [this](http://ogldev.atspace.co.uk/www/tutorial36/tutorial36.html) OGLDev tutorial. 1296 | 1297 | **Shaders.metal** 1298 | 1299 | ``` swift 1300 | // ... 1301 | struct LightFragmentInput { 1302 | float2 screenSize; 1303 | // We're going to need the camera's position to calculate specular lighting 1304 | float3 camWorldPos; 1305 | }; 1306 | 1307 | struct PointLight { 1308 | float3 worldPosition; 1309 | float radius; 1310 | float3 color; 1311 | }; 1312 | 1313 | fragment float4 lightVolumeFrag(StencilPassOut in [[stage_in]], 1314 | constant LightFragmentInput *lightData [[ buffer(0) ]], 1315 | constant PointLight *pointLight [[ buffer(1) ]], 1316 | texture2d albedoTexture [[ texture(0) ]], 1317 | texture2d normalsTexture [[ texture(1) ]], 1318 | texture2d positionTexture [[ texture(2) ]]) 1319 | { 1320 | // We sample albedo, normals and position from the position of this fragment, normalized to be 0-1 within screen space 1321 | float2 sampleCoords = in.position.xy / lightData->screenSize; 1322 | 1323 | constexpr sampler texSampler; 1324 | 1325 | // Extract data for this fragment from GBuffer textures 1326 | const float3 albedo = float3(albedoTexture.sample(texSampler, sampleCoords)); 1327 | const float3 worldPosition = float3(positionTexture.sample(texSampler, sampleCoords)); 1328 | const float3 normal = normalize(float3(normalsTexture.sample(texSampler, sampleCoords))); 1329 | 1330 | const float3 lightDir = normalize(pointLight->worldPosition - worldPosition); 1331 | 1332 | // Diffuse 1333 | const float nDotL = max(dot(normal, lightDir), 0.0); 1334 | const float3 diffuse = nDotL * albedo * pointLight->color; 1335 | 1336 | float3 result = diffuse; 1337 | 1338 | // Specular - if you want 1339 | //const float3 viewDir = normalize(lightData->camWorldPos - worldPosition); 1340 | //const float3 halfwayDir = normalize(lightDir + viewDir); 1341 | //const float3 specular = pow(max(dot(normal, halfwayDir), 0.0), 60.0) * 0.2; 1342 | //result = (diffuse + specular); 1343 | 1344 | const float3 gammaCorrect = pow(float3(result), (1.0/2.2)); 1345 | return float4(gammaCorrect, 1.0); 1346 | } 1347 | ``` 1348 | 1349 | **Renderer.swift** 1350 | 1351 | ``` swift 1352 | struct PointLight { 1353 | var worldPosition = float3(0.0, 0.0, 0.0) 1354 | var radius = Float(1.0) 1355 | var color = float3(1, 1, 1) 1356 | } 1357 | 1358 | struct LightFragmentInput { 1359 | var screenSize = float2(1, 1) 1360 | var camWorldPos = float3(0.0, 0.0, 2.5) 1361 | } 1362 | 1363 | @objc 1364 | class Renderer : NSObject, MTKViewDelegate 1365 | { 1366 | // ... 1367 | init?(mtkView: MTKView) { 1368 | // ... 1369 | 1370 | // Add space for each light's data 1371 | for _ in 0...(lightNumber - 1) { 1372 | lightProperties.append(PointLight()) 1373 | lightConstants.append(Constants()) 1374 | } 1375 | 1376 | // Hard-code position, radius, color 1377 | lightProperties[0].worldPosition = float3(1, 1, 1.5) 1378 | lightProperties[0].radius = 3 1379 | lightProperties[0].color = float3(1, 0, 0) 1380 | 1381 | lightProperties[1].worldPosition = float3(-1, 1, 1.5) 1382 | lightProperties[1].radius = 3 1383 | lightProperties[1].color = float3(0, 1, 0) 1384 | } 1385 | } 1386 | 1387 | // ... 1388 | func render(_ view: MTKView) { 1389 | // ... 1390 | 1391 | // ---- LIGHTING ---- // 1392 | let lightPassEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: lightVolumeRenderPassDescriptor) 1393 | lightPassEncoder.pushDebugGroup("Light Volume Pass") 1394 | lightPassEncoder.label = "Light Volume Pass" 1395 | // Use our previously configured depth stencil state 1396 | lightPassEncoder.setDepthStencilState(lightVolumeDepthStencilState) 1397 | // Set our stencil reference value to 0 (in the depth stencil state we configured fragments to pass only if they are NOT EQUAL to the reference value 1398 | lightPassEncoder.setStencilReferenceValue(0) 1399 | // We cull the front of the spherical light volume and not the back, in-case we are inside the light volume. I'm not 100% certain this is the best way to do this, but it seems to work. 1400 | lightPassEncoder.setCullMode(.front) 1401 | lightPassEncoder.setFrontFacing(.counterClockwise) 1402 | lightPassEncoder.setRenderPipelineState(lightVolumeRenderPipeline) 1403 | // Bind our GBuffer textures 1404 | lightPassEncoder.setFragmentTexture(gBufferAlbedoTexture, at: 0) 1405 | lightPassEncoder.setFragmentTexture(gBufferNormalTexture, at: 1) 1406 | lightPassEncoder.setFragmentTexture(gBufferPositionTexture, at: 2) 1407 | lightPassEncoder.setVertexBuffer(lightSphere.vertexBuffer, offset:0, at:0) 1408 | // Upload our screen size 1409 | lightPassEncoder.setFragmentBytes(&lightFragmentInput, length: MemoryLayout.size, at: 0) 1410 | // Render light volumes 1411 | for i in 0...(lightNumber - 1) { 1412 | lightPassEncoder.setVertexBytes(&lightConstants[i], length: MemoryLayout.size, at: 1) 1413 | // Upload light property data too 1414 | lightPassEncoder.setFragmentBytes(&lightProperties[i], length: MemoryLayout.size, at: 1) 1415 | lightPassEncoder.drawIndexedPrimitives(type: lightSphere.primitiveType, indexCount: lightSphere.indexCount, indexType: lightSphere.indexType, indexBuffer: lightSphere.indexBuffer, indexBufferOffset: 0) 1416 | } 1417 | 1418 | lightPassEncoder.popDebugGroup() 1419 | lightPassEncoder.endEncoding() 1420 | 1421 | // ... 1422 | } 1423 | ``` 1424 | 1425 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-colour-albedo.png) 1426 | 1427 | And another with no texture, so that you can see the lights better: 1428 | 1429 | ![](../img/posts/implementing-deferred-shading-in-metal/lights-colour-flat.png) 1430 | 1431 | ### An aside on gamma correction 1432 | 1433 | Gamma correction, it's really important: 1434 | 1435 | Further reading: 1436 | 1437 | - [GPU Gems 3: The Importance of Being Linear](http://http.developer.nvidia.com/GPUGems3/gpugems3_ch24.html) 1438 | - [Coding Labs: Gamma and Linear Spaces](http://www.codinglabs.net/article_gamma_vs_linear.aspx) 1439 | - [Gamasutra: Gamma-Correct Lighting](http://www.gamasutra.com/blogs/DavidRosen/20100204/4322/GammaCorrect_Lighting.php) 1440 | 1441 | [*Thanks Marcin Ignac*](http://marcinignac.com/blog/pragmatic-pbr-setup-and-gamma/) 1442 | 1443 | Handling window resizing 1444 | ------------------------ 1445 | 1446 | Up to now, we haven't been handling resize events, let's fix this: 1447 | 1448 | **Renderer.swift** 1449 | 1450 | ``` swift 1451 | @objc 1452 | class Renderer : NSObject, MTKViewDelegate 1453 | { 1454 | // ... 1455 | 1456 | func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) { 1457 | // respond to resize 1458 | let drawableWidth = Int(size.width) 1459 | let drawableHeight = Int(size.height) 1460 | 1461 | lightFragmentInput.screenSize.x = Float(size.width) 1462 | lightFragmentInput.screenSize.y = Float(size.height) 1463 | 1464 | // Create resized GBuffer albedo texture 1465 | let gBufferAlbedoTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 1466 | gBufferAlbedoTextureDescriptor.sampleCount = 1 1467 | gBufferAlbedoTextureDescriptor.storageMode = .private 1468 | gBufferAlbedoTextureDescriptor.textureType = .type2D 1469 | gBufferAlbedoTextureDescriptor.usage = [.renderTarget, .shaderRead] 1470 | 1471 | gBufferAlbedoTexture = device.makeTexture(descriptor: gBufferAlbedoTextureDescriptor) 1472 | 1473 | // Create resized GBuffer normal texture 1474 | let gBufferNormalTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 1475 | gBufferNormalTextureDescriptor.sampleCount = 1 1476 | gBufferNormalTextureDescriptor.storageMode = .private 1477 | gBufferNormalTextureDescriptor.textureType = .type2D 1478 | gBufferNormalTextureDescriptor.usage = [.renderTarget, .shaderRead] 1479 | 1480 | gBufferNormalTexture = device.makeTexture(descriptor: gBufferNormalTextureDescriptor) 1481 | 1482 | // Create resized GBuffer position texture 1483 | let gBufferPositionTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .rgba16Float, width: drawableWidth, height: drawableHeight, mipmapped: false) 1484 | gBufferPositionTextureDescriptor.sampleCount = 1 1485 | gBufferPositionTextureDescriptor.storageMode = .private 1486 | gBufferPositionTextureDescriptor.textureType = .type2D 1487 | gBufferPositionTextureDescriptor.usage = [.renderTarget, .shaderRead] 1488 | 1489 | gBufferPositionTexture = device.makeTexture(descriptor: gBufferPositionTextureDescriptor) 1490 | 1491 | // Create resized GBuffer depth (and stencil) texture 1492 | let gBufferDepthDesc: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .depth32Float_stencil8, width: drawableWidth, height: drawableHeight, mipmapped: false) 1493 | gBufferDepthDesc.sampleCount = 1 1494 | gBufferDepthDesc.storageMode = .private 1495 | gBufferDepthDesc.textureType = .type2D 1496 | gBufferDepthDesc.usage = [.renderTarget, .shaderRead] 1497 | 1498 | gBufferDepthTexture = device.makeTexture(descriptor: gBufferDepthDesc) 1499 | 1500 | // Create resized composite texture 1501 | let compositeTextureDescriptor: MTLTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: drawableWidth, height: drawableHeight, mipmapped: false) 1502 | compositeTextureDescriptor.sampleCount = 1 1503 | compositeTextureDescriptor.storageMode = .private 1504 | compositeTextureDescriptor.textureType = .type2D 1505 | compositeTextureDescriptor.usage = [.renderTarget] 1506 | 1507 | compositeTexture = device.makeTexture(descriptor: compositeTextureDescriptor) 1508 | 1509 | // Hook the new textures up to their descriptors 1510 | gBufferRenderPassDescriptor.colorAttachments[0].texture = gBufferAlbedoTexture 1511 | gBufferRenderPassDescriptor.colorAttachments[1].texture = gBufferNormalTexture 1512 | gBufferRenderPassDescriptor.colorAttachments[2].texture = gBufferPositionTexture 1513 | gBufferRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 1514 | 1515 | stencilRenderPassDescriptor.depthAttachment.texture = gBufferDepthTexture 1516 | stencilRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 1517 | 1518 | lightVolumeRenderPassDescriptor.colorAttachments[0].texture = compositeTexture 1519 | lightVolumeRenderPassDescriptor.stencilAttachment.texture = gBufferDepthTexture 1520 | } 1521 | 1522 | // ... 1523 | } 1524 | ``` 1525 | 1526 | Conclusion 1527 | ========== 1528 | 1529 | That demonstrates the core concepts of deferred shading. There's plenty more we could cover, but I'll save that for a future tutorial. 1530 | 1531 | Related links 1532 | ============= 1533 | 1534 | - [OGLDev - Deferred Shading Part 1](http://ogldev.atspace.co.uk/www/tutorial35/tutorial35.html) 1535 | - [Learn OpenGL - Deferred Shading](https://learnopengl.com/#!Advanced-Lighting/Deferred-Shading) 1536 | - [Coding Labs - Deferred Rendering](http://www.codinglabs.net/tutorial_simple_def_rendering.aspx) 1537 | - [Marcin Ignac's Blog - Gamma and Lighting](http://marcinignac.com/blog/pragmatic-pbr-setup-and-gamma/) 1538 | - [GPU Gems 3: The Importance of Being Linear](http://http.developer.nvidia.com/GPUGems3/gpugems3_ch24.html) 1539 | - [Coding Labs: Gamma and Linear Spaces](http://www.codinglabs.net/article_gamma_vs_linear.aspx) 1540 | - [Gamasutra: Gamma-Correct Lighting](http://www.gamasutra.com/blogs/DavidRosen/20100204/4322/GammaCorrect_Lighting.php) 1541 | 1542 | [^ Back to Top](#top) 1543 | -------------------------------------------------------------------------------- /img/deferred.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/deferred.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/albedo-fixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/albedo-fixed.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/albedo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/albedo.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/final.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/final.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/gbuffer-data.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/gbuffer-data.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-albedo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-albedo.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-colour-albedo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-colour-albedo.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-colour-flat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-colour-flat.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-normal-fake.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-normal-fake.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-normal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-normal.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/lights-sphere.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/lights-sphere.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/normal-fixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/normal-fixed.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/normal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/normal.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/position-fixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/position-fixed.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/position.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/position.png -------------------------------------------------------------------------------- /img/posts/implementing-deferred-shading-in-metal/stencil-buffer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sevanspowell/MetalDeferredLightingTutorial/32a1e9db8acd8fc7b489e97471da5cd3bd7463eb/img/posts/implementing-deferred-shading-in-metal/stencil-buffer.png --------------------------------------------------------------------------------