├── .github └── workflows │ └── build.yml ├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── build.zig.zon ├── debug-env.sh ├── examples ├── build.zig ├── build.zig.zon ├── c.zig ├── graphics_context.zig ├── shaders │ ├── fragment.zig │ ├── triangle.frag │ ├── triangle.vert │ └── vertex.zig ├── swapchain.zig └── triangle.zig ├── src ├── id_render.zig ├── main.zig ├── vulkan │ ├── c_parse.zig │ ├── generator.zig │ ├── parse.zig │ ├── registry.zig │ └── render.zig └── xml.zig └── test └── ref_all_decls.zig /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | schedule: 9 | - cron: '0 6 * * *' 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-22.04 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Setup Zig 19 | uses: mlugg/setup-zig@v2 20 | with: 21 | version: master 22 | 23 | - name: Check formatting 24 | run: zig fmt --check . 25 | 26 | - name: Fetch latest Vulkan SDK 27 | run: | 28 | wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - 29 | sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list 30 | sudo apt update 31 | sudo apt install shaderc libglfw3 libglfw3-dev 32 | 33 | - name: Fetch latest vk.xml 34 | run: | 35 | wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/vk.xml 36 | wget https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/main/xml/video.xml 37 | 38 | - name: Test and install with latest zig & latest vk.xml 39 | run: zig build test install -Dregistry=$(pwd)/vk.xml 40 | 41 | - name: Test and install with latest zig & latest vk.xml & latest video.xml 42 | run: zig build test install -p zig-out-video -Dregistry=$(pwd)/vk.xml -Dvideo=$(pwd)/video.xml 43 | 44 | - name: Build example with latest zig & vk.xml from dependency 45 | run: zig build --build-file $(pwd)/examples/build.zig 46 | 47 | - name: Build example with latest zig & latest vk.xml 48 | run: zig build --build-file $(pwd)/examples/build.zig -Doverride-registry=$(pwd)/vk.xml 49 | 50 | - name: Build example with latest zig & vk.xml from dependency & use zig shaders 51 | run: zig build --build-file $(pwd)/examples/build.zig -Dzig-shader 52 | 53 | - name: Archive vk.zig 54 | uses: actions/upload-artifact@v4 55 | with: 56 | name: vk.zig 57 | path: | 58 | zig-out/src/vk.zig 59 | zig-out-video/src/vk.zig 60 | if-no-files-found: error 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | zig-out/ 3 | .vscode/.zig-cache/ 4 | .zig-cache/ 5 | examples/.zig-cache 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright © Robin Voetter 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vulkan-zig 2 | 3 | A Vulkan binding generator for Zig. 4 | 5 | [![Actions Status](https://github.com/Snektron/vulkan-zig/workflows/Build/badge.svg)](https://github.com/Snektron/vulkan-zig/actions) 6 | 7 | ## Overview 8 | 9 | vulkan-zig attempts to provide a better experience to programming Vulkan applications in Zig, by providing features such as integration of vulkan errors with Zig's error system, function pointer loading, renaming fields to standard Zig style, better bitfield handling, turning out parameters into return values and more. 10 | 11 | vulkan-zig is automatically tested daily against the latest vk.xml and zig, and supports vk.xml from version 1.x.163. 12 | 13 | ## Example 14 | 15 | A partial implementation of https://vulkan-tutorial.com is implemented in [examples/triangle.zig](examples/triangle.zig). This example can be ran by executing `zig build --build-file $(pwd)/examples/build.zig run-triangle` in vulkan-zig's root. See in particular the [build file](examples/build.zig), which contains a concrete example of how to use vulkan-zig as a dependency. 16 | 17 | ### Zig versions 18 | 19 | vulkan-zig aims to be always compatible with the ever-changing Zig master branch (however, development may lag a few days behind). Sometimes, the Zig master branch breaks a bunch of functionality however, which may make the latest version vulkan-zig incompatible with older releases of Zig. This repository aims to have a version compatible for both the latest Zig master, and the latest Zig release. The `master` branch is compatible with the `master` branch of Zig, and versions for older versions of Zig are maintained in the `zig--compat` branch. 20 | 21 | `master` is compatible and tested with the Zig self-hosted compiler. The `zig-stage1-compat` branch contains a version which is compatible with the Zig stage 1 compiler. 22 | 23 | ## Features 24 | ### CLI-interface 25 | 26 | A CLI-interface is provided to generate vk.zig from the [Vulkan XML registry](https://github.com/KhronosGroup/Vulkan-Docs/blob/main/xml), which is built by default when invoking `zig build` in the project root. To generate vk.zig, simply invoke the program as follows: 27 | ``` 28 | $ zig-out/bin/vulkan-zig-generator path/to/vk.xml output/path/to/vk.zig 29 | ``` 30 | This reads the xml file, parses its contents, renders the Vulkan bindings, and formats file, before writing the result to the output path. While the intended usage of vulkan-zig is through direct generation from build.zig (see below), the CLI-interface can be used for one-off generation and vendoring the result. 31 | 32 | `path/to/vk.xml` can be obtained from several sources: 33 | - From the LunarG Vulkan SDK. This can either be obtained from [LunarG](https://www.lunarg.com/vulkan-sdk) or usually using the package manager. The registry can then be found at `$VULKAN_SDK/share/vulkan/registry/vk.xml`. 34 | - Directly from the [Vulkan-Headers GitHub repository](https://github.com/KhronosGroup/Vulkan-Headers/blob/main/registry/vk.xml). 35 | 36 | ### Generation with the package manager from build.zig 37 | 38 | There is also support for adding this project as a dependency through zig package manager in its current form. In order to do this, add this repo as a dependency in your build.zig.zon: 39 | ```zig 40 | .{ 41 | // -- snip -- 42 | .dependencies = .{ 43 | // -- snip -- 44 | .vulkan_zig = .{ 45 | .url = "https://github.com/Snektron/vulkan-zig/archive/.tar.gz", 46 | .hash = "", 47 | }, 48 | }, 49 | } 50 | ``` 51 | And then in your build.zig file, you'll need to add a line like this to your build function: 52 | ```zig 53 | const vulkan = b.dependency("vulkan_zig", .{ 54 | .registry = b.path("path/to/vk.xml"), 55 | }).module("vulkan-zig"); 56 | exe.root_module.addImport("vulkan", vulkan); 57 | ``` 58 | That will allow you to `@import("vulkan")` in your executable's source. 59 | 60 | #### Generating bindings directly from Vulkan-Headers 61 | 62 | Bindings can be generated directly from the Vulkan-Headers repository by adding Vulkan-Headers as a dependency, and then passing the path to `vk.xml` from that dependency: 63 | ```zig 64 | .{ 65 | // -- snip -- 66 | .dependencies = .{ 67 | // -- snip -- 68 | .vulkan_headers = .{ 69 | .url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/v1.3.283.tar.gz", 70 | .hash = "", 71 | }, 72 | }, 73 | } 74 | ``` 75 | ```zig 76 | const vulkan = b.dependency("vulkan_zig", .{ 77 | .registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml"), 78 | }).module("vulkan-zig"); 79 | exe.root_module.addImport("vulkan", vulkan); 80 | ``` 81 | 82 | ### Manual generation with the package manager from build.zig 83 | 84 | Bindings can also be generated by invoking the generator directly. This may be useful is some special cases, for example, it integrates particularly well with fetching the registry via the package manager. This can be done by adding the Vulkan-Headers repository to your dependencies, and then passing the `vk.xml` inside it to vulkan-zig-generator: 85 | ```zig 86 | .{ 87 | // -- snip -- 88 | .depdendencies = .{ 89 | // -- snip -- 90 | .vulkan_headers = .{ 91 | .url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/.tar.gz", 92 | .hash = "", 93 | }, 94 | }, 95 | } 96 | 97 | ``` 98 | And then pass `vk.xml` to vulkan-zig-generator as follows: 99 | ```zig 100 | // Get the (lazy) path to vk.xml: 101 | const registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml"); 102 | // Get generator executable reference 103 | const vk_gen = b.dependency("vulkan_zig", .{}).artifact("vulkan-zig-generator"); 104 | // Set up a run step to generate the bindings 105 | const vk_generate_cmd = b.addRunArtifact(vk_gen); 106 | // Pass the registry to the generator 107 | vk_generate_cmd.addFileArg(registry); 108 | // Create a module from the generator's output... 109 | const vulkan_zig = b.addModule("vulkan-zig", .{ 110 | .root_source_file = vk_generate_cmd.addOutputFileArg("vk.zig"), 111 | }); 112 | // ... and pass it as a module to your executable's build command 113 | exe.root_module.addImport("vulkan", vulkan_zig); 114 | ``` 115 | 116 | See [examples/build.zig](examples/build.zig) and [examples/build.zig.zon](examples/build.zig.zon) for a concrete example. 117 | 118 | ### Function & field renaming 119 | 120 | Functions and fields are renamed to be more or less in line with [Zig's standard library style](https://ziglang.org/documentation/master/#Style-Guide): 121 | * The vk prefix is removed everywhere 122 | * Structs like `VkInstanceCreateInfo` are renamed to `InstanceCreateInfo`. 123 | * Handles like `VkSwapchainKHR` are renamed to `SwapchainKHR` (note that the tag is retained in caps). 124 | * Functions like `vkCreateInstance` are generated as `createInstance` as wrapper and as `PfnCreateInstance` as function pointer. 125 | * API constants like `VK_WHOLE_SIZE` retain screaming snake case, and are generates as `WHOLE_SIZE`. 126 | * The type name is stripped from enumeration fields and bitflags, and they are generated in (lower) snake case. For example, `VK_IMAGE_LAYOUT_GENERAL` is generated as just `general`. Note that author tags are also generated to lower case: `VK_SURFACE_TRANSFORM_FLAGS_IDENTITY_BIT_KHR` is translated to `identity_bit_khr`. 127 | * Container fields and function parameter names are generated in (lower) snake case in a similar manner: `ppEnabledLayerNames` becomes `pp_enabled_layer_names`. 128 | * Any name which is either an illegal Zig name or a reserved identifier is rendered using `@"name"` syntax. For example, `VK_IMAGE_TYPE_2D` is translated to `@"2d"`. 129 | 130 | ### Dispatch Tables 131 | 132 | Vulkan-zig provides no integration for statically linking libvulkan, and these symbols are not generated at all. Instead, vulkan functions are to be loaded dynamically. For each Vulkan function, a function pointer type is generated using the exact parameters and return types as defined by the Vulkan specification: 133 | ```zig 134 | pub const PfnCreateInstance = fn ( 135 | p_create_info: *const InstanceCreateInfo, 136 | p_allocator: ?*const AllocationCallbacks, 137 | p_instance: *Instance, 138 | ) callconv(vulkan_call_conv) Result; 139 | ``` 140 | 141 | A set of _dispatch table_ structures is generated. A dispatch table simply contains a set of (optional) function pointers to Vulkan API functions, and not much else. Function pointers grouped by the nature of the function as follows: 142 | * Vulkan functions which are loaded by `vkGetInstanceProcAddr` without the need for passing an instance are placed in `BaseDispatch`. 143 | * Vulkan functions which are loaded by `vkGetInstanceProcAddr` but do need an instance are placed in `InstanceDispatch`. 144 | * Vulkan functions which are loaded by `vkGetDeviceProcAddr` are placed in `DeviceDispatch`. 145 | 146 | ### Wrappers 147 | 148 | To provide more interesting functionality, a set of _wrapper_ types is also generated, one for each dispatch table type. These contain the Zig-versions of each Vulkan API function, along with corresponding error set definitions, return type definitions, etc, where appropriate. 149 | 150 | The wrapper struct then provides wrapper functions for each function pointer in the dispatch struct: 151 | ```zig 152 | pub const BaseWrapper = struct { 153 | const Self = @This(); 154 | const Dispatch = CreateDispatchStruct(cmds); 155 | 156 | dispatch: Dispatch, 157 | 158 | pub const CreateInstanceError = error{ 159 | OutOfHostMemory, 160 | OutOfDeviceMemory, 161 | InitializationFailed, 162 | LayerNotPresent, 163 | ExtensionNotPresent, 164 | IncompatibleDriver, 165 | Unknown, 166 | }; 167 | pub fn createInstance( 168 | self: Self, 169 | create_info: InstanceCreateInfo, 170 | p_allocator: ?*const AllocationCallbacks, 171 | ) CreateInstanceError!Instance { 172 | var instance: Instance = undefined; 173 | const result = self.dispatch.vkCreateInstance.?( 174 | &create_info, 175 | p_allocator, 176 | &instance, 177 | ); 178 | switch (result) { 179 | .success => {}, 180 | .error_out_of_host_memory => return error.OutOfHostMemory, 181 | .error_out_of_device_memory => return error.OutOfDeviceMemory, 182 | .error_initialization_failed => return error.InitializationFailed, 183 | .error_layer_not_present => return error.LayerNotPresent, 184 | .error_extension_not_present => return error.ExtensionNotPresent, 185 | .error_incompatible_driver => return error.IncompatibleDriver, 186 | else => return error.Unknown, 187 | } 188 | return instance; 189 | } 190 | 191 | ... 192 | }; 193 | ``` 194 | Wrappers are generated according to the following rules: 195 | * The return type is determined from the original return type and the parameters. 196 | * Any non-const, non-optional single-item pointer is interpreted as an out parameter. 197 | * If a command returns a non-error `VkResult` other than `VK_SUCCESS` it is also returned. 198 | * If there are multiple return values selected, an additional struct is generated. The original call's return value is called `return_value`, `VkResult` is named `result`, and the out parameters are called the same except `p_` is removed. They are generated in this order. 199 | * Any const non-optional single-item pointer is interpreted as an in-parameter. For these, one level of indirection is removed so that create info structure pointers can now be passed as values, enabling the ability to use struct literals for these parameters. 200 | * Error codes are translated into Zig errors. 201 | * As of yet, there is no specific handling of enumeration style commands or other commands which accept slices. 202 | 203 | #### Initializing Wrappers 204 | 205 | Wrapper types are initialized by the `load` function, which must be passed a _loader_: A function which loads a function pointer by name. 206 | * For `BaseWrapper`, this function has signature `fn load(loader: anytype) Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr` (with optionally having a different calling convention). 207 | * For `InstanceWrapper`, this function has signature `fn load(instance: Instance, loader: anytype) Self`, where the type of `loader` must resemble `PfnGetInstanceProcAddr`. 208 | * For `DeviceWrapper`, this function has signature `fn load(device: Device, loader: anytype) Self`, where the type of `loader` must resemble `PfnGetDeviceProcAddr`. 209 | 210 | Note that these functions accepts a loader with the signature of `anytype` instead of `PfnGetInstanceProcAddr`. This is because it is valid for `vkGetInstanceProcAddr` to load itself, in which case the returned function is to be called with the vulkan calling convention. This calling convention is not required for loading vulkan-zig itself, though, and a loader to be called with any calling convention with the target architecture may be passed in. This is particularly useful when interacting with C libraries that provide `vkGetInstanceProcAddr`. 211 | 212 | ```zig 213 | // vkGetInstanceProcAddr as provided by GLFW. 214 | // Note that vk.Instance and vk.PfnVoidFunction are ABI compatible with VkInstance, 215 | // and that `extern` implies the C calling convention. 216 | pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; 217 | 218 | // Or provide a custom implementation. 219 | // This function is called with the unspecified Zig-internal calling convention. 220 | fn customGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction { 221 | ... 222 | } 223 | 224 | // Both calls are valid. 225 | const vkb = BaseWrapper.load(glfwGetInstanceProcAddress); 226 | const vkb = BaseWrapper.load(customGetInstanceProcAddress); 227 | ``` 228 | 229 | The `load` function tries to load all function pointers unconditionally, regardless of enabled extensions or platform. If a function pointer could not be loaded, its entry in the dispatch table is set to `null`. When invoking a function on a wrapper table, the function pointer is checked for null, and there will be a crash or undefined behavior if it was not loaded properly. That means that **it is up to the programmer to ensure that a function pointer is valid for the platform before calling it**, either by checking whether the associated extension or Vulkan version is supported or simply by checking whether the function pointer is non-null. 230 | 231 | One can access the underlying unwrapped C functions by doing `wrapper.dispatch.vkFuncYouWant.?(..)`. 232 | 233 | #### Proxying Wrappers 234 | 235 | Proxying wrappers wrap a wrapper and a pointer to the associated handle in a single struct, and automatically passes this handle to commands as appropriate. Besides the proxying wrappers for instances and devices, there are also proxying wrappers for queues and command buffers. Proxying wrapper type are constructed in the same way as a regular wrapper, by passing an api specification to them. To initialize a proxying wrapper, it must be passed a handle and a pointer to an appropriate wrapper. For queue and command buffer proxying wrappers, a pointer to a device wrapper must be passed. 236 | 237 | ```zig 238 | const InstanceWrapper = vk.InstanceWrapper; 239 | const Instance = vk.InstanceProxy; 240 | 241 | const instance_handle = try vkb.createInstance(...); 242 | const vki = try InstanceWrapper.load(instance_handle, vkb.dispatch.vkGetInstanceProcAddr.?); 243 | const instance = Instance.load(instance_handle, &vki); 244 | defer instance.destroyInstance(null); 245 | ``` 246 | 247 | For queue and command buffer proxying wrappers, the `queue` and `cmd` prefix is removed for functions where appropriate. Note that the device proxying wrappers also have the queue and command buffer functions made available for convenience, but there the prefix is not stripped. 248 | 249 | Note that the proxy must be passed a _pointer_ to a wrapper. This is because there was a limitation with LLVM in the past, where a struct with an object pointer and its associated function pointers wouldn't be optimized properly. By using a separate function pointer, LLVM knows that the "vtable" dispatch struct can never be modified and so it can subject each call to vtable optimizations. 250 | 251 | ### Bitflags 252 | 253 | Packed structs of bools are used for bit flags in vulkan-zig, instead of both a `FlagBits` and `Flags` variant. Places where either of these variants are used are both replaced by this packed struct instead. This means that even in places where just one flag would normally be accepted, the packed struct is accepted. The programmer is responsible for only enabling a single bit. 254 | 255 | Each bit is defaulted to `false`, and the first `bool` is aligned to guarantee the overal alignment 256 | of each Flags type to guarantee ABI compatibility when passing bitfields through structs: 257 | ```zig 258 | pub const QueueFlags = packed struct { 259 | graphics_bit: bool align(@alignOf(Flags)) = false, 260 | compute_bit: bool = false, 261 | transfer_bit: bool = false, 262 | sparse_binding_bit: bool = false, 263 | protected_bit: bool = false, 264 | _reserved_bit_5: bool = false, 265 | _reserved_bit_6: bool = false, 266 | ... 267 | } 268 | ``` 269 | Note that on function call ABI boundaries, this alignment trick is not sufficient. Instead, the flags 270 | are reinterpreted as an integer which is passed instead. Each flags type is augmented by a mixin which provides `IntType`, an integer which represents the flags on function ABI boundaries. This mixin also provides some common set operation on bitflags: 271 | ```zig 272 | pub fn FlagsMixin(comptime FlagsType: type) type { 273 | return struct { 274 | pub const IntType = Flags; 275 | 276 | // Return the integer representation of these flags 277 | pub fn toInt(self: FlagsType) IntType {...} 278 | 279 | // Turn an integer representation back into a flags type 280 | pub fn fromInt(flags: IntType) FlagsType { ... } 281 | 282 | // Return the set-union of `lhs` and `rhs. 283 | pub fn merge(lhs: FlagsType, rhs: FlagsType) FlagsType { ... } 284 | 285 | // Return the set-intersection of `lhs` and `rhs`. 286 | pub fn intersect(lhs: FlagsType, rhs: FlagsType) FlagsType { ... } 287 | 288 | // Return the set-complement of `lhs` and `rhs`. Note: this also inverses reserved bits. 289 | pub fn complement(self: FlagsType) FlagsType { ... } 290 | 291 | // Return the set-subtraction of `lhs` and `rhs`: All fields set in `rhs` are cleared in `lhs`. 292 | pub fn subtract(lhs: FlagsType, rhs: FlagsType) FlagsType { ... } 293 | 294 | // Returns whether all bits set in `rhs` are also set in `lhs`. 295 | pub fn contains(lhs: FlagsType, rhs: FlagsType) bool { ... } 296 | }; 297 | } 298 | ``` 299 | 300 | ### Handles 301 | 302 | Handles are generated to a non-exhaustive enum, backed by a `u64` for non-dispatchable handles and `usize` for dispatchable ones: 303 | ```zig 304 | const Instance = extern enum(usize) { null_handle = 0, _ }; 305 | ``` 306 | This means that handles are type-safe even when compiling for a 32-bit target. 307 | 308 | ### Struct defaults 309 | 310 | Defaults are generated for certain fields of structs: 311 | * sType is defaulted to the appropriate value. 312 | * pNext is defaulted to `null`. 313 | * No other fields have default values. 314 | ```zig 315 | pub const InstanceCreateInfo = extern struct { 316 | s_type: StructureType = .instance_create_info, 317 | p_next: ?*const anyopaque = null, 318 | flags: InstanceCreateFlags, 319 | ... 320 | }; 321 | ``` 322 | 323 | ### Pointer types 324 | 325 | Pointer types in both commands (wrapped and function pointers) and struct fields are augmented with the following information, where available in the registry: 326 | * Pointer optional-ness. 327 | * Pointer const-ness. 328 | * Pointer size: Either single-item, null-terminated or many-items. 329 | 330 | Note that this information is not everywhere as useful in the registry, leading to places where optional-ness is not correct. Most notably, CreateInfo type structures which take a slice often have the item count marked as optional, but the pointer itself not. As of yet, this is not fixed in vulkan-zig. If drivers properly follow the Vulkan specification, these can be initialized to `undefined`, however, [that is not always the case](https://zeux.io/2019/07/17/serializing-pipeline-cache/). 331 | 332 | ### Platform types 333 | 334 | Defaults with the same ABI layout are generated for most platform-defined types. These can either by bitcasted to, or overridden by defining them in the project root: 335 | ```zig 336 | pub const xcb_connection_t = if (@hasDecl(root, "xcb_connection_t")) root.xcb_connection_t else opaque{}; 337 | ``` 338 | For some times (such as those from Google Games Platform) no default is known, but an `opaque{}` will be used by default. Usage of these without providing a concrete type in the project root is likely an error. 339 | 340 | ### Shader compilation 341 | 342 | Shaders should be compiled by invoking a shader compiler via the build system. For example: 343 | ```zig 344 | pub fn build(b: *Builder) void { 345 | ... 346 | const vert_cmd = b.addSystemCommand(&.{ 347 | "glslc", 348 | "--target-env=vulkan1.2", 349 | "-o" 350 | }); 351 | const vert_spv = vert_cmd.addOutputFileArg("vert.spv"); 352 | vert_cmd.addFileArg(b.path("shaders/triangle.vert")); 353 | exe.root_module.addAnonymousImport("vertex_shader", .{ 354 | .root_source_file = vert_spv 355 | }); 356 | ... 357 | } 358 | ``` 359 | 360 | Note that SPIR-V must be 32-bit aligned when fed to Vulkan. The easiest way to do this is to dereference the shader's bytecode and manually align it as follows: 361 | ```zig 362 | const vert_spv align(@alignOf(u32)) = @embedFile("vertex_shader").*; 363 | ``` 364 | 365 | See [examples/build.zig](examples/build.zig) for a working example. 366 | 367 | For more advanced shader compiler usage, one may consider a library such as [shader_compiler](https://github.com/Games-by-Mason/shader_compiler). 368 | 369 | ### Vulkan Video 370 | 371 | Vulkan-zig also supports generating Vulkan Video bindings. To do this, one additionally pass `--video ` to the generator, or pass `-Dvideo=` to build.zig. If using vulkan-zig via the Zig package manager, the following also works: 372 | ```zig 373 | const vulkan_headers = b.dependency("vulkan_headers"); 374 | const vulkan = b.dependency("vulkan_zig", .{ 375 | .registry = vulkan_headers.path("registry/vk.xml"), 376 | .video = vulkan_headers.path("registery/video.xml"), 377 | }).module("vulkan-zig"); 378 | ``` 379 | 380 | The Vulkan Video bindings are not generated by default. In this case, the relevant definitions must be supplied by the user. See [platform types](#platform-types) for how this is done. 381 | 382 | ## Limitations 383 | 384 | * vulkan-zig has as of yet no functionality for selecting feature levels and extensions when generating bindings. This is because when an extension is promoted to Vulkan core, its fields and commands are renamed to lose the extensions author tag (for example, VkSemaphoreWaitFlagsKHR was renamed to VkSemaphoreWaitFlags when it was promoted from an extension to Vulkan 1.2 core). This leads to inconsistencies when only items from up to a certain feature level is included, as these promoted items then need to re-gain a tag. 385 | 386 | ## See also 387 | 388 | * Implementation of https://vulkan-tutorial.com using `@cImport`'ed bindings: https://github.com/andrewrk/zig-vulkan-triangle. 389 | * Alternative binding generator: https://github.com/SpexGuy/Zig-Vulkan-Headers 390 | * Zig bindings for GLFW: https://github.com/hexops/mach-glfw 391 | * With vulkan-zig integration example: https://github.com/hexops/mach-glfw-vulkan-example 392 | * Advanced shader compilation: https://github.com/Games-by-Mason/shader_compiler 393 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn build(b: *std.Build) void { 4 | const target = b.standardTargetOptions(.{}); 5 | const optimize = b.standardOptimizeOption(.{}); 6 | const maybe_registry = b.option(std.Build.LazyPath, "registry", "Set the path to the Vulkan registry (vk.xml)"); 7 | const maybe_video = b.option(std.Build.LazyPath, "video", "Set the path to the Vulkan Video registry (video.xml)"); 8 | const test_step = b.step("test", "Run all the tests"); 9 | 10 | // Using the package manager, this artifact can be obtained by the user 11 | // through `b.dependency(, .{}).artifact("vulkan-zig-generator")`. 12 | // with that, the user need only `.addArg("path/to/vk.xml")`, and then obtain 13 | // a file source to the generated code with `.addOutputArg("vk.zig")` 14 | const generator_exe = b.addExecutable(.{ 15 | .name = "vulkan-zig-generator", 16 | .root_source_file = b.path("src/main.zig"), 17 | .target = target, 18 | .optimize = optimize, 19 | }); 20 | b.installArtifact(generator_exe); 21 | 22 | // Or they can skip all that, and just make sure to pass `.registry = "path/to/vk.xml"` to `b.dependency`, 23 | // and then obtain the module directly via `.module("vulkan-zig")`. 24 | if (maybe_registry) |registry| { 25 | const vk_generate_cmd = b.addRunArtifact(generator_exe); 26 | 27 | if (maybe_video) |video| { 28 | vk_generate_cmd.addArg("--video"); 29 | vk_generate_cmd.addFileArg(video); 30 | } 31 | 32 | vk_generate_cmd.addFileArg(registry); 33 | 34 | const vk_zig = vk_generate_cmd.addOutputFileArg("vk.zig"); 35 | const vk_zig_module = b.addModule("vulkan-zig", .{ 36 | .root_source_file = vk_zig, 37 | }); 38 | 39 | // Also install vk.zig, if passed. 40 | 41 | const vk_zig_install_step = b.addInstallFile(vk_zig, "src/vk.zig"); 42 | b.getInstallStep().dependOn(&vk_zig_install_step.step); 43 | 44 | // And run tests on this vk.zig too. 45 | 46 | // This test needs to be an object so that vulkan-zig can import types from the root. 47 | // It does not need to run anyway. 48 | const ref_all_decls_test = b.addObject(.{ 49 | .name = "ref-all-decls-test", 50 | .root_source_file = b.path("test/ref_all_decls.zig"), 51 | .target = target, 52 | .optimize = optimize, 53 | }); 54 | ref_all_decls_test.root_module.addImport("vulkan", vk_zig_module); 55 | test_step.dependOn(&ref_all_decls_test.step); 56 | } 57 | 58 | const test_target = b.addTest(.{ 59 | .root_source_file = b.path("src/main.zig"), 60 | }); 61 | test_step.dependOn(&b.addRunArtifact(test_target).step); 62 | } 63 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .vulkan, 3 | .fingerprint = 0xbe155a03c72db6af, 4 | .version = "0.0.0", 5 | .minimum_zig_version = "0.14.0-dev.1359+e9a00ba7f", 6 | .paths = .{ 7 | "build.zig", 8 | "LICENSE", 9 | "README.md", 10 | "src", 11 | }, 12 | } 13 | -------------------------------------------------------------------------------- /debug-env.sh: -------------------------------------------------------------------------------- 1 | export VK_INSTANCE_LAYERS="VK_LAYER_LUNARG_monitor:VK_LAYER_KHRONOS_validation" 2 | -------------------------------------------------------------------------------- /examples/build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const vkgen = @import("vulkan_zig"); 4 | 5 | pub fn build(b: *std.Build) void { 6 | const target = b.standardTargetOptions(.{}); 7 | const optimize = b.standardOptimizeOption(.{}); 8 | const maybe_override_registry = b.option([]const u8, "override-registry", "Override the path to the Vulkan registry used for the examples"); 9 | const use_zig_shaders = b.option(bool, "zig-shader", "Use Zig shaders instead of GLSL") orelse false; 10 | 11 | const registry = b.dependency("vulkan_headers", .{}).path("registry/vk.xml"); 12 | 13 | const triangle_exe = b.addExecutable(.{ 14 | .name = "triangle", 15 | .root_source_file = b.path("triangle.zig"), 16 | .target = target, 17 | .link_libc = true, 18 | .optimize = optimize, 19 | }); 20 | b.installArtifact(triangle_exe); 21 | triangle_exe.linkSystemLibrary("glfw"); 22 | 23 | const registry_path: std.Build.LazyPath = if (maybe_override_registry) |override_registry| 24 | .{ .cwd_relative = override_registry } 25 | else 26 | registry; 27 | 28 | const vulkan = b.dependency("vulkan_zig", .{ 29 | .registry = registry_path, 30 | }).module("vulkan-zig"); 31 | 32 | triangle_exe.root_module.addImport("vulkan", vulkan); 33 | 34 | if (use_zig_shaders) { 35 | const spirv_target = b.resolveTargetQuery(.{ 36 | .cpu_arch = .spirv64, 37 | .os_tag = .vulkan, 38 | .cpu_model = .{ .explicit = &std.Target.spirv.cpu.vulkan_v1_2 }, 39 | .cpu_features_add = std.Target.spirv.featureSet(&.{.int64}), 40 | .ofmt = .spirv, 41 | }); 42 | 43 | const vert_spv = b.addObject(.{ 44 | .name = "vertex_shader", 45 | .root_source_file = b.path("shaders/vertex.zig"), 46 | .target = spirv_target, 47 | .use_llvm = false, 48 | }); 49 | triangle_exe.root_module.addAnonymousImport( 50 | "vertex_shader", 51 | .{ .root_source_file = vert_spv.getEmittedBin() }, 52 | ); 53 | 54 | const frag_spv = b.addObject(.{ 55 | .name = "fragment_shader", 56 | .root_source_file = b.path("shaders/fragment.zig"), 57 | .target = spirv_target, 58 | .use_llvm = false, 59 | }); 60 | triangle_exe.root_module.addAnonymousImport( 61 | "fragment_shader", 62 | .{ .root_source_file = frag_spv.getEmittedBin() }, 63 | ); 64 | } else { 65 | const vert_cmd = b.addSystemCommand(&.{ 66 | "glslc", 67 | "--target-env=vulkan1.2", 68 | "-o", 69 | }); 70 | const vert_spv = vert_cmd.addOutputFileArg("vert.spv"); 71 | vert_cmd.addFileArg(b.path("shaders/triangle.vert")); 72 | triangle_exe.root_module.addAnonymousImport("vertex_shader", .{ 73 | .root_source_file = vert_spv, 74 | }); 75 | 76 | const frag_cmd = b.addSystemCommand(&.{ 77 | "glslc", 78 | "--target-env=vulkan1.2", 79 | "-o", 80 | }); 81 | const frag_spv = frag_cmd.addOutputFileArg("frag.spv"); 82 | frag_cmd.addFileArg(b.path("shaders/triangle.frag")); 83 | triangle_exe.root_module.addAnonymousImport("fragment_shader", .{ 84 | .root_source_file = frag_spv, 85 | }); 86 | } 87 | 88 | const triangle_run_cmd = b.addRunArtifact(triangle_exe); 89 | triangle_run_cmd.step.dependOn(b.getInstallStep()); 90 | 91 | const triangle_run_step = b.step("run-triangle", "Run the triangle example"); 92 | triangle_run_step.dependOn(&triangle_run_cmd.step); 93 | } 94 | -------------------------------------------------------------------------------- /examples/build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .vulkan_zig_examples, 3 | .fingerprint = 0x60508bcca14cfc6d, 4 | .version = "0.1.0", 5 | .dependencies = .{ 6 | .vulkan_zig = .{ 7 | .path = "..", 8 | }, 9 | .vulkan_headers = .{ 10 | .url = "https://github.com/KhronosGroup/Vulkan-Headers/archive/v1.3.283.tar.gz", 11 | .hash = "N-V-__8AAAkkoQGn5z1yoNVrwqZfnYmZp8AZ5CJgoHRMQI0c", 12 | }, 13 | }, 14 | .paths = .{""}, 15 | } 16 | -------------------------------------------------------------------------------- /examples/c.zig: -------------------------------------------------------------------------------- 1 | const c = @cImport({ 2 | @cDefine("GLFW_INCLUDE_NONE", {}); 3 | @cInclude("GLFW/glfw3.h"); 4 | }); 5 | 6 | const vk = @import("vulkan"); 7 | 8 | // Re-export the GLFW things that we need 9 | pub const GLFW_TRUE = c.GLFW_TRUE; 10 | pub const GLFW_FALSE = c.GLFW_FALSE; 11 | pub const GLFW_CLIENT_API = c.GLFW_CLIENT_API; 12 | pub const GLFW_NO_API = c.GLFW_NO_API; 13 | 14 | pub const GLFWwindow = c.GLFWwindow; 15 | 16 | pub const glfwInit = c.glfwInit; 17 | pub const glfwTerminate = c.glfwTerminate; 18 | pub const glfwVulkanSupported = c.glfwVulkanSupported; 19 | pub const glfwWindowHint = c.glfwWindowHint; 20 | pub const glfwCreateWindow = c.glfwCreateWindow; 21 | pub const glfwDestroyWindow = c.glfwDestroyWindow; 22 | pub const glfwWindowShouldClose = c.glfwWindowShouldClose; 23 | pub const glfwGetRequiredInstanceExtensions = c.glfwGetRequiredInstanceExtensions; 24 | pub const glfwGetFramebufferSize = c.glfwGetFramebufferSize; 25 | pub const glfwPollEvents = c.glfwPollEvents; 26 | 27 | // usually the GLFW vulkan functions are exported if Vulkan is included, 28 | // but since thats not the case here, they are manually imported. 29 | 30 | pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, procname: [*:0]const u8) vk.PfnVoidFunction; 31 | pub extern fn glfwGetPhysicalDevicePresentationSupport(instance: vk.Instance, pdev: vk.PhysicalDevice, queuefamily: u32) c_int; 32 | pub extern fn glfwCreateWindowSurface(instance: vk.Instance, window: *GLFWwindow, allocation_callbacks: ?*const vk.AllocationCallbacks, surface: *vk.SurfaceKHR) vk.Result; 33 | -------------------------------------------------------------------------------- /examples/graphics_context.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const vk = @import("vulkan"); 3 | const c = @import("c.zig"); 4 | const Allocator = std.mem.Allocator; 5 | 6 | const required_device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name}; 7 | 8 | /// There are 3 levels of bindings in vulkan-zig: 9 | /// - The Dispatch types (vk.BaseDispatch, vk.InstanceDispatch, vk.DeviceDispatch) 10 | /// are "plain" structs which just contain the function pointers for a particular 11 | /// object. 12 | /// - The Wrapper types (vk.Basewrapper, vk.InstanceWrapper, vk.DeviceWrapper) contains 13 | /// the Dispatch type, as well as Ziggified Vulkan functions - these return Zig errors, 14 | /// etc. 15 | /// - The Proxy types (vk.InstanceProxy, vk.DeviceProxy, vk.CommandBufferProxy, 16 | /// vk.QueueProxy) contain a pointer to a Wrapper and also contain the object's handle. 17 | /// Calling Ziggified functions on these types automatically passes the handle as 18 | /// the first parameter of each function. Note that this type accepts a pointer to 19 | /// a wrapper struct as there is a problem with LLVM where embedding function pointers 20 | /// and object pointer in the same struct leads to missed optimizations. If the wrapper 21 | /// member is a pointer, LLVM will try to optimize it as any other vtable. 22 | /// The wrappers contain 23 | const BaseWrapper = vk.BaseWrapper; 24 | const InstanceWrapper = vk.InstanceWrapper; 25 | const DeviceWrapper = vk.DeviceWrapper; 26 | 27 | const Instance = vk.InstanceProxy; 28 | const Device = vk.DeviceProxy; 29 | 30 | pub const GraphicsContext = struct { 31 | pub const CommandBuffer = vk.CommandBufferProxy; 32 | 33 | allocator: Allocator, 34 | 35 | vkb: BaseWrapper, 36 | 37 | instance: Instance, 38 | surface: vk.SurfaceKHR, 39 | pdev: vk.PhysicalDevice, 40 | props: vk.PhysicalDeviceProperties, 41 | mem_props: vk.PhysicalDeviceMemoryProperties, 42 | 43 | dev: Device, 44 | graphics_queue: Queue, 45 | present_queue: Queue, 46 | 47 | pub fn init(allocator: Allocator, app_name: [*:0]const u8, window: *c.GLFWwindow) !GraphicsContext { 48 | var self: GraphicsContext = undefined; 49 | self.allocator = allocator; 50 | self.vkb = BaseWrapper.load(c.glfwGetInstanceProcAddress); 51 | 52 | var extension_names = std.ArrayList([*:0]const u8).init(allocator); 53 | defer extension_names.deinit(); 54 | // these extensions are to support vulkan in mac os 55 | // see https://github.com/glfw/glfw/issues/2335 56 | try extension_names.append("VK_KHR_portability_enumeration"); 57 | try extension_names.append("VK_KHR_get_physical_device_properties2"); 58 | 59 | var glfw_exts_count: u32 = 0; 60 | const glfw_exts = c.glfwGetRequiredInstanceExtensions(&glfw_exts_count); 61 | try extension_names.appendSlice(@ptrCast(glfw_exts[0..glfw_exts_count])); 62 | 63 | const instance = try self.vkb.createInstance(&.{ 64 | .p_application_info = &.{ 65 | .p_application_name = app_name, 66 | .application_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), 67 | .p_engine_name = app_name, 68 | .engine_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), 69 | .api_version = @bitCast(vk.API_VERSION_1_2), 70 | }, 71 | .enabled_extension_count = @intCast(extension_names.items.len), 72 | .pp_enabled_extension_names = extension_names.items.ptr, 73 | // enumerate_portability_bit_khr to support vulkan in mac os 74 | // see https://github.com/glfw/glfw/issues/2335 75 | .flags = .{ .enumerate_portability_bit_khr = true }, 76 | }, null); 77 | 78 | const vki = try allocator.create(InstanceWrapper); 79 | errdefer allocator.destroy(vki); 80 | vki.* = InstanceWrapper.load(instance, self.vkb.dispatch.vkGetInstanceProcAddr.?); 81 | self.instance = Instance.init(instance, vki); 82 | errdefer self.instance.destroyInstance(null); 83 | 84 | self.surface = try createSurface(self.instance, window); 85 | errdefer self.instance.destroySurfaceKHR(self.surface, null); 86 | 87 | const candidate = try pickPhysicalDevice(self.instance, allocator, self.surface); 88 | self.pdev = candidate.pdev; 89 | self.props = candidate.props; 90 | 91 | const dev = try initializeCandidate(self.instance, candidate); 92 | 93 | const vkd = try allocator.create(DeviceWrapper); 94 | errdefer allocator.destroy(vkd); 95 | vkd.* = DeviceWrapper.load(dev, self.instance.wrapper.dispatch.vkGetDeviceProcAddr.?); 96 | self.dev = Device.init(dev, vkd); 97 | errdefer self.dev.destroyDevice(null); 98 | 99 | self.graphics_queue = Queue.init(self.dev, candidate.queues.graphics_family); 100 | self.present_queue = Queue.init(self.dev, candidate.queues.present_family); 101 | 102 | self.mem_props = self.instance.getPhysicalDeviceMemoryProperties(self.pdev); 103 | 104 | return self; 105 | } 106 | 107 | pub fn deinit(self: GraphicsContext) void { 108 | self.dev.destroyDevice(null); 109 | self.instance.destroySurfaceKHR(self.surface, null); 110 | self.instance.destroyInstance(null); 111 | 112 | // Don't forget to free the tables to prevent a memory leak. 113 | self.allocator.destroy(self.dev.wrapper); 114 | self.allocator.destroy(self.instance.wrapper); 115 | } 116 | 117 | pub fn deviceName(self: *const GraphicsContext) []const u8 { 118 | return std.mem.sliceTo(&self.props.device_name, 0); 119 | } 120 | 121 | pub fn findMemoryTypeIndex(self: GraphicsContext, memory_type_bits: u32, flags: vk.MemoryPropertyFlags) !u32 { 122 | for (self.mem_props.memory_types[0..self.mem_props.memory_type_count], 0..) |mem_type, i| { 123 | if (memory_type_bits & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(flags)) { 124 | return @truncate(i); 125 | } 126 | } 127 | 128 | return error.NoSuitableMemoryType; 129 | } 130 | 131 | pub fn allocate(self: GraphicsContext, requirements: vk.MemoryRequirements, flags: vk.MemoryPropertyFlags) !vk.DeviceMemory { 132 | return try self.dev.allocateMemory(&.{ 133 | .allocation_size = requirements.size, 134 | .memory_type_index = try self.findMemoryTypeIndex(requirements.memory_type_bits, flags), 135 | }, null); 136 | } 137 | }; 138 | 139 | pub const Queue = struct { 140 | handle: vk.Queue, 141 | family: u32, 142 | 143 | fn init(device: Device, family: u32) Queue { 144 | return .{ 145 | .handle = device.getDeviceQueue(family, 0), 146 | .family = family, 147 | }; 148 | } 149 | }; 150 | 151 | fn createSurface(instance: Instance, window: *c.GLFWwindow) !vk.SurfaceKHR { 152 | var surface: vk.SurfaceKHR = undefined; 153 | if (c.glfwCreateWindowSurface(instance.handle, window, null, &surface) != .success) { 154 | return error.SurfaceInitFailed; 155 | } 156 | 157 | return surface; 158 | } 159 | 160 | fn initializeCandidate(instance: Instance, candidate: DeviceCandidate) !vk.Device { 161 | const priority = [_]f32{1}; 162 | const qci = [_]vk.DeviceQueueCreateInfo{ 163 | .{ 164 | .queue_family_index = candidate.queues.graphics_family, 165 | .queue_count = 1, 166 | .p_queue_priorities = &priority, 167 | }, 168 | .{ 169 | .queue_family_index = candidate.queues.present_family, 170 | .queue_count = 1, 171 | .p_queue_priorities = &priority, 172 | }, 173 | }; 174 | 175 | const queue_count: u32 = if (candidate.queues.graphics_family == candidate.queues.present_family) 176 | 1 177 | else 178 | 2; 179 | 180 | return try instance.createDevice(candidate.pdev, &.{ 181 | .queue_create_info_count = queue_count, 182 | .p_queue_create_infos = &qci, 183 | .enabled_extension_count = required_device_extensions.len, 184 | .pp_enabled_extension_names = @ptrCast(&required_device_extensions), 185 | }, null); 186 | } 187 | 188 | const DeviceCandidate = struct { 189 | pdev: vk.PhysicalDevice, 190 | props: vk.PhysicalDeviceProperties, 191 | queues: QueueAllocation, 192 | }; 193 | 194 | const QueueAllocation = struct { 195 | graphics_family: u32, 196 | present_family: u32, 197 | }; 198 | 199 | fn pickPhysicalDevice( 200 | instance: Instance, 201 | allocator: Allocator, 202 | surface: vk.SurfaceKHR, 203 | ) !DeviceCandidate { 204 | const pdevs = try instance.enumeratePhysicalDevicesAlloc(allocator); 205 | defer allocator.free(pdevs); 206 | 207 | for (pdevs) |pdev| { 208 | if (try checkSuitable(instance, pdev, allocator, surface)) |candidate| { 209 | return candidate; 210 | } 211 | } 212 | 213 | return error.NoSuitableDevice; 214 | } 215 | 216 | fn checkSuitable( 217 | instance: Instance, 218 | pdev: vk.PhysicalDevice, 219 | allocator: Allocator, 220 | surface: vk.SurfaceKHR, 221 | ) !?DeviceCandidate { 222 | if (!try checkExtensionSupport(instance, pdev, allocator)) { 223 | return null; 224 | } 225 | 226 | if (!try checkSurfaceSupport(instance, pdev, surface)) { 227 | return null; 228 | } 229 | 230 | if (try allocateQueues(instance, pdev, allocator, surface)) |allocation| { 231 | const props = instance.getPhysicalDeviceProperties(pdev); 232 | return DeviceCandidate{ 233 | .pdev = pdev, 234 | .props = props, 235 | .queues = allocation, 236 | }; 237 | } 238 | 239 | return null; 240 | } 241 | 242 | fn allocateQueues(instance: Instance, pdev: vk.PhysicalDevice, allocator: Allocator, surface: vk.SurfaceKHR) !?QueueAllocation { 243 | const families = try instance.getPhysicalDeviceQueueFamilyPropertiesAlloc(pdev, allocator); 244 | defer allocator.free(families); 245 | 246 | var graphics_family: ?u32 = null; 247 | var present_family: ?u32 = null; 248 | 249 | for (families, 0..) |properties, i| { 250 | const family: u32 = @intCast(i); 251 | 252 | if (graphics_family == null and properties.queue_flags.graphics_bit) { 253 | graphics_family = family; 254 | } 255 | 256 | if (present_family == null and (try instance.getPhysicalDeviceSurfaceSupportKHR(pdev, family, surface)) == vk.TRUE) { 257 | present_family = family; 258 | } 259 | } 260 | 261 | if (graphics_family != null and present_family != null) { 262 | return QueueAllocation{ 263 | .graphics_family = graphics_family.?, 264 | .present_family = present_family.?, 265 | }; 266 | } 267 | 268 | return null; 269 | } 270 | 271 | fn checkSurfaceSupport(instance: Instance, pdev: vk.PhysicalDevice, surface: vk.SurfaceKHR) !bool { 272 | var format_count: u32 = undefined; 273 | _ = try instance.getPhysicalDeviceSurfaceFormatsKHR(pdev, surface, &format_count, null); 274 | 275 | var present_mode_count: u32 = undefined; 276 | _ = try instance.getPhysicalDeviceSurfacePresentModesKHR(pdev, surface, &present_mode_count, null); 277 | 278 | return format_count > 0 and present_mode_count > 0; 279 | } 280 | 281 | fn checkExtensionSupport( 282 | instance: Instance, 283 | pdev: vk.PhysicalDevice, 284 | allocator: Allocator, 285 | ) !bool { 286 | const propsv = try instance.enumerateDeviceExtensionPropertiesAlloc(pdev, null, allocator); 287 | defer allocator.free(propsv); 288 | 289 | for (required_device_extensions) |ext| { 290 | for (propsv) |props| { 291 | if (std.mem.eql(u8, std.mem.span(ext), std.mem.sliceTo(&props.extension_name, 0))) { 292 | break; 293 | } 294 | } else { 295 | return false; 296 | } 297 | } 298 | 299 | return true; 300 | } 301 | -------------------------------------------------------------------------------- /examples/shaders/fragment.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = std.gpu; 3 | 4 | extern const v_color: @Vector(3, f32) addrspace(.input); 5 | extern var f_color: @Vector(4, f32) addrspace(.output); 6 | 7 | export fn main() callconv(.spirv_fragment) void { 8 | gpu.location(&v_color, 0); 9 | gpu.location(&f_color, 0); 10 | 11 | const temp: @Vector(4, f32) = .{ v_color[0], v_color[1], v_color[2], 1.0 }; 12 | f_color = temp; 13 | } 14 | -------------------------------------------------------------------------------- /examples/shaders/triangle.frag: -------------------------------------------------------------------------------- 1 | #version 450 2 | 3 | layout(location = 0) in vec3 v_color; 4 | 5 | layout(location = 0) out vec4 f_color; 6 | 7 | void main() { 8 | f_color = vec4(v_color, 1.0); 9 | } 10 | -------------------------------------------------------------------------------- /examples/shaders/triangle.vert: -------------------------------------------------------------------------------- 1 | #version 450 2 | 3 | layout(location = 0) in vec2 a_pos; 4 | layout(location = 1) in vec3 a_color; 5 | 6 | layout(location = 0) out vec3 v_color; 7 | 8 | void main() { 9 | gl_Position = vec4(a_pos, 0.0, 1.0); 10 | v_color = a_color; 11 | } 12 | -------------------------------------------------------------------------------- /examples/shaders/vertex.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = std.gpu; 3 | 4 | extern const a_pos: @Vector(2, f32) addrspace(.input); 5 | extern const a_color: @Vector(3, f32) addrspace(.input); 6 | 7 | extern var v_color: @Vector(3, f32) addrspace(.output); 8 | 9 | export fn main() callconv(.spirv_vertex) void { 10 | gpu.location(&a_pos, 0); 11 | gpu.location(&a_color, 1); 12 | gpu.location(&v_color, 0); 13 | 14 | gpu.position_out.* = .{ a_pos[0], a_pos[1], 0.0, 1.0 }; 15 | v_color = a_color; 16 | } 17 | -------------------------------------------------------------------------------- /examples/swapchain.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const vk = @import("vulkan"); 3 | const GraphicsContext = @import("graphics_context.zig").GraphicsContext; 4 | const Allocator = std.mem.Allocator; 5 | 6 | pub const Swapchain = struct { 7 | pub const PresentState = enum { 8 | optimal, 9 | suboptimal, 10 | }; 11 | 12 | gc: *const GraphicsContext, 13 | allocator: Allocator, 14 | 15 | surface_format: vk.SurfaceFormatKHR, 16 | present_mode: vk.PresentModeKHR, 17 | extent: vk.Extent2D, 18 | handle: vk.SwapchainKHR, 19 | 20 | swap_images: []SwapImage, 21 | image_index: u32, 22 | next_image_acquired: vk.Semaphore, 23 | 24 | pub fn init(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D) !Swapchain { 25 | return try initRecycle(gc, allocator, extent, .null_handle); 26 | } 27 | 28 | pub fn initRecycle(gc: *const GraphicsContext, allocator: Allocator, extent: vk.Extent2D, old_handle: vk.SwapchainKHR) !Swapchain { 29 | const caps = try gc.instance.getPhysicalDeviceSurfaceCapabilitiesKHR(gc.pdev, gc.surface); 30 | const actual_extent = findActualExtent(caps, extent); 31 | if (actual_extent.width == 0 or actual_extent.height == 0) { 32 | return error.InvalidSurfaceDimensions; 33 | } 34 | 35 | const surface_format = try findSurfaceFormat(gc, allocator); 36 | const present_mode = try findPresentMode(gc, allocator); 37 | 38 | var image_count = caps.min_image_count + 1; 39 | if (caps.max_image_count > 0) { 40 | image_count = @min(image_count, caps.max_image_count); 41 | } 42 | 43 | const qfi = [_]u32{ gc.graphics_queue.family, gc.present_queue.family }; 44 | const sharing_mode: vk.SharingMode = if (gc.graphics_queue.family != gc.present_queue.family) 45 | .concurrent 46 | else 47 | .exclusive; 48 | 49 | const handle = gc.dev.createSwapchainKHR(&.{ 50 | .surface = gc.surface, 51 | .min_image_count = image_count, 52 | .image_format = surface_format.format, 53 | .image_color_space = surface_format.color_space, 54 | .image_extent = actual_extent, 55 | .image_array_layers = 1, 56 | .image_usage = .{ .color_attachment_bit = true, .transfer_dst_bit = true }, 57 | .image_sharing_mode = sharing_mode, 58 | .queue_family_index_count = qfi.len, 59 | .p_queue_family_indices = &qfi, 60 | .pre_transform = caps.current_transform, 61 | .composite_alpha = .{ .opaque_bit_khr = true }, 62 | .present_mode = present_mode, 63 | .clipped = vk.TRUE, 64 | .old_swapchain = old_handle, 65 | }, null) catch { 66 | return error.SwapchainCreationFailed; 67 | }; 68 | errdefer gc.dev.destroySwapchainKHR(handle, null); 69 | 70 | if (old_handle != .null_handle) { 71 | // Apparently, the old swapchain handle still needs to be destroyed after recreating. 72 | gc.dev.destroySwapchainKHR(old_handle, null); 73 | } 74 | 75 | const swap_images = try initSwapchainImages(gc, handle, surface_format.format, allocator); 76 | errdefer { 77 | for (swap_images) |si| si.deinit(gc); 78 | allocator.free(swap_images); 79 | } 80 | 81 | var next_image_acquired = try gc.dev.createSemaphore(&.{}, null); 82 | errdefer gc.dev.destroySemaphore(next_image_acquired, null); 83 | 84 | const result = try gc.dev.acquireNextImageKHR(handle, std.math.maxInt(u64), next_image_acquired, .null_handle); 85 | // event with a .suboptimal_khr we can still go on to present 86 | // if we error even for .suboptimal_khr the example will crash and segfault 87 | // on resize, since even the recreated swapchain can be suboptimal during a 88 | // resize. 89 | if (result.result == .not_ready or result.result == .timeout) { 90 | return error.ImageAcquireFailed; 91 | } 92 | 93 | std.mem.swap(vk.Semaphore, &swap_images[result.image_index].image_acquired, &next_image_acquired); 94 | return Swapchain{ 95 | .gc = gc, 96 | .allocator = allocator, 97 | .surface_format = surface_format, 98 | .present_mode = present_mode, 99 | .extent = actual_extent, 100 | .handle = handle, 101 | .swap_images = swap_images, 102 | .image_index = result.image_index, 103 | .next_image_acquired = next_image_acquired, 104 | }; 105 | } 106 | 107 | fn deinitExceptSwapchain(self: Swapchain) void { 108 | for (self.swap_images) |si| si.deinit(self.gc); 109 | self.allocator.free(self.swap_images); 110 | self.gc.dev.destroySemaphore(self.next_image_acquired, null); 111 | } 112 | 113 | pub fn waitForAllFences(self: Swapchain) !void { 114 | for (self.swap_images) |si| si.waitForFence(self.gc) catch {}; 115 | } 116 | 117 | pub fn deinit(self: Swapchain) void { 118 | // if we have no swapchain none of these should exist and we can just return 119 | if (self.handle == .null_handle) return; 120 | self.deinitExceptSwapchain(); 121 | self.gc.dev.destroySwapchainKHR(self.handle, null); 122 | } 123 | 124 | pub fn recreate(self: *Swapchain, new_extent: vk.Extent2D) !void { 125 | const gc = self.gc; 126 | const allocator = self.allocator; 127 | const old_handle = self.handle; 128 | self.deinitExceptSwapchain(); 129 | // set current handle to NULL_HANDLE to signal that the current swapchain does no longer need to be 130 | // de-initialized if we fail to recreate it. 131 | self.handle = .null_handle; 132 | self.* = initRecycle(gc, allocator, new_extent, old_handle) catch |err| switch (err) { 133 | error.SwapchainCreationFailed => { 134 | // we failed while recreating so our current handle still exists, 135 | // but we won't destroy it in the deferred deinit of this object. 136 | gc.dev.destroySwapchainKHR(old_handle, null); 137 | return err; 138 | }, 139 | else => return err, 140 | }; 141 | } 142 | 143 | pub fn currentImage(self: Swapchain) vk.Image { 144 | return self.swap_images[self.image_index].image; 145 | } 146 | 147 | pub fn currentSwapImage(self: Swapchain) *const SwapImage { 148 | return &self.swap_images[self.image_index]; 149 | } 150 | 151 | pub fn present(self: *Swapchain, cmdbuf: vk.CommandBuffer) !PresentState { 152 | // Simple method: 153 | // 1) Acquire next image 154 | // 2) Wait for and reset fence of the acquired image 155 | // 3) Submit command buffer with fence of acquired image, 156 | // dependendent on the semaphore signalled by the first step. 157 | // 4) Present current frame, dependent on semaphore signalled by previous step 158 | // Problem: This way we can't reference the current image while rendering. 159 | // Better method: Shuffle the steps around such that acquire next image is the last step, 160 | // leaving the swapchain in a state with the current image. 161 | // 1) Wait for and reset fence of current image 162 | // 2) Submit command buffer, signalling fence of current image and dependent on 163 | // the semaphore signalled by step 4. 164 | // 3) Present current frame, dependent on semaphore signalled by the submit 165 | // 4) Acquire next image, signalling its semaphore 166 | // One problem that arises is that we can't know beforehand which semaphore to signal, 167 | // so we keep an extra auxilery semaphore that is swapped around 168 | 169 | // Step 1: Make sure the current frame has finished rendering 170 | const current = self.currentSwapImage(); 171 | try current.waitForFence(self.gc); 172 | try self.gc.dev.resetFences(1, @ptrCast(¤t.frame_fence)); 173 | 174 | // Step 2: Submit the command buffer 175 | const wait_stage = [_]vk.PipelineStageFlags{.{ .top_of_pipe_bit = true }}; 176 | try self.gc.dev.queueSubmit(self.gc.graphics_queue.handle, 1, &[_]vk.SubmitInfo{.{ 177 | .wait_semaphore_count = 1, 178 | .p_wait_semaphores = @ptrCast(¤t.image_acquired), 179 | .p_wait_dst_stage_mask = &wait_stage, 180 | .command_buffer_count = 1, 181 | .p_command_buffers = @ptrCast(&cmdbuf), 182 | .signal_semaphore_count = 1, 183 | .p_signal_semaphores = @ptrCast(¤t.render_finished), 184 | }}, current.frame_fence); 185 | 186 | // Step 3: Present the current frame 187 | _ = try self.gc.dev.queuePresentKHR(self.gc.present_queue.handle, &.{ 188 | .wait_semaphore_count = 1, 189 | .p_wait_semaphores = @ptrCast(¤t.render_finished), 190 | .swapchain_count = 1, 191 | .p_swapchains = @ptrCast(&self.handle), 192 | .p_image_indices = @ptrCast(&self.image_index), 193 | }); 194 | 195 | // Step 4: Acquire next frame 196 | const result = try self.gc.dev.acquireNextImageKHR( 197 | self.handle, 198 | std.math.maxInt(u64), 199 | self.next_image_acquired, 200 | .null_handle, 201 | ); 202 | 203 | std.mem.swap(vk.Semaphore, &self.swap_images[result.image_index].image_acquired, &self.next_image_acquired); 204 | self.image_index = result.image_index; 205 | 206 | return switch (result.result) { 207 | .success => .optimal, 208 | .suboptimal_khr => .suboptimal, 209 | else => unreachable, 210 | }; 211 | } 212 | }; 213 | 214 | const SwapImage = struct { 215 | image: vk.Image, 216 | view: vk.ImageView, 217 | image_acquired: vk.Semaphore, 218 | render_finished: vk.Semaphore, 219 | frame_fence: vk.Fence, 220 | 221 | fn init(gc: *const GraphicsContext, image: vk.Image, format: vk.Format) !SwapImage { 222 | const view = try gc.dev.createImageView(&.{ 223 | .image = image, 224 | .view_type = .@"2d", 225 | .format = format, 226 | .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity }, 227 | .subresource_range = .{ 228 | .aspect_mask = .{ .color_bit = true }, 229 | .base_mip_level = 0, 230 | .level_count = 1, 231 | .base_array_layer = 0, 232 | .layer_count = 1, 233 | }, 234 | }, null); 235 | errdefer gc.dev.destroyImageView(view, null); 236 | 237 | const image_acquired = try gc.dev.createSemaphore(&.{}, null); 238 | errdefer gc.dev.destroySemaphore(image_acquired, null); 239 | 240 | const render_finished = try gc.dev.createSemaphore(&.{}, null); 241 | errdefer gc.dev.destroySemaphore(render_finished, null); 242 | 243 | const frame_fence = try gc.dev.createFence(&.{ .flags = .{ .signaled_bit = true } }, null); 244 | errdefer gc.dev.destroyFence(frame_fence, null); 245 | 246 | return SwapImage{ 247 | .image = image, 248 | .view = view, 249 | .image_acquired = image_acquired, 250 | .render_finished = render_finished, 251 | .frame_fence = frame_fence, 252 | }; 253 | } 254 | 255 | fn deinit(self: SwapImage, gc: *const GraphicsContext) void { 256 | self.waitForFence(gc) catch return; 257 | gc.dev.destroyImageView(self.view, null); 258 | gc.dev.destroySemaphore(self.image_acquired, null); 259 | gc.dev.destroySemaphore(self.render_finished, null); 260 | gc.dev.destroyFence(self.frame_fence, null); 261 | } 262 | 263 | fn waitForFence(self: SwapImage, gc: *const GraphicsContext) !void { 264 | _ = try gc.dev.waitForFences(1, @ptrCast(&self.frame_fence), vk.TRUE, std.math.maxInt(u64)); 265 | } 266 | }; 267 | 268 | fn initSwapchainImages(gc: *const GraphicsContext, swapchain: vk.SwapchainKHR, format: vk.Format, allocator: Allocator) ![]SwapImage { 269 | const images = try gc.dev.getSwapchainImagesAllocKHR(swapchain, allocator); 270 | defer allocator.free(images); 271 | 272 | const swap_images = try allocator.alloc(SwapImage, images.len); 273 | errdefer allocator.free(swap_images); 274 | 275 | var i: usize = 0; 276 | errdefer for (swap_images[0..i]) |si| si.deinit(gc); 277 | 278 | for (images) |image| { 279 | swap_images[i] = try SwapImage.init(gc, image, format); 280 | i += 1; 281 | } 282 | 283 | return swap_images; 284 | } 285 | 286 | fn findSurfaceFormat(gc: *const GraphicsContext, allocator: Allocator) !vk.SurfaceFormatKHR { 287 | const preferred = vk.SurfaceFormatKHR{ 288 | .format = .b8g8r8a8_srgb, 289 | .color_space = .srgb_nonlinear_khr, 290 | }; 291 | 292 | const surface_formats = try gc.instance.getPhysicalDeviceSurfaceFormatsAllocKHR(gc.pdev, gc.surface, allocator); 293 | defer allocator.free(surface_formats); 294 | 295 | for (surface_formats) |sfmt| { 296 | if (std.meta.eql(sfmt, preferred)) { 297 | return preferred; 298 | } 299 | } 300 | 301 | return surface_formats[0]; // There must always be at least one supported surface format 302 | } 303 | 304 | fn findPresentMode(gc: *const GraphicsContext, allocator: Allocator) !vk.PresentModeKHR { 305 | const present_modes = try gc.instance.getPhysicalDeviceSurfacePresentModesAllocKHR(gc.pdev, gc.surface, allocator); 306 | defer allocator.free(present_modes); 307 | 308 | const preferred = [_]vk.PresentModeKHR{ 309 | .mailbox_khr, 310 | .immediate_khr, 311 | }; 312 | 313 | for (preferred) |mode| { 314 | if (std.mem.indexOfScalar(vk.PresentModeKHR, present_modes, mode) != null) { 315 | return mode; 316 | } 317 | } 318 | 319 | return .fifo_khr; 320 | } 321 | 322 | fn findActualExtent(caps: vk.SurfaceCapabilitiesKHR, extent: vk.Extent2D) vk.Extent2D { 323 | if (caps.current_extent.width != 0xFFFF_FFFF) { 324 | return caps.current_extent; 325 | } else { 326 | return .{ 327 | .width = std.math.clamp(extent.width, caps.min_image_extent.width, caps.max_image_extent.width), 328 | .height = std.math.clamp(extent.height, caps.min_image_extent.height, caps.max_image_extent.height), 329 | }; 330 | } 331 | } 332 | -------------------------------------------------------------------------------- /examples/triangle.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const vk = @import("vulkan"); 3 | const c = @import("c.zig"); 4 | const GraphicsContext = @import("graphics_context.zig").GraphicsContext; 5 | const Swapchain = @import("swapchain.zig").Swapchain; 6 | const Allocator = std.mem.Allocator; 7 | 8 | const vert_spv align(@alignOf(u32)) = @embedFile("vertex_shader").*; 9 | const frag_spv align(@alignOf(u32)) = @embedFile("fragment_shader").*; 10 | 11 | const app_name = "vulkan-zig triangle example"; 12 | 13 | const Vertex = struct { 14 | const binding_description = vk.VertexInputBindingDescription{ 15 | .binding = 0, 16 | .stride = @sizeOf(Vertex), 17 | .input_rate = .vertex, 18 | }; 19 | 20 | const attribute_description = [_]vk.VertexInputAttributeDescription{ 21 | .{ 22 | .binding = 0, 23 | .location = 0, 24 | .format = .r32g32_sfloat, 25 | .offset = @offsetOf(Vertex, "pos"), 26 | }, 27 | .{ 28 | .binding = 0, 29 | .location = 1, 30 | .format = .r32g32b32_sfloat, 31 | .offset = @offsetOf(Vertex, "color"), 32 | }, 33 | }; 34 | 35 | pos: [2]f32, 36 | color: [3]f32, 37 | }; 38 | 39 | const vertices = [_]Vertex{ 40 | .{ .pos = .{ 0, -0.5 }, .color = .{ 1, 0, 0 } }, 41 | .{ .pos = .{ 0.5, 0.5 }, .color = .{ 0, 1, 0 } }, 42 | .{ .pos = .{ -0.5, 0.5 }, .color = .{ 0, 0, 1 } }, 43 | }; 44 | 45 | pub fn main() !void { 46 | if (c.glfwInit() != c.GLFW_TRUE) return error.GlfwInitFailed; 47 | defer c.glfwTerminate(); 48 | 49 | if (c.glfwVulkanSupported() != c.GLFW_TRUE) { 50 | std.log.err("GLFW could not find libvulkan", .{}); 51 | return error.NoVulkan; 52 | } 53 | 54 | var extent = vk.Extent2D{ .width = 800, .height = 600 }; 55 | 56 | c.glfwWindowHint(c.GLFW_CLIENT_API, c.GLFW_NO_API); 57 | const window = c.glfwCreateWindow( 58 | @intCast(extent.width), 59 | @intCast(extent.height), 60 | app_name, 61 | null, 62 | null, 63 | ) orelse return error.WindowInitFailed; 64 | defer c.glfwDestroyWindow(window); 65 | 66 | var gpa = std.heap.GeneralPurposeAllocator(.{}){}; 67 | defer _ = gpa.deinit(); 68 | const allocator = gpa.allocator(); 69 | 70 | const gc = try GraphicsContext.init(allocator, app_name, window); 71 | defer gc.deinit(); 72 | 73 | std.log.debug("Using device: {s}", .{gc.deviceName()}); 74 | 75 | var swapchain = try Swapchain.init(&gc, allocator, extent); 76 | defer swapchain.deinit(); 77 | 78 | const pipeline_layout = try gc.dev.createPipelineLayout(&.{ 79 | .flags = .{}, 80 | .set_layout_count = 0, 81 | .p_set_layouts = undefined, 82 | .push_constant_range_count = 0, 83 | .p_push_constant_ranges = undefined, 84 | }, null); 85 | defer gc.dev.destroyPipelineLayout(pipeline_layout, null); 86 | 87 | const render_pass = try createRenderPass(&gc, swapchain); 88 | defer gc.dev.destroyRenderPass(render_pass, null); 89 | 90 | const pipeline = try createPipeline(&gc, pipeline_layout, render_pass); 91 | defer gc.dev.destroyPipeline(pipeline, null); 92 | 93 | var framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); 94 | defer destroyFramebuffers(&gc, allocator, framebuffers); 95 | 96 | const pool = try gc.dev.createCommandPool(&.{ 97 | .queue_family_index = gc.graphics_queue.family, 98 | }, null); 99 | defer gc.dev.destroyCommandPool(pool, null); 100 | 101 | const buffer = try gc.dev.createBuffer(&.{ 102 | .size = @sizeOf(@TypeOf(vertices)), 103 | .usage = .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, 104 | .sharing_mode = .exclusive, 105 | }, null); 106 | defer gc.dev.destroyBuffer(buffer, null); 107 | const mem_reqs = gc.dev.getBufferMemoryRequirements(buffer); 108 | const memory = try gc.allocate(mem_reqs, .{ .device_local_bit = true }); 109 | defer gc.dev.freeMemory(memory, null); 110 | try gc.dev.bindBufferMemory(buffer, memory, 0); 111 | 112 | try uploadVertices(&gc, pool, buffer); 113 | 114 | var cmdbufs = try createCommandBuffers( 115 | &gc, 116 | pool, 117 | allocator, 118 | buffer, 119 | swapchain.extent, 120 | render_pass, 121 | pipeline, 122 | framebuffers, 123 | ); 124 | defer destroyCommandBuffers(&gc, pool, allocator, cmdbufs); 125 | 126 | while (c.glfwWindowShouldClose(window) == c.GLFW_FALSE) { 127 | var w: c_int = undefined; 128 | var h: c_int = undefined; 129 | c.glfwGetFramebufferSize(window, &w, &h); 130 | 131 | // Don't present or resize swapchain while the window is minimized 132 | if (w == 0 or h == 0) { 133 | c.glfwPollEvents(); 134 | continue; 135 | } 136 | 137 | const cmdbuf = cmdbufs[swapchain.image_index]; 138 | 139 | const state = swapchain.present(cmdbuf) catch |err| switch (err) { 140 | error.OutOfDateKHR => Swapchain.PresentState.suboptimal, 141 | else => |narrow| return narrow, 142 | }; 143 | 144 | if (state == .suboptimal or extent.width != @as(u32, @intCast(w)) or extent.height != @as(u32, @intCast(h))) { 145 | extent.width = @intCast(w); 146 | extent.height = @intCast(h); 147 | try swapchain.recreate(extent); 148 | 149 | destroyFramebuffers(&gc, allocator, framebuffers); 150 | framebuffers = try createFramebuffers(&gc, allocator, render_pass, swapchain); 151 | 152 | destroyCommandBuffers(&gc, pool, allocator, cmdbufs); 153 | cmdbufs = try createCommandBuffers( 154 | &gc, 155 | pool, 156 | allocator, 157 | buffer, 158 | swapchain.extent, 159 | render_pass, 160 | pipeline, 161 | framebuffers, 162 | ); 163 | } 164 | 165 | c.glfwPollEvents(); 166 | } 167 | 168 | try swapchain.waitForAllFences(); 169 | try gc.dev.deviceWaitIdle(); 170 | } 171 | 172 | fn uploadVertices(gc: *const GraphicsContext, pool: vk.CommandPool, buffer: vk.Buffer) !void { 173 | const staging_buffer = try gc.dev.createBuffer(&.{ 174 | .size = @sizeOf(@TypeOf(vertices)), 175 | .usage = .{ .transfer_src_bit = true }, 176 | .sharing_mode = .exclusive, 177 | }, null); 178 | defer gc.dev.destroyBuffer(staging_buffer, null); 179 | const mem_reqs = gc.dev.getBufferMemoryRequirements(staging_buffer); 180 | const staging_memory = try gc.allocate(mem_reqs, .{ .host_visible_bit = true, .host_coherent_bit = true }); 181 | defer gc.dev.freeMemory(staging_memory, null); 182 | try gc.dev.bindBufferMemory(staging_buffer, staging_memory, 0); 183 | 184 | { 185 | const data = try gc.dev.mapMemory(staging_memory, 0, vk.WHOLE_SIZE, .{}); 186 | defer gc.dev.unmapMemory(staging_memory); 187 | 188 | const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); 189 | @memcpy(gpu_vertices, vertices[0..]); 190 | } 191 | 192 | try copyBuffer(gc, pool, buffer, staging_buffer, @sizeOf(@TypeOf(vertices))); 193 | } 194 | 195 | fn copyBuffer(gc: *const GraphicsContext, pool: vk.CommandPool, dst: vk.Buffer, src: vk.Buffer, size: vk.DeviceSize) !void { 196 | var cmdbuf_handle: vk.CommandBuffer = undefined; 197 | try gc.dev.allocateCommandBuffers(&.{ 198 | .command_pool = pool, 199 | .level = .primary, 200 | .command_buffer_count = 1, 201 | }, @ptrCast(&cmdbuf_handle)); 202 | defer gc.dev.freeCommandBuffers(pool, 1, @ptrCast(&cmdbuf_handle)); 203 | 204 | const cmdbuf = GraphicsContext.CommandBuffer.init(cmdbuf_handle, gc.dev.wrapper); 205 | 206 | try cmdbuf.beginCommandBuffer(&.{ 207 | .flags = .{ .one_time_submit_bit = true }, 208 | }); 209 | 210 | const region = vk.BufferCopy{ 211 | .src_offset = 0, 212 | .dst_offset = 0, 213 | .size = size, 214 | }; 215 | cmdbuf.copyBuffer(src, dst, 1, @ptrCast(®ion)); 216 | 217 | try cmdbuf.endCommandBuffer(); 218 | 219 | const si = vk.SubmitInfo{ 220 | .command_buffer_count = 1, 221 | .p_command_buffers = (&cmdbuf.handle)[0..1], 222 | .p_wait_dst_stage_mask = undefined, 223 | }; 224 | try gc.dev.queueSubmit(gc.graphics_queue.handle, 1, @ptrCast(&si), .null_handle); 225 | try gc.dev.queueWaitIdle(gc.graphics_queue.handle); 226 | } 227 | 228 | fn createCommandBuffers( 229 | gc: *const GraphicsContext, 230 | pool: vk.CommandPool, 231 | allocator: Allocator, 232 | buffer: vk.Buffer, 233 | extent: vk.Extent2D, 234 | render_pass: vk.RenderPass, 235 | pipeline: vk.Pipeline, 236 | framebuffers: []vk.Framebuffer, 237 | ) ![]vk.CommandBuffer { 238 | const cmdbufs = try allocator.alloc(vk.CommandBuffer, framebuffers.len); 239 | errdefer allocator.free(cmdbufs); 240 | 241 | try gc.dev.allocateCommandBuffers(&.{ 242 | .command_pool = pool, 243 | .level = .primary, 244 | .command_buffer_count = @intCast(cmdbufs.len), 245 | }, cmdbufs.ptr); 246 | errdefer gc.dev.freeCommandBuffers(pool, @intCast(cmdbufs.len), cmdbufs.ptr); 247 | 248 | const clear = vk.ClearValue{ 249 | .color = .{ .float_32 = .{ 0, 0, 0, 1 } }, 250 | }; 251 | 252 | const viewport = vk.Viewport{ 253 | .x = 0, 254 | .y = 0, 255 | .width = @floatFromInt(extent.width), 256 | .height = @floatFromInt(extent.height), 257 | .min_depth = 0, 258 | .max_depth = 1, 259 | }; 260 | 261 | const scissor = vk.Rect2D{ 262 | .offset = .{ .x = 0, .y = 0 }, 263 | .extent = extent, 264 | }; 265 | 266 | for (cmdbufs, framebuffers) |cmdbuf, framebuffer| { 267 | try gc.dev.beginCommandBuffer(cmdbuf, &.{}); 268 | 269 | gc.dev.cmdSetViewport(cmdbuf, 0, 1, @ptrCast(&viewport)); 270 | gc.dev.cmdSetScissor(cmdbuf, 0, 1, @ptrCast(&scissor)); 271 | 272 | // This needs to be a separate definition - see https://github.com/ziglang/zig/issues/7627. 273 | const render_area = vk.Rect2D{ 274 | .offset = .{ .x = 0, .y = 0 }, 275 | .extent = extent, 276 | }; 277 | 278 | gc.dev.cmdBeginRenderPass(cmdbuf, &.{ 279 | .render_pass = render_pass, 280 | .framebuffer = framebuffer, 281 | .render_area = render_area, 282 | .clear_value_count = 1, 283 | .p_clear_values = @ptrCast(&clear), 284 | }, .@"inline"); 285 | 286 | gc.dev.cmdBindPipeline(cmdbuf, .graphics, pipeline); 287 | const offset = [_]vk.DeviceSize{0}; 288 | gc.dev.cmdBindVertexBuffers(cmdbuf, 0, 1, @ptrCast(&buffer), &offset); 289 | gc.dev.cmdDraw(cmdbuf, vertices.len, 1, 0, 0); 290 | 291 | gc.dev.cmdEndRenderPass(cmdbuf); 292 | try gc.dev.endCommandBuffer(cmdbuf); 293 | } 294 | 295 | return cmdbufs; 296 | } 297 | 298 | fn destroyCommandBuffers(gc: *const GraphicsContext, pool: vk.CommandPool, allocator: Allocator, cmdbufs: []vk.CommandBuffer) void { 299 | gc.dev.freeCommandBuffers(pool, @truncate(cmdbufs.len), cmdbufs.ptr); 300 | allocator.free(cmdbufs); 301 | } 302 | 303 | fn createFramebuffers(gc: *const GraphicsContext, allocator: Allocator, render_pass: vk.RenderPass, swapchain: Swapchain) ![]vk.Framebuffer { 304 | const framebuffers = try allocator.alloc(vk.Framebuffer, swapchain.swap_images.len); 305 | errdefer allocator.free(framebuffers); 306 | 307 | var i: usize = 0; 308 | errdefer for (framebuffers[0..i]) |fb| gc.dev.destroyFramebuffer(fb, null); 309 | 310 | for (framebuffers) |*fb| { 311 | fb.* = try gc.dev.createFramebuffer(&.{ 312 | .render_pass = render_pass, 313 | .attachment_count = 1, 314 | .p_attachments = @ptrCast(&swapchain.swap_images[i].view), 315 | .width = swapchain.extent.width, 316 | .height = swapchain.extent.height, 317 | .layers = 1, 318 | }, null); 319 | i += 1; 320 | } 321 | 322 | return framebuffers; 323 | } 324 | 325 | fn destroyFramebuffers(gc: *const GraphicsContext, allocator: Allocator, framebuffers: []const vk.Framebuffer) void { 326 | for (framebuffers) |fb| gc.dev.destroyFramebuffer(fb, null); 327 | allocator.free(framebuffers); 328 | } 329 | 330 | fn createRenderPass(gc: *const GraphicsContext, swapchain: Swapchain) !vk.RenderPass { 331 | const color_attachment = vk.AttachmentDescription{ 332 | .format = swapchain.surface_format.format, 333 | .samples = .{ .@"1_bit" = true }, 334 | .load_op = .clear, 335 | .store_op = .store, 336 | .stencil_load_op = .dont_care, 337 | .stencil_store_op = .dont_care, 338 | .initial_layout = .undefined, 339 | .final_layout = .present_src_khr, 340 | }; 341 | 342 | const color_attachment_ref = vk.AttachmentReference{ 343 | .attachment = 0, 344 | .layout = .color_attachment_optimal, 345 | }; 346 | 347 | const subpass = vk.SubpassDescription{ 348 | .pipeline_bind_point = .graphics, 349 | .color_attachment_count = 1, 350 | .p_color_attachments = @ptrCast(&color_attachment_ref), 351 | }; 352 | 353 | return try gc.dev.createRenderPass(&.{ 354 | .attachment_count = 1, 355 | .p_attachments = @ptrCast(&color_attachment), 356 | .subpass_count = 1, 357 | .p_subpasses = @ptrCast(&subpass), 358 | }, null); 359 | } 360 | 361 | fn createPipeline( 362 | gc: *const GraphicsContext, 363 | layout: vk.PipelineLayout, 364 | render_pass: vk.RenderPass, 365 | ) !vk.Pipeline { 366 | const vert = try gc.dev.createShaderModule(&.{ 367 | .code_size = vert_spv.len, 368 | .p_code = @ptrCast(&vert_spv), 369 | }, null); 370 | defer gc.dev.destroyShaderModule(vert, null); 371 | 372 | const frag = try gc.dev.createShaderModule(&.{ 373 | .code_size = frag_spv.len, 374 | .p_code = @ptrCast(&frag_spv), 375 | }, null); 376 | defer gc.dev.destroyShaderModule(frag, null); 377 | 378 | const pssci = [_]vk.PipelineShaderStageCreateInfo{ 379 | .{ 380 | .stage = .{ .vertex_bit = true }, 381 | .module = vert, 382 | .p_name = "main", 383 | }, 384 | .{ 385 | .stage = .{ .fragment_bit = true }, 386 | .module = frag, 387 | .p_name = "main", 388 | }, 389 | }; 390 | 391 | const pvisci = vk.PipelineVertexInputStateCreateInfo{ 392 | .vertex_binding_description_count = 1, 393 | .p_vertex_binding_descriptions = @ptrCast(&Vertex.binding_description), 394 | .vertex_attribute_description_count = Vertex.attribute_description.len, 395 | .p_vertex_attribute_descriptions = &Vertex.attribute_description, 396 | }; 397 | 398 | const piasci = vk.PipelineInputAssemblyStateCreateInfo{ 399 | .topology = .triangle_list, 400 | .primitive_restart_enable = vk.FALSE, 401 | }; 402 | 403 | const pvsci = vk.PipelineViewportStateCreateInfo{ 404 | .viewport_count = 1, 405 | .p_viewports = undefined, // set in createCommandBuffers with cmdSetViewport 406 | .scissor_count = 1, 407 | .p_scissors = undefined, // set in createCommandBuffers with cmdSetScissor 408 | }; 409 | 410 | const prsci = vk.PipelineRasterizationStateCreateInfo{ 411 | .depth_clamp_enable = vk.FALSE, 412 | .rasterizer_discard_enable = vk.FALSE, 413 | .polygon_mode = .fill, 414 | .cull_mode = .{ .back_bit = true }, 415 | .front_face = .clockwise, 416 | .depth_bias_enable = vk.FALSE, 417 | .depth_bias_constant_factor = 0, 418 | .depth_bias_clamp = 0, 419 | .depth_bias_slope_factor = 0, 420 | .line_width = 1, 421 | }; 422 | 423 | const pmsci = vk.PipelineMultisampleStateCreateInfo{ 424 | .rasterization_samples = .{ .@"1_bit" = true }, 425 | .sample_shading_enable = vk.FALSE, 426 | .min_sample_shading = 1, 427 | .alpha_to_coverage_enable = vk.FALSE, 428 | .alpha_to_one_enable = vk.FALSE, 429 | }; 430 | 431 | const pcbas = vk.PipelineColorBlendAttachmentState{ 432 | .blend_enable = vk.FALSE, 433 | .src_color_blend_factor = .one, 434 | .dst_color_blend_factor = .zero, 435 | .color_blend_op = .add, 436 | .src_alpha_blend_factor = .one, 437 | .dst_alpha_blend_factor = .zero, 438 | .alpha_blend_op = .add, 439 | .color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, 440 | }; 441 | 442 | const pcbsci = vk.PipelineColorBlendStateCreateInfo{ 443 | .logic_op_enable = vk.FALSE, 444 | .logic_op = .copy, 445 | .attachment_count = 1, 446 | .p_attachments = @ptrCast(&pcbas), 447 | .blend_constants = [_]f32{ 0, 0, 0, 0 }, 448 | }; 449 | 450 | const dynstate = [_]vk.DynamicState{ .viewport, .scissor }; 451 | const pdsci = vk.PipelineDynamicStateCreateInfo{ 452 | .flags = .{}, 453 | .dynamic_state_count = dynstate.len, 454 | .p_dynamic_states = &dynstate, 455 | }; 456 | 457 | const gpci = vk.GraphicsPipelineCreateInfo{ 458 | .flags = .{}, 459 | .stage_count = 2, 460 | .p_stages = &pssci, 461 | .p_vertex_input_state = &pvisci, 462 | .p_input_assembly_state = &piasci, 463 | .p_tessellation_state = null, 464 | .p_viewport_state = &pvsci, 465 | .p_rasterization_state = &prsci, 466 | .p_multisample_state = &pmsci, 467 | .p_depth_stencil_state = null, 468 | .p_color_blend_state = &pcbsci, 469 | .p_dynamic_state = &pdsci, 470 | .layout = layout, 471 | .render_pass = render_pass, 472 | .subpass = 0, 473 | .base_pipeline_handle = .null_handle, 474 | .base_pipeline_index = -1, 475 | }; 476 | 477 | var pipeline: vk.Pipeline = undefined; 478 | _ = try gc.dev.createGraphicsPipelines( 479 | .null_handle, 480 | 1, 481 | @ptrCast(&gpci), 482 | null, 483 | @ptrCast(&pipeline), 484 | ); 485 | return pipeline; 486 | } 487 | -------------------------------------------------------------------------------- /src/id_render.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const mem = std.mem; 3 | const Allocator = mem.Allocator; 4 | 5 | pub fn isZigPrimitiveType(name: []const u8) bool { 6 | if (name.len > 1 and (name[0] == 'u' or name[0] == 'i')) { 7 | for (name[1..]) |c| { 8 | switch (c) { 9 | '0'...'9' => {}, 10 | else => break, 11 | } 12 | } else return true; 13 | } 14 | 15 | const primitives = [_][]const u8{ 16 | "void", 17 | "comptime_float", 18 | "comptime_int", 19 | "bool", 20 | "isize", 21 | "usize", 22 | "f16", 23 | "f32", 24 | "f64", 25 | "f128", 26 | "noreturn", 27 | "type", 28 | "anyerror", 29 | "c_short", 30 | "c_ushort", 31 | "c_int", 32 | "c_uint", 33 | "c_long", 34 | "c_ulong", 35 | "c_longlong", 36 | "c_ulonglong", 37 | "c_longdouble", 38 | // Removed in stage 2 in https://github.com/ziglang/zig/commit/05cf44933d753f7a5a53ab289ea60fd43761de57, 39 | // but these are still invalid identifiers in stage 1. 40 | "undefined", 41 | "true", 42 | "false", 43 | "null", 44 | }; 45 | 46 | for (primitives) |reserved| { 47 | if (mem.eql(u8, reserved, name)) { 48 | return true; 49 | } 50 | } 51 | 52 | return false; 53 | } 54 | 55 | pub fn writeIdentifier(writer: anytype, id: []const u8) !void { 56 | // https://github.com/ziglang/zig/issues/2897 57 | if (isZigPrimitiveType(id)) { 58 | try writer.print("@\"{}\"", .{std.zig.fmtEscapes(id)}); 59 | } else { 60 | try writer.print("{}", .{std.zig.fmtId(id)}); 61 | } 62 | } 63 | 64 | pub const CaseStyle = enum { 65 | snake, 66 | screaming_snake, 67 | title, 68 | camel, 69 | }; 70 | 71 | pub const SegmentIterator = struct { 72 | text: []const u8, 73 | offset: usize, 74 | 75 | pub fn init(text: []const u8) SegmentIterator { 76 | return .{ 77 | .text = text, 78 | .offset = 0, 79 | }; 80 | } 81 | 82 | fn nextBoundary(self: SegmentIterator) usize { 83 | var i = self.offset + 1; 84 | 85 | while (true) { 86 | if (i == self.text.len or self.text[i] == '_') { 87 | return i; 88 | } 89 | 90 | const prev_lower = std.ascii.isLower(self.text[i - 1]); 91 | const next_lower = std.ascii.isLower(self.text[i]); 92 | 93 | if (prev_lower and !next_lower) { 94 | return i; 95 | } else if (i != self.offset + 1 and !prev_lower and next_lower) { 96 | return i - 1; 97 | } 98 | 99 | i += 1; 100 | } 101 | } 102 | 103 | pub fn next(self: *SegmentIterator) ?[]const u8 { 104 | while (self.offset < self.text.len and self.text[self.offset] == '_') { 105 | self.offset += 1; 106 | } 107 | 108 | if (self.offset == self.text.len) { 109 | return null; 110 | } 111 | 112 | const end = self.nextBoundary(); 113 | const word = self.text[self.offset..end]; 114 | self.offset = end; 115 | return word; 116 | } 117 | 118 | pub fn rest(self: SegmentIterator) []const u8 { 119 | if (self.offset >= self.text.len) { 120 | return &[_]u8{}; 121 | } else { 122 | return self.text[self.offset..]; 123 | } 124 | } 125 | }; 126 | 127 | pub const IdRenderer = struct { 128 | tags: []const []const u8, 129 | text_cache: std.ArrayList(u8), 130 | 131 | pub fn init(allocator: Allocator, tags: []const []const u8) IdRenderer { 132 | return .{ 133 | .tags = tags, 134 | .text_cache = std.ArrayList(u8).init(allocator), 135 | }; 136 | } 137 | 138 | pub fn deinit(self: IdRenderer) void { 139 | self.text_cache.deinit(); 140 | } 141 | 142 | fn renderSnake(self: *IdRenderer, screaming: bool, id: []const u8, tag: ?[]const u8) !void { 143 | var it = SegmentIterator.init(id); 144 | var first = true; 145 | 146 | while (it.next()) |segment| { 147 | if (first) { 148 | first = false; 149 | } else { 150 | try self.text_cache.append('_'); 151 | } 152 | 153 | for (segment) |c| { 154 | try self.text_cache.append(if (screaming) std.ascii.toUpper(c) else std.ascii.toLower(c)); 155 | } 156 | } 157 | 158 | if (tag) |name| { 159 | try self.text_cache.append('_'); 160 | 161 | for (name) |c| { 162 | try self.text_cache.append(if (screaming) std.ascii.toUpper(c) else std.ascii.toLower(c)); 163 | } 164 | } 165 | } 166 | 167 | fn renderCamel(self: *IdRenderer, title: bool, id: []const u8, tag: ?[]const u8) !void { 168 | var it = SegmentIterator.init(id); 169 | var lower_first = !title; 170 | 171 | while (it.next()) |segment| { 172 | var i: usize = 0; 173 | while (i < segment.len and std.ascii.isDigit(segment[i])) { 174 | try self.text_cache.append(segment[i]); 175 | i += 1; 176 | } 177 | 178 | if (i == segment.len) { 179 | continue; 180 | } 181 | 182 | if (i == 0 and lower_first) { 183 | try self.text_cache.append(std.ascii.toLower(segment[i])); 184 | } else { 185 | try self.text_cache.append(std.ascii.toUpper(segment[i])); 186 | } 187 | lower_first = false; 188 | 189 | for (segment[i + 1 ..]) |c| { 190 | try self.text_cache.append(std.ascii.toLower(c)); 191 | } 192 | } 193 | 194 | if (tag) |name| { 195 | try self.text_cache.appendSlice(name); 196 | } 197 | } 198 | 199 | pub fn renderFmt(self: *IdRenderer, out: anytype, comptime fmt: []const u8, args: anytype) !void { 200 | self.text_cache.items.len = 0; 201 | try std.fmt.format(self.text_cache.writer(), fmt, args); 202 | try writeIdentifier(out, self.text_cache.items); 203 | } 204 | 205 | pub fn renderWithCase(self: *IdRenderer, out: anytype, case_style: CaseStyle, id: []const u8) !void { 206 | const tag = self.getAuthorTag(id); 207 | // The trailing underscore doesn't need to be removed here as its removed by the SegmentIterator. 208 | const adjusted_id = if (tag) |name| id[0 .. id.len - name.len] else id; 209 | 210 | self.text_cache.items.len = 0; 211 | 212 | switch (case_style) { 213 | .snake => try self.renderSnake(false, adjusted_id, tag), 214 | .screaming_snake => try self.renderSnake(true, adjusted_id, tag), 215 | .title => try self.renderCamel(true, adjusted_id, tag), 216 | .camel => try self.renderCamel(false, adjusted_id, tag), 217 | } 218 | 219 | try writeIdentifier(out, self.text_cache.items); 220 | } 221 | 222 | pub fn getAuthorTag(self: IdRenderer, id: []const u8) ?[]const u8 { 223 | for (self.tags) |tag| { 224 | if (mem.endsWith(u8, id, tag)) { 225 | return tag; 226 | } 227 | } 228 | 229 | return null; 230 | } 231 | 232 | pub fn stripAuthorTag(self: IdRenderer, id: []const u8) []const u8 { 233 | if (self.getAuthorTag(id)) |tag| { 234 | return mem.trimRight(u8, id[0 .. id.len - tag.len], "_"); 235 | } 236 | 237 | return id; 238 | } 239 | }; 240 | -------------------------------------------------------------------------------- /src/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const generator = @import("vulkan/generator.zig"); 3 | 4 | fn invalidUsage(prog_name: []const u8, comptime fmt: []const u8, args: anytype) noreturn { 5 | std.log.err(fmt, args); 6 | std.log.err("see {s} --help for usage", .{prog_name}); 7 | std.process.exit(1); 8 | } 9 | 10 | fn reportParseErrors(tree: std.zig.Ast) !void { 11 | const stderr = std.io.getStdErr().writer(); 12 | 13 | for (tree.errors) |err| { 14 | const loc = tree.tokenLocation(0, err.token); 15 | try stderr.print("(vulkan-zig error):{}:{}: error: ", .{ loc.line + 1, loc.column + 1 }); 16 | try tree.renderError(err, stderr); 17 | try stderr.print("\n{s}\n", .{tree.source[loc.line_start..loc.line_end]}); 18 | for (0..loc.column) |_| { 19 | try stderr.writeAll(" "); 20 | } 21 | try stderr.writeAll("^\n"); 22 | } 23 | } 24 | 25 | pub fn main() !void { 26 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 27 | defer arena.deinit(); 28 | const allocator = arena.allocator(); 29 | 30 | var args = std.process.argsWithAllocator(allocator) catch |err| switch (err) { 31 | error.OutOfMemory => @panic("OOM"), 32 | }; 33 | const prog_name = args.next() orelse "vulkan-zig-generator"; 34 | 35 | var maybe_xml_path: ?[]const u8 = null; 36 | var maybe_out_path: ?[]const u8 = null; 37 | var maybe_video_xml_path: ?[]const u8 = null; 38 | var debug: bool = false; 39 | var api = generator.Api.vulkan; 40 | 41 | while (args.next()) |arg| { 42 | if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) { 43 | @setEvalBranchQuota(2000); 44 | std.io.getStdOut().writer().print( 45 | \\Utility to generate a Zig binding from the Vulkan XML API registry. 46 | \\ 47 | \\The most recent Vulkan XML API registry can be obtained from 48 | \\https://github.com/KhronosGroup/Vulkan-Docs/blob/master/xml/vk.xml, 49 | \\and the most recent LunarG Vulkan SDK version can be found at 50 | \\$VULKAN_SDK/x86_64/share/vulkan/registry/vk.xml. 51 | \\ 52 | \\Usage: {s} [options] 53 | \\Options: 54 | \\-h --help show this message and exit. 55 | \\-a --api Generate API for 'vulkan' or 'vulkansc'. Defaults to 'vulkan'. 56 | \\--debug Write out unformatted source if does not parse correctly. 57 | \\--video Also gnerate Vulkan Video API bindings from video.xml 58 | \\ registry at . 59 | \\ 60 | , 61 | .{prog_name}, 62 | ) catch |err| { 63 | std.log.err("failed to write to stdout: {s}", .{@errorName(err)}); 64 | std.process.exit(1); 65 | }; 66 | return; 67 | } else if (std.mem.eql(u8, arg, "-a") or std.mem.eql(u8, arg, "--api")) { 68 | const api_str = args.next() orelse { 69 | invalidUsage(prog_name, "{s} expects argument ", .{arg}); 70 | }; 71 | api = std.meta.stringToEnum(generator.Api, api_str) orelse { 72 | invalidUsage(prog_name, "invalid api '{s}'", .{api_str}); 73 | }; 74 | } else if (std.mem.eql(u8, arg, "--debug")) { 75 | debug = true; 76 | } else if (std.mem.eql(u8, arg, "--video")) { 77 | maybe_video_xml_path = args.next() orelse { 78 | invalidUsage(prog_name, "{s} expects argument ", .{arg}); 79 | }; 80 | } else if (maybe_xml_path == null) { 81 | maybe_xml_path = arg; 82 | } else if (maybe_out_path == null) { 83 | maybe_out_path = arg; 84 | } else { 85 | invalidUsage(prog_name, "superficial argument '{s}'", .{arg}); 86 | } 87 | } 88 | 89 | const xml_path = maybe_xml_path orelse { 90 | invalidUsage(prog_name, "missing required argument ", .{}); 91 | }; 92 | 93 | const out_path = maybe_out_path orelse { 94 | invalidUsage(prog_name, "missing required argument ", .{}); 95 | }; 96 | 97 | const cwd = std.fs.cwd(); 98 | const xml_src = cwd.readFileAlloc(allocator, xml_path, std.math.maxInt(usize)) catch |err| { 99 | std.log.err("failed to open input file '{s}' ({s})", .{ xml_path, @errorName(err) }); 100 | std.process.exit(1); 101 | }; 102 | 103 | const maybe_video_xml_src = if (maybe_video_xml_path) |video_xml_path| 104 | cwd.readFileAlloc(allocator, video_xml_path, std.math.maxInt(usize)) catch |err| { 105 | std.log.err("failed to open input file '{s}' ({s})", .{ video_xml_path, @errorName(err) }); 106 | std.process.exit(1); 107 | } 108 | else 109 | null; 110 | 111 | var out_buffer = std.ArrayList(u8).init(allocator); 112 | generator.generate(allocator, api, xml_src, maybe_video_xml_src, out_buffer.writer()) catch |err| switch (err) { 113 | error.InvalidXml => { 114 | std.log.err("invalid vulkan registry - invalid xml", .{}); 115 | std.log.err("please check that the correct vk.xml file is passed", .{}); 116 | std.process.exit(1); 117 | }, 118 | error.InvalidRegistry => { 119 | std.log.err("invalid vulkan registry - registry is valid xml but contents are invalid", .{}); 120 | std.log.err("please check that the correct vk.xml file is passed", .{}); 121 | std.process.exit(1); 122 | }, 123 | error.UnhandledBitfieldStruct => { 124 | std.log.err("unhandled struct with bit fields detected in vk.xml", .{}); 125 | std.log.err("this is a bug in vulkan-zig", .{}); 126 | std.log.err("please make a bug report at https://github.com/Snektron/vulkan-zig/issues/", .{}); 127 | std.process.exit(1); 128 | }, 129 | error.OutOfMemory => @panic("oom"), 130 | }; 131 | 132 | out_buffer.append(0) catch @panic("oom"); 133 | 134 | const src = out_buffer.items[0 .. out_buffer.items.len - 1 :0]; 135 | const tree = std.zig.Ast.parse(allocator, src, .zig) catch |err| switch (err) { 136 | error.OutOfMemory => @panic("oom"), 137 | }; 138 | 139 | const formatted = if (tree.errors.len > 0) blk: { 140 | std.log.err("generated invalid zig code", .{}); 141 | std.log.err("this is a bug in vulkan-zig", .{}); 142 | std.log.err("please make a bug report at https://github.com/Snektron/vulkan-zig/issues/", .{}); 143 | std.log.err("or run with --debug to write out unformatted source", .{}); 144 | 145 | reportParseErrors(tree) catch |err| { 146 | std.log.err("failed to dump ast errors: {s}", .{@errorName(err)}); 147 | std.process.exit(1); 148 | }; 149 | 150 | if (debug) { 151 | break :blk src; 152 | } 153 | std.process.exit(1); 154 | } else tree.render(allocator) catch |err| switch (err) { 155 | error.OutOfMemory => @panic("oom"), 156 | }; 157 | 158 | if (std.fs.path.dirname(out_path)) |dir| { 159 | cwd.makePath(dir) catch |err| { 160 | std.log.err("failed to create output directory '{s}' ({s})", .{ dir, @errorName(err) }); 161 | std.process.exit(1); 162 | }; 163 | } 164 | 165 | cwd.writeFile(.{ 166 | .sub_path = out_path, 167 | .data = formatted, 168 | }) catch |err| { 169 | std.log.err("failed to write to output file '{s}' ({s})", .{ out_path, @errorName(err) }); 170 | std.process.exit(1); 171 | }; 172 | } 173 | 174 | test "main" { 175 | _ = @import("xml.zig"); 176 | _ = @import("vulkan/c_parse.zig"); 177 | } 178 | -------------------------------------------------------------------------------- /src/vulkan/c_parse.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const registry = @import("registry.zig"); 3 | const xml = @import("../xml.zig"); 4 | const mem = std.mem; 5 | const Allocator = mem.Allocator; 6 | const testing = std.testing; 7 | const ArraySize = registry.Array.ArraySize; 8 | const TypeInfo = registry.TypeInfo; 9 | 10 | pub const Token = struct { 11 | kind: Kind, 12 | text: []const u8, 13 | 14 | const Kind = enum { 15 | id, // Any id thats not a keyword 16 | name, // Vulkan ... 17 | type_name, // Vulkan ... 18 | enum_name, // Vulkan ... 19 | int, 20 | star, 21 | comma, 22 | semicolon, 23 | colon, 24 | minus, 25 | tilde, 26 | dot, 27 | hash, 28 | lparen, 29 | rparen, 30 | lbracket, 31 | rbracket, 32 | kw_typedef, 33 | kw_const, 34 | kw_vkapi_ptr, 35 | kw_struct, 36 | }; 37 | }; 38 | 39 | pub const CTokenizer = struct { 40 | source: []const u8, 41 | offset: usize = 0, 42 | in_comment: bool = false, 43 | 44 | fn peek(self: CTokenizer) ?u8 { 45 | return if (self.offset < self.source.len) self.source[self.offset] else null; 46 | } 47 | 48 | fn consumeNoEof(self: *CTokenizer) u8 { 49 | const c = self.peek().?; 50 | self.offset += 1; 51 | return c; 52 | } 53 | 54 | fn consume(self: *CTokenizer) !u8 { 55 | return if (self.offset < self.source.len) 56 | return self.consumeNoEof() 57 | else 58 | return null; 59 | } 60 | 61 | fn keyword(self: *CTokenizer) Token { 62 | const start = self.offset; 63 | _ = self.consumeNoEof(); 64 | 65 | while (true) { 66 | const c = self.peek() orelse break; 67 | switch (c) { 68 | 'A'...'Z', 'a'...'z', '_', '0'...'9' => _ = self.consumeNoEof(), 69 | else => break, 70 | } 71 | } 72 | 73 | const token_text = self.source[start..self.offset]; 74 | 75 | const kind = if (mem.eql(u8, token_text, "typedef")) 76 | Token.Kind.kw_typedef 77 | else if (mem.eql(u8, token_text, "const")) 78 | Token.Kind.kw_const 79 | else if (mem.eql(u8, token_text, "VKAPI_PTR")) 80 | Token.Kind.kw_vkapi_ptr 81 | else if (mem.eql(u8, token_text, "struct")) 82 | Token.Kind.kw_struct 83 | else 84 | Token.Kind.id; 85 | 86 | return .{ .kind = kind, .text = token_text }; 87 | } 88 | 89 | fn int(self: *CTokenizer) Token { 90 | const start = self.offset; 91 | _ = self.consumeNoEof(); 92 | 93 | const hex = self.peek() == 'x'; 94 | if (hex) { 95 | _ = self.consumeNoEof(); 96 | } 97 | 98 | while (true) { 99 | switch (self.peek() orelse break) { 100 | '0'...'9' => _ = self.consumeNoEof(), 101 | 'A'...'F', 'a'...'f' => { 102 | if (!hex) break; 103 | _ = self.consumeNoEof(); 104 | }, 105 | else => break, 106 | } 107 | } 108 | 109 | return .{ 110 | .kind = .int, 111 | .text = self.source[start..self.offset], 112 | }; 113 | } 114 | 115 | fn skipws(self: *CTokenizer) void { 116 | while (true) { 117 | switch (self.peek() orelse break) { 118 | ' ', '\t', '\n', '\r' => _ = self.consumeNoEof(), 119 | else => break, 120 | } 121 | } 122 | } 123 | 124 | pub fn next(self: *CTokenizer) !?Token { 125 | self.skipws(); 126 | if (mem.startsWith(u8, self.source[self.offset..], "//") or self.in_comment) { 127 | const end = mem.indexOfScalarPos(u8, self.source, self.offset, '\n') orelse { 128 | self.offset = self.source.len; 129 | self.in_comment = true; 130 | return null; 131 | }; 132 | self.in_comment = false; 133 | self.offset = end + 1; 134 | } 135 | self.skipws(); 136 | 137 | const c = self.peek() orelse return null; 138 | var kind: Token.Kind = undefined; 139 | switch (c) { 140 | 'A'...'Z', 'a'...'z', '_' => return self.keyword(), 141 | '0'...'9' => return self.int(), 142 | '*' => kind = .star, 143 | ',' => kind = .comma, 144 | ';' => kind = .semicolon, 145 | ':' => kind = .colon, 146 | '-' => kind = .minus, 147 | '~' => kind = .tilde, 148 | '.' => kind = .dot, 149 | '#' => kind = .hash, 150 | '[' => kind = .lbracket, 151 | ']' => kind = .rbracket, 152 | '(' => kind = .lparen, 153 | ')' => kind = .rparen, 154 | else => return error.UnexpectedCharacter, 155 | } 156 | 157 | const start = self.offset; 158 | _ = self.consumeNoEof(); 159 | return Token{ .kind = kind, .text = self.source[start..self.offset] }; 160 | } 161 | }; 162 | 163 | pub const XmlCTokenizer = struct { 164 | it: xml.Element.ChildIterator, 165 | ctok: ?CTokenizer = null, 166 | current: ?Token = null, 167 | 168 | pub fn init(elem: *xml.Element) XmlCTokenizer { 169 | return .{ 170 | .it = elem.iterator(), 171 | }; 172 | } 173 | 174 | fn elemToToken(elem: *xml.Element) !?Token { 175 | // Sometimes we encounter empty comment tags. Filter those out 176 | // by early returning here, otherwise the next check will 177 | // determine that the input is not valid XML. 178 | if (mem.eql(u8, elem.tag, "comment")) { 179 | return null; 180 | } else if (elem.children.len != 1 or elem.children[0] != .char_data) { 181 | return error.InvalidXml; 182 | } 183 | 184 | const text = elem.children[0].char_data; 185 | if (mem.eql(u8, elem.tag, "type")) { 186 | return Token{ .kind = .type_name, .text = text }; 187 | } else if (mem.eql(u8, elem.tag, "enum")) { 188 | return Token{ .kind = .enum_name, .text = text }; 189 | } else if (mem.eql(u8, elem.tag, "name")) { 190 | return Token{ .kind = .name, .text = text }; 191 | } else { 192 | return error.InvalidTag; 193 | } 194 | } 195 | 196 | fn next(self: *XmlCTokenizer) !?Token { 197 | if (self.current) |current| { 198 | const token = current; 199 | self.current = null; 200 | return token; 201 | } 202 | 203 | var in_comment: bool = false; 204 | 205 | while (true) { 206 | if (self.ctok) |*ctok| { 207 | if (try ctok.next()) |tok| { 208 | return tok; 209 | } 210 | in_comment = ctok.in_comment; 211 | } 212 | 213 | self.ctok = null; 214 | 215 | if (self.it.next()) |child| { 216 | switch (child.*) { 217 | .char_data => |cdata| self.ctok = CTokenizer{ .source = cdata, .in_comment = in_comment }, 218 | .comment => {}, // xml comment 219 | .element => |elem| if (!in_comment) if (try elemToToken(elem)) |tok| return tok, 220 | } 221 | } else { 222 | return null; 223 | } 224 | } 225 | } 226 | 227 | fn nextNoEof(self: *XmlCTokenizer) !Token { 228 | return (try self.next()) orelse return error.UnexpectedEof; 229 | } 230 | 231 | fn peek(self: *XmlCTokenizer) !?Token { 232 | if (self.current) |current| { 233 | return current; 234 | } 235 | 236 | self.current = try self.next(); 237 | return self.current; 238 | } 239 | 240 | fn peekNoEof(self: *XmlCTokenizer) !Token { 241 | return (try self.peek()) orelse return error.UnexpectedEof; 242 | } 243 | 244 | fn expect(self: *XmlCTokenizer, kind: Token.Kind) !Token { 245 | const tok = (try self.next()) orelse return error.UnexpectedEof; 246 | if (tok.kind != kind) { 247 | return error.UnexpectedToken; 248 | } 249 | 250 | return tok; 251 | } 252 | }; 253 | 254 | // TYPEDEF = kw_typedef DECLARATION ';' 255 | pub fn parseTypedef(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Declaration { 256 | _ = try xctok.expect(.kw_typedef); 257 | const decl = try parseDeclaration(allocator, xctok, ptrs_optional); 258 | _ = try xctok.expect(.semicolon); 259 | if (try xctok.peek()) |_| { 260 | return error.InvalidSyntax; 261 | } 262 | 263 | return registry.Declaration{ 264 | .name = decl.name orelse return error.MissingTypeIdentifier, 265 | .decl_type = .{ .typedef = decl.decl_type }, 266 | }; 267 | } 268 | 269 | // MEMBER = DECLARATION (':' int)? 270 | pub fn parseMember(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Container.Field { 271 | const decl = try parseDeclaration(allocator, xctok, ptrs_optional); 272 | var field = registry.Container.Field{ 273 | .name = decl.name orelse return error.MissingTypeIdentifier, 274 | .field_type = decl.decl_type, 275 | .bits = null, 276 | .is_buffer_len = false, 277 | .is_optional = false, 278 | }; 279 | 280 | if (try xctok.peek()) |tok| { 281 | if (tok.kind != .colon) { 282 | return error.InvalidSyntax; 283 | } 284 | 285 | _ = try xctok.nextNoEof(); 286 | const bits = try xctok.expect(.int); 287 | field.bits = try std.fmt.parseInt(usize, bits.text, 10); 288 | 289 | // Assume for now that there won't be any invalid C types like `char char* x : 4`. 290 | 291 | if (try xctok.peek()) |_| { 292 | return error.InvalidSyntax; 293 | } 294 | } 295 | 296 | return field; 297 | } 298 | 299 | pub fn parseParamOrProto(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) !registry.Declaration { 300 | var decl = try parseDeclaration(allocator, xctok, ptrs_optional); 301 | if (try xctok.peek()) |_| { 302 | return error.InvalidSyntax; 303 | } 304 | 305 | // Decay pointers 306 | switch (decl.decl_type) { 307 | .array => { 308 | const child = try allocator.create(TypeInfo); 309 | child.* = decl.decl_type; 310 | 311 | decl.decl_type = .{ 312 | .pointer = .{ 313 | .is_const = decl.is_const, 314 | .is_optional = false, 315 | .size = .one, 316 | .child = child, 317 | }, 318 | }; 319 | }, 320 | else => {}, 321 | } 322 | 323 | return registry.Declaration{ 324 | .name = decl.name orelse return error.MissingTypeIdentifier, 325 | .decl_type = .{ .typedef = decl.decl_type }, 326 | }; 327 | } 328 | 329 | pub const Declaration = struct { 330 | name: ?[]const u8, // Parameter names may be optional, especially in case of func(void) 331 | decl_type: TypeInfo, 332 | is_const: bool, 333 | }; 334 | 335 | pub const ParseError = error{ 336 | OutOfMemory, 337 | InvalidSyntax, 338 | InvalidTag, 339 | InvalidXml, 340 | Overflow, 341 | UnexpectedEof, 342 | UnexpectedCharacter, 343 | UnexpectedToken, 344 | MissingTypeIdentifier, 345 | }; 346 | 347 | // DECLARATION = kw_const? type_name DECLARATOR 348 | // DECLARATOR = POINTERS (id | name)? ('[' ARRAY_DECLARATOR ']')* 349 | // | POINTERS '(' FNPTRSUFFIX 350 | fn parseDeclaration(allocator: Allocator, xctok: *XmlCTokenizer, ptrs_optional: bool) ParseError!Declaration { 351 | // Parse declaration constness 352 | var tok = try xctok.nextNoEof(); 353 | const inner_is_const = tok.kind == .kw_const; 354 | if (inner_is_const) { 355 | tok = try xctok.nextNoEof(); 356 | } 357 | 358 | if (tok.kind == .kw_struct) { 359 | tok = try xctok.nextNoEof(); 360 | } 361 | // Parse type name 362 | if (tok.kind != .type_name and tok.kind != .id) return error.InvalidSyntax; 363 | const type_name = tok.text; 364 | 365 | var type_info = TypeInfo{ .name = type_name }; 366 | 367 | // Parse pointers 368 | type_info = try parsePointers(allocator, xctok, inner_is_const, type_info, ptrs_optional); 369 | 370 | // Parse name / fn ptr 371 | 372 | if (try parseFnPtrSuffix(allocator, xctok, type_info, ptrs_optional)) |decl| { 373 | return Declaration{ 374 | .name = decl.name, 375 | .decl_type = decl.decl_type, 376 | .is_const = inner_is_const, 377 | }; 378 | } 379 | 380 | const name = blk: { 381 | const name_tok = (try xctok.peek()) orelse break :blk null; 382 | if (name_tok.kind == .id or name_tok.kind == .name) { 383 | _ = try xctok.nextNoEof(); 384 | break :blk name_tok.text; 385 | } else { 386 | break :blk null; 387 | } 388 | }; 389 | 390 | var inner_type = &type_info; 391 | while (try parseArrayDeclarator(xctok)) |array_size| { 392 | // Move the current inner type to a new node on the heap 393 | const child = try allocator.create(TypeInfo); 394 | child.* = inner_type.*; 395 | 396 | // Re-assign the previous inner type for the array type info node 397 | inner_type.* = .{ 398 | .array = .{ 399 | .size = array_size, 400 | .valid_size = .all, // Refined later 401 | .is_optional = true, 402 | .child = child, 403 | }, 404 | }; 405 | 406 | // update the inner_type pointer so it points to the proper 407 | // inner type again 408 | inner_type = child; 409 | } 410 | 411 | return Declaration{ 412 | .name = name, 413 | .decl_type = type_info, 414 | .is_const = inner_is_const, 415 | }; 416 | } 417 | 418 | // FNPTRSUFFIX = kw_vkapi_ptr '*' name' ')' '(' ('void' | (DECLARATION (',' DECLARATION)*)?) ')' 419 | fn parseFnPtrSuffix(allocator: Allocator, xctok: *XmlCTokenizer, return_type: TypeInfo, ptrs_optional: bool) !?Declaration { 420 | const lparen = try xctok.peek(); 421 | if (lparen == null or lparen.?.kind != .lparen) { 422 | return null; 423 | } 424 | _ = try xctok.nextNoEof(); 425 | _ = try xctok.expect(.kw_vkapi_ptr); 426 | _ = try xctok.expect(.star); 427 | const name = try xctok.expect(.name); 428 | _ = try xctok.expect(.rparen); 429 | _ = try xctok.expect(.lparen); 430 | 431 | const return_type_heap = try allocator.create(TypeInfo); 432 | return_type_heap.* = return_type; 433 | 434 | var command_ptr = Declaration{ 435 | .name = name.text, 436 | .decl_type = .{ 437 | .command_ptr = .{ 438 | .params = &[_]registry.Command.Param{}, 439 | .return_type = return_type_heap, 440 | .success_codes = &[_][]const u8{}, 441 | .error_codes = &[_][]const u8{}, 442 | }, 443 | }, 444 | .is_const = false, 445 | }; 446 | 447 | const first_param = try parseDeclaration(allocator, xctok, ptrs_optional); 448 | if (first_param.name == null) { 449 | if (first_param.decl_type != .name or !mem.eql(u8, first_param.decl_type.name, "void")) { 450 | return error.InvalidSyntax; 451 | } 452 | 453 | _ = try xctok.expect(.rparen); 454 | return command_ptr; 455 | } 456 | 457 | // There is no good way to estimate the number of parameters beforehand. 458 | // Fortunately, there are usually a relatively low number of parameters to a function pointer, 459 | // so an ArrayList backed by an arena allocator is good enough. 460 | var params = std.ArrayList(registry.Command.Param).init(allocator); 461 | try params.append(.{ 462 | .name = first_param.name.?, 463 | .param_type = first_param.decl_type, 464 | .is_buffer_len = false, 465 | .is_optional = false, 466 | }); 467 | 468 | while (true) { 469 | switch ((try xctok.peekNoEof()).kind) { 470 | .rparen => break, 471 | .comma => _ = try xctok.nextNoEof(), 472 | else => return error.InvalidSyntax, 473 | } 474 | 475 | const decl = try parseDeclaration(allocator, xctok, ptrs_optional); 476 | try params.append(.{ 477 | .name = decl.name orelse return error.MissingTypeIdentifier, 478 | .param_type = decl.decl_type, 479 | .is_buffer_len = false, 480 | .is_optional = false, 481 | }); 482 | } 483 | 484 | _ = try xctok.nextNoEof(); 485 | command_ptr.decl_type.command_ptr.params = try params.toOwnedSlice(); 486 | return command_ptr; 487 | } 488 | 489 | // POINTERS = (kw_const? '*')* 490 | fn parsePointers(allocator: Allocator, xctok: *XmlCTokenizer, inner_const: bool, inner: TypeInfo, ptrs_optional: bool) !TypeInfo { 491 | var type_info = inner; 492 | var first_const = inner_const; 493 | 494 | while (true) { 495 | var tok = (try xctok.peek()) orelse return type_info; 496 | var is_const = first_const; 497 | first_const = false; 498 | 499 | if (tok.kind == .kw_const) { 500 | is_const = true; 501 | _ = try xctok.nextNoEof(); 502 | tok = (try xctok.peek()) orelse return type_info; 503 | } 504 | 505 | if (tok.kind != .star) { 506 | // if `is_const` is true at this point, there was a trailing const, 507 | // and the declaration itself is const. 508 | return type_info; 509 | } 510 | 511 | _ = try xctok.nextNoEof(); 512 | 513 | const child = try allocator.create(TypeInfo); 514 | child.* = type_info; 515 | 516 | type_info = .{ 517 | .pointer = .{ 518 | .is_const = is_const or first_const, 519 | .is_optional = ptrs_optional, // set elsewhere 520 | .size = .one, // set elsewhere 521 | .child = child, 522 | }, 523 | }; 524 | } 525 | } 526 | 527 | // ARRAY_DECLARATOR = '[' (int | enum_name) ']' 528 | fn parseArrayDeclarator(xctok: *XmlCTokenizer) !?ArraySize { 529 | const lbracket = try xctok.peek(); 530 | if (lbracket == null or lbracket.?.kind != .lbracket) { 531 | return null; 532 | } 533 | 534 | _ = try xctok.nextNoEof(); 535 | 536 | const size_tok = try xctok.nextNoEof(); 537 | const size: ArraySize = switch (size_tok.kind) { 538 | .int => .{ 539 | .int = std.fmt.parseInt(usize, size_tok.text, 10) catch |err| switch (err) { 540 | error.Overflow => return error.Overflow, 541 | error.InvalidCharacter => unreachable, 542 | }, 543 | }, 544 | // Sometimes, arrays are declared as `T aa[SIZE]`, 545 | // and sometimes just as `T aa[SIZE]`, so we have to account 546 | // for both `.enum_name` and `.id` here. 547 | .enum_name, .id => .{ .alias = size_tok.text }, 548 | else => return error.InvalidSyntax, 549 | }; 550 | 551 | _ = try xctok.expect(.rbracket); 552 | return size; 553 | } 554 | 555 | pub fn parseVersion(xctok: *XmlCTokenizer) !registry.ApiConstant.Value { 556 | _ = try xctok.expect(.hash); 557 | const define = try xctok.expect(.id); 558 | if (!mem.eql(u8, define.text, "define")) { 559 | return error.InvalidVersion; 560 | } 561 | 562 | _ = try xctok.expect(.name); 563 | const vk_make_version = try xctok.expect(.type_name); 564 | if (mem.eql(u8, vk_make_version.text, "VK_MAKE_API_VERSION")) { 565 | return .{ 566 | .version = try parseVersionValues(xctok, 4), 567 | }; 568 | } else if (mem.eql(u8, vk_make_version.text, "VK_MAKE_VIDEO_STD_VERSION")) { 569 | return .{ 570 | .video_std_version = try parseVersionValues(xctok, 3), 571 | }; 572 | } else { 573 | return error.NotVersion; 574 | } 575 | } 576 | 577 | fn parseVersionValues(xctok: *XmlCTokenizer, comptime count: usize) ![count][]const u8 { 578 | _ = try xctok.expect(.lparen); 579 | var version: [count][]const u8 = undefined; 580 | for (&version, 0..) |*part, i| { 581 | if (i != 0) { 582 | _ = try xctok.expect(.comma); 583 | } 584 | 585 | const tok = try xctok.nextNoEof(); 586 | switch (tok.kind) { 587 | .id, .int => part.* = tok.text, 588 | else => return error.UnexpectedToken, 589 | } 590 | } 591 | _ = try xctok.expect(.rparen); 592 | return version; 593 | } 594 | 595 | fn testTokenizer(tokenizer: anytype, expected_tokens: []const Token) !void { 596 | for (expected_tokens) |expected| { 597 | const tok = (tokenizer.next() catch unreachable).?; 598 | try testing.expectEqual(expected.kind, tok.kind); 599 | try testing.expectEqualSlices(u8, expected.text, tok.text); 600 | } 601 | 602 | if (tokenizer.next() catch unreachable) |_| unreachable; 603 | } 604 | 605 | test "CTokenizer" { 606 | var ctok = CTokenizer{ .source = "typedef ([const)]** VKAPI_PTR 123,;aaaa" }; 607 | 608 | try testTokenizer(&ctok, &[_]Token{ 609 | .{ .kind = .kw_typedef, .text = "typedef" }, 610 | .{ .kind = .lparen, .text = "(" }, 611 | .{ .kind = .lbracket, .text = "[" }, 612 | .{ .kind = .kw_const, .text = "const" }, 613 | .{ .kind = .rparen, .text = ")" }, 614 | .{ .kind = .rbracket, .text = "]" }, 615 | .{ .kind = .star, .text = "*" }, 616 | .{ .kind = .star, .text = "*" }, 617 | .{ .kind = .kw_vkapi_ptr, .text = "VKAPI_PTR" }, 618 | .{ .kind = .int, .text = "123" }, 619 | .{ .kind = .comma, .text = "," }, 620 | .{ .kind = .semicolon, .text = ";" }, 621 | .{ .kind = .id, .text = "aaaa" }, 622 | }); 623 | } 624 | 625 | test "XmlCTokenizer" { 626 | const document = try xml.parse(testing.allocator, 627 | \\// comment commented name commented type trailing 628 | \\ typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); 629 | \\ 630 | ); 631 | defer document.deinit(); 632 | 633 | var xctok = XmlCTokenizer.init(document.root); 634 | 635 | try testTokenizer(&xctok, &[_]Token{ 636 | .{ .kind = .kw_typedef, .text = "typedef" }, 637 | .{ .kind = .id, .text = "void" }, 638 | .{ .kind = .lparen, .text = "(" }, 639 | .{ .kind = .kw_vkapi_ptr, .text = "VKAPI_PTR" }, 640 | .{ .kind = .star, .text = "*" }, 641 | .{ .kind = .name, .text = "PFN_vkVoidFunction" }, 642 | .{ .kind = .rparen, .text = ")" }, 643 | .{ .kind = .lparen, .text = "(" }, 644 | .{ .kind = .id, .text = "void" }, 645 | .{ .kind = .rparen, .text = ")" }, 646 | .{ .kind = .semicolon, .text = ";" }, 647 | }); 648 | } 649 | 650 | test "parseTypedef" { 651 | const document = try xml.parse(testing.allocator, 652 | \\ // comment commented name trailing 653 | \\ typedef const struct Python* pythons[4]; 654 | \\ // more comments 655 | \\ 656 | \\ 657 | ); 658 | defer document.deinit(); 659 | 660 | var arena = std.heap.ArenaAllocator.init(testing.allocator); 661 | defer arena.deinit(); 662 | 663 | var xctok = XmlCTokenizer.init(document.root); 664 | const decl = try parseTypedef(arena.allocator(), &xctok, false); 665 | 666 | try testing.expectEqualSlices(u8, "pythons", decl.name); 667 | const array = decl.decl_type.typedef.array; 668 | try testing.expectEqual(ArraySize{ .int = 4 }, array.size); 669 | const ptr = array.child.pointer; 670 | try testing.expectEqual(true, ptr.is_const); 671 | try testing.expectEqualSlices(u8, "Python", ptr.child.name); 672 | } 673 | -------------------------------------------------------------------------------- /src/vulkan/generator.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const reg = @import("registry.zig"); 3 | const xml = @import("../xml.zig"); 4 | const renderRegistry = @import("render.zig").render; 5 | const parseXml = @import("parse.zig").parseXml; 6 | const IdRenderer = @import("../id_render.zig").IdRenderer; 7 | const mem = std.mem; 8 | const Allocator = mem.Allocator; 9 | const FeatureLevel = reg.FeatureLevel; 10 | 11 | const EnumFieldMerger = struct { 12 | const EnumExtensionMap = std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(reg.Enum.Field)); 13 | const ApiConstantMap = std.StringArrayHashMapUnmanaged(reg.ApiConstant); 14 | const FieldSet = std.StringArrayHashMapUnmanaged(void); 15 | 16 | arena: Allocator, 17 | registry: *reg.Registry, 18 | enum_extensions: EnumExtensionMap, 19 | api_constants: ApiConstantMap, 20 | field_set: FieldSet, 21 | 22 | fn init(arena: Allocator, registry: *reg.Registry) EnumFieldMerger { 23 | return .{ 24 | .arena = arena, 25 | .registry = registry, 26 | .enum_extensions = .{}, 27 | .api_constants = .{}, 28 | .field_set = .{}, 29 | }; 30 | } 31 | 32 | fn putEnumExtension(self: *EnumFieldMerger, enum_name: []const u8, field: reg.Enum.Field) !void { 33 | const res = try self.enum_extensions.getOrPut(self.arena, enum_name); 34 | if (!res.found_existing) { 35 | res.value_ptr.* = std.ArrayListUnmanaged(reg.Enum.Field){}; 36 | } 37 | 38 | try res.value_ptr.append(self.arena, field); 39 | } 40 | 41 | fn addRequires(self: *EnumFieldMerger, reqs: []const reg.Require) !void { 42 | for (reqs) |req| { 43 | for (req.extends) |enum_ext| { 44 | switch (enum_ext.value) { 45 | .field => try self.putEnumExtension(enum_ext.extends, enum_ext.value.field), 46 | .new_api_constant_expr => |expr| try self.api_constants.put( 47 | self.arena, 48 | enum_ext.extends, 49 | .{ 50 | .name = enum_ext.extends, 51 | .value = .{ .expr = expr }, 52 | }, 53 | ), 54 | } 55 | } 56 | } 57 | } 58 | 59 | fn mergeEnumFields(self: *EnumFieldMerger, name: []const u8, base_enum: *reg.Enum) !void { 60 | // If there are no extensions for this enum, assume its valid. 61 | const extensions = self.enum_extensions.get(name) orelse return; 62 | 63 | self.field_set.clearRetainingCapacity(); 64 | 65 | const n_fields_upper_bound = base_enum.fields.len + extensions.items.len; 66 | const new_fields = try self.arena.alloc(reg.Enum.Field, n_fields_upper_bound); 67 | var i: usize = 0; 68 | 69 | for (base_enum.fields) |field| { 70 | const res = try self.field_set.getOrPut(self.arena, field.name); 71 | if (!res.found_existing) { 72 | new_fields[i] = field; 73 | i += 1; 74 | } 75 | } 76 | 77 | // Assume that if a field name clobbers, the value is the same 78 | for (extensions.items) |field| { 79 | const res = try self.field_set.getOrPut(self.arena, field.name); 80 | if (!res.found_existing) { 81 | new_fields[i] = field; 82 | i += 1; 83 | } 84 | } 85 | 86 | // Existing base_enum.fields was allocated by `self.arena`, so 87 | // it gets cleaned up whenever that is deinited. 88 | base_enum.fields = new_fields[0..i]; 89 | } 90 | 91 | fn merge(self: *EnumFieldMerger) !void { 92 | for (self.registry.api_constants) |api_constant| { 93 | try self.api_constants.put(self.arena, api_constant.name, api_constant); 94 | } 95 | 96 | for (self.registry.features) |feature| { 97 | try self.addRequires(feature.requires); 98 | } 99 | 100 | for (self.registry.extensions) |ext| { 101 | try self.addRequires(ext.requires); 102 | } 103 | 104 | // Merge all the enum fields. 105 | // Assume that all keys of enum_extensions appear in `self.registry.decls` 106 | for (self.registry.decls) |*decl| { 107 | if (decl.decl_type == .enumeration) { 108 | try self.mergeEnumFields(decl.name, &decl.decl_type.enumeration); 109 | } 110 | } 111 | 112 | self.registry.api_constants = self.api_constants.values(); 113 | } 114 | }; 115 | 116 | pub const Generator = struct { 117 | arena: std.heap.ArenaAllocator, 118 | registry: reg.Registry, 119 | id_renderer: IdRenderer, 120 | have_video: bool, 121 | 122 | fn init(allocator: Allocator, spec: *xml.Element, maybe_video_spec: ?*xml.Element, api: reg.Api) !Generator { 123 | const result = try parseXml(allocator, spec, maybe_video_spec, api); 124 | 125 | const tags = try allocator.alloc([]const u8, result.registry.tags.len); 126 | for (tags, result.registry.tags) |*tag, registry_tag| tag.* = registry_tag.name; 127 | 128 | return Generator{ 129 | .arena = result.arena, 130 | .registry = result.registry, 131 | .id_renderer = IdRenderer.init(allocator, tags), 132 | .have_video = maybe_video_spec != null, 133 | }; 134 | } 135 | 136 | fn deinit(self: Generator) void { 137 | self.arena.deinit(); 138 | } 139 | 140 | fn stripFlagBits(self: Generator, name: []const u8) []const u8 { 141 | const tagless = self.id_renderer.stripAuthorTag(name); 142 | return tagless[0 .. tagless.len - "FlagBits".len]; 143 | } 144 | 145 | fn stripFlags(self: Generator, name: []const u8) []const u8 { 146 | const tagless = self.id_renderer.stripAuthorTag(name); 147 | return tagless[0 .. tagless.len - "Flags".len]; 148 | } 149 | 150 | // Solve `registry.declarations` according to `registry.extensions` and `registry.features`. 151 | fn mergeEnumFields(self: *Generator) !void { 152 | var merger = EnumFieldMerger.init(self.arena.allocator(), &self.registry); 153 | try merger.merge(); 154 | } 155 | 156 | // https://github.com/KhronosGroup/Vulkan-Docs/pull/1556 157 | fn fixupBitFlags(self: *Generator) !void { 158 | var seen_bits = std.StringArrayHashMap(void).init(self.arena.allocator()); 159 | defer seen_bits.deinit(); 160 | 161 | for (self.registry.decls) |decl| { 162 | const bitmask = switch (decl.decl_type) { 163 | .bitmask => |bm| bm, 164 | else => continue, 165 | }; 166 | 167 | if (bitmask.bits_enum) |bits_enum| { 168 | try seen_bits.put(bits_enum, {}); 169 | } 170 | } 171 | 172 | var i: usize = 0; 173 | 174 | for (self.registry.decls) |decl| { 175 | switch (decl.decl_type) { 176 | .enumeration => |e| { 177 | if (e.is_bitmask and seen_bits.get(decl.name) == null) 178 | continue; 179 | }, 180 | else => {}, 181 | } 182 | self.registry.decls[i] = decl; 183 | i += 1; 184 | } 185 | 186 | self.registry.decls.len = i; 187 | } 188 | 189 | fn render(self: *Generator, writer: anytype) !void { 190 | try renderRegistry(writer, self.arena.allocator(), &self.registry, &self.id_renderer, self.have_video); 191 | } 192 | }; 193 | 194 | /// The vulkan registry contains the specification for multiple APIs: Vulkan and VulkanSC. This enum 195 | /// describes applicable APIs. 196 | pub const Api = reg.Api; 197 | 198 | /// Main function for generating the Vulkan bindings. vk.xml is to be provided via `spec_xml`, 199 | /// and the resulting binding is written to `writer`. `allocator` will be used to allocate temporary 200 | /// internal datastructures - mostly via an ArenaAllocator, but sometimes a hashmap uses this allocator 201 | /// directly. `api` is the API to generate the bindings for, usually `.vulkan`. 202 | pub fn generate( 203 | allocator: Allocator, 204 | api: Api, 205 | spec_xml: []const u8, 206 | maybe_video_spec_xml: ?[]const u8, 207 | writer: anytype, 208 | ) !void { 209 | const spec = xml.parse(allocator, spec_xml) catch |err| switch (err) { 210 | error.InvalidDocument, 211 | error.UnexpectedEof, 212 | error.UnexpectedCharacter, 213 | error.IllegalCharacter, 214 | error.InvalidEntity, 215 | error.InvalidName, 216 | error.InvalidStandaloneValue, 217 | error.NonMatchingClosingTag, 218 | error.UnclosedComment, 219 | error.UnclosedValue, 220 | => return error.InvalidXml, 221 | error.OutOfMemory => return error.OutOfMemory, 222 | }; 223 | defer spec.deinit(); 224 | 225 | const maybe_video_spec_root = if (maybe_video_spec_xml) |video_spec_xml| blk: { 226 | const video_spec = xml.parse(allocator, video_spec_xml) catch |err| switch (err) { 227 | error.InvalidDocument, 228 | error.UnexpectedEof, 229 | error.UnexpectedCharacter, 230 | error.IllegalCharacter, 231 | error.InvalidEntity, 232 | error.InvalidName, 233 | error.InvalidStandaloneValue, 234 | error.NonMatchingClosingTag, 235 | error.UnclosedComment, 236 | error.UnclosedValue, 237 | => return error.InvalidXml, 238 | error.OutOfMemory => return error.OutOfMemory, 239 | }; 240 | 241 | break :blk video_spec.root; 242 | } else null; 243 | 244 | var gen = Generator.init(allocator, spec.root, maybe_video_spec_root, api) catch |err| switch (err) { 245 | error.InvalidXml, 246 | error.InvalidCharacter, 247 | error.Overflow, 248 | error.InvalidFeatureLevel, 249 | error.InvalidSyntax, 250 | error.InvalidTag, 251 | error.MissingTypeIdentifier, 252 | error.UnexpectedCharacter, 253 | error.UnexpectedEof, 254 | error.UnexpectedToken, 255 | error.InvalidRegistry, 256 | => return error.InvalidRegistry, 257 | error.OutOfMemory => return error.OutOfMemory, 258 | }; 259 | defer gen.deinit(); 260 | 261 | try gen.mergeEnumFields(); 262 | try gen.fixupBitFlags(); 263 | gen.render(writer) catch |err| switch (err) { 264 | error.InvalidApiConstant, 265 | error.InvalidConstantExpr, 266 | error.InvalidRegistry, 267 | error.UnexpectedCharacter, 268 | error.InvalidCharacter, 269 | error.Overflow, 270 | => return error.InvalidRegistry, 271 | else => |others| return others, 272 | }; 273 | } 274 | -------------------------------------------------------------------------------- /src/vulkan/parse.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const registry = @import("registry.zig"); 3 | const xml = @import("../xml.zig"); 4 | const cparse = @import("c_parse.zig"); 5 | const mem = std.mem; 6 | const Allocator = mem.Allocator; 7 | const ArenaAllocator = std.heap.ArenaAllocator; 8 | 9 | const api_constants_name = "API Constants"; 10 | 11 | pub const ParseResult = struct { 12 | arena: ArenaAllocator, 13 | registry: registry.Registry, 14 | 15 | pub fn deinit(self: ParseResult) void { 16 | self.arena.deinit(); 17 | } 18 | }; 19 | 20 | pub fn parseXml( 21 | backing_allocator: Allocator, 22 | root: *xml.Element, 23 | maybe_video_root: ?*xml.Element, 24 | api: registry.Api, 25 | ) !ParseResult { 26 | var arena = ArenaAllocator.init(backing_allocator); 27 | errdefer arena.deinit(); 28 | 29 | const allocator = arena.allocator(); 30 | 31 | var decls: std.ArrayListUnmanaged(registry.Declaration) = .{}; 32 | var api_constants: std.ArrayListUnmanaged(registry.ApiConstant) = .{}; 33 | var tags: std.ArrayListUnmanaged(registry.Tag) = .{}; 34 | var features: std.ArrayListUnmanaged(registry.Feature) = .{}; 35 | var extensions: std.ArrayListUnmanaged(registry.Extension) = .{}; 36 | 37 | try parseDeclarations(allocator, root, api, &decls); 38 | try parseApiConstants(allocator, root, api, &api_constants); 39 | try parseTags(allocator, root, &tags); 40 | try parseFeatures(allocator, root, api, &features); 41 | try parseExtensions(allocator, root, api, &extensions); 42 | 43 | if (maybe_video_root) |video_root| { 44 | try parseDeclarations(allocator, video_root, api, &decls); 45 | try parseApiConstants(allocator, video_root, api, &api_constants); 46 | try parseTags(allocator, video_root, &tags); 47 | try parseFeatures(allocator, video_root, api, &features); 48 | try parseExtensions(allocator, video_root, api, &extensions); 49 | } 50 | 51 | const reg = registry.Registry{ 52 | .decls = decls.items, 53 | .api_constants = api_constants.items, 54 | .tags = tags.items, 55 | .features = features.items, 56 | .extensions = extensions.items, 57 | }; 58 | 59 | return ParseResult{ 60 | .arena = arena, 61 | .registry = reg, 62 | }; 63 | } 64 | 65 | fn parseDeclarations( 66 | allocator: Allocator, 67 | root: *xml.Element, 68 | api: registry.Api, 69 | decls: *std.ArrayListUnmanaged(registry.Declaration), 70 | ) !void { 71 | const types_elem = root.findChildByTag("types") orelse return error.InvalidRegistry; 72 | try decls.ensureUnusedCapacity(allocator, types_elem.children.len); 73 | 74 | try parseTypes(allocator, types_elem, api, decls); 75 | try parseEnums(allocator, root, api, decls); 76 | 77 | if (root.findChildByTag("commands")) |commands_elem| { 78 | try decls.ensureUnusedCapacity(allocator, commands_elem.children.len); 79 | try parseCommands(allocator, commands_elem, api, decls); 80 | } 81 | } 82 | 83 | fn parseTypes( 84 | allocator: Allocator, 85 | types_elem: *xml.Element, 86 | api: registry.Api, 87 | decls: *std.ArrayListUnmanaged(registry.Declaration), 88 | ) !void { 89 | var it = types_elem.findChildrenByTag("type"); 90 | while (it.next()) |ty| { 91 | try decls.append(allocator, blk: { 92 | if (!requiredByApi(ty, api)) 93 | continue; 94 | 95 | const category = ty.getAttribute("category") orelse { 96 | break :blk try parseForeigntype(ty); 97 | }; 98 | 99 | if (mem.eql(u8, category, "bitmask")) { 100 | break :blk try parseBitmaskType(ty); 101 | } else if (mem.eql(u8, category, "handle")) { 102 | break :blk try parseHandleType(ty); 103 | } else if (mem.eql(u8, category, "basetype")) { 104 | break :blk try parseBaseType(allocator, ty); 105 | } else if (mem.eql(u8, category, "struct")) { 106 | break :blk try parseContainer(allocator, ty, false, api); 107 | } else if (mem.eql(u8, category, "union")) { 108 | break :blk try parseContainer(allocator, ty, true, api); 109 | } else if (mem.eql(u8, category, "funcpointer")) { 110 | break :blk try parseFuncPointer(allocator, ty); 111 | } else if (mem.eql(u8, category, "enum")) { 112 | break :blk (try parseEnumAlias(ty)) orelse continue; 113 | } 114 | 115 | continue; 116 | }); 117 | } 118 | } 119 | 120 | fn parseForeigntype(ty: *xml.Element) !registry.Declaration { 121 | const name = ty.getAttribute("name") orelse return error.InvalidRegistry; 122 | const depends = ty.getAttribute("requires") orelse if (mem.eql(u8, name, "int")) 123 | "vk_platform" // for some reason, int doesn't depend on vk_platform (but the other c types do) 124 | else 125 | return error.InvalidRegistry; 126 | 127 | return registry.Declaration{ 128 | .name = name, 129 | .decl_type = .{ .foreign = .{ .depends = depends } }, 130 | }; 131 | } 132 | 133 | fn parseBitmaskType(ty: *xml.Element) !registry.Declaration { 134 | if (ty.getAttribute("name")) |name| { 135 | const alias = ty.getAttribute("alias") orelse return error.InvalidRegistry; 136 | return registry.Declaration{ 137 | .name = name, 138 | .decl_type = .{ .alias = .{ .name = alias, .target = .other_type } }, 139 | }; 140 | } else { 141 | const flags_type = ty.getCharData("type") orelse return error.InvalidRegistry; 142 | 143 | const bitwidth: u8 = if (mem.eql(u8, flags_type, "VkFlags")) 144 | 32 145 | else if (mem.eql(u8, flags_type, "VkFlags64")) 146 | 64 147 | else 148 | return error.InvalidRegistry; 149 | 150 | return registry.Declaration{ 151 | .name = ty.getCharData("name") orelse return error.InvalidRegistry, 152 | .decl_type = .{ 153 | .bitmask = .{ 154 | // Who knows why these are different fields 155 | .bits_enum = ty.getAttribute("requires") orelse ty.getAttribute("bitvalues"), 156 | .bitwidth = bitwidth, 157 | }, 158 | }, 159 | }; 160 | } 161 | } 162 | 163 | fn parseHandleType(ty: *xml.Element) !registry.Declaration { 164 | // Parent is not handled in case of an alias 165 | if (ty.getAttribute("name")) |name| { 166 | const alias = ty.getAttribute("alias") orelse return error.InvalidRegistry; 167 | return registry.Declaration{ 168 | .name = name, 169 | .decl_type = .{ 170 | .alias = .{ .name = alias, .target = .other_type }, 171 | }, 172 | }; 173 | } else { 174 | const name = ty.getCharData("name") orelse return error.InvalidRegistry; 175 | const handle_type = ty.getCharData("type") orelse return error.InvalidRegistry; 176 | const dispatchable = mem.eql(u8, handle_type, "VK_DEFINE_HANDLE"); 177 | if (!dispatchable and !mem.eql(u8, handle_type, "VK_DEFINE_NON_DISPATCHABLE_HANDLE")) { 178 | return error.InvalidRegistry; 179 | } 180 | 181 | return registry.Declaration{ 182 | .name = name, 183 | .decl_type = .{ 184 | .handle = .{ 185 | .parent = ty.getAttribute("parent"), 186 | .is_dispatchable = dispatchable, 187 | }, 188 | }, 189 | }; 190 | } 191 | } 192 | 193 | fn parseBaseType(allocator: Allocator, ty: *xml.Element) !registry.Declaration { 194 | const name = ty.getCharData("name") orelse return error.InvalidRegistry; 195 | if (ty.getCharData("type")) |_| { 196 | var tok = cparse.XmlCTokenizer.init(ty); 197 | return try cparse.parseTypedef(allocator, &tok, false); 198 | } else { 199 | // Either ANativeWindow, AHardwareBuffer or CAMetalLayer. The latter has a lot of 200 | // macros, which is why this part is not built into the xml/c parser. 201 | return registry.Declaration{ 202 | .name = name, 203 | .decl_type = .{ .foreign = .{ .depends = &.{} } }, 204 | }; 205 | } 206 | } 207 | 208 | fn parseContainer(allocator: Allocator, ty: *xml.Element, is_union: bool, api: registry.Api) !registry.Declaration { 209 | const name = ty.getAttribute("name") orelse return error.InvalidRegistry; 210 | 211 | if (ty.getAttribute("alias")) |alias| { 212 | return registry.Declaration{ 213 | .name = name, 214 | .decl_type = .{ 215 | .alias = .{ .name = alias, .target = .other_type }, 216 | }, 217 | }; 218 | } 219 | 220 | var members = try allocator.alloc(registry.Container.Field, ty.children.len); 221 | 222 | var i: usize = 0; 223 | var it = ty.findChildrenByTag("member"); 224 | var maybe_stype: ?[]const u8 = null; 225 | while (it.next()) |member| { 226 | if (!requiredByApi(member, api)) 227 | continue; 228 | 229 | var xctok = cparse.XmlCTokenizer.init(member); 230 | members[i] = try cparse.parseMember(allocator, &xctok, false); 231 | if (mem.eql(u8, members[i].name, "sType")) { 232 | if (member.getAttribute("values")) |stype| { 233 | maybe_stype = stype; 234 | } 235 | } 236 | 237 | if (member.getAttribute("optional")) |optionals| { 238 | var optional_it = mem.splitScalar(u8, optionals, ','); 239 | if (optional_it.next()) |first_optional| { 240 | members[i].is_optional = mem.eql(u8, first_optional, "true"); 241 | } else { 242 | // Optional is empty, probably incorrect. 243 | return error.InvalidRegistry; 244 | } 245 | } 246 | i += 1; 247 | } 248 | 249 | members = members[0..i]; 250 | 251 | var maybe_extends: ?[][]const u8 = null; 252 | if (ty.getAttribute("structextends")) |extends| { 253 | const n_structs = std.mem.count(u8, extends, ",") + 1; 254 | maybe_extends = try allocator.alloc([]const u8, n_structs); 255 | var struct_extends = std.mem.splitScalar(u8, extends, ','); 256 | var j: usize = 0; 257 | while (struct_extends.next()) |struct_extend| { 258 | maybe_extends.?[j] = struct_extend; 259 | j += 1; 260 | } 261 | } 262 | 263 | it = ty.findChildrenByTag("member"); 264 | for (members) |*member| { 265 | const member_elem = while (it.next()) |elem| { 266 | if (requiredByApi(elem, api)) break elem; 267 | } else unreachable; 268 | 269 | try parsePointerMeta(.{ .container = members }, &member.field_type, member_elem); 270 | 271 | // pNext isn't always properly marked as optional, so just manually override it, 272 | if (mem.eql(u8, member.name, "pNext")) { 273 | member.field_type.pointer.is_optional = true; 274 | } 275 | } 276 | 277 | return registry.Declaration{ 278 | .name = name, 279 | .decl_type = .{ 280 | .container = .{ 281 | .stype = maybe_stype, 282 | .fields = members, 283 | .is_union = is_union, 284 | .extends = maybe_extends, 285 | }, 286 | }, 287 | }; 288 | } 289 | 290 | fn parseFuncPointer(allocator: Allocator, ty: *xml.Element) !registry.Declaration { 291 | var xctok = cparse.XmlCTokenizer.init(ty); 292 | return try cparse.parseTypedef(allocator, &xctok, true); 293 | } 294 | 295 | // For some reason, the DeclarationType cannot be passed to lenToPointer, as 296 | // that causes the Zig compiler to generate invalid code for the function. Using a 297 | // dedicated enum fixes the issue... 298 | const Fields = union(enum) { 299 | command: []registry.Command.Param, 300 | container: []registry.Container.Field, 301 | }; 302 | 303 | // returns .{ size, nullable } 304 | fn lenToPointer(fields: Fields, len: []const u8) std.meta.Tuple(&.{ registry.Pointer.PointerSize, bool }) { 305 | switch (fields) { 306 | .command => |params| { 307 | for (params) |*param| { 308 | if (mem.eql(u8, param.name, len)) { 309 | param.is_buffer_len = true; 310 | return .{ .{ .other_field = param.name }, param.is_optional }; 311 | } 312 | } 313 | }, 314 | .container => |members| { 315 | for (members) |*member| { 316 | if (mem.eql(u8, member.name, len)) { 317 | member.is_buffer_len = true; 318 | return .{ .{ .other_field = member.name }, member.is_optional }; 319 | } 320 | } 321 | }, 322 | } 323 | 324 | if (mem.eql(u8, len, "null-terminated")) { 325 | return .{ .zero_terminated, false }; 326 | } else { 327 | return .{ .many, false }; 328 | } 329 | } 330 | 331 | fn parsePointerMeta(fields: Fields, type_info: *registry.TypeInfo, elem: *xml.Element) !void { 332 | var len_attribute_depth: usize = 0; 333 | 334 | if (elem.getAttribute("len")) |lens| { 335 | var it = mem.splitScalar(u8, lens, ','); 336 | var current_type_info = type_info; 337 | 338 | while (true) switch (current_type_info.*) { 339 | .pointer => |*ptr| { 340 | if (it.next()) |len_str| { 341 | ptr.size, ptr.is_optional = lenToPointer(fields, len_str); 342 | } else { 343 | ptr.size = .many; 344 | } 345 | 346 | current_type_info = ptr.child; 347 | len_attribute_depth += 1; 348 | }, 349 | .array => |*arr| { 350 | if (it.next()) |len_str| { 351 | const size, _ = lenToPointer(fields, len_str); 352 | arr.valid_size = switch (size) { 353 | .one => .all, 354 | .many => .many, 355 | .other_field => |field| .{ .other_field = field }, 356 | .zero_terminated => .zero_terminated, 357 | }; 358 | } else { 359 | arr.valid_size = .all; 360 | } 361 | 362 | current_type_info = arr.child; 363 | len_attribute_depth += 1; 364 | }, 365 | else => break, 366 | }; 367 | 368 | if (it.next()) |_| { 369 | // There are more elements in the `len` attribute than there are pointers 370 | // Something probably went wrong 371 | std.log.err("len: {s}", .{lens}); 372 | return error.InvalidRegistry; 373 | } 374 | } 375 | 376 | var current_depth: usize = 0; 377 | 378 | if (elem.getAttribute("optional")) |optionals| { 379 | var it = mem.splitScalar(u8, optionals, ','); 380 | var current_type_info = type_info; 381 | while (true) switch (current_type_info.*) { 382 | inline .pointer, .array => |*info| { 383 | if (it.next()) |optional_str| { 384 | 385 | // The pointer may have already been marked as optional due to its `len` attribute. 386 | const is_already_optional = current_depth < len_attribute_depth and info.is_optional; 387 | info.is_optional = is_already_optional or mem.eql(u8, optional_str, "true"); 388 | } else { 389 | // There is no information for this pointer, probably incorrect. 390 | // Currently there is one definition where this is the case, VkCudaLaunchInfoNV. 391 | 392 | // We work around these by assuming that they are optional, so that in the case 393 | // that they are, we can assign null to them. 394 | // See https://github.com/Snektron/vulkan-zig/issues/109 395 | info.is_optional = true; 396 | } 397 | 398 | current_type_info = info.child; 399 | current_depth += 1; 400 | }, 401 | else => break, 402 | }; 403 | } 404 | } 405 | 406 | fn parseEnumAlias(elem: *xml.Element) !?registry.Declaration { 407 | if (elem.getAttribute("alias")) |alias| { 408 | const name = elem.getAttribute("name") orelse return error.InvalidRegistry; 409 | return registry.Declaration{ 410 | .name = name, 411 | .decl_type = .{ 412 | .alias = .{ .name = alias, .target = .other_type }, 413 | }, 414 | }; 415 | } 416 | 417 | return null; 418 | } 419 | 420 | fn parseEnums( 421 | allocator: Allocator, 422 | root: *xml.Element, 423 | api: registry.Api, 424 | decls: *std.ArrayListUnmanaged(registry.Declaration), 425 | ) !void { 426 | var it = root.findChildrenByTag("enums"); 427 | while (it.next()) |enums| { 428 | const name = enums.getAttribute("name") orelse return error.InvalidRegistry; 429 | if (mem.eql(u8, name, api_constants_name) or !requiredByApi(enums, api)) { 430 | continue; 431 | } 432 | 433 | try decls.append(allocator, .{ 434 | .name = name, 435 | .decl_type = .{ .enumeration = try parseEnumFields(allocator, enums, api) }, 436 | }); 437 | } 438 | } 439 | 440 | fn parseEnumFields(allocator: Allocator, elem: *xml.Element, api: registry.Api) !registry.Enum { 441 | // TODO: `type` was added recently, fall back to checking endswith FlagBits for older versions? 442 | const enum_type = elem.getAttribute("type") orelse return error.InvalidRegistry; 443 | const is_bitmask = mem.eql(u8, enum_type, "bitmask"); 444 | if (!is_bitmask and !mem.eql(u8, enum_type, "enum")) { 445 | return error.InvalidRegistry; 446 | } 447 | 448 | const bitwidth = if (elem.getAttribute("bitwidth")) |bitwidth| 449 | try std.fmt.parseInt(u8, bitwidth, 10) 450 | else 451 | 32; 452 | 453 | const fields = try allocator.alloc(registry.Enum.Field, elem.children.len); 454 | 455 | var i: usize = 0; 456 | var it = elem.findChildrenByTag("enum"); 457 | while (it.next()) |field| { 458 | if (!requiredByApi(field, api)) 459 | continue; 460 | 461 | fields[i] = try parseEnumField(field); 462 | i += 1; 463 | } 464 | 465 | return registry.Enum{ 466 | .fields = fields[0..i], 467 | .bitwidth = bitwidth, 468 | .is_bitmask = is_bitmask, 469 | }; 470 | } 471 | 472 | fn parseEnumField(field: *xml.Element) !registry.Enum.Field { 473 | const is_compat_alias = if (field.getAttribute("comment")) |comment| 474 | mem.eql(u8, comment, "Backwards-compatible alias containing a typo") or 475 | mem.eql(u8, comment, "Deprecated name for backwards compatibility") 476 | else 477 | false; 478 | 479 | const name = field.getAttribute("name") orelse return error.InvalidRegistry; 480 | const value: registry.Enum.Value = blk: { 481 | // An enum variant's value could be defined by any of the following attributes: 482 | // - value: Straight up value of the enum variant, in either base 10 or 16 (prefixed with 0x). 483 | // - bitpos: Used for bitmasks, and can also be set in extensions. 484 | // - alias: The field is an alias of another variant within the same enum. 485 | // - offset: Used with features and extensions, where a non-bitpos value is added to an enum. 486 | // The value is given by `1e9 + (extr_nr - 1) * 1e3 + offset`, where `ext_nr` is either 487 | // given by the `extnumber` field (in the case of a feature), or given in the parent 488 | // tag. In the latter case its passed via the `ext_nr` parameter. 489 | if (field.getAttribute("value")) |value| { 490 | if (mem.startsWith(u8, value, "0x")) { 491 | break :blk .{ .bit_vector = try std.fmt.parseInt(i32, value[2..], 16) }; 492 | } else { 493 | break :blk .{ .int = try std.fmt.parseInt(i32, value, 10) }; 494 | } 495 | } else if (field.getAttribute("bitpos")) |bitpos| { 496 | break :blk .{ .bitpos = try std.fmt.parseInt(u6, bitpos, 10) }; 497 | } else if (field.getAttribute("alias")) |alias| { 498 | break :blk .{ .alias = .{ .name = alias, .is_compat_alias = is_compat_alias } }; 499 | } else { 500 | return error.InvalidRegistry; 501 | } 502 | }; 503 | 504 | return registry.Enum.Field{ 505 | .name = name, 506 | .value = value, 507 | }; 508 | } 509 | 510 | fn parseCommands( 511 | allocator: Allocator, 512 | commands_elem: *xml.Element, 513 | api: registry.Api, 514 | decls: *std.ArrayListUnmanaged(registry.Declaration), 515 | ) !void { 516 | var it = commands_elem.findChildrenByTag("command"); 517 | while (it.next()) |elem| { 518 | if (!requiredByApi(elem, api)) 519 | continue; 520 | 521 | try decls.append(allocator, try parseCommand(allocator, elem, api)); 522 | } 523 | } 524 | 525 | fn splitCommaAlloc(allocator: Allocator, text: []const u8) ![][]const u8 { 526 | var n_codes: usize = 1; 527 | for (text) |c| { 528 | if (c == ',') n_codes += 1; 529 | } 530 | 531 | const codes = try allocator.alloc([]const u8, n_codes); 532 | var it = mem.splitScalar(u8, text, ','); 533 | for (codes) |*code| { 534 | code.* = it.next().?; 535 | } 536 | 537 | return codes; 538 | } 539 | 540 | fn parseCommand(allocator: Allocator, elem: *xml.Element, api: registry.Api) !registry.Declaration { 541 | if (elem.getAttribute("alias")) |alias| { 542 | const name = elem.getAttribute("name") orelse return error.InvalidRegistry; 543 | return registry.Declaration{ 544 | .name = name, 545 | .decl_type = .{ 546 | .alias = .{ .name = alias, .target = .other_command }, 547 | }, 548 | }; 549 | } 550 | 551 | const proto = elem.findChildByTag("proto") orelse return error.InvalidRegistry; 552 | var proto_xctok = cparse.XmlCTokenizer.init(proto); 553 | const command_decl = try cparse.parseParamOrProto(allocator, &proto_xctok, false); 554 | 555 | var params = try allocator.alloc(registry.Command.Param, elem.children.len); 556 | 557 | var i: usize = 0; 558 | var it = elem.findChildrenByTag("param"); 559 | while (it.next()) |param| { 560 | if (!requiredByApi(param, api)) 561 | continue; 562 | 563 | var xctok = cparse.XmlCTokenizer.init(param); 564 | const decl = try cparse.parseParamOrProto(allocator, &xctok, false); 565 | params[i] = .{ 566 | .name = decl.name, 567 | .param_type = decl.decl_type.typedef, 568 | .is_buffer_len = false, 569 | .is_optional = false, 570 | }; 571 | 572 | if (param.getAttribute("optional")) |optionals| { 573 | var optional_it = mem.splitScalar(u8, optionals, ','); 574 | if (optional_it.next()) |first_optional| { 575 | params[i].is_optional = mem.eql(u8, first_optional, "true"); 576 | } else { 577 | // Optional is empty, probably incorrect. 578 | return error.InvalidRegistry; 579 | } 580 | } 581 | i += 1; 582 | } 583 | 584 | const return_type = try allocator.create(registry.TypeInfo); 585 | return_type.* = command_decl.decl_type.typedef; 586 | 587 | const success_codes = if (elem.getAttribute("successcodes")) |codes| 588 | try splitCommaAlloc(allocator, codes) 589 | else 590 | &[_][]const u8{}; 591 | 592 | const error_codes = if (elem.getAttribute("errorcodes")) |codes| 593 | try splitCommaAlloc(allocator, codes) 594 | else 595 | &[_][]const u8{}; 596 | 597 | params = params[0..i]; 598 | 599 | it = elem.findChildrenByTag("param"); 600 | for (params) |*param| { 601 | const param_elem = while (it.next()) |param_elem| { 602 | if (requiredByApi(param_elem, api)) break param_elem; 603 | } else unreachable; 604 | 605 | try parsePointerMeta(.{ .command = params }, ¶m.param_type, param_elem); 606 | } 607 | 608 | return registry.Declaration{ 609 | .name = command_decl.name, 610 | .decl_type = .{ 611 | .command = .{ 612 | .params = params, 613 | .return_type = return_type, 614 | .success_codes = success_codes, 615 | .error_codes = error_codes, 616 | }, 617 | }, 618 | }; 619 | } 620 | 621 | fn parseApiConstants( 622 | allocator: Allocator, 623 | root: *xml.Element, 624 | api: registry.Api, 625 | api_constants: *std.ArrayListUnmanaged(registry.ApiConstant), 626 | ) !void { 627 | const maybe_enums = blk: { 628 | var it = root.findChildrenByTag("enums"); 629 | while (it.next()) |child| { 630 | const name = child.getAttribute("name") orelse continue; 631 | if (mem.eql(u8, name, api_constants_name)) { 632 | break :blk child; 633 | } 634 | } 635 | 636 | break :blk null; 637 | }; 638 | 639 | if (maybe_enums) |enums| { 640 | var it = enums.findChildrenByTag("enum"); 641 | while (it.next()) |constant| { 642 | if (!requiredByApi(constant, api)) 643 | continue; 644 | 645 | const expr = if (constant.getAttribute("value")) |expr| 646 | expr 647 | else if (constant.getAttribute("alias")) |alias| 648 | alias 649 | else 650 | return error.InvalidRegistry; 651 | 652 | try api_constants.append(allocator, .{ 653 | .name = constant.getAttribute("name") orelse return error.InvalidRegistry, 654 | .value = .{ .expr = expr }, 655 | }); 656 | } 657 | } 658 | 659 | const types = root.findChildByTag("types") orelse return error.InvalidRegistry; 660 | try parseDefines(allocator, types, api, api_constants); 661 | } 662 | 663 | fn parseDefines( 664 | allocator: Allocator, 665 | types: *xml.Element, 666 | api: registry.Api, 667 | api_constants: *std.ArrayListUnmanaged(registry.ApiConstant), 668 | ) !void { 669 | var it = types.findChildrenByTag("type"); 670 | while (it.next()) |ty| { 671 | if (!requiredByApi(ty, api)) 672 | continue; 673 | 674 | const category = ty.getAttribute("category") orelse continue; 675 | if (!mem.eql(u8, category, "define")) { 676 | continue; 677 | } 678 | 679 | const name = ty.getCharData("name") orelse continue; 680 | if (mem.eql(u8, name, "VK_HEADER_VERSION") or mem.eql(u8, name, "VKSC_API_VARIANT")) { 681 | try api_constants.append(allocator, .{ 682 | .name = name, 683 | .value = .{ .expr = mem.trim(u8, ty.children[2].char_data, " ") }, 684 | }); 685 | } else { 686 | var xctok = cparse.XmlCTokenizer.init(ty); 687 | try api_constants.append(allocator, .{ 688 | .name = name, 689 | .value = cparse.parseVersion(&xctok) catch continue, 690 | }); 691 | } 692 | } 693 | } 694 | 695 | fn parseTags( 696 | allocator: Allocator, 697 | root: *xml.Element, 698 | tags: *std.ArrayListUnmanaged(registry.Tag), 699 | ) !void { 700 | var tags_elem = root.findChildByTag("tags") orelse return; 701 | try tags.ensureUnusedCapacity(allocator, tags_elem.children.len); 702 | 703 | var it = tags_elem.findChildrenByTag("tag"); 704 | while (it.next()) |tag| { 705 | tags.appendAssumeCapacity(.{ 706 | .name = tag.getAttribute("name") orelse return error.InvalidRegistry, 707 | .author = tag.getAttribute("author") orelse return error.InvalidRegistry, 708 | }); 709 | } 710 | } 711 | 712 | fn parseFeatures(allocator: Allocator, root: *xml.Element, api: registry.Api, features: *std.ArrayListUnmanaged(registry.Feature)) !void { 713 | var it = root.findChildrenByTag("feature"); 714 | while (it.next()) |feature| { 715 | if (!requiredByApi(feature, api)) 716 | continue; 717 | 718 | try features.append(allocator, try parseFeature(allocator, feature, api)); 719 | } 720 | } 721 | 722 | fn parseFeature(allocator: Allocator, feature: *xml.Element, api: registry.Api) !registry.Feature { 723 | const name = feature.getAttribute("name") orelse return error.InvalidRegistry; 724 | const feature_level = blk: { 725 | const number = feature.getAttribute("number") orelse return error.InvalidRegistry; 726 | break :blk try splitFeatureLevel(number, "."); 727 | }; 728 | 729 | var requires = try allocator.alloc(registry.Require, feature.children.len); 730 | var i: usize = 0; 731 | var it = feature.findChildrenByTag("require"); 732 | while (it.next()) |require| { 733 | if (!requiredByApi(require, api)) 734 | continue; 735 | 736 | requires[i] = try parseRequire(allocator, require, null, api); 737 | i += 1; 738 | } 739 | 740 | return registry.Feature{ 741 | .name = name, 742 | .level = feature_level, 743 | .requires = requires[0..i], 744 | }; 745 | } 746 | 747 | fn parseEnumExtension(elem: *xml.Element, parent_extnumber: ?u31) !?registry.Require.EnumExtension { 748 | // check for either _SPEC_VERSION or _EXTENSION_NAME 749 | const name = elem.getAttribute("name") orelse return error.InvalidRegistry; 750 | if (std.mem.endsWith(u8, name, "_SPEC_VERSION") or std.mem.endsWith(u8, name, "_EXTENSION_NAME")) { 751 | return null; 752 | } 753 | 754 | const extends = elem.getAttribute("extends") orelse { 755 | const expr = elem.getAttribute("value") orelse return null; 756 | // This adds a value to the 'API constants' set 757 | 758 | return registry.Require.EnumExtension{ 759 | .extends = name, 760 | .extnumber = null, 761 | .value = .{ .new_api_constant_expr = expr }, 762 | }; 763 | }; 764 | 765 | if (elem.getAttribute("offset")) |offset_str| { 766 | const offset = try std.fmt.parseInt(u31, offset_str, 10); 767 | const extnumber = if (elem.getAttribute("extnumber")) |num| 768 | try std.fmt.parseInt(u31, num, 10) 769 | else 770 | null; 771 | 772 | const actual_extnumber = extnumber orelse parent_extnumber orelse return error.InvalidRegistry; 773 | const value = blk: { 774 | const abs_value = enumExtOffsetToValue(actual_extnumber, offset); 775 | if (elem.getAttribute("dir")) |dir| { 776 | if (mem.eql(u8, dir, "-")) { 777 | break :blk -@as(i32, abs_value); 778 | } else { 779 | return error.InvalidRegistry; 780 | } 781 | } 782 | 783 | break :blk @as(i32, abs_value); 784 | }; 785 | 786 | return registry.Require.EnumExtension{ 787 | .extends = extends, 788 | .extnumber = actual_extnumber, 789 | .value = .{ 790 | .field = .{ 791 | .name = name, 792 | .value = .{ .int = value }, 793 | }, 794 | }, 795 | }; 796 | } 797 | 798 | return registry.Require.EnumExtension{ 799 | .extends = extends, 800 | .extnumber = parent_extnumber, 801 | .value = .{ .field = try parseEnumField(elem) }, 802 | }; 803 | } 804 | 805 | fn enumExtOffsetToValue(extnumber: u31, offset: u31) u31 { 806 | const extension_value_base = 1000000000; 807 | const extension_block = 1000; 808 | return extension_value_base + (extnumber - 1) * extension_block + offset; 809 | } 810 | 811 | fn parseRequire(allocator: Allocator, require: *xml.Element, extnumber: ?u31, api: registry.Api) !registry.Require { 812 | var n_extends: usize = 0; 813 | var n_types: usize = 0; 814 | var n_commands: usize = 0; 815 | 816 | var it = require.elements(); 817 | while (it.next()) |elem| { 818 | if (mem.eql(u8, elem.tag, "enum")) { 819 | n_extends += 1; 820 | } else if (mem.eql(u8, elem.tag, "type")) { 821 | n_types += 1; 822 | } else if (mem.eql(u8, elem.tag, "command")) { 823 | n_commands += 1; 824 | } 825 | } 826 | 827 | const extends = try allocator.alloc(registry.Require.EnumExtension, n_extends); 828 | const types = try allocator.alloc([]const u8, n_types); 829 | const commands = try allocator.alloc([]const u8, n_commands); 830 | 831 | var i_extends: usize = 0; 832 | var i_types: usize = 0; 833 | var i_commands: usize = 0; 834 | 835 | it = require.elements(); 836 | while (it.next()) |elem| { 837 | if (!requiredByApi(elem, api)) 838 | continue; 839 | 840 | if (mem.eql(u8, elem.tag, "enum")) { 841 | if (try parseEnumExtension(elem, extnumber)) |ext| { 842 | extends[i_extends] = ext; 843 | i_extends += 1; 844 | } 845 | } else if (mem.eql(u8, elem.tag, "type")) { 846 | types[i_types] = elem.getAttribute("name") orelse return error.InvalidRegistry; 847 | i_types += 1; 848 | } else if (mem.eql(u8, elem.tag, "command")) { 849 | commands[i_commands] = elem.getAttribute("name") orelse return error.InvalidRegistry; 850 | i_commands += 1; 851 | } 852 | } 853 | 854 | const required_feature_level = blk: { 855 | const feature_level = require.getAttribute("feature") orelse break :blk null; 856 | if (!mem.startsWith(u8, feature_level, "VK_VERSION_")) { 857 | return error.InvalidRegistry; 858 | } 859 | 860 | break :blk try splitFeatureLevel(feature_level["VK_VERSION_".len..], "_"); 861 | }; 862 | 863 | return registry.Require{ 864 | .extends = extends[0..i_extends], 865 | .types = types[0..i_types], 866 | .commands = commands[0..i_commands], 867 | .required_feature_level = required_feature_level, 868 | .required_extension = require.getAttribute("extension"), 869 | }; 870 | } 871 | 872 | fn parseExtensions( 873 | allocator: Allocator, 874 | root: *xml.Element, 875 | api: registry.Api, 876 | extensions: *std.ArrayListUnmanaged(registry.Extension), 877 | ) !void { 878 | const extensions_elem = root.findChildByTag("extensions") orelse return error.InvalidRegistry; 879 | try extensions.ensureUnusedCapacity(allocator, extensions_elem.children.len); 880 | 881 | var it = extensions_elem.findChildrenByTag("extension"); 882 | while (it.next()) |extension| { 883 | if (!requiredByApi(extension, api)) 884 | continue; 885 | // Some extensions (in particular 94) are disabled, so just skip them 886 | if (extension.getAttribute("supported")) |supported| { 887 | if (mem.eql(u8, supported, "disabled")) { 888 | continue; 889 | } 890 | } 891 | 892 | extensions.appendAssumeCapacity(try parseExtension(allocator, extension, api)); 893 | } 894 | } 895 | 896 | fn findExtVersion(extension: *xml.Element) !registry.Extension.Version { 897 | var req_it = extension.findChildrenByTag("require"); 898 | while (req_it.next()) |req| { 899 | var enum_it = req.findChildrenByTag("enum"); 900 | while (enum_it.next()) |e| { 901 | const name = e.getAttribute("name") orelse continue; 902 | const value = e.getAttribute("value") orelse continue; 903 | if (mem.endsWith(u8, name, "_SPEC_VERSION")) { 904 | // Vulkan Video extensions are sometimes aliases. 905 | // If we fail to parse it as integer, just assume that its an alias and return that. 906 | const version = std.fmt.parseInt(u32, value, 10) catch return .{ .alias = value }; 907 | return .{ .int = version }; 908 | } 909 | } 910 | } 911 | 912 | return .unknown; 913 | } 914 | 915 | fn parseExtension(allocator: Allocator, extension: *xml.Element, api: registry.Api) !registry.Extension { 916 | const name = extension.getAttribute("name") orelse return error.InvalidRegistry; 917 | const platform = extension.getAttribute("platform"); 918 | 919 | const is_video = std.mem.startsWith(u8, name, "vulkan_video_"); 920 | 921 | const version = try findExtVersion(extension); 922 | 923 | // For some reason there are two ways for an extension to state its required 924 | // feature level: both seperately in each tag, or using 925 | // the requiresCore attribute. 926 | const requires_core = if (extension.getAttribute("requiresCore")) |feature_level| 927 | try splitFeatureLevel(feature_level, ".") 928 | else 929 | null; 930 | 931 | const promoted_to: registry.Extension.Promotion = blk: { 932 | const promotedto = extension.getAttribute("promotedto") orelse break :blk .none; 933 | if (mem.startsWith(u8, promotedto, "VK_VERSION_")) { 934 | const feature_level = try splitFeatureLevel(promotedto["VK_VERSION_".len..], "_"); 935 | break :blk .{ .feature = feature_level }; 936 | } 937 | 938 | break :blk .{ .extension = promotedto }; 939 | }; 940 | 941 | const number = blk: { 942 | // Vulkan Video extensions do not have numbers. 943 | if (is_video) break :blk 0; 944 | const number_str = extension.getAttribute("number") orelse return error.InvalidRegistry; 945 | break :blk try std.fmt.parseInt(u31, number_str, 10); 946 | }; 947 | 948 | const ext_type: ?registry.Extension.ExtensionType = blk: { 949 | if (is_video) break :blk .video; 950 | const ext_type_str = extension.getAttribute("type") orelse break :blk null; 951 | if (mem.eql(u8, ext_type_str, "instance")) { 952 | break :blk .instance; 953 | } else if (mem.eql(u8, ext_type_str, "device")) { 954 | break :blk .device; 955 | } else { 956 | return error.InvalidRegistry; 957 | } 958 | }; 959 | 960 | const depends = blk: { 961 | const requires_str = extension.getAttribute("requires") orelse break :blk &[_][]const u8{}; 962 | break :blk try splitCommaAlloc(allocator, requires_str); 963 | }; 964 | 965 | var requires = try allocator.alloc(registry.Require, extension.children.len); 966 | var i: usize = 0; 967 | var it = extension.findChildrenByTag("require"); 968 | while (it.next()) |require| { 969 | if (!requiredByApi(require, api)) 970 | continue; 971 | requires[i] = try parseRequire(allocator, require, number, api); 972 | i += 1; 973 | } 974 | 975 | return registry.Extension{ 976 | .name = name, 977 | .number = number, 978 | .version = version, 979 | .extension_type = ext_type, 980 | .depends = depends, 981 | .promoted_to = promoted_to, 982 | .platform = platform, 983 | .required_feature_level = requires_core, 984 | .requires = requires[0..i], 985 | }; 986 | } 987 | 988 | fn splitFeatureLevel(ver: []const u8, split: []const u8) !registry.FeatureLevel { 989 | var it = mem.splitSequence(u8, ver, split); 990 | 991 | const major = it.next() orelse return error.InvalidFeatureLevel; 992 | const minor = it.next() orelse return error.InvalidFeatureLevel; 993 | if (it.next() != null) { 994 | return error.InvalidFeatureLevel; 995 | } 996 | 997 | return registry.FeatureLevel{ 998 | .major = try std.fmt.parseInt(u32, major, 10), 999 | .minor = try std.fmt.parseInt(u32, minor, 10), 1000 | }; 1001 | } 1002 | 1003 | fn requiredByApi(elem: *xml.Element, api: registry.Api) bool { 1004 | const apis = elem.getAttribute("api") orelse return true; // If the 'api' element is not present, assume required. 1005 | 1006 | var it = mem.splitScalar(u8, apis, ','); 1007 | while (it.next()) |required_by_api| { 1008 | if (std.mem.eql(u8, @tagName(api), required_by_api)) return true; 1009 | } 1010 | 1011 | return false; 1012 | } 1013 | -------------------------------------------------------------------------------- /src/vulkan/registry.zig: -------------------------------------------------------------------------------- 1 | pub const Api = enum { 2 | vulkan, 3 | vulkansc, 4 | }; 5 | 6 | pub const Registry = struct { 7 | decls: []Declaration, 8 | api_constants: []ApiConstant, 9 | tags: []Tag, 10 | features: []Feature, 11 | extensions: []Extension, 12 | }; 13 | 14 | pub const Declaration = struct { 15 | name: []const u8, 16 | decl_type: DeclarationType, 17 | }; 18 | 19 | pub const DeclarationType = union(enum) { 20 | container: Container, 21 | enumeration: Enum, 22 | bitmask: Bitmask, 23 | handle: Handle, 24 | command: Command, 25 | alias: Alias, 26 | foreign: Foreign, 27 | typedef: TypeInfo, 28 | external, 29 | }; 30 | 31 | pub const Alias = struct { 32 | pub const Target = enum { 33 | other_command, 34 | other_type, 35 | }; 36 | 37 | name: []const u8, 38 | target: Target, 39 | }; 40 | 41 | pub const ApiConstant = struct { 42 | pub const Value = union(enum) { 43 | expr: []const u8, 44 | version: [4][]const u8, 45 | video_std_version: [3][]const u8, 46 | }; 47 | 48 | name: []const u8, 49 | value: Value, 50 | }; 51 | 52 | pub const Tag = struct { 53 | name: []const u8, 54 | author: []const u8, 55 | }; 56 | 57 | pub const TypeInfo = union(enum) { 58 | name: []const u8, 59 | command_ptr: Command, 60 | pointer: Pointer, 61 | array: Array, 62 | }; 63 | 64 | pub const Container = struct { 65 | pub const Field = struct { 66 | name: []const u8, 67 | field_type: TypeInfo, 68 | bits: ?usize, 69 | is_buffer_len: bool, 70 | is_optional: bool, 71 | }; 72 | 73 | stype: ?[]const u8, 74 | extends: ?[]const []const u8, 75 | fields: []Field, 76 | is_union: bool, 77 | }; 78 | 79 | pub const Enum = struct { 80 | pub const Value = union(enum) { 81 | bitpos: u6, // 1 << bitpos 82 | bit_vector: i32, // Combined flags & some vendor IDs 83 | int: i32, 84 | alias: struct { 85 | name: []const u8, 86 | is_compat_alias: bool, 87 | }, 88 | }; 89 | 90 | pub const Field = struct { 91 | name: []const u8, 92 | value: Value, 93 | }; 94 | 95 | fields: []Field, 96 | bitwidth: u8, 97 | is_bitmask: bool, 98 | }; 99 | 100 | pub const Bitmask = struct { 101 | bits_enum: ?[]const u8, 102 | bitwidth: u8, 103 | }; 104 | 105 | pub const Handle = struct { 106 | parent: ?[]const u8, // VkInstance has no parent 107 | is_dispatchable: bool, 108 | }; 109 | 110 | pub const Command = struct { 111 | pub const Param = struct { 112 | name: []const u8, 113 | param_type: TypeInfo, 114 | is_buffer_len: bool, 115 | is_optional: bool, 116 | }; 117 | 118 | params: []Param, 119 | return_type: *TypeInfo, 120 | success_codes: []const []const u8, 121 | error_codes: []const []const u8, 122 | }; 123 | 124 | pub const Pointer = struct { 125 | pub const PointerSize = union(enum) { 126 | one, 127 | /// The length is given by some complex expression, possibly involving another field 128 | many, 129 | /// The length is given by some other field or parameter 130 | other_field: []const u8, 131 | zero_terminated, 132 | }; 133 | 134 | is_const: bool, 135 | is_optional: bool, 136 | size: PointerSize, 137 | child: *TypeInfo, 138 | }; 139 | 140 | pub const Array = struct { 141 | pub const ArraySize = union(enum) { 142 | int: usize, 143 | alias: []const u8, // Field size is given by an api constant 144 | }; 145 | 146 | pub const ArrayValidSize = union(enum) { 147 | /// All elements are valid. 148 | all, 149 | /// The length is given by some complex expression, possibly involving another field 150 | many, 151 | /// The length is given by some complex expression, possibly involving another field 152 | other_field: []const u8, 153 | /// The valid elements are terminated by a 0, or by the bounds of the array. 154 | zero_terminated, 155 | }; 156 | 157 | /// This is the total size of the array 158 | size: ArraySize, 159 | /// The number of items that are actually filled with valid values 160 | valid_size: ArrayValidSize, 161 | /// Some members may indicate than an array is optional. This happens with 162 | /// VkPhysicalDeviceHostImageCopyPropertiesEXT::optimalTilingLayoutUUID for example. 163 | /// The spec is not entirely clear about what this means, but presumably it should 164 | /// be filled with all zeroes. 165 | is_optional: bool, 166 | child: *TypeInfo, 167 | }; 168 | 169 | pub const Foreign = struct { 170 | depends: []const u8, // Either a header or vk_platform 171 | }; 172 | 173 | pub const Feature = struct { 174 | name: []const u8, 175 | level: FeatureLevel, // from 'number' 176 | requires: []Require, 177 | }; 178 | 179 | pub const Extension = struct { 180 | pub const ExtensionType = enum { 181 | instance, 182 | device, 183 | video, 184 | }; 185 | 186 | pub const Promotion = union(enum) { 187 | none, 188 | feature: FeatureLevel, 189 | extension: []const u8, 190 | }; 191 | 192 | pub const Version = union(enum) { 193 | int: u32, 194 | alias: []const u8, 195 | unknown, 196 | }; 197 | 198 | name: []const u8, 199 | number: u31, 200 | version: Version, 201 | extension_type: ?ExtensionType, 202 | depends: []const []const u8, // Other extensions 203 | promoted_to: Promotion, 204 | platform: ?[]const u8, 205 | required_feature_level: ?FeatureLevel, 206 | requires: []Require, 207 | }; 208 | 209 | pub const Require = struct { 210 | pub const EnumExtension = struct { 211 | pub const Value = union(enum) { 212 | field: Enum.Field, 213 | new_api_constant_expr: []const u8, 214 | }; 215 | extends: []const u8, 216 | extnumber: ?u31, 217 | value: Value, 218 | }; 219 | 220 | extends: []EnumExtension, 221 | types: []const []const u8, 222 | commands: []const []const u8, 223 | required_feature_level: ?FeatureLevel, 224 | required_extension: ?[]const u8, 225 | }; 226 | 227 | pub const FeatureLevel = struct { 228 | major: u32, 229 | minor: u32, 230 | }; 231 | -------------------------------------------------------------------------------- /src/xml.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const mem = std.mem; 3 | const testing = std.testing; 4 | const Allocator = mem.Allocator; 5 | const ArenaAllocator = std.heap.ArenaAllocator; 6 | 7 | pub const Attribute = struct { 8 | name: []const u8, 9 | value: []const u8, 10 | }; 11 | 12 | pub const Content = union(enum) { 13 | char_data: []const u8, 14 | comment: []const u8, 15 | element: *Element, 16 | }; 17 | 18 | pub const Element = struct { 19 | tag: []const u8, 20 | attributes: []Attribute = &.{}, 21 | children: []Content = &.{}, 22 | 23 | pub fn getAttribute(self: Element, attrib_name: []const u8) ?[]const u8 { 24 | for (self.attributes) |child| { 25 | if (mem.eql(u8, child.name, attrib_name)) { 26 | return child.value; 27 | } 28 | } 29 | 30 | return null; 31 | } 32 | 33 | pub fn getCharData(self: Element, child_tag: []const u8) ?[]const u8 { 34 | const child = self.findChildByTag(child_tag) orelse return null; 35 | if (child.children.len != 1) { 36 | return null; 37 | } 38 | 39 | return switch (child.children[0]) { 40 | .char_data => |char_data| char_data, 41 | else => null, 42 | }; 43 | } 44 | 45 | pub fn iterator(self: Element) ChildIterator { 46 | return .{ 47 | .items = self.children, 48 | .i = 0, 49 | }; 50 | } 51 | 52 | pub fn elements(self: Element) ChildElementIterator { 53 | return .{ 54 | .inner = self.iterator(), 55 | }; 56 | } 57 | 58 | pub fn findChildByTag(self: Element, tag: []const u8) ?*Element { 59 | var it = self.findChildrenByTag(tag); 60 | return it.next(); 61 | } 62 | 63 | pub fn findChildrenByTag(self: Element, tag: []const u8) FindChildrenByTagIterator { 64 | return .{ 65 | .inner = self.elements(), 66 | .tag = tag, 67 | }; 68 | } 69 | 70 | pub const ChildIterator = struct { 71 | items: []Content, 72 | i: usize, 73 | 74 | pub fn next(self: *ChildIterator) ?*Content { 75 | if (self.i < self.items.len) { 76 | self.i += 1; 77 | return &self.items[self.i - 1]; 78 | } 79 | 80 | return null; 81 | } 82 | }; 83 | 84 | pub const ChildElementIterator = struct { 85 | inner: ChildIterator, 86 | 87 | pub fn next(self: *ChildElementIterator) ?*Element { 88 | while (self.inner.next()) |child| { 89 | if (child.* != .element) { 90 | continue; 91 | } 92 | 93 | return child.*.element; 94 | } 95 | 96 | return null; 97 | } 98 | }; 99 | 100 | pub const FindChildrenByTagIterator = struct { 101 | inner: ChildElementIterator, 102 | tag: []const u8, 103 | 104 | pub fn next(self: *FindChildrenByTagIterator) ?*Element { 105 | while (self.inner.next()) |child| { 106 | if (!mem.eql(u8, child.tag, self.tag)) { 107 | continue; 108 | } 109 | 110 | return child; 111 | } 112 | 113 | return null; 114 | } 115 | }; 116 | }; 117 | 118 | pub const Document = struct { 119 | arena: ArenaAllocator, 120 | xml_decl: ?*Element, 121 | root: *Element, 122 | 123 | pub fn deinit(self: Document) void { 124 | var arena = self.arena; // Copy to stack so self can be taken by value. 125 | arena.deinit(); 126 | } 127 | }; 128 | 129 | const Parser = struct { 130 | source: []const u8, 131 | offset: usize, 132 | line: usize, 133 | column: usize, 134 | 135 | fn init(source: []const u8) Parser { 136 | return .{ 137 | .source = source, 138 | .offset = 0, 139 | .line = 0, 140 | .column = 0, 141 | }; 142 | } 143 | 144 | fn peek(self: *Parser) ?u8 { 145 | return if (self.offset < self.source.len) self.source[self.offset] else null; 146 | } 147 | 148 | fn consume(self: *Parser) !u8 { 149 | if (self.offset < self.source.len) { 150 | return self.consumeNoEof(); 151 | } 152 | 153 | return error.UnexpectedEof; 154 | } 155 | 156 | fn consumeNoEof(self: *Parser) u8 { 157 | std.debug.assert(self.offset < self.source.len); 158 | const c = self.source[self.offset]; 159 | self.offset += 1; 160 | 161 | if (c == '\n') { 162 | self.line += 1; 163 | self.column = 0; 164 | } else { 165 | self.column += 1; 166 | } 167 | 168 | return c; 169 | } 170 | 171 | fn eat(self: *Parser, char: u8) bool { 172 | self.expect(char) catch return false; 173 | return true; 174 | } 175 | 176 | fn expect(self: *Parser, expected: u8) !void { 177 | if (self.peek()) |actual| { 178 | if (expected != actual) { 179 | return error.UnexpectedCharacter; 180 | } 181 | 182 | _ = self.consumeNoEof(); 183 | return; 184 | } 185 | 186 | return error.UnexpectedEof; 187 | } 188 | 189 | fn eatStr(self: *Parser, text: []const u8) bool { 190 | self.expectStr(text) catch return false; 191 | return true; 192 | } 193 | 194 | fn expectStr(self: *Parser, text: []const u8) !void { 195 | if (self.source.len < self.offset + text.len) { 196 | return error.UnexpectedEof; 197 | } else if (mem.startsWith(u8, self.source[self.offset..], text)) { 198 | var i: usize = 0; 199 | while (i < text.len) : (i += 1) { 200 | _ = self.consumeNoEof(); 201 | } 202 | 203 | return; 204 | } 205 | 206 | return error.UnexpectedCharacter; 207 | } 208 | 209 | fn eatWs(self: *Parser) bool { 210 | var ws = false; 211 | 212 | while (self.peek()) |ch| { 213 | switch (ch) { 214 | ' ', '\t', '\n', '\r' => { 215 | ws = true; 216 | _ = self.consumeNoEof(); 217 | }, 218 | else => break, 219 | } 220 | } 221 | 222 | return ws; 223 | } 224 | 225 | fn expectWs(self: *Parser) !void { 226 | if (!self.eatWs()) return error.UnexpectedCharacter; 227 | } 228 | 229 | fn currentLine(self: Parser) []const u8 { 230 | var begin: usize = 0; 231 | if (mem.lastIndexOfScalar(u8, self.source[0..self.offset], '\n')) |prev_nl| { 232 | begin = prev_nl + 1; 233 | } 234 | 235 | const end = mem.indexOfScalarPos(u8, self.source, self.offset, '\n') orelse self.source.len; 236 | return self.source[begin..end]; 237 | } 238 | }; 239 | 240 | test "xml: Parser" { 241 | { 242 | var parser = Parser.init("I like pythons"); 243 | try testing.expectEqual(@as(?u8, 'I'), parser.peek()); 244 | try testing.expectEqual(@as(u8, 'I'), parser.consumeNoEof()); 245 | try testing.expectEqual(@as(?u8, ' '), parser.peek()); 246 | try testing.expectEqual(@as(u8, ' '), try parser.consume()); 247 | 248 | try testing.expect(parser.eat('l')); 249 | try testing.expectEqual(@as(?u8, 'i'), parser.peek()); 250 | try testing.expectEqual(false, parser.eat('a')); 251 | try testing.expectEqual(@as(?u8, 'i'), parser.peek()); 252 | 253 | try parser.expect('i'); 254 | try testing.expectEqual(@as(?u8, 'k'), parser.peek()); 255 | try testing.expectError(error.UnexpectedCharacter, parser.expect('a')); 256 | try testing.expectEqual(@as(?u8, 'k'), parser.peek()); 257 | 258 | try testing.expect(parser.eatStr("ke")); 259 | try testing.expectEqual(@as(?u8, ' '), parser.peek()); 260 | 261 | try testing.expect(parser.eatWs()); 262 | try testing.expectEqual(@as(?u8, 'p'), parser.peek()); 263 | try testing.expectEqual(false, parser.eatWs()); 264 | try testing.expectEqual(@as(?u8, 'p'), parser.peek()); 265 | 266 | try testing.expectEqual(false, parser.eatStr("aaaaaaaaa")); 267 | try testing.expectEqual(@as(?u8, 'p'), parser.peek()); 268 | 269 | try testing.expectError(error.UnexpectedEof, parser.expectStr("aaaaaaaaa")); 270 | try testing.expectEqual(@as(?u8, 'p'), parser.peek()); 271 | try testing.expectError(error.UnexpectedCharacter, parser.expectStr("pytn")); 272 | try testing.expectEqual(@as(?u8, 'p'), parser.peek()); 273 | try parser.expectStr("python"); 274 | try testing.expectEqual(@as(?u8, 's'), parser.peek()); 275 | } 276 | 277 | { 278 | var parser = Parser.init(""); 279 | try testing.expectEqual(parser.peek(), null); 280 | try testing.expectError(error.UnexpectedEof, parser.consume()); 281 | try testing.expectEqual(parser.eat('p'), false); 282 | try testing.expectError(error.UnexpectedEof, parser.expect('p')); 283 | } 284 | } 285 | 286 | pub const ParseError = error{ 287 | IllegalCharacter, 288 | UnexpectedEof, 289 | UnexpectedCharacter, 290 | UnclosedValue, 291 | UnclosedComment, 292 | InvalidName, 293 | InvalidEntity, 294 | InvalidStandaloneValue, 295 | NonMatchingClosingTag, 296 | InvalidDocument, 297 | OutOfMemory, 298 | }; 299 | 300 | pub fn parse(backing_allocator: Allocator, source: []const u8) !Document { 301 | var parser = Parser.init(source); 302 | return try parseDocument(&parser, backing_allocator); 303 | } 304 | 305 | fn parseDocument(parser: *Parser, backing_allocator: Allocator) !Document { 306 | var doc = Document{ 307 | .arena = ArenaAllocator.init(backing_allocator), 308 | .xml_decl = null, 309 | .root = undefined, 310 | }; 311 | 312 | errdefer doc.deinit(); 313 | 314 | const allocator = doc.arena.allocator(); 315 | 316 | try skipComments(parser, allocator); 317 | 318 | doc.xml_decl = try parseElement(parser, allocator, .xml_decl); 319 | _ = parser.eatWs(); 320 | try skipComments(parser, allocator); 321 | 322 | doc.root = (try parseElement(parser, allocator, .element)) orelse return error.InvalidDocument; 323 | _ = parser.eatWs(); 324 | try skipComments(parser, allocator); 325 | 326 | if (parser.peek() != null) return error.InvalidDocument; 327 | 328 | return doc; 329 | } 330 | 331 | fn parseAttrValue(parser: *Parser, alloc: Allocator) ![]const u8 { 332 | const quote = try parser.consume(); 333 | if (quote != '"' and quote != '\'') return error.UnexpectedCharacter; 334 | 335 | const begin = parser.offset; 336 | 337 | while (true) { 338 | const c = parser.consume() catch return error.UnclosedValue; 339 | if (c == quote) break; 340 | } 341 | 342 | const end = parser.offset - 1; 343 | 344 | return try unescape(alloc, parser.source[begin..end]); 345 | } 346 | 347 | fn parseEqAttrValue(parser: *Parser, alloc: Allocator) ![]const u8 { 348 | _ = parser.eatWs(); 349 | try parser.expect('='); 350 | _ = parser.eatWs(); 351 | 352 | return try parseAttrValue(parser, alloc); 353 | } 354 | 355 | fn parseNameNoDupe(parser: *Parser) ![]const u8 { 356 | // XML's spec on names is very long, so to make this easier 357 | // we just take any character that is not special and not whitespace 358 | const begin = parser.offset; 359 | 360 | while (parser.peek()) |ch| { 361 | switch (ch) { 362 | ' ', '\t', '\n', '\r' => break, 363 | '&', '"', '\'', '<', '>', '?', '=', '/' => break, 364 | else => _ = parser.consumeNoEof(), 365 | } 366 | } 367 | 368 | const end = parser.offset; 369 | if (begin == end) return error.InvalidName; 370 | 371 | return parser.source[begin..end]; 372 | } 373 | 374 | fn parseCharData(parser: *Parser, alloc: Allocator) !?[]const u8 { 375 | const begin = parser.offset; 376 | 377 | while (parser.peek()) |ch| { 378 | switch (ch) { 379 | '<' => break, 380 | else => _ = parser.consumeNoEof(), 381 | } 382 | } 383 | 384 | const end = parser.offset; 385 | if (begin == end) return null; 386 | 387 | return try unescape(alloc, parser.source[begin..end]); 388 | } 389 | 390 | fn parseContent(parser: *Parser, alloc: Allocator) ParseError!Content { 391 | if (try parseCharData(parser, alloc)) |cd| { 392 | return Content{ .char_data = cd }; 393 | } else if (try parseComment(parser, alloc)) |comment| { 394 | return Content{ .comment = comment }; 395 | } else if (try parseElement(parser, alloc, .element)) |elem| { 396 | return Content{ .element = elem }; 397 | } else { 398 | return error.UnexpectedCharacter; 399 | } 400 | } 401 | 402 | fn parseAttr(parser: *Parser, alloc: Allocator) !?Attribute { 403 | const name = parseNameNoDupe(parser) catch return null; 404 | _ = parser.eatWs(); 405 | try parser.expect('='); 406 | _ = parser.eatWs(); 407 | const value = try parseAttrValue(parser, alloc); 408 | 409 | const attr = Attribute{ 410 | .name = try alloc.dupe(u8, name), 411 | .value = value, 412 | }; 413 | return attr; 414 | } 415 | 416 | const ElementKind = enum { 417 | xml_decl, 418 | element, 419 | }; 420 | 421 | fn parseElement(parser: *Parser, alloc: Allocator, comptime kind: ElementKind) !?*Element { 422 | const start = parser.offset; 423 | 424 | const tag = switch (kind) { 425 | .xml_decl => blk: { 426 | if (!parser.eatStr(" blk: { 433 | if (!parser.eat('<')) return null; 434 | const tag = parseNameNoDupe(parser) catch { 435 | parser.offset = start; 436 | return null; 437 | }; 438 | break :blk tag; 439 | }, 440 | }; 441 | 442 | var attributes = std.ArrayList(Attribute).init(alloc); 443 | defer attributes.deinit(); 444 | 445 | var children = std.ArrayList(Content).init(alloc); 446 | defer children.deinit(); 447 | 448 | while (parser.eatWs()) { 449 | const attr = (try parseAttr(parser, alloc)) orelse break; 450 | try attributes.append(attr); 451 | } 452 | 453 | switch (kind) { 454 | .xml_decl => try parser.expectStr("?>"), 455 | .element => { 456 | if (!parser.eatStr("/>")) { 457 | try parser.expect('>'); 458 | 459 | while (true) { 460 | if (parser.peek() == null) { 461 | return error.UnexpectedEof; 462 | } else if (parser.eatStr("'); 477 | } 478 | }, 479 | } 480 | 481 | const element = try alloc.create(Element); 482 | element.* = .{ 483 | .tag = try alloc.dupe(u8, tag), 484 | .attributes = try attributes.toOwnedSlice(), 485 | .children = try children.toOwnedSlice(), 486 | }; 487 | return element; 488 | } 489 | 490 | test "xml: parseElement" { 491 | var arena = ArenaAllocator.init(testing.allocator); 492 | defer arena.deinit(); 493 | const alloc = arena.allocator(); 494 | 495 | { 496 | var parser = Parser.init("<= a='b'/>"); 497 | try testing.expectEqual(@as(?*Element, null), try parseElement(&parser, alloc, .element)); 498 | try testing.expectEqual(@as(?u8, '<'), parser.peek()); 499 | } 500 | 501 | { 502 | var parser = Parser.init(""); 503 | const elem = try parseElement(&parser, alloc, .element); 504 | try testing.expectEqualSlices(u8, elem.?.tag, "python"); 505 | 506 | const size_attr = elem.?.attributes[0]; 507 | try testing.expectEqualSlices(u8, size_attr.name, "size"); 508 | try testing.expectEqualSlices(u8, size_attr.value, "15"); 509 | 510 | const color_attr = elem.?.attributes[1]; 511 | try testing.expectEqualSlices(u8, color_attr.name, "color"); 512 | try testing.expectEqualSlices(u8, color_attr.value, "green"); 513 | } 514 | 515 | { 516 | var parser = Parser.init("test"); 517 | const elem = try parseElement(&parser, alloc, .element); 518 | try testing.expectEqualSlices(u8, elem.?.tag, "python"); 519 | try testing.expectEqualSlices(u8, elem.?.children[0].char_data, "test"); 520 | } 521 | 522 | { 523 | var parser = Parser.init("bdf"); 524 | const elem = try parseElement(&parser, alloc, .element); 525 | try testing.expectEqualSlices(u8, elem.?.tag, "a"); 526 | try testing.expectEqualSlices(u8, elem.?.children[0].char_data, "b"); 527 | try testing.expectEqualSlices(u8, elem.?.children[1].element.tag, "c"); 528 | try testing.expectEqualSlices(u8, elem.?.children[2].char_data, "d"); 529 | try testing.expectEqualSlices(u8, elem.?.children[3].element.tag, "e"); 530 | try testing.expectEqualSlices(u8, elem.?.children[4].char_data, "f"); 531 | try testing.expectEqualSlices(u8, elem.?.children[5].comment, "g"); 532 | } 533 | } 534 | 535 | test "xml: parse prolog" { 536 | var arena = ArenaAllocator.init(testing.allocator); 537 | defer arena.deinit(); 538 | const a = arena.allocator(); 539 | 540 | { 541 | var parser = Parser.init(""); 542 | try testing.expectEqual(@as(?*Element, null), try parseElement(&parser, a, .xml_decl)); 543 | try testing.expectEqual(@as(?u8, '<'), parser.peek()); 544 | } 545 | 546 | { 547 | var parser = Parser.init(""); 548 | const decl = try parseElement(&parser, a, .xml_decl); 549 | try testing.expectEqualSlices(u8, "aa", decl.?.getAttribute("version").?); 550 | try testing.expectEqual(@as(?[]const u8, null), decl.?.getAttribute("encoding")); 551 | try testing.expectEqual(@as(?[]const u8, null), decl.?.getAttribute("standalone")); 552 | } 553 | 554 | { 555 | var parser = Parser.init(""); 556 | const decl = try parseElement(&parser, a, .xml_decl); 557 | try testing.expectEqualSlices(u8, "ccc", decl.?.getAttribute("version").?); 558 | try testing.expectEqualSlices(u8, "bbb", decl.?.getAttribute("encoding").?); 559 | try testing.expectEqualSlices(u8, "yes", decl.?.getAttribute("standalone").?); 560 | } 561 | } 562 | 563 | fn skipComments(parser: *Parser, alloc: Allocator) !void { 564 | while ((try parseComment(parser, alloc)) != null) { 565 | _ = parser.eatWs(); 566 | } 567 | } 568 | 569 | fn parseComment(parser: *Parser, alloc: Allocator) !?[]const u8 { 570 | if (!parser.eatStr("")) { 574 | _ = parser.consume() catch return error.UnclosedComment; 575 | } 576 | 577 | const end = parser.offset - "-->".len; 578 | return try alloc.dupe(u8, parser.source[begin..end]); 579 | } 580 | 581 | fn unescapeEntity(text: []const u8) !u8 { 582 | const EntitySubstition = struct { text: []const u8, replacement: u8 }; 583 | 584 | const entities = [_]EntitySubstition{ 585 | .{ .text = "<", .replacement = '<' }, 586 | .{ .text = ">", .replacement = '>' }, 587 | .{ .text = "&", .replacement = '&' }, 588 | .{ .text = "'", .replacement = '\'' }, 589 | .{ .text = """, .replacement = '"' }, 590 | }; 591 | 592 | for (entities) |entity| { 593 | if (mem.eql(u8, text, entity.text)) return entity.replacement; 594 | } 595 | 596 | return error.InvalidEntity; 597 | } 598 | 599 | fn unescape(arena: Allocator, text: []const u8) ![]const u8 { 600 | const unescaped = try arena.alloc(u8, text.len); 601 | 602 | var j: usize = 0; 603 | var i: usize = 0; 604 | while (i < text.len) : (j += 1) { 605 | if (text[i] == '&') { 606 | const entity_end = 1 + (mem.indexOfScalarPos(u8, text, i, ';') orelse return error.InvalidEntity); 607 | unescaped[j] = try unescapeEntity(text[i..entity_end]); 608 | i = entity_end; 609 | } else { 610 | unescaped[j] = text[i]; 611 | i += 1; 612 | } 613 | } 614 | 615 | return unescaped[0..j]; 616 | } 617 | 618 | test "xml: unescape" { 619 | var arena = ArenaAllocator.init(testing.allocator); 620 | defer arena.deinit(); 621 | const a = arena.allocator(); 622 | 623 | try testing.expectEqualSlices(u8, "test", try unescape(a, "test")); 624 | try testing.expectEqualSlices(u8, "ad\"e'f<", try unescape(a, "a<b&c>d"e'f<")); 625 | try testing.expectError(error.InvalidEntity, unescape(a, "python&")); 626 | try testing.expectError(error.InvalidEntity, unescape(a, "python&&")); 627 | try testing.expectError(error.InvalidEntity, unescape(a, "python&test;")); 628 | try testing.expectError(error.InvalidEntity, unescape(a, "python&boa")); 629 | } 630 | 631 | test "xml: top level comments" { 632 | var arena = ArenaAllocator.init(testing.allocator); 633 | defer arena.deinit(); 634 | const a = arena.allocator(); 635 | 636 | const doc = try parse(a, ""); 637 | try testing.expectEqualSlices(u8, "python", doc.root.tag); 638 | } 639 | -------------------------------------------------------------------------------- /test/ref_all_decls.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const vk = @import("vulkan"); 3 | 4 | // Provide bogus defaults for unknown platform types 5 | // The actual type does not really matter here... 6 | pub const GgpStreamDescriptor = u32; 7 | pub const GgpFrameToken = u32; 8 | pub const _screen_buffer = u32; 9 | pub const NvSciSyncAttrList = u32; 10 | pub const NvSciSyncObj = u32; 11 | pub const NvSciSyncFence = u32; 12 | pub const NvSciBufAttrList = u32; 13 | pub const NvSciBufObj = u32; 14 | pub const ANativeWindow = u32; 15 | pub const AHardwareBuffer = u32; 16 | pub const CAMetalLayer = u32; 17 | pub const MTLDevice_id = u32; 18 | pub const MTLCommandQueue_id = u32; 19 | pub const MTLBuffer_id = u32; 20 | pub const MTLTexture_id = u32; 21 | pub const MTLSharedEvent_id = u32; 22 | pub const IOSurfaceRef = u32; 23 | 24 | // For some reason these types are exported in a different header, and not described in vk.xml. 25 | // If we are not also generating these, the user will have to manually specify them. 26 | pub usingnamespace if (!vk.have_vulkan_video) struct { 27 | pub const StdVideoH264ProfileIdc = u32; 28 | pub const StdVideoH264LevelIdc = u32; 29 | pub const StdVideoH264ChromaFormatIdc = u32; 30 | pub const StdVideoH264PocType = u32; 31 | pub const StdVideoH264SpsFlags = u32; 32 | pub const StdVideoH264ScalingLists = u32; 33 | pub const StdVideoH264SequenceParameterSetVui = u32; 34 | pub const StdVideoH264AspectRatioIdc = u32; 35 | pub const StdVideoH264HrdParameters = u32; 36 | pub const StdVideoH264SpsVuiFlags = u32; 37 | pub const StdVideoH264WeightedBipredIdc = u32; 38 | pub const StdVideoH264PpsFlags = u32; 39 | pub const StdVideoH264SliceType = u32; 40 | pub const StdVideoH264CabacInitIdc = u32; 41 | pub const StdVideoH264DisableDeblockingFilterIdc = u32; 42 | pub const StdVideoH264PictureType = u32; 43 | pub const StdVideoH264ModificationOfPicNumsIdc = u32; 44 | pub const StdVideoH264MemMgmtControlOp = u32; 45 | pub const StdVideoDecodeH264PictureInfo = u32; 46 | pub const StdVideoDecodeH264ReferenceInfo = u32; 47 | pub const StdVideoDecodeH264PictureInfoFlags = u32; 48 | pub const StdVideoDecodeH264ReferenceInfoFlags = u32; 49 | pub const StdVideoH264SequenceParameterSet = u32; 50 | pub const StdVideoH264PictureParameterSet = u32; 51 | pub const StdVideoH265ProfileIdc = u32; 52 | pub const StdVideoH265VideoParameterSet = u32; 53 | pub const StdVideoH265SequenceParameterSet = u32; 54 | pub const StdVideoH265PictureParameterSet = u32; 55 | pub const StdVideoH265DecPicBufMgr = u32; 56 | pub const StdVideoH265HrdParameters = u32; 57 | pub const StdVideoH265VpsFlags = u32; 58 | pub const StdVideoH265LevelIdc = u32; 59 | pub const StdVideoH265SpsFlags = u32; 60 | pub const StdVideoH265ScalingLists = u32; 61 | pub const StdVideoH265SequenceParameterSetVui = u32; 62 | pub const StdVideoH265PredictorPaletteEntries = u32; 63 | pub const StdVideoH265PpsFlags = u32; 64 | pub const StdVideoH265SubLayerHrdParameters = u32; 65 | pub const StdVideoH265HrdFlags = u32; 66 | pub const StdVideoH265SpsVuiFlags = u32; 67 | pub const StdVideoH265SliceType = u32; 68 | pub const StdVideoH265PictureType = u32; 69 | pub const StdVideoDecodeH265PictureInfo = u32; 70 | pub const StdVideoDecodeH265ReferenceInfo = u32; 71 | pub const StdVideoDecodeH265PictureInfoFlags = u32; 72 | pub const StdVideoDecodeH265ReferenceInfoFlags = u32; 73 | pub const StdVideoAV1Profile = u32; 74 | pub const StdVideoAV1Level = u32; 75 | pub const StdVideoAV1SequenceHeader = u32; 76 | pub const StdVideoDecodeAV1PictureInfo = u32; 77 | pub const StdVideoDecodeAV1ReferenceInfo = u32; 78 | pub const StdVideoEncodeH264SliceHeader = u32; 79 | pub const StdVideoEncodeH264PictureInfo = u32; 80 | pub const StdVideoEncodeH264ReferenceInfo = u32; 81 | pub const StdVideoEncodeH264SliceHeaderFlags = u32; 82 | pub const StdVideoEncodeH264ReferenceListsInfo = u32; 83 | pub const StdVideoEncodeH264PictureInfoFlags = u32; 84 | pub const StdVideoEncodeH264ReferenceInfoFlags = u32; 85 | pub const StdVideoEncodeH264RefMgmtFlags = u32; 86 | pub const StdVideoEncodeH264RefListModEntry = u32; 87 | pub const StdVideoEncodeH264RefPicMarkingEntry = u32; 88 | pub const StdVideoEncodeH265PictureInfoFlags = u32; 89 | pub const StdVideoEncodeH265PictureInfo = u32; 90 | pub const StdVideoEncodeH265SliceSegmentHeader = u32; 91 | pub const StdVideoEncodeH265ReferenceInfo = u32; 92 | pub const StdVideoEncodeH265ReferenceListsInfo = u32; 93 | pub const StdVideoEncodeH265SliceSegmentHeaderFlags = u32; 94 | pub const StdVideoEncodeH265ReferenceInfoFlags = u32; 95 | pub const StdVideoEncodeH265ReferenceModificationFlags = u32; 96 | pub const StdVideoEncodeAV1OperatingPointInfo = u32; 97 | } else struct {}; 98 | 99 | comptime { 100 | @setEvalBranchQuota(1000000); 101 | reallyRefAllDecls(vk); 102 | } 103 | 104 | fn reallyRefAllDecls(comptime T: type) void { 105 | switch (@typeInfo(T)) { 106 | .@"struct", .@"union" => { 107 | reallyRefAllContainerDecls(T); 108 | inline for (std.meta.fields(T)) |field| { 109 | reallyRefAllDecls(field.type); 110 | } 111 | }, 112 | .@"enum", .@"opaque" => { 113 | reallyRefAllContainerDecls(T); 114 | }, 115 | else => {}, 116 | } 117 | } 118 | 119 | fn reallyRefAllContainerDecls(comptime T: type) void { 120 | inline for (comptime std.meta.declarations(T)) |decl| { 121 | if (@TypeOf(@field(T, decl.name)) == type) { 122 | reallyRefAllDecls(@field(T, decl.name)); 123 | } 124 | } 125 | } 126 | --------------------------------------------------------------------------------