├── src ├── BindGroup.zig ├── Buffer.zig ├── QuerySet.zig ├── Sampler.zig ├── ComputePipeline.zig ├── ExternalTexture.zig ├── RenderBundle.zig ├── ComputePassEncoder.zig ├── RenderBundleEncoder.zig ├── main.zig ├── BindGroupLayout.zig ├── Texture.zig ├── ShaderModule.zig ├── internal.zig ├── PipelineLayout.zig ├── CommandBuffer.zig ├── CommandEncoder.zig ├── Surface.zig ├── TextureView.zig ├── Queue.zig ├── Instance.zig ├── vk.zig ├── SwapChain.zig ├── Device.zig ├── Adapter.zig ├── RenderPassEncoder.zig ├── helper.zig ├── RenderPipeline.zig └── Interface.zig ├── .gitignore ├── LICENSE └── README.md /src/BindGroup.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/Buffer.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/QuerySet.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/Sampler.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vk.xml 2 | -------------------------------------------------------------------------------- /src/ComputePipeline.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/ExternalTexture.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/RenderBundle.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/ComputePassEncoder.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/RenderBundleEncoder.zig: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/main.zig: -------------------------------------------------------------------------------- 1 | pub const Interface = @import("Interface.zig"); 2 | -------------------------------------------------------------------------------- /src/BindGroupLayout.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const BindGroupLayout = @This(); 9 | 10 | manager: helper.Manager(BindGroupLayout) = .{}, 11 | layout: vk.DescriptorSetLayout, 12 | device: *internal.Device, 13 | -------------------------------------------------------------------------------- /src/Texture.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const Texture = @This(); 9 | 10 | manager: helper.Manager(Texture) = .{}, 11 | image: vk.Image, 12 | extent: vk.Extent2D, 13 | samples: u32, 14 | device: *internal.Device, 15 | 16 | pub fn deinit(self: *Texture) void { 17 | self.device.dispatch.destroyImage(self.device.device, self.image, null); 18 | self.device.allocator().destroy(self); 19 | } 20 | 21 | pub fn createView(self: *Texture, descriptor: ?*const gpu.TextureView.Descriptor) !*internal.TextureView { 22 | const view = try self.device.allocator().create(internal.TextureView); 23 | errdefer self.device.allocator().destroy(view); 24 | view.* = try internal.TextureView.init(self, descriptor orelse &gpu.TextureView.Descriptor{}); 25 | return view; 26 | } 27 | -------------------------------------------------------------------------------- /src/ShaderModule.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const ShaderModule = @This(); 9 | 10 | manager: helper.Manager(ShaderModule) = .{}, 11 | shader: vk.ShaderModule, 12 | device: *internal.Device, 13 | 14 | pub fn init(device: *internal.Device, descriptor: *const gpu.ShaderModule.Descriptor) !ShaderModule { 15 | const spirv = helper.findChained(gpu.ShaderModule.SPIRVDescriptor, descriptor.next_in_chain.generic) orelse return error.InvalidDescriptor; 16 | const shader = try device.dispatch.createShaderModule(device.device, &.{ 17 | .flags = .{}, 18 | .code_size = spirv.code_size, 19 | .p_code = spirv.code, 20 | }, null); 21 | return .{ 22 | .shader = shader, 23 | .device = device, 24 | }; 25 | } 26 | 27 | pub fn deinit(self: *ShaderModule) void { 28 | self.device.dispatch.destroyShaderModule(self.device.device, self.shader, null); 29 | self.device.allocator().destroy(self); 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2022 silversquirl 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Crescent 2 | 3 | Crescent is a simple WebGPU implementation written in Zig. 4 | It is designed to be used with [`mach/gpu`][gpu], a WebGPU API wrapper for Zig. 5 | 6 | For simplicity, Crescent has only one backend: Vulkan. 7 | This is unusual, as most WebGPU implementations support other backends, such as DirectX, Metal and sometimes OpenGL. 8 | Crescent prefers to focus on optimizations for one backend, improving speed and keeping the implementation smaller and simpler. 9 | 10 | [gpu]: https://github.com/hexops/mach-gpu 11 | 12 | ## Status: under construction 13 | 14 | Crescent doesn't yet implement the full WebGPU API, however it can already run a [simple triangle example][example]! 15 | 16 | [example]: https://github.com/silversquirl/crescent-example 17 | 18 | ## Security 19 | 20 | Crescent does **not** provide any security guarantees and is **not** suitable for sandboxed environments, such as web browsers. 21 | While most WebGPU implementations are designed for browsers and hence have a heavy focus on security, Crescent optimizes for speed and code size instead, resulting in a much smaller implementation suitable for use by trusted code in native applications. 22 | -------------------------------------------------------------------------------- /src/internal.zig: -------------------------------------------------------------------------------- 1 | pub const Adapter = @import("Adapter.zig"); 2 | pub const BindGroup = @import("BindGroup.zig"); 3 | pub const BindGroupLayout = @import("BindGroupLayout.zig"); 4 | pub const Buffer = @import("Buffer.zig"); 5 | pub const CommandBuffer = @import("CommandBuffer.zig"); 6 | pub const CommandEncoder = @import("CommandEncoder.zig"); 7 | pub const ComputePassEncoder = @import("ComputePassEncoder.zig"); 8 | pub const ComputePipeline = @import("ComputePipeline.zig"); 9 | pub const Device = @import("Device.zig"); 10 | pub const ExternalTexture = @import("ExternalTexture.zig"); 11 | pub const Instance = @import("Instance.zig"); 12 | pub const PipelineLayout = @import("PipelineLayout.zig"); 13 | pub const QuerySet = @import("QuerySet.zig"); 14 | pub const Queue = @import("Queue.zig"); 15 | pub const RenderBundle = @import("RenderBundle.zig"); 16 | pub const RenderBundleEncoder = @import("RenderBundleEncoder.zig"); 17 | pub const RenderPassEncoder = @import("RenderPassEncoder.zig"); 18 | pub const RenderPipeline = @import("RenderPipeline.zig"); 19 | pub const Sampler = @import("Sampler.zig"); 20 | pub const ShaderModule = @import("ShaderModule.zig"); 21 | pub const Surface = @import("Surface.zig"); 22 | pub const SwapChain = @import("SwapChain.zig"); 23 | pub const Texture = @import("Texture.zig"); 24 | pub const TextureView = @import("TextureView.zig"); 25 | -------------------------------------------------------------------------------- /src/PipelineLayout.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const PipelineLayout = @This(); 9 | 10 | manager: helper.Manager(PipelineLayout) = .{}, 11 | layout: vk.PipelineLayout, 12 | device: *internal.Device, 13 | 14 | pub fn init(device: *internal.Device, descriptor: *const gpu.PipelineLayout.Descriptor) !PipelineLayout { 15 | const groups = try device.allocator().alloc(vk.DescriptorSetLayout, descriptor.bind_group_layout_count); 16 | defer device.allocator().free(groups); 17 | for (groups) |*l, i| { 18 | l.* = helper.castOpaque(*internal.BindGroupLayout, descriptor.bind_group_layouts.?[i]).layout; 19 | } 20 | 21 | const layout = try device.dispatch.createPipelineLayout(device.device, &.{ 22 | .flags = .{}, 23 | .set_layout_count = @intCast(u32, groups.len), 24 | .p_set_layouts = groups.ptr, 25 | .push_constant_range_count = 0, 26 | .p_push_constant_ranges = undefined, 27 | }, null); 28 | 29 | return .{ 30 | .layout = layout, 31 | .device = device, 32 | }; 33 | } 34 | 35 | pub fn deinit(self: *PipelineLayout) void { 36 | self.device.dispatch.destroyPipelineLayout(self.device.device, self.layout, null); 37 | self.device.allocator().destroy(self); 38 | } 39 | -------------------------------------------------------------------------------- /src/CommandBuffer.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const CommandBuffer = @This(); 9 | 10 | manager: helper.Manager(CommandBuffer) = .{}, 11 | buffer: vk.CommandBuffer, 12 | render_passes: std.ArrayListUnmanaged(*internal.RenderPassEncoder) = .{}, // TODO: compute passes 13 | device: *const internal.Device, 14 | 15 | pub fn init(device: *const internal.Device) !CommandBuffer { 16 | var buffer: vk.CommandBuffer = undefined; 17 | try device.dispatch.allocateCommandBuffers(device.device, &.{ 18 | .command_pool = device.pool, 19 | .level = .primary, 20 | .command_buffer_count = 1, 21 | }, @as(*[1]vk.CommandBuffer, &buffer)); 22 | try device.dispatch.beginCommandBuffer(buffer, &.{ 23 | .flags = .{}, 24 | .p_inheritance_info = undefined, 25 | }); 26 | 27 | return .{ 28 | .buffer = buffer, 29 | .device = device, 30 | }; 31 | } 32 | 33 | pub fn deinit(self: *CommandBuffer) void { 34 | self.device.dispatch.freeCommandBuffers( 35 | self.device.device, 36 | self.device.pool, 37 | 1, 38 | @as(*[1]vk.CommandBuffer, &self.buffer), 39 | ); 40 | 41 | for (self.render_passes.items) |pass| { 42 | pass.manager.release(); 43 | } 44 | 45 | const allocator = self.device.allocator(); 46 | self.render_passes.deinit(allocator); 47 | allocator.destroy(self); 48 | } 49 | -------------------------------------------------------------------------------- /src/CommandEncoder.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const CommandEncoder = @This(); 9 | 10 | manager: helper.Manager(CommandEncoder) = .{}, 11 | buffer: *internal.CommandBuffer, 12 | 13 | pub fn init(device: *const internal.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) !CommandEncoder { 14 | _ = descriptor; 15 | const buffer = try device.allocator().create(internal.CommandBuffer); 16 | errdefer device.allocator().destroy(buffer); 17 | buffer.* = try internal.CommandBuffer.init(device); 18 | return .{ .buffer = buffer }; 19 | } 20 | 21 | pub fn deinit(self: *CommandEncoder) void { 22 | const buffer = self.buffer; 23 | buffer.device.allocator().destroy(self); 24 | buffer.manager.release(); 25 | } 26 | 27 | pub fn beginRenderPass(self: *CommandEncoder, descriptor: ?*const gpu.RenderPassDescriptor) !*internal.RenderPassEncoder { 28 | const allocator = self.buffer.device.allocator(); 29 | const encoder = try allocator.create(internal.RenderPassEncoder); 30 | errdefer allocator.destroy(encoder); 31 | encoder.* = try internal.RenderPassEncoder.init(self, descriptor.?); 32 | return encoder; 33 | } 34 | 35 | pub fn finish(self: *CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) !*internal.CommandBuffer { 36 | _ = descriptor; 37 | try self.buffer.device.dispatch.endCommandBuffer(self.buffer.buffer); 38 | self.buffer.manager.reference(); 39 | return self.buffer; 40 | } 41 | -------------------------------------------------------------------------------- /src/Surface.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const Surface = @This(); 9 | 10 | manager: helper.Manager(Surface) = .{}, 11 | surface: vk.SurfaceKHR, 12 | instance: *internal.Instance, 13 | 14 | pub fn init(instance: *internal.Instance, descriptor: *const gpu.Surface.Descriptor) !Surface { 15 | const surface = switch (vk.windowing_system) { 16 | .win32 => try initWin32(instance, descriptor), 17 | .xlib => try initXlib(instance, descriptor), 18 | }; 19 | 20 | return .{ 21 | .surface = surface, 22 | .instance = instance, 23 | }; 24 | } 25 | 26 | fn initWin32(instance: *internal.Instance, descriptor: *const gpu.Surface.Descriptor) !vk.SurfaceKHR { 27 | const desc = helper.findChained( 28 | gpu.Surface.DescriptorFromWindowsHWND, 29 | descriptor.next_in_chain.generic, 30 | ) orelse { 31 | return error.InvalidDescriptor; 32 | }; 33 | return instance.dispatch.createWin32SurfaceKHR(instance.instance, &.{ 34 | .hinstance = @ptrCast(vk.HINSTANCE, desc.hinstance), 35 | .hwnd = @ptrCast(vk.HWND, desc.hwnd), 36 | .flags = .{}, 37 | }, null); 38 | } 39 | 40 | fn initXlib(instance: *internal.Instance, descriptor: *const gpu.Surface.Descriptor) !vk.SurfaceKHR { 41 | const desc = helper.findChained( 42 | gpu.Surface.DescriptorFromXlibWindow, 43 | descriptor.next_in_chain.generic, 44 | ) orelse { 45 | return error.InvalidDescriptor; 46 | }; 47 | return instance.dispatch.createXlibSurfaceKHR(instance.instance, &.{ 48 | .dpy = @ptrCast(*vk.Display, desc.display), 49 | .window = desc.window, 50 | .flags = .{}, 51 | }, null); 52 | } 53 | 54 | pub fn deinit(self: *Surface) void { 55 | self.instance.dispatch.destroySurfaceKHR(self.instance.instance, self.surface, null); 56 | self.instance.allocator().destroy(self); 57 | } 58 | -------------------------------------------------------------------------------- /src/TextureView.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const TextureView = @This(); 9 | 10 | manager: helper.Manager(TextureView) = .{}, 11 | view: vk.ImageView, 12 | format: vk.Format, 13 | texture: *internal.Texture, 14 | 15 | pub fn init(texture: *internal.Texture, descriptor: *const gpu.TextureView.Descriptor) !TextureView { 16 | const aspect: vk.ImageAspectFlags = if (descriptor.aspect != .all) .{ 17 | .stencil_bit = descriptor.aspect == .stencil_only, 18 | .depth_bit = descriptor.aspect == .depth_only, 19 | .plane_0_bit = descriptor.aspect == .plane0_only, 20 | .plane_1_bit = descriptor.aspect == .plane1_only, 21 | } else switch (descriptor.format) { 22 | .stencil8 => .{ .stencil_bit = true }, 23 | .depth16_unorm, .depth24_plus, .depth32_float => .{ .depth_bit = true }, 24 | .depth24_plus_stencil8, .depth32_float_stencil8 => .{ 25 | .depth_bit = true, 26 | .stencil_bit = true, 27 | }, 28 | .r8_bg8_biplanar420_unorm => .{ 29 | .plane_0_bit = true, 30 | .plane_1_bit = true, 31 | }, 32 | else => .{ .color_bit = true }, 33 | }; 34 | const format = helper.vulkanTextureFormat(descriptor.format); 35 | 36 | const view = try texture.device.dispatch.createImageView(texture.device.device, &.{ 37 | .flags = .{}, 38 | .image = texture.image, 39 | .view_type = switch (descriptor.dimension) { 40 | .dimension_undef => unreachable, 41 | .dimension_1d => vk.ImageViewType.@"1d", 42 | .dimension_2d => .@"2d", 43 | .dimension_2d_array => .@"2d_array", 44 | .dimension_cube => .cube, 45 | .dimension_cube_array => .cube_array, 46 | .dimension_3d => .@"3d", 47 | }, 48 | .format = format, 49 | .components = .{ 50 | .r = .identity, 51 | .g = .identity, 52 | .b = .identity, 53 | .a = .identity, 54 | }, 55 | .subresource_range = .{ 56 | .aspect_mask = aspect, 57 | .base_mip_level = descriptor.base_mip_level, 58 | .level_count = descriptor.mip_level_count, 59 | .base_array_layer = descriptor.base_array_layer, 60 | .layer_count = descriptor.array_layer_count, 61 | }, 62 | }, null); 63 | 64 | texture.manager.reference(); 65 | return .{ 66 | .view = view, 67 | .format = format, 68 | .texture = texture, 69 | }; 70 | } 71 | 72 | pub fn deinit(self: *TextureView) void { 73 | self.texture.device.dispatch.destroyImageView(self.texture.device.device, self.view, null); 74 | self.texture.manager.release(); 75 | self.texture.device.allocator().destroy(self); 76 | } 77 | -------------------------------------------------------------------------------- /src/Queue.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const vk = @import("vk.zig"); 6 | 7 | const Queue = @This(); 8 | 9 | graphics: vk.Queue, 10 | compute: vk.Queue, 11 | fence: vk.Fence, 12 | buffers: std.ArrayListUnmanaged(*internal.CommandBuffer) = .{}, 13 | 14 | pub fn init(dispatch: vk.DeviceDispatch, device: vk.Device, info: internal.Adapter.DeviceInfo) !internal.Queue { 15 | const fence = try dispatch.createFence(device, &.{ 16 | .flags = .{ .signaled_bit = true }, // So waiting pre-submit doesn't block 17 | }, null); 18 | return .{ 19 | .graphics = dispatch.getDeviceQueue(device, info.graphics_family, 0), 20 | .compute = dispatch.getDeviceQueue(device, info.compute_family, 0), 21 | .fence = fence, 22 | }; 23 | } 24 | pub fn deinit(self: *Queue) void { 25 | const device = @fieldParentPtr(internal.Device, "queue", self); 26 | for (self.buffers.items) |buf| { 27 | buf.manager.release(); 28 | } 29 | self.buffers.deinit(device.allocator()); 30 | device.dispatch.destroyFence(device.device, self.fence, null); 31 | } 32 | 33 | pub fn submit(self: *Queue, commands: []const *internal.CommandBuffer) !void { 34 | const device = @fieldParentPtr(internal.Device, "queue", self); 35 | 36 | // Release previous buffers 37 | // TODO: free earlier if we run out of memory 38 | // TODO: finer-grained freeing based on a few fences instead of just one? 39 | try self.waitUncapped(); 40 | for (self.buffers.items) |buf| { 41 | buf.manager.release(); 42 | } 43 | errdefer self.buffers.clearRetainingCapacity(); 44 | 45 | // Collect new buffers 46 | try self.buffers.resize(device.allocator(), commands.len); 47 | std.mem.copy(*internal.CommandBuffer, self.buffers.items, commands); 48 | 49 | // TODO: compute 50 | const submits = try device.allocator().alloc(vk.SubmitInfo, commands.len); 51 | defer device.allocator().free(submits); 52 | for (commands) |buf, i| { 53 | buf.manager.reference(); 54 | // TODO: sequencing/synchronization 55 | submits[i] = .{ 56 | .wait_semaphore_count = 0, 57 | .p_wait_semaphores = undefined, 58 | .p_wait_dst_stage_mask = undefined, 59 | 60 | .command_buffer_count = 1, 61 | .p_command_buffers = @as(*const [1]vk.CommandBuffer, &buf.buffer), 62 | 63 | .signal_semaphore_count = 0, 64 | .p_signal_semaphores = undefined, 65 | }; 66 | } 67 | 68 | try device.dispatch.resetFences(device.device, 1, &[1]vk.Fence{self.fence}); 69 | try device.dispatch.queueSubmit(self.graphics, @intCast(u32, submits.len), submits.ptr, self.fence); 70 | } 71 | 72 | pub fn waitUncapped(self: *Queue) !void { 73 | while (!try self.waitTimeout(std.math.maxInt(u64))) {} 74 | } 75 | pub fn waitTimeout(self: *Queue, timeout: u64) !bool { 76 | const device = @fieldParentPtr(internal.Device, "queue", self); 77 | 78 | const res = try device.dispatch.waitForFences( 79 | device.device, 80 | 1, 81 | &[_]vk.Fence{self.fence}, 82 | vk.TRUE, 83 | timeout, 84 | ); 85 | return res == .success; 86 | } 87 | -------------------------------------------------------------------------------- /src/Instance.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const Instance = @This(); 9 | 10 | manager: helper.Manager(Instance) = .{}, 11 | gpa: std.heap.GeneralPurposeAllocator(.{}), 12 | 13 | dispatch: vk.InstanceDispatch, 14 | instance: vk.Instance, 15 | 16 | pub fn create(descriptor: ?*const gpu.Instance.Descriptor) !*Instance { 17 | _ = descriptor; 18 | 19 | const khr_validation = "VK_LAYER_KHRONOS_validation"; 20 | const layers: []const [*:0]const u8 = if (vk.enable_validation and vk.hasLayer(khr_validation)) 21 | &.{khr_validation} 22 | else 23 | &.{}; 24 | 25 | const system_ext = switch (vk.windowing_system) { 26 | .win32 => vk.extension_info.khr_win_32_surface.name, 27 | .xlib => vk.extension_info.khr_xlib_surface.name, 28 | }; 29 | var exts: []const [*:0]const u8 = &.{ 30 | vk.extension_info.khr_surface.name, 31 | system_ext, 32 | }; 33 | 34 | const desc: vk.InstanceCreateInfo = .{ 35 | .flags = .{}, 36 | .p_application_info = null, 37 | .enabled_layer_count = @intCast(u32, layers.len), 38 | .pp_enabled_layer_names = layers.ptr, 39 | .enabled_extension_count = @intCast(u32, exts.len), 40 | .pp_enabled_extension_names = exts.ptr, 41 | }; 42 | 43 | const instance = try vk.bd.createInstance(&desc, null); 44 | 45 | var gpa: std.heap.GeneralPurposeAllocator(.{}) = .{}; 46 | errdefer std.debug.assert(!gpa.deinit()); 47 | 48 | const self = try gpa.allocator().create(Instance); 49 | errdefer gpa.allocator().destroy(self); 50 | 51 | self.* = .{ 52 | .gpa = gpa, 53 | .dispatch = try vk.InstanceDispatch.load(instance, vk.bd.dispatch.vkGetInstanceProcAddr), 54 | .instance = instance, 55 | }; 56 | 57 | return self; 58 | } 59 | 60 | pub fn deinit(self: *Instance) void { 61 | self.dispatch.destroyInstance(self.instance, null); 62 | var gpa = self.gpa; 63 | gpa.allocator().destroy(self); 64 | _ = gpa.deinit(); 65 | } 66 | 67 | pub inline fn allocator(self: *Instance) std.mem.Allocator { 68 | return self.gpa.allocator(); 69 | } 70 | 71 | pub fn createSurface(self: *Instance, descriptor: *const gpu.Surface.Descriptor) !*internal.Surface { 72 | const surface = try self.allocator().create(internal.Surface); 73 | errdefer self.allocator().destroy(surface); 74 | surface.* = try internal.Surface.init(self, descriptor); 75 | return surface; 76 | } 77 | 78 | pub fn requestAdapter(self: *Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void { 79 | if (self.createAdapter(options)) |adapter| { 80 | callback(.success, @ptrCast(*gpu.Adapter, adapter), null, userdata); 81 | } else |err| switch (err) { 82 | error.NoAdapterFound => callback(.unavailable, undefined, null, userdata), 83 | else => |e| callback(.err, undefined, @errorName(e), userdata), 84 | } 85 | } 86 | fn createAdapter(self: *Instance, options: ?*const gpu.RequestAdapterOptions) !*internal.Adapter { 87 | const adapter = try self.allocator().create(internal.Adapter); 88 | errdefer self.allocator().destroy(adapter); 89 | adapter.* = try internal.Adapter.init(self, options orelse &gpu.RequestAdapterOptions{}); 90 | return adapter; 91 | } 92 | -------------------------------------------------------------------------------- /src/vk.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const builtin = @import("builtin"); 3 | const vk = @import("gen/vk.zig"); 4 | 5 | pub usingnamespace vk; 6 | 7 | pub const BaseDispatch = vk.BaseWrapper(.{ 8 | .createInstance = true, 9 | .getInstanceProcAddr = true, 10 | .enumerateInstanceLayerProperties = true, 11 | }); 12 | pub const InstanceDispatch = vk.InstanceWrapper(.{ 13 | .destroyInstance = true, 14 | 15 | .createDevice = true, 16 | .getDeviceProcAddr = true, 17 | 18 | .createXlibSurfaceKHR = windowing_system == .xlib, 19 | .createWin32SurfaceKHR = windowing_system == .win32, 20 | .destroySurfaceKHR = true, 21 | 22 | .enumeratePhysicalDevices = true, 23 | .getPhysicalDeviceProperties = true, 24 | .getPhysicalDeviceQueueFamilyProperties = true, 25 | .getPhysicalDeviceSurfaceFormatsKHR = true, 26 | .getPhysicalDeviceSurfaceCapabilitiesKHR = true, 27 | }); 28 | pub const DeviceDispatch = vk.DeviceWrapper(.{ 29 | .destroyDevice = true, 30 | .getDeviceQueue = true, 31 | .queuePresentKHR = true, 32 | .queueSubmit = true, 33 | 34 | .createFence = true, 35 | .destroyFence = true, 36 | .resetFences = true, 37 | .waitForFences = true, 38 | 39 | .createShaderModule = true, 40 | .destroyShaderModule = true, 41 | 42 | .createRenderPass = true, 43 | .destroyRenderPass = true, 44 | .createGraphicsPipelines = true, 45 | .destroyPipeline = true, 46 | .createPipelineLayout = true, 47 | .destroyPipelineLayout = true, 48 | 49 | .createFramebuffer = true, 50 | .destroyFramebuffer = true, 51 | 52 | .createSwapchainKHR = true, 53 | .destroySwapchainKHR = true, 54 | .getSwapchainImagesKHR = true, 55 | .acquireNextImageKHR = true, 56 | 57 | .createSemaphore = true, 58 | .destroySemaphore = true, 59 | 60 | .destroyImage = true, 61 | 62 | .createImageView = true, 63 | .destroyImageView = true, 64 | 65 | .createCommandPool = true, 66 | .destroyCommandPool = true, 67 | .allocateCommandBuffers = true, 68 | .freeCommandBuffers = true, 69 | 70 | .beginCommandBuffer = true, 71 | .endCommandBuffer = true, 72 | 73 | .cmdBeginRenderPass = true, 74 | .cmdBindPipeline = true, 75 | .cmdDraw = true, 76 | .cmdEndRenderPass = true, 77 | .cmdSetScissor = true, 78 | .cmdSetViewport = true, 79 | }); 80 | 81 | pub var bd: BaseDispatch = undefined; 82 | var lib: ?std.DynLib = null; 83 | 84 | pub fn init() !void { 85 | if (lib == null) { 86 | const lib_name = switch (@import("builtin").target.os.tag) { 87 | .windows => "vulkan-1.dll", 88 | .macos => "libvulkan.1.dylib", 89 | .openbsd, .netbsd => "libvulkan.so", 90 | else => "libvulkan.so.1", 91 | }; 92 | 93 | lib = try std.DynLib.open(lib_name); 94 | errdefer { 95 | lib.?.close(); 96 | lib = null; 97 | } 98 | 99 | bd = try BaseDispatch.load(getBootstrapProcAddress); 100 | } 101 | } 102 | 103 | fn getBootstrapProcAddress(_: vk.Instance, name_ptr: [*:0]const u8) vk.PfnVoidFunction { 104 | var name = std.mem.span(name_ptr); 105 | return lib.?.lookup(vk.PfnVoidFunction, name) orelse null; 106 | } 107 | 108 | pub fn hasLayer(name: []const u8) bool { 109 | var layers: [128]vk.LayerProperties = undefined; 110 | var layer_count: u32 = layers.len; 111 | _ = bd.enumerateInstanceLayerProperties(&layer_count, &layers) catch return false; 112 | 113 | for (layers[0..layer_count]) |supported| { 114 | if (std.mem.eql(u8, name, std.mem.sliceTo(&supported.layer_name, 0))) { 115 | return true; 116 | } 117 | } 118 | return false; 119 | } 120 | 121 | pub const WindowingSystem = enum { 122 | win32, 123 | xlib, 124 | // TODO: wayland 125 | // TODO: macos 126 | }; 127 | // TODO: support multiple windowing systems simultaneously (eg. xlib and wayland) 128 | pub const windowing_system: WindowingSystem = switch (builtin.target.os.tag) { 129 | .windows => .win32, 130 | .macos => @compileError("TODO: macos"), 131 | else => .xlib, 132 | }; 133 | 134 | pub const enable_validation = builtin.mode == .Debug; 135 | -------------------------------------------------------------------------------- /src/SwapChain.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const SwapChain = @This(); 9 | 10 | manager: helper.Manager(SwapChain) = .{}, 11 | swapchain: vk.SwapchainKHR, 12 | current_tex: u32 = undefined, 13 | textures: []internal.Texture, 14 | extent: vk.Extent2D, 15 | format: gpu.Texture.Format, 16 | device: *internal.Device, 17 | 18 | pub fn init(device: *internal.Device, surface: *internal.Surface, descriptor: *const gpu.SwapChain.Descriptor) !SwapChain { 19 | const capabilities = try device.adapter.instance.dispatch.getPhysicalDeviceSurfaceCapabilitiesKHR( 20 | device.adapter.info.phys_dev, 21 | surface.surface, 22 | ); 23 | 24 | const extent = vk.Extent2D{ 25 | .width = descriptor.width, 26 | .height = descriptor.height, 27 | }; 28 | 29 | const swapchain = try device.dispatch.createSwapchainKHR(device.device, &.{ 30 | .flags = .{}, 31 | .surface = surface.surface, 32 | .min_image_count = @max(2, capabilities.min_image_count), 33 | .image_format = helper.vulkanTextureFormat(descriptor.format), 34 | .image_color_space = .srgb_nonlinear_khr, 35 | .image_extent = extent, 36 | .image_array_layers = 1, 37 | .image_usage = .{ 38 | .transfer_src_bit = descriptor.usage.copy_src, 39 | .transfer_dst_bit = descriptor.usage.copy_dst, 40 | .sampled_bit = descriptor.usage.texture_binding, 41 | .storage_bit = descriptor.usage.storage_binding, 42 | .color_attachment_bit = descriptor.usage.render_attachment, 43 | }, 44 | .image_sharing_mode = .exclusive, // FIXME: this is probably wrong but I'm lazy 45 | .queue_family_index_count = 0, 46 | .p_queue_family_indices = undefined, 47 | .pre_transform = .{ .identity_bit_khr = true }, 48 | .composite_alpha = .{ .inherit_bit_khr = true }, // TODO: extension to control this 49 | .present_mode = switch (descriptor.present_mode) { 50 | .immediate => vk.PresentModeKHR.immediate_khr, 51 | .mailbox => .mailbox_khr, 52 | .fifo => .fifo_khr, 53 | }, 54 | .clipped = vk.FALSE, // TODO: check if WebGPU actually requires this 55 | .old_swapchain = .null_handle, 56 | }, null); 57 | errdefer device.dispatch.destroySwapchainKHR(device.device, swapchain, null); 58 | 59 | // Get swapchain images 60 | var image_count: u32 = undefined; 61 | _ = try device.dispatch.getSwapchainImagesKHR(device.device, swapchain, &image_count, null); 62 | 63 | const textures = try device.allocator().alloc(internal.Texture, image_count); 64 | errdefer device.allocator().free(textures); 65 | 66 | const images = try device.allocator().alloc(vk.Image, image_count); 67 | defer device.allocator().free(images); 68 | _ = try device.dispatch.getSwapchainImagesKHR(device.device, swapchain, &image_count, images.ptr); 69 | 70 | for (images) |image, i| { 71 | textures[i] = .{ 72 | .image = image, 73 | .extent = extent, 74 | .samples = 1, 75 | .device = device, 76 | }; 77 | } 78 | 79 | return .{ 80 | .swapchain = swapchain, 81 | .textures = textures, 82 | .format = descriptor.format, 83 | .extent = extent, 84 | .device = device, 85 | }; 86 | } 87 | 88 | pub fn deinit(self: *SwapChain) void { 89 | self.device.dispatch.destroySwapchainKHR(self.device.device, self.swapchain, null); 90 | self.device.allocator().free(self.textures); 91 | self.device.allocator().destroy(self); 92 | } 93 | 94 | pub fn getCurrentTextureView(self: *SwapChain) !*internal.TextureView { 95 | // TODO: reuse semaphore? 96 | const semaphore = try self.device.dispatch.createSemaphore(self.device.device, &.{ .flags = .{} }, null); 97 | defer self.device.dispatch.destroySemaphore(self.device.device, semaphore, null); 98 | 99 | const result = try self.device.dispatch.acquireNextImageKHR( 100 | self.device.device, 101 | self.swapchain, 102 | std.math.maxInt(u64), 103 | semaphore, 104 | .null_handle, 105 | ); 106 | switch (result.result) { 107 | .success => {}, 108 | .suboptimal_khr => {}, 109 | .not_ready => return error.NotReady, 110 | .timeout => unreachable, // infinite timeout 111 | else => unreachable, 112 | } 113 | self.current_tex = result.image_index; 114 | 115 | return self.textures[self.current_tex].createView(&.{ 116 | .format = self.format, 117 | .dimension = .dimension_2d, 118 | }); 119 | } 120 | 121 | pub fn present(self: *SwapChain) !void { 122 | try self.device.queue.waitUncapped(); 123 | 124 | _ = try self.device.dispatch.queuePresentKHR(self.device.queue.graphics, &.{ 125 | .wait_semaphore_count = 0, 126 | .p_wait_semaphores = undefined, 127 | .swapchain_count = 1, 128 | .p_swapchains = &[_]vk.SwapchainKHR{self.swapchain}, 129 | .p_image_indices = &[_]u32{self.current_tex}, 130 | .p_results = null, 131 | }); 132 | self.current_tex = undefined; 133 | } 134 | -------------------------------------------------------------------------------- /src/Device.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const Device = @This(); 9 | 10 | manager: helper.Manager(Device) = .{}, 11 | 12 | dispatch: vk.DeviceDispatch, 13 | device: vk.Device, 14 | 15 | queue: internal.Queue, 16 | pool: vk.CommandPool, 17 | 18 | adapter: *internal.Adapter, 19 | 20 | pub fn init(adapter: *internal.Adapter, descriptor: *const gpu.Device.Descriptor) !Device { 21 | const queue_infos: []const vk.DeviceQueueCreateInfo = if (adapter.info.graphics_family == adapter.info.compute_family) 22 | &.{.{ 23 | .flags = .{}, 24 | .queue_family_index = adapter.info.graphics_family, 25 | .queue_count = 1, 26 | .p_queue_priorities = &[1]f32{1.0}, 27 | }} 28 | else 29 | &.{ .{ 30 | .flags = .{}, 31 | .queue_family_index = adapter.info.graphics_family, 32 | .queue_count = 1, 33 | .p_queue_priorities = &[1]f32{1.0}, 34 | }, .{ 35 | .flags = .{}, 36 | .queue_family_index = adapter.info.compute_family, 37 | .queue_count = 1, 38 | .p_queue_priorities = &[1]f32{1.0}, 39 | } }; 40 | 41 | const exts: []const [*:0]const u8 = &.{ 42 | vk.extension_info.khr_swapchain.name, 43 | }; 44 | 45 | // TODO: debug label 46 | _ = descriptor; 47 | const device = try adapter.instance.dispatch.createDevice(adapter.info.phys_dev, &.{ 48 | .flags = .{}, 49 | .queue_create_info_count = @intCast(u32, queue_infos.len), 50 | .p_queue_create_infos = queue_infos.ptr, 51 | .enabled_layer_count = 0, 52 | .pp_enabled_layer_names = undefined, 53 | .enabled_extension_count = @intCast(u32, exts.len), 54 | .pp_enabled_extension_names = exts.ptr, 55 | .p_enabled_features = null, // TODO 56 | }, null); 57 | const dispatch = vk.DeviceDispatch.load( 58 | device, 59 | adapter.instance.dispatch.dispatch.vkGetDeviceProcAddr, 60 | ) catch |err| { 61 | // If the DeviceDispatch fails to load, we can't destroy the device 62 | std.log.warn("leaked vulkan device due to error", .{}); 63 | return err; 64 | }; 65 | errdefer dispatch.destroyDevice(device, null); 66 | 67 | const pool = try dispatch.createCommandPool(device, &.{ 68 | .flags = .{}, 69 | .queue_family_index = adapter.info.graphics_family, 70 | }, null); 71 | errdefer dispatch.destroyCommandPool(device, pool, null); 72 | 73 | return .{ 74 | .dispatch = dispatch, 75 | .device = device, 76 | .queue = try internal.Queue.init(dispatch, device, adapter.info), 77 | .pool = pool, 78 | .adapter = adapter, 79 | }; 80 | } 81 | 82 | pub fn deinit(self: *Device) void { 83 | self.queue.deinit(); 84 | self.dispatch.destroyCommandPool(self.device, self.pool, null); 85 | self.dispatch.destroyDevice(self.device, null); 86 | self.allocator().destroy(self); 87 | } 88 | 89 | pub inline fn allocator(self: Device) std.mem.Allocator { 90 | return self.adapter.instance.allocator(); 91 | } 92 | 93 | pub fn getQueue(self: *Device) *internal.Queue { 94 | return &self.queue; 95 | } 96 | 97 | pub fn setUncapturedErrorCallback(self: *Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) void { 98 | _ = self; 99 | _ = callback; 100 | _ = userdata; 101 | std.log.warn("setUncapturedErrorCallback is not yet implemented", .{}); 102 | } 103 | 104 | pub fn createShaderModule(self: *Device, descriptor: *const gpu.ShaderModule.Descriptor) !*internal.ShaderModule { 105 | const shader = try self.allocator().create(internal.ShaderModule); 106 | errdefer self.allocator().destroy(shader); 107 | shader.* = try internal.ShaderModule.init(self, descriptor); 108 | return shader; 109 | } 110 | 111 | pub fn createRenderPipeline(self: *Device, descriptor: *const gpu.RenderPipeline.Descriptor) !*internal.RenderPipeline { 112 | const pipeline = try self.allocator().create(internal.RenderPipeline); 113 | errdefer self.allocator().destroy(pipeline); 114 | pipeline.* = try internal.RenderPipeline.init(self, descriptor); 115 | return pipeline; 116 | } 117 | 118 | pub fn createPipelineLayout(self: *Device, descriptor: *const gpu.PipelineLayout.Descriptor) !*internal.PipelineLayout { 119 | const layout = try self.allocator().create(internal.PipelineLayout); 120 | errdefer self.allocator().destroy(layout); 121 | layout.* = try internal.PipelineLayout.init(self, descriptor); 122 | return layout; 123 | } 124 | 125 | pub fn createSwapChain(self: *Device, surface: ?*internal.Surface, descriptor: *const gpu.SwapChain.Descriptor) !*internal.SwapChain { 126 | const swapchain = try self.allocator().create(internal.SwapChain); 127 | errdefer self.allocator().destroy(swapchain); 128 | swapchain.* = try internal.SwapChain.init(self, surface.?, descriptor); 129 | return swapchain; 130 | } 131 | 132 | pub fn createCommandEncoder(self: *const Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) !*internal.CommandEncoder { 133 | const encoder = try self.allocator().create(internal.CommandEncoder); 134 | errdefer self.allocator().destroy(encoder); 135 | encoder.* = try internal.CommandEncoder.init(self, descriptor); 136 | return encoder; 137 | } 138 | -------------------------------------------------------------------------------- /src/Adapter.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const Adapter = @This(); 9 | 10 | manager: helper.Manager(Adapter) = .{}, 11 | info: DeviceInfo, 12 | instance: *internal.Instance, 13 | 14 | pub const DeviceInfo = struct { 15 | phys_dev: vk.PhysicalDevice, 16 | props: vk.PhysicalDeviceProperties, 17 | graphics_family: u32, 18 | compute_family: u32, 19 | }; 20 | 21 | pub fn init(instance: *internal.Instance, options: *const gpu.RequestAdapterOptions) !Adapter { 22 | var devices: [128]vk.PhysicalDevice = undefined; 23 | var n_devices: u32 = devices.len; 24 | _ = try instance.dispatch.enumeratePhysicalDevices( 25 | instance.instance, 26 | &n_devices, 27 | &devices, 28 | ); 29 | 30 | var result: ?struct { 31 | rating: DeviceRating, 32 | info: DeviceInfo, 33 | } = null; 34 | for (devices[0..n_devices]) |dev| { 35 | // Check device type ranking 36 | const props = instance.dispatch.getPhysicalDeviceProperties(dev); 37 | const rating = rateDevice(options.power_preference, props); 38 | if (result != null and !rating.betterThan(result.?.rating)) { 39 | // Already found a better device 40 | continue; 41 | } 42 | 43 | // Check force fallback 44 | if (options.force_fallback_adapter and rating != .fallback) { 45 | continue; 46 | } 47 | 48 | // TODO: check compatible surface 49 | 50 | // Check queue family support 51 | var families: [128]vk.QueueFamilyProperties = undefined; 52 | var n_families: u32 = families.len; 53 | instance.dispatch.getPhysicalDeviceQueueFamilyProperties(dev, &n_families, &families); 54 | 55 | var graphics_family: ?u32 = null; 56 | var compute_family: ?u32 = null; 57 | for (families[0..n_families]) |family, i| { 58 | const idx = @intCast(u32, i); 59 | // Prefer the same family for both 60 | if (family.queue_flags.graphics_bit and family.queue_flags.compute_bit) { 61 | graphics_family = idx; 62 | compute_family = idx; 63 | break; 64 | } 65 | // Otherwise, look for individual families 66 | if (family.queue_flags.graphics_bit and graphics_family == null) { 67 | graphics_family = idx; 68 | } 69 | if (family.queue_flags.compute_bit and compute_family == null) { 70 | compute_family = idx; 71 | } 72 | // Check if we've found all the families we need 73 | if (graphics_family != null and compute_family != null) { 74 | break; 75 | } 76 | } else { 77 | continue; 78 | } 79 | 80 | // Set current best device 81 | result = .{ 82 | .rating = rating, 83 | .info = .{ 84 | .phys_dev = dev, 85 | .props = props, 86 | .graphics_family = graphics_family.?, 87 | .compute_family = compute_family.?, 88 | }, 89 | }; 90 | } 91 | if (result == null) { 92 | return error.NoAdapterFound; 93 | } 94 | 95 | return .{ 96 | .info = result.?.info, 97 | .instance = instance, 98 | }; 99 | } 100 | 101 | /// Classify devices, with reference to power preference 102 | fn rateDevice(power: gpu.PowerPreference, props: vk.PhysicalDeviceProperties) DeviceRating { 103 | return switch (props.device_type) { 104 | .integrated_gpu => switch (power) { 105 | .undefined => .good, 106 | .low_power => .good, 107 | .high_performance => .bad, 108 | }, 109 | .discrete_gpu => switch (power) { 110 | .undefined => .good, 111 | .low_power => .bad, 112 | .high_performance => .good, 113 | }, 114 | else => .fallback, 115 | }; 116 | } 117 | 118 | const DeviceRating = enum { 119 | good, 120 | bad, 121 | fallback, 122 | 123 | pub fn betterThan(a: DeviceRating, b: DeviceRating) bool { 124 | return @enumToInt(a) < @enumToInt(b); 125 | } 126 | }; 127 | 128 | pub fn deinit(self: *Adapter) void { 129 | self.instance.allocator().destroy(self); 130 | } 131 | 132 | pub fn getProperties(self: *Adapter, properties: *gpu.Adapter.Properties) void { 133 | properties.* = .{ 134 | .vendor_id = self.info.props.vendor_id, 135 | .vendor_name = "", 136 | .architecture = "", 137 | .device_id = self.info.props.device_id, 138 | .name = @ptrCast([*:0]const u8, &self.info.props.device_name), 139 | .driver_description = "", 140 | .adapter_type = switch (self.info.props.device_type) { 141 | .discrete_gpu => .discrete_gpu, 142 | .integrated_gpu => .integrated_gpu, 143 | .cpu => .cpu, 144 | else => .unknown, 145 | }, 146 | .backend_type = .vulkan, 147 | }; 148 | } 149 | 150 | pub fn createDevice(self: *Adapter, descriptor: ?*const gpu.Device.Descriptor) !*internal.Device { 151 | const device = try self.instance.allocator().create(internal.Device); 152 | errdefer self.instance.allocator().destroy(device); 153 | device.* = try internal.Device.init(self, descriptor orelse &gpu.Device.Descriptor{}); 154 | return device; 155 | } 156 | -------------------------------------------------------------------------------- /src/RenderPassEncoder.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const RenderPassEncoder = @This(); 9 | 10 | manager: helper.Manager(RenderPassEncoder) = .{}, 11 | 12 | fb: vk.Framebuffer = .null_handle, 13 | attachments: []const vk.ImageView, 14 | extent: vk.Extent2D, 15 | clear_values: []const vk.ClearValue, 16 | 17 | buffer: *internal.CommandBuffer, 18 | 19 | pub fn init(encoder: *internal.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) !RenderPassEncoder { 20 | const allocator = encoder.buffer.device.allocator(); 21 | const attachments = try allocator.alloc(vk.ImageView, descriptor.color_attachment_count + @boolToInt(descriptor.depth_stencil_attachment != null)); 22 | errdefer allocator.free(attachments); 23 | 24 | var extent: ?vk.Extent2D = null; 25 | var clear_value_count: usize = 0; 26 | 27 | { 28 | var i: usize = 0; 29 | while (i < descriptor.color_attachment_count) : (i += 1) { 30 | const attach = descriptor.color_attachments.?[i]; 31 | const view = helper.castOpaque(*internal.TextureView, attach.view); 32 | attachments[i] = view.view; 33 | 34 | if (attach.load_op == .clear) { 35 | clear_value_count += 1; 36 | } 37 | 38 | if (extent) |e| { 39 | std.debug.assert(std.meta.eql(e, view.texture.extent)); 40 | } else { 41 | extent = view.texture.extent; 42 | } 43 | } 44 | 45 | if (descriptor.depth_stencil_attachment) |attach| { 46 | const view = helper.castOpaque(*internal.TextureView, attach.view); 47 | attachments[i] = view.view; 48 | i += 1; 49 | 50 | if (attach.depth_load_op == .clear or attach.stencil_load_op == .clear) { 51 | clear_value_count += 1; 52 | } 53 | 54 | if (extent) |e| { 55 | std.debug.assert(std.meta.eql(e, view.texture.extent)); 56 | } else { 57 | extent = view.texture.extent; 58 | } 59 | } 60 | 61 | std.debug.assert(i == attachments.len); 62 | } 63 | 64 | const clear_values = try allocator.alloc(vk.ClearValue, clear_value_count); 65 | errdefer allocator.free(clear_values); 66 | { 67 | var i: usize = 0; 68 | var j: usize = 0; 69 | while (i < descriptor.color_attachment_count) : (i += 1) { 70 | const attach = descriptor.color_attachments.?[i]; 71 | if (attach.load_op == .clear) { 72 | const v = attach.clear_value; 73 | clear_values[j] = .{ 74 | .color = .{ 75 | // TODO: adjust for format 76 | .float_32 = .{ 77 | @floatCast(f32, v.r), 78 | @floatCast(f32, v.g), 79 | @floatCast(f32, v.b), 80 | @floatCast(f32, v.a), 81 | }, 82 | }, 83 | }; 84 | j += 1; 85 | } 86 | } 87 | 88 | if (descriptor.depth_stencil_attachment) |attach| { 89 | i += 1; 90 | 91 | if (attach.depth_load_op == .clear or attach.stencil_load_op == .clear) { 92 | clear_values[j] = .{ 93 | .depth_stencil = .{ 94 | .depth = attach.depth_clear_value, 95 | .stencil = attach.stencil_clear_value, 96 | }, 97 | }; 98 | j += 1; 99 | } 100 | } 101 | 102 | std.debug.assert(i == attachments.len); 103 | std.debug.assert(j == clear_values.len); 104 | } 105 | 106 | // TODO: occlusion queries 107 | // TODO: timestamp writes 108 | 109 | // TODO: catch release of encoder before pass is finished (weak ref of some kind?) 110 | 111 | return .{ 112 | .attachments = attachments, 113 | .extent = extent.?, 114 | .clear_values = clear_values, 115 | .buffer = encoder.buffer, 116 | }; 117 | } 118 | 119 | pub fn deinit(self: *RenderPassEncoder) void { 120 | const device = self.buffer.device; 121 | device.dispatch.destroyFramebuffer(device.device, self.fb, null); // TODO: cache framebuffers 122 | 123 | const allocator = device.allocator(); 124 | allocator.free(self.attachments); 125 | allocator.free(self.clear_values); 126 | allocator.destroy(self); 127 | } 128 | 129 | pub fn setPipeline(self: *RenderPassEncoder, pipeline: *internal.RenderPipeline) !void { 130 | const device = self.buffer.device; 131 | 132 | self.fb = try device.dispatch.createFramebuffer(device.device, &.{ 133 | .flags = .{}, 134 | .render_pass = pipeline.pass, 135 | .attachment_count = @intCast(u32, self.attachments.len), 136 | .p_attachments = self.attachments.ptr, 137 | .width = self.extent.width, 138 | .height = self.extent.height, 139 | .layers = 1, // TODO 140 | }, null); 141 | errdefer device.dispatch.destroyFramebuffer(device.device, self.fb, null); 142 | 143 | const buf = self.buffer.buffer; 144 | const rect = vk.Rect2D{ 145 | .offset = .{ .x = 0, .y = 0 }, 146 | .extent = self.extent, 147 | }; 148 | device.dispatch.cmdBeginRenderPass(buf, &.{ 149 | .render_pass = pipeline.pass, 150 | .framebuffer = self.fb, 151 | .render_area = rect, 152 | .clear_value_count = @intCast(u32, self.clear_values.len), 153 | .p_clear_values = self.clear_values.ptr, 154 | }, .@"inline"); 155 | 156 | try self.buffer.render_passes.append(device.allocator(), self); 157 | self.manager.reference(); // Command buffer now references this pass 158 | 159 | device.dispatch.cmdBindPipeline(buf, .graphics, pipeline.pipeline); 160 | 161 | // Set default dynamic state 162 | // TODO: may need to flip viewport (requires vk1.1?) 163 | device.dispatch.cmdSetViewport(buf, 0, 1, @as(*const [1]vk.Viewport, &vk.Viewport{ 164 | .x = 0, 165 | .y = 0, 166 | .width = @intToFloat(f32, self.extent.width), 167 | .height = @intToFloat(f32, self.extent.height), 168 | .min_depth = 0, 169 | .max_depth = 1, 170 | })); 171 | 172 | device.dispatch.cmdSetScissor(buf, 0, 1, @as(*const [1]vk.Rect2D, &rect)); 173 | } 174 | 175 | pub fn draw(self: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { 176 | const buf = self.buffer; 177 | buf.device.dispatch.cmdDraw(buf.buffer, vertex_count, instance_count, first_vertex, first_instance); 178 | } 179 | 180 | pub fn end(self: *RenderPassEncoder) void { 181 | const buf = self.buffer; 182 | buf.device.dispatch.cmdEndRenderPass(buf.buffer); 183 | } 184 | -------------------------------------------------------------------------------- /src/helper.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | const vk = @import("vk.zig"); 4 | 5 | // TODO: thread safety? 6 | pub fn Manager(comptime T: type) type { 7 | return struct { 8 | refs: u32 = 0, 9 | 10 | const Self = @This(); 11 | 12 | pub fn reference(self: *Self) void { 13 | self.refs += 1; 14 | } 15 | pub fn release(self: *Self) void { 16 | if (self.refs == 0) { 17 | const parent = @fieldParentPtr(T, "manager", self); 18 | parent.deinit(); 19 | } else { 20 | self.refs -= 1; 21 | } 22 | } 23 | }; 24 | } 25 | 26 | pub fn findChained(comptime T: type, next_in_chain: ?*const gpu.ChainedStruct) ?*const T { 27 | const search = @ptrCast(*align(1) const gpu.ChainedStruct, std.meta.fieldInfo(T, .chain).default_value.?); 28 | var chain = next_in_chain; 29 | while (chain) |c| { 30 | if (c.s_type == search.s_type) { 31 | return @ptrCast(*const T, c); 32 | } 33 | chain = c.next; 34 | } 35 | return null; 36 | } 37 | 38 | pub fn castOpaque(comptime T: type, ptr: anytype) T { 39 | comptime { 40 | var From = @TypeOf(ptr); 41 | if (std.meta.trait.is(.Optional)(From)) { 42 | From = std.meta.Child(From); 43 | } 44 | if (std.meta.trait.is(.Pointer)(From)) { 45 | From = std.meta.Child(From); 46 | } else { 47 | @compileError(@typeName(From) ++ " is not a pointer type"); 48 | } 49 | if (!std.meta.trait.is(.Opaque)(From)) { 50 | @compileError(@typeName(From) ++ " is not an opaque type"); 51 | } 52 | } 53 | const alignment = @alignOf(std.meta.Child(T)); 54 | const aligned = @alignCast(alignment, ptr); 55 | return @ptrCast(T, aligned); 56 | } 57 | 58 | pub fn vulkanTextureFormat(fmt: gpu.Texture.Format) vk.Format { 59 | return switch (fmt) { 60 | .undefined => .undefined, 61 | .r8_unorm => .r8_unorm, 62 | .r8_snorm => .r8_snorm, 63 | .r8_uint => .r8_uint, 64 | .r8_sint => .r8_sint, 65 | .r16_uint => .r16_uint, 66 | .r16_sint => .r16_sint, 67 | .r16_float => .r16_sfloat, 68 | .rg8_unorm => .r8g8_unorm, 69 | .rg8_snorm => .r8g8_snorm, 70 | .rg8_uint => .r8g8_uint, 71 | .rg8_sint => .r8g8_sint, 72 | .r32_float => .r32_sfloat, 73 | .r32_uint => .r32_uint, 74 | .r32_sint => .r32_sint, 75 | .rg16_uint => .r16g16_uint, 76 | .rg16_sint => .r16g16_sint, 77 | .rg16_float => .r16g16_sfloat, 78 | .rgba8_unorm => .r8g8b8a8_unorm, 79 | .rgba8_unorm_srgb => .r8g8b8a8_srgb, 80 | .rgba8_snorm => .r8g8b8a8_snorm, 81 | .rgba8_uint => .r8g8b8a8_uint, 82 | .rgba8_sint => .r8g8b8a8_sint, 83 | .bgra8_unorm => .b8g8r8a8_unorm, 84 | .bgra8_unorm_srgb => .b8g8r8a8_srgb, 85 | .rgb10_a2_unorm => .a2r10g10b10_unorm_pack32, 86 | .rg11_b10_ufloat => .b10g11r11_ufloat_pack32, 87 | .rgb9_e5_ufloat => .e5b9g9r9_ufloat_pack32, 88 | .rg32_float => .r32g32_sfloat, 89 | .rg32_uint => .r32g32_uint, 90 | .rg32_sint => .r32g32_sint, 91 | .rgba16_uint => .r16g16b16a16_uint, 92 | .rgba16_sint => .r16g16b16a16_sint, 93 | .rgba16_float => .r16g16b16a16_sfloat, 94 | .rgba32_float => .r32g32b32a32_sfloat, 95 | .rgba32_uint => .r32g32b32a32_uint, 96 | .rgba32_sint => .r32g32b32a32_sint, 97 | .stencil8 => .s8_uint, 98 | .depth16_unorm => .d16_unorm, 99 | .depth24_plus => .x8_d24_unorm_pack32, 100 | .depth24_plus_stencil8 => .d24_unorm_s8_uint, 101 | .depth32_float => .d32_sfloat, 102 | .depth32_float_stencil8 => .d32_sfloat_s8_uint, 103 | .bc1_rgba_unorm => .bc1_rgba_unorm_block, 104 | .bc1_rgba_unorm_srgb => .bc1_rgba_srgb_block, 105 | .bc2_rgba_unorm => .bc2_unorm_block, 106 | .bc2_rgba_unorm_srgb => .bc2_srgb_block, 107 | .bc3_rgba_unorm => .bc3_unorm_block, 108 | .bc3_rgba_unorm_srgb => .bc3_srgb_block, 109 | .bc4_runorm => .bc4_unorm_block, 110 | .bc4_rsnorm => .bc4_snorm_block, 111 | .bc5_rg_unorm => .bc5_unorm_block, 112 | .bc5_rg_snorm => .bc5_snorm_block, 113 | .bc6_hrgb_ufloat => .bc6h_ufloat_block, 114 | .bc6_hrgb_float => .bc6h_sfloat_block, 115 | .bc7_rgba_unorm => .bc7_unorm_block, 116 | .bc7_rgba_unorm_srgb => .bc7_srgb_block, 117 | .etc2_rgb8_unorm => .etc2_r8g8b8_unorm_block, 118 | .etc2_rgb8_unorm_srgb => .etc2_r8g8b8_srgb_block, 119 | .etc2_rgb8_a1_unorm => .etc2_r8g8b8a1_unorm_block, 120 | .etc2_rgb8_a1_unorm_srgb => .etc2_r8g8b8a1_srgb_block, 121 | .etc2_rgba8_unorm => .etc2_r8g8b8a8_unorm_block, 122 | .etc2_rgba8_unorm_srgb => .etc2_r8g8b8a8_srgb_block, 123 | .eacr11_unorm => .eac_r11_unorm_block, 124 | .eacr11_snorm => .eac_r11_snorm_block, 125 | .eacrg11_unorm => .eac_r11g11_unorm_block, 126 | .eacrg11_snorm => .eac_r11g11_snorm_block, 127 | .astc4x4_unorm => .astc_4x_4_unorm_block, 128 | .astc4x4_unorm_srgb => .astc_4x_4_srgb_block, 129 | .astc5x4_unorm => .astc_5x_4_unorm_block, 130 | .astc5x4_unorm_srgb => .astc_5x_4_srgb_block, 131 | .astc5x5_unorm => .astc_5x_5_unorm_block, 132 | .astc5x5_unorm_srgb => .astc_5x_5_srgb_block, 133 | .astc6x5_unorm => .astc_6x_5_unorm_block, 134 | .astc6x5_unorm_srgb => .astc_6x_5_srgb_block, 135 | .astc6x6_unorm => .astc_6x_6_unorm_block, 136 | .astc6x6_unorm_srgb => .astc_6x_6_srgb_block, 137 | .astc8x5_unorm => .astc_8x_5_unorm_block, 138 | .astc8x5_unorm_srgb => .astc_8x_5_srgb_block, 139 | .astc8x6_unorm => .astc_8x_6_unorm_block, 140 | .astc8x6_unorm_srgb => .astc_8x_6_srgb_block, 141 | .astc8x8_unorm => .astc_8x_8_unorm_block, 142 | .astc8x8_unorm_srgb => .astc_8x_8_srgb_block, 143 | .astc10x5_unorm => .astc_1_0x_5_unorm_block, 144 | .astc10x5_unorm_srgb => .astc_1_0x_5_srgb_block, 145 | .astc10x6_unorm => .astc_1_0x_6_unorm_block, 146 | .astc10x6_unorm_srgb => .astc_1_0x_6_srgb_block, 147 | .astc10x8_unorm => .astc_1_0x_8_unorm_block, 148 | .astc10x8_unorm_srgb => .astc_1_0x_8_srgb_block, 149 | .astc10x10_unorm => .astc_1_0x_10_unorm_block, 150 | .astc10x10_unorm_srgb => .astc_1_0x_10_srgb_block, 151 | .astc12x10_unorm => .astc_1_2x_10_unorm_block, 152 | .astc12x10_unorm_srgb => .astc_1_2x_10_srgb_block, 153 | .astc12x12_unorm => .astc_1_2x_12_unorm_block, 154 | .astc12x12_unorm_srgb => .astc_1_2x_12_srgb_block, 155 | .r8_bg8_biplanar420_unorm => .g8_b8r8_2plane_420_unorm, 156 | }; 157 | } 158 | 159 | pub fn vulkanVertexFormat(format: gpu.VertexFormat) vk.Format { 160 | return switch (format) { 161 | .undefined => .undefined, 162 | 163 | .uint8x2 => .r8g8_uint, 164 | .uint8x4 => .r8g8b8a8_uint, 165 | .sint8x2 => .r8g8_sint, 166 | .sint8x4 => .r8g8b8a8_sint, 167 | .unorm8x2 => .r8g8_unorm, 168 | .unorm8x4 => .r8g8b8a8_unorm, 169 | .snorm8x2 => .r8g8_snorm, 170 | .snorm8x4 => .r8g8b8a8_snorm, 171 | 172 | .uint16x2 => .r16g16_uint, 173 | .uint16x4 => .r16g16b16a16_uint, 174 | .sint16x2 => .r16g16_sint, 175 | .sint16x4 => .r16g16b16a16_sint, 176 | .unorm16x2 => .r16g16_unorm, 177 | .unorm16x4 => .r16g16b16a16_unorm, 178 | .snorm16x2 => .r16g16_snorm, 179 | .snorm16x4 => .r16g16b16a16_snorm, 180 | 181 | .float16x2 => .r16g16_sfloat, 182 | .float16x4 => .r16g16b16a16_sfloat, 183 | 184 | .float32 => .r16_sfloat, 185 | .float32x2 => .r16g16_sfloat, 186 | .float32x3 => .r16g16b16_sfloat, 187 | .float32x4 => .r16g16b16a16_sfloat, 188 | 189 | .uint32 => .r32_uint, 190 | .uint32x2 => .r32g32_uint, 191 | .uint32x3 => .r32g32b32_uint, 192 | .uint32x4 => .r32g32b32a32_uint, 193 | .sint32 => .r32_sint, 194 | .sint32x2 => .r32g32_sint, 195 | .sint32x3 => .r32g32b32_sint, 196 | .sint32x4 => .r32g32b32a32_sint, 197 | }; 198 | } 199 | 200 | pub fn vulkanCompareOp(op: gpu.CompareFunction) vk.CompareOp { 201 | return switch (op) { 202 | .undefined => unreachable, 203 | .never => vk.CompareOp.never, 204 | .less => .less, 205 | .less_equal => .less_or_equal, 206 | .greater => .greater, 207 | .greater_equal => .greater_or_equal, 208 | .equal => .equal, 209 | .not_equal => .not_equal, 210 | .always => .always, 211 | }; 212 | } 213 | 214 | pub fn vulkanStencilOp(op: gpu.StencilOperation) vk.StencilOp { 215 | return switch (op) { 216 | .keep => .keep, 217 | .zero => .zero, 218 | .replace => .replace, 219 | .invert => .invert, 220 | .increment_clamp => .increment_and_clamp, 221 | .decrement_clamp => .decrement_and_clamp, 222 | .increment_wrap => .increment_and_wrap, 223 | .decrement_wrap => .decrement_and_wrap, 224 | }; 225 | } 226 | 227 | pub fn vulkanBlendFactor(fac: gpu.BlendFactor) vk.BlendFactor { 228 | return switch (fac) { 229 | .zero => .zero, 230 | .one => .one, 231 | .src => .src_color, 232 | .one_minus_src => .one_minus_src_color, 233 | .src_alpha => .src_alpha, 234 | .one_minus_src_alpha => .one_minus_src_alpha, 235 | .dst => .dst_color, 236 | .one_minus_dst => .one_minus_dst_color, 237 | .dst_alpha => .dst_alpha, 238 | .one_minus_dst_alpha => .one_minus_dst_alpha, 239 | .src_alpha_saturated => .src_alpha_saturate, 240 | .constant => .constant_color, 241 | .one_minus_constant => .one_minus_constant_color, 242 | }; 243 | } 244 | pub fn vulkanBlendOp(op: gpu.BlendOperation) vk.BlendOp { 245 | return switch (op) { 246 | .add => .add, 247 | .subtract => .subtract, 248 | .reverse_subtract => .reverse_subtract, 249 | .min => .min, 250 | .max => .max, 251 | }; 252 | } 253 | 254 | pub fn vulkanSampleCountFlags(samples: u32) vk.SampleCountFlags { 255 | // TODO: Snektron/vulkan-zig#27 256 | return switch (samples) { 257 | 1 => .{ .@"1_bit" = true }, 258 | 2 => .{ .@"2_bit" = true }, 259 | 4 => .{ .@"4_bit" = true }, 260 | 8 => .{ .@"8_bit" = true }, 261 | 16 => .{ .@"16_bit" = true }, 262 | 32 => .{ .@"32_bit" = true }, 263 | else => unreachable, 264 | }; 265 | } 266 | -------------------------------------------------------------------------------- /src/RenderPipeline.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const gpu = @import("gpu"); 3 | 4 | const internal = @import("internal.zig"); 5 | const helper = @import("helper.zig"); 6 | const vk = @import("vk.zig"); 7 | 8 | const RenderPipeline = @This(); 9 | 10 | manager: helper.Manager(RenderPipeline) = .{}, 11 | pass: vk.RenderPass, 12 | pipeline: vk.Pipeline, 13 | device: *internal.Device, 14 | 15 | pub fn init(device: *internal.Device, descriptor: *const gpu.RenderPipeline.Descriptor) !RenderPipeline { 16 | const allocator = device.allocator(); 17 | 18 | const multisample_count_vk = helper.vulkanSampleCountFlags(descriptor.multisample.count); 19 | 20 | // Create render pass 21 | // TODO: may be best to defer vulkan render pass (and hence pipeline) creation until it's 22 | // used with a render pass? Would require recreating the pipeline if it's used again 23 | // with an incompatible render pass. 24 | var attachments: []vk.AttachmentDescription = &.{}; 25 | defer allocator.free(attachments); 26 | var color_attachments: []vk.AttachmentReference = &.{}; 27 | defer allocator.free(color_attachments); 28 | 29 | if (descriptor.depth_stencil != null) { 30 | @panic("TODO: depth_stencil"); 31 | } 32 | if (descriptor.fragment) |frag| { 33 | attachments = try allocator.alloc(vk.AttachmentDescription, frag.target_count); 34 | for (attachments) |*a, i| { 35 | const target = frag.targets.?[i]; 36 | a.* = .{ 37 | .flags = .{ .may_alias_bit = true }, // TODO: do we actually need this? Check spec 38 | .format = helper.vulkanTextureFormat(target.format), 39 | .samples = multisample_count_vk, 40 | .load_op = .clear, // TODO 41 | .store_op = .store, // TODO 42 | .stencil_load_op = .dont_care, 43 | .stencil_store_op = .dont_care, 44 | .initial_layout = .undefined, // TODO 45 | .final_layout = .present_src_khr, // TODO 46 | }; 47 | } 48 | 49 | color_attachments = try allocator.alloc(vk.AttachmentReference, frag.target_count); 50 | for (color_attachments) |*a, i| { 51 | a.* = .{ 52 | .attachment = @intCast(u32, i), 53 | .layout = .general, 54 | }; 55 | } 56 | } 57 | 58 | const pass = try device.dispatch.createRenderPass(device.device, &.{ 59 | .flags = .{}, 60 | .attachment_count = @intCast(u32, attachments.len), 61 | .p_attachments = attachments.ptr, 62 | .subpass_count = 1, 63 | .p_subpasses = &[1]vk.SubpassDescription{.{ 64 | .flags = .{}, 65 | .pipeline_bind_point = .graphics, 66 | .input_attachment_count = 0, 67 | .p_input_attachments = undefined, 68 | .color_attachment_count = @intCast(u32, color_attachments.len), 69 | .p_color_attachments = color_attachments.ptr, 70 | .p_resolve_attachments = null, // TODO 71 | .p_depth_stencil_attachment = null, // TODO 72 | .preserve_attachment_count = 0, // TODO 73 | .p_preserve_attachments = undefined, 74 | }}, 75 | .dependency_count = 0, 76 | .p_dependencies = undefined, 77 | }, null); 78 | 79 | // Configure blend attachments 80 | var blend_attachments = try allocator.alloc(vk.PipelineColorBlendAttachmentState, attachments.len); 81 | defer allocator.free(blend_attachments); 82 | for (blend_attachments) |*vk_blend, i| { 83 | const target = descriptor.fragment.?.targets.?[i]; 84 | if (target.blend) |blend| { 85 | vk_blend.* = .{ 86 | .blend_enable = vk.TRUE, 87 | .src_color_blend_factor = helper.vulkanBlendFactor(blend.color.src_factor), 88 | .dst_color_blend_factor = helper.vulkanBlendFactor(blend.color.dst_factor), 89 | .color_blend_op = helper.vulkanBlendOp(blend.color.operation), 90 | .src_alpha_blend_factor = helper.vulkanBlendFactor(blend.alpha.src_factor), 91 | .dst_alpha_blend_factor = helper.vulkanBlendFactor(blend.alpha.dst_factor), 92 | .alpha_blend_op = helper.vulkanBlendOp(blend.alpha.operation), 93 | .color_write_mask = .{ 94 | .r_bit = target.write_mask.red, 95 | .g_bit = target.write_mask.green, 96 | .b_bit = target.write_mask.blue, 97 | .a_bit = target.write_mask.alpha, 98 | }, 99 | }; 100 | } else { 101 | vk_blend.blend_enable = vk.FALSE; 102 | } 103 | } 104 | 105 | // Create shader stages 106 | var stages: [2]vk.PipelineShaderStageCreateInfo = undefined; 107 | var stage_count: u32 = 1; 108 | 109 | // TODO: specialization constants 110 | const vertex_shader = helper.castOpaque(*internal.ShaderModule, descriptor.vertex.module); 111 | stages[0] = .{ 112 | .flags = .{}, 113 | .stage = .{ .vertex_bit = true }, 114 | .module = vertex_shader.shader, 115 | .p_name = descriptor.vertex.entry_point, 116 | .p_specialization_info = null, 117 | }; 118 | 119 | if (descriptor.fragment) |frag| { 120 | stage_count += 1; 121 | const frag_shader = helper.castOpaque(*internal.ShaderModule, frag.module); 122 | stages[1] = .{ 123 | .flags = .{}, 124 | .stage = .{ .fragment_bit = true }, 125 | .module = frag_shader.shader, 126 | .p_name = frag.entry_point, 127 | .p_specialization_info = null, 128 | }; 129 | } 130 | 131 | // Configure vertex stage 132 | const vbinds = try allocator.alloc(vk.VertexInputBindingDescription, descriptor.vertex.buffer_count); 133 | defer allocator.free(vbinds); 134 | var vattrs = std.ArrayList(vk.VertexInputAttributeDescription).init(allocator); 135 | defer vattrs.deinit(); 136 | for (vbinds) |*bind, i| { 137 | const bind_idx = @intCast(u32, i); 138 | const buf = descriptor.vertex.buffers.?[i]; 139 | bind.* = .{ 140 | .binding = bind_idx, 141 | .stride = @intCast(u32, buf.array_stride), 142 | .input_rate = switch (buf.step_mode) { 143 | .vertex => .vertex, 144 | .instance => .instance, 145 | .vertex_buffer_not_used => undefined, 146 | }, 147 | }; 148 | 149 | var j: usize = 0; 150 | while (j < buf.attribute_count) : (j += 1) { 151 | const attr = buf.attributes.?[j]; 152 | try vattrs.append(.{ 153 | .location = attr.shader_location, 154 | .binding = bind_idx, 155 | .format = helper.vulkanVertexFormat(attr.format), 156 | .offset = @intCast(u32, attr.offset), 157 | }); 158 | } 159 | } 160 | 161 | var pipelines: [1]vk.Pipeline = undefined; 162 | _ = try device.dispatch.createGraphicsPipelines(device.device, .null_handle, 1, &[_]vk.GraphicsPipelineCreateInfo{.{ 163 | .flags = .{}, 164 | .stage_count = stage_count, 165 | .p_stages = &stages, 166 | 167 | .p_vertex_input_state = &.{ 168 | .flags = .{}, 169 | .vertex_binding_description_count = @intCast(u32, vbinds.len), 170 | .p_vertex_binding_descriptions = vbinds.ptr, 171 | .vertex_attribute_description_count = @intCast(u32, vattrs.items.len), 172 | .p_vertex_attribute_descriptions = vattrs.items.ptr, 173 | }, 174 | 175 | .p_input_assembly_state = &.{ 176 | .flags = .{}, 177 | .topology = switch (descriptor.primitive.topology) { 178 | .point_list => vk.PrimitiveTopology.point_list, 179 | .line_list => .line_list, 180 | .line_strip => .line_strip, 181 | .triangle_list => .triangle_list, 182 | .triangle_strip => .triangle_strip, 183 | }, 184 | .primitive_restart_enable = vk.FALSE, // TODO 185 | }, 186 | 187 | .p_tessellation_state = null, 188 | 189 | .p_viewport_state = &.{ 190 | .flags = .{}, 191 | .viewport_count = 1, 192 | .p_viewports = undefined, // Dynamic 193 | .scissor_count = 1, 194 | .p_scissors = undefined, // Dynamic 195 | }, 196 | 197 | .p_rasterization_state = &.{ 198 | .flags = .{}, 199 | .depth_clamp_enable = vk.FALSE, 200 | .rasterizer_discard_enable = vk.FALSE, 201 | .polygon_mode = .fill, // TODO: Having a "wireframe" extension might be nice for debugging 202 | .cull_mode = .{ 203 | .front_bit = descriptor.primitive.cull_mode == .front, 204 | .back_bit = descriptor.primitive.cull_mode == .back, 205 | }, 206 | .front_face = switch (descriptor.primitive.front_face) { 207 | .ccw => vk.FrontFace.counter_clockwise, 208 | .cw => .clockwise, 209 | }, 210 | 211 | // TODO: I'm mostly guessing on how this works. The WebGPU spec doesn't really cover it 212 | // TODO: this code is ugly as fuck, think about how to clean it up 213 | .depth_bias_enable = if (descriptor.depth_stencil) |ds| 214 | @boolToInt(ds.depth_bias != 0 or ds.depth_bias_slope_scale != 0) 215 | else 216 | vk.FALSE, 217 | .depth_bias_constant_factor = if (descriptor.depth_stencil) |ds| 218 | @intToFloat(f32, ds.depth_bias) 219 | else 220 | undefined, 221 | .depth_bias_clamp = if (descriptor.depth_stencil) |ds| 222 | ds.depth_bias_clamp 223 | else 224 | 0.0, 225 | .depth_bias_slope_factor = if (descriptor.depth_stencil) |ds| 226 | ds.depth_bias_slope_scale 227 | else 228 | undefined, 229 | 230 | .line_width = 1, 231 | }, 232 | 233 | .p_multisample_state = &.{ 234 | .flags = .{}, 235 | .rasterization_samples = multisample_count_vk, 236 | .sample_shading_enable = vk.FALSE, 237 | .min_sample_shading = undefined, 238 | .p_sample_mask = &[1]u32{descriptor.multisample.mask}, 239 | .alpha_to_coverage_enable = @boolToInt(descriptor.multisample.alpha_to_coverage_enabled), 240 | .alpha_to_one_enable = vk.FALSE, 241 | }, 242 | 243 | .p_depth_stencil_state = if (descriptor.depth_stencil) |ds| &.{ 244 | .flags = .{}, 245 | .depth_test_enable = vk.TRUE, 246 | .depth_write_enable = @boolToInt(ds.depth_write_enabled), 247 | .depth_compare_op = helper.vulkanCompareOp(ds.depth_compare), 248 | .depth_bounds_test_enable = vk.FALSE, 249 | .stencil_test_enable = @boolToInt(ds.stencil_read_mask != 0 or ds.stencil_write_mask != 0), 250 | .front = .{ 251 | .fail_op = helper.vulkanStencilOp(ds.stencil_front.fail_op), 252 | .depth_fail_op = helper.vulkanStencilOp(ds.stencil_front.depth_fail_op), 253 | .pass_op = helper.vulkanStencilOp(ds.stencil_front.pass_op), 254 | .compare_op = helper.vulkanCompareOp(ds.stencil_front.compare), 255 | .compare_mask = ds.stencil_read_mask, 256 | .write_mask = ds.stencil_write_mask, 257 | .reference = 0, 258 | }, 259 | .back = .{ 260 | .fail_op = helper.vulkanStencilOp(ds.stencil_back.fail_op), 261 | .depth_fail_op = helper.vulkanStencilOp(ds.stencil_back.depth_fail_op), 262 | .pass_op = helper.vulkanStencilOp(ds.stencil_back.pass_op), 263 | .compare_op = helper.vulkanCompareOp(ds.stencil_back.compare), 264 | .compare_mask = ds.stencil_read_mask, 265 | .write_mask = ds.stencil_write_mask, 266 | .reference = 0, 267 | }, 268 | .min_depth_bounds = undefined, 269 | .max_depth_bounds = undefined, 270 | } else &.{ 271 | .flags = .{}, 272 | .depth_test_enable = vk.FALSE, 273 | .depth_write_enable = undefined, 274 | .depth_compare_op = undefined, 275 | .depth_bounds_test_enable = vk.FALSE, 276 | .stencil_test_enable = vk.FALSE, 277 | .front = undefined, 278 | .back = undefined, 279 | .min_depth_bounds = undefined, 280 | .max_depth_bounds = undefined, 281 | }, 282 | 283 | .p_color_blend_state = &.{ 284 | .flags = .{}, 285 | .logic_op_enable = vk.FALSE, 286 | .logic_op = undefined, 287 | .attachment_count = @intCast(u32, blend_attachments.len), 288 | .p_attachments = blend_attachments.ptr, 289 | .blend_constants = .{ 0, 0, 0, 0 }, 290 | }, 291 | 292 | .p_dynamic_state = &.{ 293 | .flags = .{}, 294 | .dynamic_state_count = vk_dynamic_states.len, 295 | .p_dynamic_states = &vk_dynamic_states, 296 | }, 297 | 298 | // TODO: auto pipeline layout 299 | .layout = helper.castOpaque(*internal.PipelineLayout, descriptor.layout.?).layout, 300 | .render_pass = pass, 301 | .subpass = 0, 302 | 303 | .base_pipeline_handle = .null_handle, 304 | .base_pipeline_index = -1, 305 | }}, null, &pipelines); 306 | 307 | return .{ 308 | .pass = pass, 309 | .pipeline = pipelines[0], 310 | .device = device, 311 | }; 312 | } 313 | const vk_dynamic_states = [_]vk.DynamicState{ 314 | .viewport, .scissor, .stencil_reference, .blend_constants, 315 | // TODO: lots more 316 | }; 317 | 318 | pub fn deinit(self: *RenderPipeline) void { 319 | self.device.dispatch.destroyRenderPass(self.device.device, self.pass, null); 320 | self.device.dispatch.destroyPipeline(self.device.device, self.pipeline, null); 321 | self.device.allocator().destroy(self); 322 | } 323 | -------------------------------------------------------------------------------- /src/Interface.zig: -------------------------------------------------------------------------------- 1 | //! Implementation of mach-gpu GPUInterface 2 | 3 | const std = @import("std"); 4 | const gpu = @import("gpu"); 5 | const vk = @import("vk.zig"); 6 | const internal = @import("internal.zig"); 7 | const castOpaque = @import("helper.zig").castOpaque; 8 | 9 | pub fn init() void { 10 | vk.init() catch |err| { 11 | std.debug.panic("Failed to initialize Vulkan: {s}", .{@errorName(err)}); 12 | }; 13 | } 14 | 15 | pub inline fn createInstance(descriptor: ?*const gpu.Instance.Descriptor) ?*gpu.Instance { 16 | return @ptrCast( 17 | ?*gpu.Instance, 18 | internal.Instance.create(descriptor) catch return null, 19 | ); 20 | } 21 | 22 | pub inline fn getProcAddress(device: *gpu.Device, proc_name: [*:0]const u8) ?gpu.Proc { 23 | _ = device; 24 | _ = proc_name; 25 | @panic("TODO: implement getProcAddress"); 26 | } 27 | 28 | pub inline fn adapterCreateDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor) ?*gpu.Device { 29 | return @ptrCast( 30 | ?*gpu.Device, 31 | castOpaque(*internal.Adapter, adapter).createDevice(descriptor) catch return null, 32 | ); 33 | } 34 | 35 | pub inline fn adapterEnumerateFeatures(adapter: *gpu.Adapter, features: ?[*]gpu.FeatureName) usize { 36 | return castOpaque(*internal.Adapter, adapter).enumerateFeatures(features); 37 | } 38 | 39 | pub inline fn adapterGetLimits(adapter: *gpu.Adapter, limits: *gpu.SupportedLimits) bool { 40 | return castOpaque(*internal.Adapter, adapter).getLimits(limits); 41 | } 42 | 43 | pub inline fn adapterGetProperties(adapter: *gpu.Adapter, properties: *gpu.Adapter.Properties) void { 44 | return castOpaque(*internal.Adapter, adapter).getProperties(properties); 45 | } 46 | 47 | pub inline fn adapterHasFeature(adapter: *gpu.Adapter, feature: gpu.FeatureName) bool { 48 | return castOpaque(*internal.Adapter, adapter).hasFeature(feature); 49 | } 50 | 51 | pub inline fn adapterRequestDevice(adapter: *gpu.Adapter, descriptor: ?*const gpu.Device.Descriptor, callback: gpu.RequestDeviceCallback, userdata: ?*anyopaque) void { 52 | return castOpaque(*internal.Adapter, adapter).requestDevice(descriptor, callback, userdata); 53 | } 54 | 55 | pub inline fn adapterReference(adapter: *gpu.Adapter) void { 56 | castOpaque(*internal.Adapter, adapter).manager.reference(); 57 | } 58 | 59 | pub inline fn adapterRelease(adapter: *gpu.Adapter) void { 60 | castOpaque(*internal.Adapter, adapter).manager.release(); 61 | } 62 | 63 | pub inline fn bindGroupSetLabel(bind_group: *gpu.BindGroup, label: [*:0]const u8) void { 64 | castOpaque(*internal.BindGroup, bind_group).setLabel(label); 65 | } 66 | 67 | pub inline fn bindGroupReference(bind_group: *gpu.BindGroup) void { 68 | castOpaque(*internal.BindGroup, bind_group).manager.reference(); 69 | } 70 | 71 | pub inline fn bindGroupRelease(bind_group: *gpu.BindGroup) void { 72 | castOpaque(*internal.BindGroup, bind_group).manager.release(); 73 | } 74 | 75 | pub inline fn bindGroupLayoutSetLabel(bind_group_layout: *gpu.BindGroupLayout, label: [*:0]const u8) void { 76 | castOpaque(*internal.BindGroupLayout, bind_group_layout).layoutSetLabel(label); 77 | } 78 | 79 | pub inline fn bindGroupLayoutReference(bind_group_layout: *gpu.BindGroupLayout) void { 80 | castOpaque(*internal.BindGroupLayout, bind_group_layout).layoutReference(); 81 | } 82 | 83 | pub inline fn bindGroupLayoutRelease(bind_group_layout: *gpu.BindGroupLayout) void { 84 | castOpaque(*internal.BindGroupLayout, bind_group_layout).layoutRelease(); 85 | } 86 | 87 | pub inline fn bufferDestroy(buffer: *gpu.Buffer) void { 88 | castOpaque(*internal.Buffer, buffer).destroy(); 89 | } 90 | 91 | // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. 92 | pub inline fn bufferGetConstMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*const anyopaque { 93 | return castOpaque(*internal.Buffer, buffer).getConstMappedRange(offset, size); 94 | } 95 | 96 | // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. 97 | pub inline fn bufferGetMappedRange(buffer: *gpu.Buffer, offset: usize, size: usize) ?*anyopaque { 98 | return castOpaque(*internal.Buffer, buffer).getMappedRange(offset, size); 99 | } 100 | 101 | pub inline fn bufferGetSize(buffer: *gpu.Buffer) u64 { 102 | return castOpaque(*internal.Buffer, buffer).getSize(); 103 | } 104 | 105 | pub inline fn bufferGetUsage(buffer: *gpu.Buffer) gpu.Buffer.UsageFlags { 106 | return @bitCast(gpu.Buffer.UsageFlags, castOpaque(*internal.Buffer, buffer).getUsage()); 107 | } 108 | 109 | pub inline fn bufferMapAsync(buffer: *gpu.Buffer, mode: gpu.MapModeFlags, offset: usize, size: usize, callback: gpu.Buffer.MapCallback, userdata: ?*anyopaque) void { 110 | castOpaque(*internal.Buffer, buffer).mapAsync(mode, offset, size, callback, userdata); 111 | } 112 | 113 | pub inline fn bufferSetLabel(buffer: *gpu.Buffer, label: [*:0]const u8) void { 114 | castOpaque(*internal.Buffer, buffer).setLabel(label); 115 | } 116 | 117 | pub inline fn bufferUnmap(buffer: *gpu.Buffer) void { 118 | castOpaque(*internal.Buffer, buffer).unmap(); 119 | } 120 | 121 | pub inline fn bufferReference(buffer: *gpu.Buffer) void { 122 | castOpaque(*internal.Buffer, buffer).manager.reference(); 123 | } 124 | 125 | pub inline fn bufferRelease(buffer: *gpu.Buffer) void { 126 | castOpaque(*internal.Buffer, buffer).manager.release(); 127 | } 128 | 129 | pub inline fn commandBufferSetLabel(command_buffer: *gpu.CommandBuffer, label: [*:0]const u8) void { 130 | castOpaque(*internal.CommandBuffer, command_buffer).setLabel(label); 131 | } 132 | 133 | pub inline fn commandBufferReference(command_buffer: *gpu.CommandBuffer) void { 134 | castOpaque(*internal.CommandBuffer, command_buffer).manager.reference(); 135 | } 136 | 137 | pub inline fn commandBufferRelease(command_buffer: *gpu.CommandBuffer) void { 138 | castOpaque(*internal.CommandBuffer, command_buffer).manager.release(); 139 | } 140 | 141 | pub inline fn commandEncoderBeginComputePass(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.ComputePassDescriptor) *gpu.ComputePassEncoder { 142 | return @ptrCast(*gpu.ComputePassEncoder, castOpaque(*internal.CommandEncoder, command_encoder).beginComputePass(descriptor)); 143 | } 144 | 145 | pub inline fn commandEncoderBeginRenderPass(command_encoder: *gpu.CommandEncoder, descriptor: *const gpu.RenderPassDescriptor) *gpu.RenderPassEncoder { 146 | return @ptrCast( 147 | *gpu.RenderPassEncoder, 148 | castOpaque(*internal.CommandEncoder, command_encoder).beginRenderPass(descriptor) catch |err| { 149 | std.debug.panic("Error creating render pass encoder: {s}\n", .{@errorName(err)}); 150 | }, 151 | ); 152 | } 153 | 154 | pub inline fn commandEncoderClearBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, offset: u64, size: u64) void { 155 | castOpaque(*internal.CommandEncoder, command_encoder).clearBuffer(castOpaque(*internal.Buffer, buffer), offset, size); 156 | } 157 | 158 | pub inline fn commandEncoderCopyBufferToBuffer(command_encoder: *gpu.CommandEncoder, source: *gpu.Buffer, source_offset: u64, destination: *gpu.Buffer, destination_offset: u64, size: u64) void { 159 | castOpaque(*internal.CommandEncoder, command_encoder).copyBufferToBuffer( 160 | castOpaque(*internal.Buffer, source), 161 | source_offset, 162 | castOpaque(*internal.Buffer, destination), 163 | destination_offset, 164 | size, 165 | ); 166 | } 167 | 168 | pub inline fn commandEncoderCopyBufferToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyBuffer, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { 169 | castOpaque(*internal.CommandEncoder, command_encoder).copyBufferToTexture(source, destination, copy_size); 170 | } 171 | 172 | pub inline fn commandEncoderCopyTextureToBuffer(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyBuffer, copy_size: *const gpu.Extent3D) void { 173 | castOpaque(*internal.CommandEncoder, command_encoder).copyTextureToBuffer(source, destination, copy_size); 174 | } 175 | 176 | pub inline fn commandEncoderCopyTextureToTexture(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { 177 | castOpaque(*internal.CommandEncoder, command_encoder).copyTextureToTexture(source, destination, copy_size); 178 | } 179 | 180 | pub inline fn commandEncoderCopyTextureToTextureInternal(command_encoder: *gpu.CommandEncoder, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D) void { 181 | castOpaque(*internal.CommandEncoder, command_encoder).copyTextureToTextureInternal(source, destination, copy_size); 182 | } 183 | 184 | pub inline fn commandEncoderFinish(command_encoder: *gpu.CommandEncoder, descriptor: ?*const gpu.CommandBuffer.Descriptor) *gpu.CommandBuffer { 185 | return @ptrCast( 186 | *gpu.CommandBuffer, 187 | castOpaque(*internal.CommandEncoder, command_encoder).finish(descriptor) catch |err| { 188 | std.debug.panic("Error encoding commands: {s}\n", .{@errorName(err)}); 189 | }, 190 | ); 191 | } 192 | 193 | pub inline fn commandEncoderInjectValidationError(command_encoder: *gpu.CommandEncoder, message: [*:0]const u8) void { 194 | castOpaque(*internal.CommandEncoder, command_encoder).injectValidationError(message); 195 | } 196 | 197 | pub inline fn commandEncoderInsertDebugMarker(command_encoder: *gpu.CommandEncoder, marker_label: [*:0]const u8) void { 198 | castOpaque(*internal.CommandEncoder, command_encoder).insertDebugMarker(marker_label); 199 | } 200 | 201 | pub inline fn commandEncoderPopDebugGroup(command_encoder: *gpu.CommandEncoder) void { 202 | castOpaque(*internal.CommandEncoder, command_encoder).popDebugGroup(); 203 | } 204 | 205 | pub inline fn commandEncoderPushDebugGroup(command_encoder: *gpu.CommandEncoder, group_label: [*:0]const u8) void { 206 | castOpaque(*internal.CommandEncoder, command_encoder).pushDebugGroup(group_label); 207 | } 208 | 209 | pub inline fn commandEncoderResolveQuerySet(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, first_query: u32, query_count: u32, destination: *gpu.Buffer, destination_offset: u64) void { 210 | castOpaque(*internal.CommandEncoder, command_encoder).resolveQuerySet( 211 | castOpaque(*internal.QuerySet, query_set), 212 | first_query, 213 | query_count, 214 | castOpaque(*internal.Buffer, destination), 215 | destination_offset, 216 | ); 217 | } 218 | 219 | pub inline fn commandEncoderSetLabel(command_encoder: *gpu.CommandEncoder, label: [*:0]const u8) void { 220 | castOpaque(*internal.CommandEncoder, command_encoder).setLabel(label); 221 | } 222 | 223 | pub inline fn commandEncoderWriteBuffer(command_encoder: *gpu.CommandEncoder, buffer: *gpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { 224 | castOpaque(*internal.CommandEncoder, command_encoder).writeBuffer(castOpaque(*internal.Buffer, buffer), buffer_offset, data, size); 225 | } 226 | 227 | pub inline fn commandEncoderWriteTimestamp(command_encoder: *gpu.CommandEncoder, query_set: *gpu.QuerySet, query_index: u32) void { 228 | castOpaque(*internal.CommandEncoder, command_encoder).writeTimestamp(castOpaque(*internal.QuerySet, query_set), query_index); 229 | } 230 | 231 | pub inline fn commandEncoderReference(command_encoder: *gpu.CommandEncoder) void { 232 | castOpaque(*internal.CommandEncoder, command_encoder).manager.reference(); 233 | } 234 | 235 | pub inline fn commandEncoderRelease(command_encoder: *gpu.CommandEncoder) void { 236 | castOpaque(*internal.CommandEncoder, command_encoder).manager.release(); 237 | } 238 | 239 | pub inline fn computePassEncoderDispatchWorkgroups(compute_pass_encoder: *gpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { 240 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).dispatchWorkgroups( 241 | workgroup_count_x, 242 | workgroup_count_y, 243 | workgroup_count_z, 244 | ); 245 | } 246 | 247 | pub inline fn computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *gpu.ComputePassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { 248 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).dispatchWorkgroupsIndirect(castOpaque(*internal.Buffer, indirect_buffer), indirect_offset); 249 | } 250 | 251 | pub inline fn computePassEncoderEnd(compute_pass_encoder: *gpu.ComputePassEncoder) void { 252 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).end(); 253 | } 254 | 255 | pub inline fn computePassEncoderInsertDebugMarker(compute_pass_encoder: *gpu.ComputePassEncoder, marker_label: [*:0]const u8) void { 256 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).insertDebugMarker(marker_label); 257 | } 258 | 259 | pub inline fn computePassEncoderPopDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder) void { 260 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).popDebugGroup(); 261 | } 262 | 263 | pub inline fn computePassEncoderPushDebugGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_label: [*:0]const u8) void { 264 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).pushDebugGroup(group_label); 265 | } 266 | 267 | pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *gpu.ComputePassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void { 268 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).setBindGroup( 269 | group_index, 270 | castOpaque(*internal.BindGroup, group), 271 | dynamic_offset_count, 272 | dynamic_offsets, 273 | ); 274 | } 275 | 276 | pub inline fn computePassEncoderSetLabel(compute_pass_encoder: *gpu.ComputePassEncoder, label: [*:0]const u8) void { 277 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).setLabel(label); 278 | } 279 | 280 | pub inline fn computePassEncoderSetPipeline(compute_pass_encoder: *gpu.ComputePassEncoder, pipeline: *gpu.ComputePipeline) void { 281 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).setPipeline(castOpaque(*internal.ComputePipeline, pipeline)); 282 | } 283 | 284 | pub inline fn computePassEncoderWriteTimestamp(compute_pass_encoder: *gpu.ComputePassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { 285 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).writeTimestamp(castOpaque(*internal.QuerySet, query_set), query_index); 286 | } 287 | 288 | pub inline fn computePassEncoderReference(compute_pass_encoder: *gpu.ComputePassEncoder) void { 289 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).manager.reference(); 290 | } 291 | 292 | pub inline fn computePassEncoderRelease(compute_pass_encoder: *gpu.ComputePassEncoder) void { 293 | castOpaque(*internal.ComputePassEncoder, compute_pass_encoder).manager.release(); 294 | } 295 | 296 | pub inline fn computePipelineGetBindGroupLayout(compute_pipeline: *gpu.ComputePipeline, group_index: u32) *gpu.BindGroupLayout { 297 | return @ptrCast(*gpu.BindGroupLayout, castOpaque(*internal.ComputePipeline, compute_pipeline).getBindGroupLayout(group_index)); 298 | } 299 | 300 | pub inline fn computePipelineSetLabel(compute_pipeline: *gpu.ComputePipeline, label: [*:0]const u8) void { 301 | castOpaque(*internal.ComputePipeline, compute_pipeline).setLabel(label); 302 | } 303 | 304 | pub inline fn computePipelineReference(compute_pipeline: *gpu.ComputePipeline) void { 305 | castOpaque(*internal.ComputePipeline, compute_pipeline).manager.reference(); 306 | } 307 | 308 | pub inline fn computePipelineRelease(compute_pipeline: *gpu.ComputePipeline) void { 309 | castOpaque(*internal.ComputePipeline, compute_pipeline).manager.release(); 310 | } 311 | 312 | pub inline fn deviceCreateBindGroup(device: *gpu.Device, descriptor: *const gpu.BindGroup.Descriptor) *gpu.BindGroup { 313 | return @ptrCast(*gpu.BindGroup, castOpaque(*internal.Device, device).createBindGroup(descriptor)); 314 | } 315 | 316 | pub inline fn deviceCreateBindGroupLayout(device: *gpu.Device, descriptor: *const gpu.BindGroupLayout.Descriptor) *gpu.BindGroupLayout { 317 | return @ptrCast(*gpu.BindGroupLayout, castOpaque(*internal.Device, device).createBindGroupLayout(descriptor)); 318 | } 319 | 320 | pub inline fn deviceCreateBuffer(device: *gpu.Device, descriptor: *const gpu.Buffer.Descriptor) *gpu.Buffer { 321 | return @ptrCast(*gpu.Buffer, castOpaque(*internal.Device, device).createBuffer(descriptor)); 322 | } 323 | 324 | pub inline fn deviceCreateCommandEncoder(device: *gpu.Device, descriptor: ?*const gpu.CommandEncoder.Descriptor) *gpu.CommandEncoder { 325 | return @ptrCast( 326 | *gpu.CommandEncoder, 327 | castOpaque(*internal.Device, device).createCommandEncoder(descriptor) catch |err| { 328 | std.debug.panic("Error creating command encoder: {s}\n", .{@errorName(err)}); 329 | }, 330 | ); 331 | } 332 | 333 | pub inline fn deviceCreateComputePipeline(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor) *gpu.ComputePipeline { 334 | return @ptrCast(*gpu.ComputePipeline, castOpaque(*internal.Device, device).createComputePipeline(descriptor)); 335 | } 336 | 337 | pub inline fn deviceCreateComputePipelineAsync(device: *gpu.Device, descriptor: *const gpu.ComputePipeline.Descriptor, callback: gpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { 338 | castOpaque(*internal.Device, device).createComputePipelineAsync(descriptor, callback, userdata); 339 | } 340 | 341 | pub inline fn deviceCreateErrorBuffer(device: *gpu.Device) *gpu.Buffer { 342 | return @ptrCast(*gpu.Buffer, castOpaque(*internal.Device, device).createErrorBuffer()); 343 | } 344 | 345 | pub inline fn deviceCreateErrorExternalTexture(device: *gpu.Device) *gpu.ExternalTexture { 346 | return @ptrCast(*gpu.ExternalTexture, castOpaque(*internal.Device, device).createErrorExternalTexture()); 347 | } 348 | 349 | pub inline fn deviceCreateErrorTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { 350 | return @ptrCast(*gpu.Texture, castOpaque(*internal.Device, device).createErrorTexture(descriptor)); 351 | } 352 | 353 | pub inline fn deviceCreateExternalTexture(device: *gpu.Device, external_texture_descriptor: *const gpu.ExternalTexture.Descriptor) *gpu.ExternalTexture { 354 | return @ptrCast(*gpu.ExternalTexture, castOpaque(*internal.Device, device).createExternalTexture(external_texture_descriptor)); 355 | } 356 | 357 | pub inline fn deviceCreatePipelineLayout(device: *gpu.Device, pipeline_layout_descriptor: *const gpu.PipelineLayout.Descriptor) *gpu.PipelineLayout { 358 | return @ptrCast( 359 | *gpu.PipelineLayout, 360 | castOpaque(*internal.Device, device).createPipelineLayout(pipeline_layout_descriptor) catch |err| { 361 | std.debug.panic("Error creating pipeline layout: {s}\n", .{@errorName(err)}); 362 | }, 363 | ); 364 | } 365 | 366 | pub inline fn deviceCreateQuerySet(device: *gpu.Device, descriptor: *const gpu.QuerySet.Descriptor) *gpu.QuerySet { 367 | return @ptrCast(*gpu.QuerySet, castOpaque(*internal.Device, device).createQuerySet(descriptor)); 368 | } 369 | 370 | pub inline fn deviceCreateRenderBundleEncoder(device: *gpu.Device, descriptor: *const gpu.RenderBundleEncoder.Descriptor) *gpu.RenderBundleEncoder { 371 | return @ptrCast(*gpu.RenderBundleEncoder, castOpaque(*internal.Device, device).createRenderBundleEncoder(descriptor)); 372 | } 373 | 374 | pub inline fn deviceCreateRenderPipeline(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor) *gpu.RenderPipeline { 375 | return @ptrCast( 376 | *gpu.RenderPipeline, 377 | castOpaque(*internal.Device, device).createRenderPipeline(descriptor) catch |err| { 378 | std.debug.panic("Error creating render pipeline: {s}\n", .{@errorName(err)}); 379 | }, 380 | ); 381 | } 382 | 383 | pub inline fn deviceCreateRenderPipelineAsync(device: *gpu.Device, descriptor: *const gpu.RenderPipeline.Descriptor, callback: gpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { 384 | castOpaque(*internal.Device, device).createRenderPipelineAsync(descriptor, callback, userdata); 385 | } 386 | 387 | // TODO(self-hosted): this cannot be marked as inline for some reason. 388 | // https://github.com/ziglang/zig/issues/12545 389 | pub fn deviceCreateSampler(device: *gpu.Device, descriptor: ?*const gpu.Sampler.Descriptor) *gpu.Sampler { 390 | return @ptrCast(*gpu.Sampler, castOpaque(*internal.Device, device).createSampler(descriptor)); 391 | } 392 | 393 | pub inline fn deviceCreateShaderModule(device: *gpu.Device, descriptor: *const gpu.ShaderModule.Descriptor) *gpu.ShaderModule { 394 | return @ptrCast( 395 | *gpu.ShaderModule, 396 | castOpaque(*internal.Device, device).createShaderModule(descriptor) catch |err| { 397 | std.debug.panic("Error creating shader: {s}\n", .{@errorName(err)}); 398 | }, 399 | ); 400 | } 401 | 402 | pub inline fn deviceCreateSwapChain(device: *gpu.Device, surface: ?*gpu.Surface, descriptor: *const gpu.SwapChain.Descriptor) *gpu.SwapChain { 403 | return @ptrCast( 404 | *gpu.SwapChain, 405 | castOpaque(*internal.Device, device).createSwapChain( 406 | castOpaque(?*internal.Surface, surface), 407 | descriptor, 408 | ) catch |err| { 409 | std.debug.panic("Error creating swapchain: {s}\n", .{@errorName(err)}); 410 | }, 411 | ); 412 | } 413 | 414 | pub inline fn deviceCreateTexture(device: *gpu.Device, descriptor: *const gpu.Texture.Descriptor) *gpu.Texture { 415 | return @ptrCast(*gpu.Texture, castOpaque(*internal.Device, device).createTexture(descriptor)); 416 | } 417 | 418 | pub inline fn deviceDestroy(device: *gpu.Device) void { 419 | castOpaque(*internal.Device, device).destroy(); 420 | } 421 | 422 | pub inline fn deviceEnumerateFeatures(device: *gpu.Device, features: ?[*]gpu.FeatureName) usize { 423 | return castOpaque(*internal.Device, device).enumerateFeatures(features); 424 | } 425 | 426 | pub inline fn deviceGetLimits(device: *gpu.Device, limits: *gpu.SupportedLimits) bool { 427 | return castOpaque(*internal.Device, device).getLimits(limits); 428 | } 429 | 430 | pub inline fn deviceGetQueue(device: *gpu.Device) *gpu.Queue { 431 | return @ptrCast(*gpu.Queue, castOpaque(*internal.Device, device).getQueue()); 432 | } 433 | 434 | pub inline fn deviceHasFeature(device: *gpu.Device, feature: gpu.FeatureName) bool { 435 | return castOpaque(*internal.Device, device).hasFeature(feature); 436 | } 437 | 438 | pub inline fn deviceInjectError(device: *gpu.Device, typ: gpu.ErrorType, message: [*:0]const u8) void { 439 | castOpaque(*internal.Device, device).injectError(typ, message); 440 | } 441 | 442 | pub inline fn deviceLoseForTesting(device: *gpu.Device) void { 443 | castOpaque(*internal.Device, device).loseForTesting(); 444 | } 445 | 446 | pub inline fn devicePopErrorScope(device: *gpu.Device, callback: gpu.ErrorCallback, userdata: ?*anyopaque) bool { 447 | return castOpaque(*internal.Device, device).popErrorScope(callback, userdata); 448 | } 449 | 450 | pub inline fn devicePushErrorScope(device: *gpu.Device, filter: gpu.ErrorFilter) void { 451 | castOpaque(*internal.Device, device).pushErrorScope(filter); 452 | } 453 | 454 | pub inline fn deviceSetDeviceLostCallback(device: *gpu.Device, callback: ?gpu.Device.LostCallback, userdata: ?*anyopaque) void { 455 | castOpaque(*internal.Device, device).setDeviceLostCallback(callback, userdata); 456 | } 457 | 458 | pub inline fn deviceSetLabel(device: *gpu.Device, label: [*:0]const u8) void { 459 | castOpaque(*internal.Device, device).setLabel(label); 460 | } 461 | 462 | pub inline fn deviceSetLoggingCallback(device: *gpu.Device, callback: ?gpu.LoggingCallback, userdata: ?*anyopaque) void { 463 | castOpaque(*internal.Device, device).setLoggingCallback(callback, userdata); 464 | } 465 | 466 | pub inline fn deviceSetUncapturedErrorCallback(device: *gpu.Device, callback: ?gpu.ErrorCallback, userdata: ?*anyopaque) void { 467 | castOpaque(*internal.Device, device).setUncapturedErrorCallback(callback, userdata); 468 | } 469 | 470 | pub inline fn deviceTick(device: *gpu.Device) void { 471 | castOpaque(*internal.Device, device).tick(); 472 | } 473 | 474 | pub inline fn deviceReference(device: *gpu.Device) void { 475 | castOpaque(*internal.Device, device).manager.reference(); 476 | } 477 | 478 | pub inline fn deviceRelease(device: *gpu.Device) void { 479 | castOpaque(*internal.Device, device).manager.release(); 480 | } 481 | 482 | pub inline fn externalTextureDestroy(external_texture: *gpu.ExternalTexture) void { 483 | castOpaque(*internal.ExternalTexture, external_texture).destroy(); 484 | } 485 | 486 | pub inline fn externalTextureSetLabel(external_texture: *gpu.ExternalTexture, label: [*:0]const u8) void { 487 | castOpaque(*internal.ExternalTexture, external_texture).setLabel(label); 488 | } 489 | 490 | pub inline fn externalTextureReference(external_texture: *gpu.ExternalTexture) void { 491 | castOpaque(*internal.ExternalTexture, external_texture).manager.reference(); 492 | } 493 | 494 | pub inline fn externalTextureRelease(external_texture: *gpu.ExternalTexture) void { 495 | castOpaque(*internal.ExternalTexture, external_texture).manager.release(); 496 | } 497 | 498 | pub inline fn instanceCreateSurface(instance: *gpu.Instance, descriptor: *const gpu.Surface.Descriptor) *gpu.Surface { 499 | return @ptrCast( 500 | *gpu.Surface, 501 | castOpaque(*internal.Instance, instance).createSurface(descriptor) catch |err| { 502 | std.debug.panic("Failed to create surface: {s}\n", .{@errorName(err)}); 503 | }, 504 | ); 505 | } 506 | 507 | pub inline fn instanceRequestAdapter(instance: *gpu.Instance, options: ?*const gpu.RequestAdapterOptions, callback: gpu.RequestAdapterCallback, userdata: ?*anyopaque) void { 508 | castOpaque(*internal.Instance, instance).requestAdapter(options, callback, userdata); 509 | } 510 | 511 | pub inline fn instanceReference(instance: *gpu.Instance) void { 512 | castOpaque(*internal.Instance, instance).manager.reference(); 513 | } 514 | 515 | pub inline fn instanceRelease(instance: *gpu.Instance) void { 516 | castOpaque(*internal.Instance, instance).manager.release(); 517 | } 518 | 519 | pub inline fn pipelineLayoutSetLabel(pipeline_layout: *gpu.PipelineLayout, label: [*:0]const u8) void { 520 | castOpaque(*internal.PipelineLayout, pipeline_layout).setLabel(label); 521 | } 522 | 523 | pub inline fn pipelineLayoutReference(pipeline_layout: *gpu.PipelineLayout) void { 524 | castOpaque(*internal.PipelineLayout, pipeline_layout).manager.reference(); 525 | } 526 | 527 | pub inline fn pipelineLayoutRelease(pipeline_layout: *gpu.PipelineLayout) void { 528 | castOpaque(*internal.PipelineLayout, pipeline_layout).manager.release(); 529 | } 530 | 531 | pub inline fn querySetDestroy(query_set: *gpu.QuerySet) void { 532 | castOpaque(*internal.QuerySet, query_set).destroy(); 533 | } 534 | 535 | pub inline fn querySetGetCount(query_set: *gpu.QuerySet) u32 { 536 | return castOpaque(*internal.QuerySet, query_set).getCount(); 537 | } 538 | 539 | pub inline fn querySetGetType(query_set: *gpu.QuerySet) gpu.QueryType { 540 | return @intToEnum(gpu.QueryType, castOpaque(*internal.QuerySet, query_set).getType()); 541 | } 542 | 543 | pub inline fn querySetSetLabel(query_set: *gpu.QuerySet, label: [*:0]const u8) void { 544 | castOpaque(*internal.QuerySet, query_set).setLabel(label); 545 | } 546 | 547 | pub inline fn querySetReference(query_set: *gpu.QuerySet) void { 548 | castOpaque(*internal.QuerySet, query_set).manager.reference(); 549 | } 550 | 551 | pub inline fn querySetRelease(query_set: *gpu.QuerySet) void { 552 | castOpaque(*internal.QuerySet, query_set).manager.release(); 553 | } 554 | 555 | pub inline fn queueCopyTextureForBrowser(queue: *gpu.Queue, source: *const gpu.ImageCopyTexture, destination: *const gpu.ImageCopyTexture, copy_size: *const gpu.Extent3D, options: *const gpu.CopyTextureForBrowserOptions) void { 556 | castOpaque(*internal.Queue, queue).copyTextureForBrowser(source, destination, copy_size, options); 557 | } 558 | 559 | pub inline fn queueOnSubmittedWorkDone(queue: *gpu.Queue, signal_value: u64, callback: gpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { 560 | castOpaque(*internal.Queue, queue).onSubmittedWorkDone(signal_value, callback, userdata); 561 | } 562 | 563 | pub inline fn queueSetLabel(queue: *gpu.Queue, label: [*:0]const u8) void { 564 | castOpaque(*internal.Queue, queue).setLabel(label); 565 | } 566 | 567 | pub inline fn queueSubmit(queue: *gpu.Queue, command_count: u32, commands: [*]*const gpu.CommandBuffer) void { 568 | castOpaque(*internal.Queue, queue).submit( 569 | @ptrCast([]const *internal.CommandBuffer, commands[0..command_count]), 570 | ) catch |err| { 571 | std.debug.panic("Error in queue submission: {s}\n", .{@errorName(err)}); 572 | }; 573 | } 574 | 575 | pub inline fn queueWriteBuffer(queue: *gpu.Queue, buffer: *gpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { 576 | castOpaque(*internal.Queue, queue).writeBuffer(castOpaque(*internal.Buffer, buffer), buffer_offset, data, size); 577 | } 578 | 579 | pub inline fn queueWriteTexture(queue: *gpu.Queue, destination: *const gpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const gpu.Texture.DataLayout, write_size: *const gpu.Extent3D) void { 580 | castOpaque(*internal.Queue, queue).writeTexture(destination, data, data_size, data_layout, write_size); 581 | } 582 | 583 | pub inline fn queueReference(queue: *gpu.Queue) void { 584 | castOpaque(*internal.Queue, queue).manager.reference(); 585 | } 586 | 587 | pub inline fn queueRelease(queue: *gpu.Queue) void { 588 | castOpaque(*internal.Queue, queue).manager.release(); 589 | } 590 | 591 | pub inline fn renderBundleReference(render_bundle: *gpu.RenderBundle) void { 592 | castOpaque(*internal.RenderBundle, render_bundle).manager.reference(); 593 | } 594 | 595 | pub inline fn renderBundleRelease(render_bundle: *gpu.RenderBundle) void { 596 | castOpaque(*internal.RenderBundle, render_bundle).manager.release(); 597 | } 598 | 599 | pub inline fn renderBundleEncoderDraw(render_bundle_encoder: *gpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { 600 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderDraw(vertex_count, instance_count, first_vertex, first_instance); 601 | } 602 | 603 | pub inline fn renderBundleEncoderDrawIndexed(render_bundle_encoder: *gpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { 604 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderDrawIndexed( 605 | index_count, 606 | instance_count, 607 | first_index, 608 | base_vertex, 609 | first_instance, 610 | ); 611 | } 612 | 613 | pub inline fn renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { 614 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderDrawIndexedIndirect(castOpaque(*internal.Buffer, indirect_buffer), indirect_offset); 615 | } 616 | 617 | pub inline fn renderBundleEncoderDrawIndirect(render_bundle_encoder: *gpu.RenderBundleEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { 618 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderDrawIndirect(castOpaque(*internal.Buffer, indirect_buffer), indirect_offset); 619 | } 620 | 621 | pub inline fn renderBundleEncoderFinish(render_bundle_encoder: *gpu.RenderBundleEncoder, descriptor: ?*const gpu.RenderBundle.Descriptor) *gpu.RenderBundle { 622 | return @ptrCast(*gpu.RenderBundle, castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderFinish(descriptor)); 623 | } 624 | 625 | pub inline fn renderBundleEncoderInsertDebugMarker(render_bundle_encoder: *gpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { 626 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderInsertDebugMarker(marker_label); 627 | } 628 | 629 | pub inline fn renderBundleEncoderPopDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder) void { 630 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderPopDebugGroup(); 631 | } 632 | 633 | pub inline fn renderBundleEncoderPushDebugGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_label: [*:0]const u8) void { 634 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderPushDebugGroup(group_label); 635 | } 636 | 637 | pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *gpu.RenderBundleEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void { 638 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderSetBindGroup( 639 | group_index, 640 | castOpaque(*internal.BindGroup, group), 641 | dynamic_offset_count, 642 | dynamic_offsets, 643 | ); 644 | } 645 | 646 | pub inline fn renderBundleEncoderSetIndexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { 647 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderSetIndexBuffer(castOpaque(*internal.Buffer, buffer), format, offset, size); 648 | } 649 | 650 | pub inline fn renderBundleEncoderSetLabel(render_bundle_encoder: *gpu.RenderBundleEncoder, label: [*:0]const u8) void { 651 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderSetLabel(label); 652 | } 653 | 654 | pub inline fn renderBundleEncoderSetPipeline(render_bundle_encoder: *gpu.RenderBundleEncoder, pipeline: *gpu.RenderPipeline) void { 655 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderSetPipeline(castOpaque(*internal.RenderPipeline, pipeline)); 656 | } 657 | 658 | pub inline fn renderBundleEncoderSetVertexBuffer(render_bundle_encoder: *gpu.RenderBundleEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { 659 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderSetVertexBuffer(slot, castOpaque(*internal.Buffer, buffer), offset, size); 660 | } 661 | 662 | pub inline fn renderBundleEncoderReference(render_bundle_encoder: *gpu.RenderBundleEncoder) void { 663 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderReference(); 664 | } 665 | 666 | pub inline fn renderBundleEncoderRelease(render_bundle_encoder: *gpu.RenderBundleEncoder) void { 667 | castOpaque(*internal.RenderBundleEncoder, render_bundle_encoder).encoderRelease(); 668 | } 669 | 670 | pub inline fn renderPassEncoderBeginOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder, query_index: u32) void { 671 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).beginOcclusionQuery(query_index); 672 | } 673 | 674 | pub inline fn renderPassEncoderDraw(render_pass_encoder: *gpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { 675 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).draw( 676 | vertex_count, 677 | instance_count, 678 | first_vertex, 679 | first_instance, 680 | ); 681 | } 682 | 683 | pub inline fn renderPassEncoderDrawIndexed(render_pass_encoder: *gpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { 684 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).drawIndexed( 685 | index_count, 686 | instance_count, 687 | first_index, 688 | base_vertex, 689 | first_instance, 690 | ); 691 | } 692 | 693 | pub inline fn renderPassEncoderDrawIndexedIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { 694 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).drawIndexedIndirect(castOpaque(*internal.Buffer, indirect_buffer), indirect_offset); 695 | } 696 | 697 | pub inline fn renderPassEncoderDrawIndirect(render_pass_encoder: *gpu.RenderPassEncoder, indirect_buffer: *gpu.Buffer, indirect_offset: u64) void { 698 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).drawIndirect(castOpaque(*internal.Buffer, indirect_buffer), indirect_offset); 699 | } 700 | 701 | pub inline fn renderPassEncoderEnd(render_pass_encoder: *gpu.RenderPassEncoder) void { 702 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).end(); 703 | } 704 | 705 | pub inline fn renderPassEncoderEndOcclusionQuery(render_pass_encoder: *gpu.RenderPassEncoder) void { 706 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).endOcclusionQuery(); 707 | } 708 | 709 | pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *gpu.RenderPassEncoder, bundles_count: u32, bundles: [*]const *const gpu.RenderBundle) void { 710 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).executeBundles(bundles_count, bundles); 711 | } 712 | 713 | pub inline fn renderPassEncoderInsertDebugMarker(render_pass_encoder: *gpu.RenderPassEncoder, marker_label: [*:0]const u8) void { 714 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).insertDebugMarker(marker_label); 715 | } 716 | 717 | pub inline fn renderPassEncoderPopDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder) void { 718 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).popDebugGroup(); 719 | } 720 | 721 | pub inline fn renderPassEncoderPushDebugGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_label: [*:0]const u8) void { 722 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).pushDebugGroup(group_label); 723 | } 724 | 725 | pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *gpu.RenderPassEncoder, group_index: u32, group: *gpu.BindGroup, dynamic_offset_count: u32, dynamic_offsets: ?[*]const u32) void { 726 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setBindGroup( 727 | group_index, 728 | castOpaque(*internal.BindGroup, group), 729 | dynamic_offset_count, 730 | dynamic_offsets, 731 | ); 732 | } 733 | 734 | pub inline fn renderPassEncoderSetBlendConstant(render_pass_encoder: *gpu.RenderPassEncoder, color: *const gpu.Color) void { 735 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setBlendConstant(color); 736 | } 737 | 738 | pub inline fn renderPassEncoderSetIndexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, buffer: *gpu.Buffer, format: gpu.IndexFormat, offset: u64, size: u64) void { 739 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setIndexBuffer(castOpaque(*internal.Buffer, buffer), format, offset, size); 740 | } 741 | 742 | pub inline fn renderPassEncoderSetLabel(render_pass_encoder: *gpu.RenderPassEncoder, label: [*:0]const u8) void { 743 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setLabel(label); 744 | } 745 | 746 | pub inline fn renderPassEncoderSetPipeline(render_pass_encoder: *gpu.RenderPassEncoder, pipeline: *gpu.RenderPipeline) void { 747 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setPipeline(castOpaque(*internal.RenderPipeline, pipeline)) catch |err| { 748 | std.debug.panic("Error in deferred render pass init: {s}\n", .{@errorName(err)}); 749 | }; 750 | } 751 | 752 | pub inline fn renderPassEncoderSetScissorRect(render_pass_encoder: *gpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { 753 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setScissorRect(x, y, width, height); 754 | } 755 | 756 | pub inline fn renderPassEncoderSetStencilReference(render_pass_encoder: *gpu.RenderPassEncoder, reference: u32) void { 757 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setStencilReference(reference); 758 | } 759 | 760 | pub inline fn renderPassEncoderSetVertexBuffer(render_pass_encoder: *gpu.RenderPassEncoder, slot: u32, buffer: *gpu.Buffer, offset: u64, size: u64) void { 761 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setVertexBuffer(slot, castOpaque(*internal.Buffer, buffer), offset, size); 762 | } 763 | 764 | pub inline fn renderPassEncoderSetViewport(render_pass_encoder: *gpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { 765 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).setViewport(x, y, width, height, min_depth, max_depth); 766 | } 767 | 768 | pub inline fn renderPassEncoderWriteTimestamp(render_pass_encoder: *gpu.RenderPassEncoder, query_set: *gpu.QuerySet, query_index: u32) void { 769 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).writeTimestamp(castOpaque(*internal.QuerySet, query_set), query_index); 770 | } 771 | 772 | pub inline fn renderPassEncoderReference(render_pass_encoder: *gpu.RenderPassEncoder) void { 773 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).manager.reference(); 774 | } 775 | 776 | pub inline fn renderPassEncoderRelease(render_pass_encoder: *gpu.RenderPassEncoder) void { 777 | castOpaque(*internal.RenderPassEncoder, render_pass_encoder).manager.release(); 778 | } 779 | 780 | pub inline fn renderPipelineGetBindGroupLayout(render_pipeline: *gpu.RenderPipeline, group_index: u32) *gpu.BindGroupLayout { 781 | return @ptrCast(*gpu.BindGroupLayout, castOpaque(*internal.RenderPipeline, render_pipeline).getBindGroupLayout(group_index)); 782 | } 783 | 784 | pub inline fn renderPipelineSetLabel(render_pipeline: *gpu.RenderPipeline, label: [*:0]const u8) void { 785 | castOpaque(*internal.RenderPipeline, render_pipeline).setLabel(label); 786 | } 787 | 788 | pub inline fn renderPipelineReference(render_pipeline: *gpu.RenderPipeline) void { 789 | castOpaque(*internal.RenderPipeline, render_pipeline).manager.reference(); 790 | } 791 | 792 | pub inline fn renderPipelineRelease(render_pipeline: *gpu.RenderPipeline) void { 793 | castOpaque(*internal.RenderPipeline, render_pipeline).manager.release(); 794 | } 795 | 796 | pub inline fn samplerSetLabel(sampler: *gpu.Sampler, label: [*:0]const u8) void { 797 | castOpaque(*internal.Sampler, sampler).setLabel(label); 798 | } 799 | 800 | pub inline fn samplerReference(sampler: *gpu.Sampler) void { 801 | castOpaque(*internal.Sampler, sampler).manager.reference(); 802 | } 803 | 804 | pub inline fn samplerRelease(sampler: *gpu.Sampler) void { 805 | castOpaque(*internal.Sampler, sampler).manager.release(); 806 | } 807 | 808 | pub inline fn shaderModuleGetCompilationInfo(shader_module: *gpu.ShaderModule, callback: gpu.CompilationInfoCallback, userdata: ?*anyopaque) void { 809 | castOpaque(*internal.ShaderModule, shader_module).getCompilationInfo(callback, userdata); 810 | } 811 | 812 | pub inline fn shaderModuleSetLabel(shader_module: *gpu.ShaderModule, label: [*:0]const u8) void { 813 | castOpaque(*internal.ShaderModule, shader_module).setLabel(label); 814 | } 815 | 816 | pub inline fn shaderModuleReference(shader_module: *gpu.ShaderModule) void { 817 | castOpaque(*internal.ShaderModule, shader_module).manager.reference(); 818 | } 819 | 820 | pub inline fn shaderModuleRelease(shader_module: *gpu.ShaderModule) void { 821 | castOpaque(*internal.ShaderModule, shader_module).manager.release(); 822 | } 823 | 824 | pub inline fn surfaceReference(surface: *gpu.Surface) void { 825 | castOpaque(*internal.Surface, surface).manager.reference(); 826 | } 827 | 828 | pub inline fn surfaceRelease(surface: *gpu.Surface) void { 829 | castOpaque(*internal.Surface, surface).manager.release(); 830 | } 831 | 832 | pub inline fn swapChainConfigure(swap_chain: *gpu.SwapChain, format: gpu.Texture.Format, allowed_usage: gpu.Texture.UsageFlags, width: u32, height: u32) void { 833 | castOpaque(*internal.SwapChain, swap_chain).configure(format, allowed_usage, width, height); 834 | } 835 | 836 | pub inline fn swapChainGetCurrentTextureView(swap_chain: *gpu.SwapChain) *gpu.TextureView { 837 | return @ptrCast( 838 | *gpu.TextureView, 839 | castOpaque(*internal.SwapChain, swap_chain).getCurrentTextureView() catch |err| { 840 | std.debug.panic("Error creating texture view: {s}\n", .{@errorName(err)}); 841 | }, 842 | ); 843 | } 844 | 845 | pub inline fn swapChainPresent(swap_chain: *gpu.SwapChain) void { 846 | castOpaque(*internal.SwapChain, swap_chain).present() catch |err| { 847 | std.debug.panic("Error presenting swap chain: {s}\n", .{@errorName(err)}); 848 | }; 849 | } 850 | 851 | pub inline fn swapChainReference(swap_chain: *gpu.SwapChain) void { 852 | castOpaque(*internal.SwapChain, swap_chain).manager.reference(); 853 | } 854 | 855 | pub inline fn swapChainRelease(swap_chain: *gpu.SwapChain) void { 856 | castOpaque(*internal.SwapChain, swap_chain).manager.release(); 857 | } 858 | 859 | pub inline fn textureCreateView(texture: *gpu.Texture, descriptor: ?*const gpu.TextureView.Descriptor) *gpu.TextureView { 860 | return @ptrCast(*gpu.TextureView, castOpaque(*internal.Texture, texture).createView(descriptor)); 861 | } 862 | 863 | pub inline fn textureDestroy(texture: *gpu.Texture) void { 864 | castOpaque(*internal.Texture, texture).destroy(); 865 | } 866 | 867 | pub inline fn textureGetDepthOrArrayLayers(texture: *gpu.Texture) u32 { 868 | return castOpaque(*internal.Texture, texture).getDepthOrArrayLayers(); 869 | } 870 | 871 | pub inline fn textureGetDimension(texture: *gpu.Texture) gpu.Texture.Dimension { 872 | return @intToEnum(gpu.Texture.Dimension, castOpaque(*internal.Texture, texture).getDimension()); 873 | } 874 | 875 | pub inline fn textureGetFormat(texture: *gpu.Texture) gpu.Texture.Format { 876 | return @intToEnum(gpu.Texture.Format, castOpaque(*internal.Texture, texture).getFormat()); 877 | } 878 | 879 | pub inline fn textureGetHeight(texture: *gpu.Texture) u32 { 880 | return castOpaque(*internal.Texture, texture).getHeight(); 881 | } 882 | 883 | pub inline fn textureGetMipLevelCount(texture: *gpu.Texture) u32 { 884 | return castOpaque(*internal.Texture, texture).getMipLevelCount(); 885 | } 886 | 887 | pub inline fn textureGetSampleCount(texture: *gpu.Texture) u32 { 888 | return castOpaque(*internal.Texture, texture).getSampleCount(); 889 | } 890 | 891 | pub inline fn textureGetUsage(texture: *gpu.Texture) gpu.Texture.UsageFlags { 892 | return @bitCast(gpu.Texture.UsageFlags, castOpaque(*internal.Texture, texture).getUsage()); 893 | } 894 | 895 | pub inline fn textureGetWidth(texture: *gpu.Texture) u32 { 896 | return castOpaque(*internal.Texture, texture).getWidth(); 897 | } 898 | 899 | pub inline fn textureSetLabel(texture: *gpu.Texture, label: [*:0]const u8) void { 900 | castOpaque(*internal.Texture, texture).setLabel(label); 901 | } 902 | 903 | pub inline fn textureReference(texture: *gpu.Texture) void { 904 | castOpaque(*internal.Texture, texture).manager.reference(); 905 | } 906 | 907 | pub inline fn textureRelease(texture: *gpu.Texture) void { 908 | castOpaque(*internal.Texture, texture).manager.release(); 909 | } 910 | 911 | pub inline fn textureViewSetLabel(texture_view: *gpu.TextureView, label: [*:0]const u8) void { 912 | castOpaque(*internal.TextureView, texture_view).setLabel(label); 913 | } 914 | 915 | pub inline fn textureViewReference(texture_view: *gpu.TextureView) void { 916 | castOpaque(*internal.TextureView, texture_view).manager.reference(); 917 | } 918 | 919 | pub inline fn textureViewRelease(texture_view: *gpu.TextureView) void { 920 | castOpaque(*internal.TextureView, texture_view).manager.release(); 921 | } 922 | --------------------------------------------------------------------------------