├── .gitignore ├── src ├── docs.zig ├── root.zig ├── bindings.cpp ├── main.zig ├── Texture.zig ├── Ktx2.zig └── Image.zig ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | .zig-cache 2 | zig-out 3 | test-data 4 | -------------------------------------------------------------------------------- /src/docs.zig: -------------------------------------------------------------------------------- 1 | /// See build.zig. 2 | pub fn main() void {} 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [Moved to Codeberg](https://codeberg.org/Games-by-Mason/mr_texture) 2 | 3 | This repository is not mirrored. 4 | -------------------------------------------------------------------------------- /src/root.zig: -------------------------------------------------------------------------------- 1 | //! A texture utility for Zig. For more information, see README.md, or 2 | //! [Stop Shipping PNGs In Your Games](https://gamesbymason.com/blog/2025/stop-shipping-pngs/). 3 | 4 | const std = @import("std"); 5 | const tracy = @import("tracy"); 6 | 7 | const Allocator = std.mem.Allocator; 8 | const Zone = tracy.Zone; 9 | 10 | pub const Image = @import("Image.zig"); 11 | pub const Texture = @import("Texture.zig"); 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/bindings.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #define STB_IMAGE_IMPLEMENTATION 6 | #define STBI_NO_FAILURE_STRINGS 7 | #include "stb_image.h" 8 | 9 | #define STB_IMAGE_RESIZE_IMPLEMENTATION 10 | #define STBIR_USE_FMA 11 | #include "stb_image_resize2.h" 12 | 13 | extern "C" { 14 | rdo_bc::rdo_bc_encoder * bc7enc_init() { 15 | return new rdo_bc::rdo_bc_encoder(); 16 | } 17 | 18 | void bc7enc_deinit(rdo_bc::rdo_bc_encoder * encoder) { 19 | delete encoder; 20 | } 21 | 22 | bool bc7enc_encode( 23 | rdo_bc::rdo_bc_encoder * encoder, 24 | rdo_bc::rdo_bc_params * const params, 25 | uint32_t width, 26 | uint32_t height, 27 | float * const pixels 28 | ) { 29 | // Encode the image as u8s. We need to do this before initializing the encoder, since doing 30 | // so may change the perceptual param that we reference here. It's unfortunate that the C++ 31 | // API requires us to do a copy here, but the time it takes to copy an image while not 32 | // insignificant is dwarfed by the time it takes to encode as BC7. 33 | utils::image_u8 img; 34 | img.init(width, height); 35 | auto &ldr = img.get_pixels(); 36 | for (size_t i = 0; i < (size_t)width * (size_t)height; ++i) { 37 | utils::color_quad_u8 pixel; 38 | for (uint8_t channel = 0; channel < 4; ++channel) { 39 | float sample = pixels[i * 4 + channel]; 40 | if (params->m_perceptual && channel != 3) sample = pow(sample, 1.0f / 2.2f); 41 | sample = sample * 255.0f + 0.5f; 42 | if (sample < 0.0f) { 43 | sample = 0.0f; 44 | } else if (sample > 255.0f) { 45 | sample = 255.0f; 46 | } 47 | pixel.m_c[channel] = sample; 48 | } 49 | ldr[i] = pixel; 50 | } 51 | 52 | // Encode the data as BC7. This may modify params which is referenced above. 53 | if (!encoder->init(img, *params)) { 54 | return false; 55 | } 56 | if (!encoder->encode()) return false; 57 | return true; 58 | } 59 | 60 | uint8_t * bc7enc_getBlocks(rdo_bc::rdo_bc_encoder * encoder) { 61 | return (uint8_t *)encoder->get_blocks(); 62 | } 63 | 64 | uint32_t bc7enc_getTotalBlocksSizeInBytes(rdo_bc::rdo_bc_encoder * encoder) { 65 | return encoder->get_total_blocks_size_in_bytes(); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const log = std.log; 3 | const assert = std.debug.assert; 4 | const structopt = @import("structopt"); 5 | const zex = @import("zex"); 6 | const zon = @import("zon.zig"); 7 | 8 | pub const tracy = @import("tracy"); 9 | 10 | pub const tracy_impl = @import("tracy_impl"); 11 | 12 | const Io = std.Io; 13 | const Command = structopt.Command; 14 | const Zone = tracy.Zone; 15 | 16 | const command: Command = .{ 17 | .name = "zex", 18 | .description = "Converts images to KTX2 using config specified as a ZON file.", 19 | .named_args = &.{ 20 | .init([]const u8, .{ 21 | .long = "input", 22 | }), 23 | .initAccum([]const u8, .{ 24 | .long = "config", 25 | }), 26 | .init([]const u8, .{ 27 | .long = "output", 28 | }), 29 | }, 30 | }; 31 | 32 | pub fn main() !void { 33 | const zone = Zone.begin(.{ .src = @src() }); 34 | defer zone.end(); 35 | tracy.frameMarkStart("main"); 36 | tracy.appInfo("Zex"); 37 | defer tracy.cleanExit(); 38 | 39 | var gpa = std.heap.GeneralPurposeAllocator(.{ .thread_safe = false }){}; 40 | defer std.debug.assert(gpa.deinit() == .ok); 41 | const allocator = gpa.allocator(); 42 | 43 | var threaded_io: Io.Threaded = .init_single_threaded; 44 | const io = threaded_io.io(); 45 | 46 | var arg_iter = std.process.argsWithAllocator(allocator) catch @panic("OOM"); 47 | defer arg_iter.deinit(); 48 | const args = command.parseOrExit(allocator, &arg_iter); 49 | defer command.parseFree(args); 50 | 51 | const cwd = std.fs.cwd(); 52 | 53 | var input_file = cwd.openFile(args.named.input, .{}) catch |err| { 54 | log.err("{s}: {s}", .{ args.named.input, @errorName(err) }); 55 | std.process.exit(1); 56 | }; 57 | defer input_file.close(); 58 | var input_buf: [4096]u8 = undefined; 59 | var input = input_file.readerStreaming(io, &input_buf); 60 | 61 | // Read the config file(s) 62 | var config: zex.Texture.Options = .{}; 63 | for (args.named.config.items) |path| { 64 | // Get the config source file 65 | const src = cwd.readFileAllocOptions( 66 | path, 67 | allocator, 68 | .unlimited, 69 | .@"1", 70 | 0, 71 | ) catch |err| { 72 | log.err("{s}: {s}", .{ path, @errorName(err) }); 73 | std.process.exit(1); 74 | }; 75 | defer allocator.free(src); 76 | 77 | // Parse the ZON and update the config 78 | var diag: zon.Diagnostics = .{}; 79 | defer diag.deinit(allocator); 80 | config = zon.fromSliceDefaults( 81 | zex.Texture.Options, 82 | allocator, 83 | src, 84 | &diag, 85 | &config, 86 | .{}, 87 | ) catch |err| switch (err) { 88 | error.OutOfMemory => return error.OutOfMemory, 89 | error.ParseZon => { 90 | log.err("{s}: {f}", .{ path, diag }); 91 | std.process.exit(1); 92 | }, 93 | }; 94 | } 95 | 96 | // Create the texture 97 | var texture = zex.Texture.init( 98 | allocator, 99 | &input.interface, 100 | config, 101 | ) catch |err| switch (err) { 102 | error.OutOfMemory => return error.OutOfMemory, 103 | else => std.process.exit(1), 104 | }; 105 | defer texture.deinit(); 106 | 107 | var output_file = cwd.createFile(args.named.output, .{}) catch |err| { 108 | log.err("{s}: {s}", .{ args.named.output, @errorName(err) }); 109 | std.process.exit(1); 110 | }; 111 | defer output_file.close(); 112 | 113 | var output_buf: [4096]u8 = undefined; 114 | var output = output_file.writerStreaming(&output_buf); 115 | 116 | // Write the texture 117 | try texture.writeKtx2(&output.interface); 118 | try output.interface.flush(); 119 | } 120 | 121 | pub const std_options: std.Options = .{ 122 | .logFn = logFn, 123 | .log_level = .info, 124 | }; 125 | 126 | fn logFn( 127 | comptime message_level: std.log.Level, 128 | comptime _: @TypeOf(.enum_literal), 129 | comptime format: []const u8, 130 | args: anytype, 131 | ) void { 132 | const level_txt = comptime message_level.asText(); 133 | 134 | var buffer: [64]u8 = undefined; 135 | var stderr, const tty_config = std.debug.lockStderrWriter(&buffer); 136 | defer std.debug.unlockStderrWriter(); 137 | nosuspend { 138 | var wrote_prefix = false; 139 | if (message_level != .info) { 140 | tty_config.setColor(stderr, .bold) catch {}; 141 | tty_config.setColor(stderr, switch (message_level) { 142 | .err => .red, 143 | .warn => .yellow, 144 | .info => .green, 145 | .debug => .blue, 146 | }) catch {}; 147 | stderr.writeAll(level_txt) catch {}; 148 | tty_config.setColor(stderr, .reset) catch {}; 149 | wrote_prefix = true; 150 | } 151 | if (message_level == .err) tty_config.setColor(stderr, .bold) catch {}; 152 | if (wrote_prefix) { 153 | stderr.writeAll(": ") catch return; 154 | } 155 | stderr.print(format ++ "\n", args) catch return; 156 | tty_config.setColor(stderr, .reset) catch {}; 157 | } 158 | } 159 | 160 | test { 161 | _ = @import("zon.zig"); 162 | } 163 | -------------------------------------------------------------------------------- /src/Texture.zig: -------------------------------------------------------------------------------- 1 | const builtin = @import("builtin"); 2 | const std = @import("std"); 3 | const assert = std.debug.assert; 4 | const tracy = @import("tracy"); 5 | const Zone = tracy.Zone; 6 | const Ktx2 = @import("Ktx2"); 7 | const Image = @import("Image.zig"); 8 | const Texture = @This(); 9 | const Allocator = std.mem.Allocator; 10 | 11 | pub const capacity = Ktx2.max_levels; 12 | 13 | buf: [capacity]Image = undefined, 14 | len: u8 = 0, 15 | 16 | comptime { 17 | assert(std.math.maxInt(@FieldType(@This(), "len")) >= capacity); 18 | } 19 | 20 | pub const Options = struct { 21 | encoding: Image.EncodeOptions = .r8g8b8a8_srgb, 22 | preserve_alpha_coverage: ?struct { 23 | alpha_test: f32 = 0.5, 24 | max_steps: u8 = 10, 25 | } = null, 26 | max: struct { 27 | size: u32 = std.math.maxInt(u32), 28 | width: u32 = std.math.maxInt(u32), 29 | height: u32 = std.math.maxInt(u32), 30 | } = .{}, 31 | filter: struct { 32 | u: Image.Filter = .default, 33 | v: Image.Filter = .default, 34 | } = .{}, 35 | address_mode: struct { 36 | u: Image.AddressMode = .clamp, 37 | v: Image.AddressMode = .clamp, 38 | } = .{}, 39 | zlib: ?Image.CompressZlibOptions = .{ .level = .@"9" }, 40 | generate_mipmaps: bool = true, 41 | premultiply: bool = true, 42 | }; 43 | 44 | /// High level helper for texture creation. Feel free to fork this function into your codebase if 45 | /// you need to customize it further, it only calls into the public API. 46 | pub fn init(gpa: Allocator, input: *std.Io.Reader, options: Options) !@This() { 47 | const zone = Zone.begin(.{ .src = @src() }); 48 | defer zone.end(); 49 | 50 | // Create the texture 51 | var texture: Texture = .{}; 52 | errdefer texture.deinit(); 53 | 54 | // Append the first level 55 | const encoding: Image.Encoding = options.encoding; 56 | texture.appendLevel(try Image.rgbaF32InitFromReader( 57 | gpa, 58 | input, 59 | .{ 60 | .color_space = encoding.colorSpace(), 61 | .alpha = if (options.preserve_alpha_coverage) |pac| 62 | .{ .alpha_test = .{ .threshold = pac.alpha_test } } 63 | else 64 | .opacity, 65 | .premultiply = options.premultiply, 66 | }, 67 | )) catch @panic("OOB"); 68 | 69 | // Resize the first level 70 | try texture.levels()[0].rgbaF32ResizeToFit(.{ 71 | .max_size = options.max.size, 72 | .max_width = options.max.width, 73 | .max_height = options.max.height, 74 | .address_mode_u = options.address_mode.u, 75 | .address_mode_v = options.address_mode.v, 76 | .filter_u = options.filter.u, 77 | .filter_v = options.filter.v, 78 | }); 79 | 80 | // Generate mipmaps if requested 81 | if (options.generate_mipmaps) { 82 | try texture.rgbaF32GenerateMipmaps(.{ 83 | .filter_u = options.filter.u, 84 | .filter_v = options.filter.v, 85 | .address_mode_u = options.address_mode.u, 86 | .address_mode_v = options.address_mode.v, 87 | .block_size = encoding.blockSize(), 88 | }); 89 | } 90 | 91 | // Encode the texture 92 | try texture.rgbaF32Encode(gpa, null, options.encoding); 93 | 94 | // Compress the texture if requested 95 | if (options.zlib) |zlib_options| { 96 | try texture.compressZlib(gpa, zlib_options); 97 | } 98 | 99 | return texture; 100 | } 101 | 102 | pub fn deinit(self: *@This()) void { 103 | for (self.levels()) |*compressed_level| { 104 | compressed_level.deinit(); 105 | } 106 | self.* = undefined; 107 | } 108 | 109 | pub fn appendLevel(self: *@This(), level: Image) error{OutOfBounds}!void { 110 | if (capacity - self.len == 0) return error.OutOfBounds; 111 | self.buf[self.len] = level; 112 | self.len += 1; 113 | } 114 | 115 | pub fn levels(self: *@This()) []Image { 116 | return @constCast(self.levelsConst()); 117 | } 118 | 119 | pub fn levelsConst(self: *const @This()) []const Image { 120 | return self.buf[0..self.len]; 121 | } 122 | 123 | pub const GenerateMipMapsOptions = struct { 124 | address_mode_u: Image.AddressMode, 125 | address_mode_v: Image.AddressMode, 126 | filter: Image.Filter = .mitchell, 127 | filter_u: ?Image.Filter = null, 128 | filter_v: ?Image.Filter = null, 129 | block_size: u8, 130 | 131 | fn filterU(self: @This()) Image.Filter { 132 | return self.filter_u orelse self.filter; 133 | } 134 | 135 | fn filterV(self: @This()) Image.Filter { 136 | return self.filter_v orelse self.filter; 137 | } 138 | }; 139 | 140 | pub fn rgbaF32GenerateMipmaps(self: *@This(), options: GenerateMipMapsOptions) Image.ResizeError!void { 141 | const zone = Zone.begin(.{ .src = @src() }); 142 | defer zone.end(); 143 | 144 | if (self.len != 1) @panic("generate mipmaps requires exactly one level"); 145 | const source = self.levels()[0]; 146 | source.assertIsUncompressedRgbaF32(); 147 | 148 | var generate_mipmaps = source.rgbaF32GenerateMipmaps(.{ 149 | .address_mode_u = options.address_mode_u, 150 | .address_mode_v = options.address_mode_v, 151 | .filter_u = options.filterU(), 152 | .filter_v = options.filterV(), 153 | .block_size = options.block_size, 154 | }); 155 | 156 | while (try generate_mipmaps.next()) |mipmap| { 157 | self.appendLevel(mipmap) catch @panic("OOB"); 158 | } 159 | } 160 | 161 | pub fn rgbaF32Encode( 162 | self: *@This(), 163 | gpa: std.mem.Allocator, 164 | max_threads: ?u16, 165 | options: Image.EncodeOptions, 166 | ) Image.EncodeError!void { 167 | const zone = Zone.begin(.{ .src = @src() }); 168 | defer zone.end(); 169 | for (self.levels()) |*slice| { 170 | try slice.rgbaF32Encode(gpa, max_threads, options); 171 | } 172 | } 173 | 174 | pub fn compressZlib( 175 | self: *@This(), 176 | allocator: std.mem.Allocator, 177 | options: Image.CompressZlibOptions, 178 | ) Image.CompressZlibError!void { 179 | for (self.levels()) |*level| { 180 | try level.compressZlib(allocator, options); 181 | } 182 | } 183 | 184 | pub fn writeKtx2(self: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void { 185 | const zone = Zone.begin(.{ .src = @src() }); 186 | defer zone.end(); 187 | 188 | assert(self.len > 0); 189 | const first_level = self.levelsConst()[0]; 190 | const encoding = first_level.encoding; 191 | const supercompression = first_level.supercompression; 192 | const premultiplied = first_level.premultiplied; 193 | { 194 | var level_width = first_level.width; 195 | var level_height = first_level.height; 196 | for (self.levelsConst()) |level| { 197 | assert(level.encoding == encoding); 198 | assert(level.supercompression == supercompression); 199 | assert(level.premultiplied == premultiplied); 200 | const block_size = level.encoding.blockSize(); 201 | assert(level.width >= block_size and level.height >= block_size); 202 | assert(level.width == level_width); 203 | assert(level.height == level_height); 204 | 205 | level_width = @max(1, level_width / 2); 206 | level_height = @max(1, level_height / 2); 207 | } 208 | } 209 | 210 | // Serialization assumes little endian 211 | comptime assert(builtin.cpu.arch.endian() == .little); 212 | 213 | // Write the header 214 | const samples = encoding.samples(); 215 | const index = Ktx2.Header.Index.init(.{ 216 | .levels = self.len, 217 | .samples = samples, 218 | }); 219 | { 220 | const header_zone = Zone.begin(.{ .name = "header", .src = @src() }); 221 | defer header_zone.end(); 222 | try writer.writeStruct(Ktx2.Header{ 223 | .format = encoding.vkFormat(), 224 | .type_size = encoding.typeSize(), 225 | .pixel_width = first_level.width, 226 | .pixel_height = first_level.height, 227 | .pixel_depth = 0, 228 | .layer_count = 0, 229 | .face_count = 1, 230 | .level_count = .fromInt(self.len), 231 | .supercompression_scheme = supercompression, 232 | .index = index, 233 | }, .little); 234 | } 235 | 236 | // Write the level index 237 | const level_alignment: u8 = if (supercompression != .none) 1 else switch (encoding) { 238 | .r8g8b8a8_unorm, .r8g8b8a8_srgb => 4, 239 | .r32g32b32_sfloat => 16, 240 | .bc7_unorm_block, .bc7_srgb_block => 16, 241 | }; 242 | { 243 | const level_index_zone = Zone.begin(.{ .name = "level index", .src = @src() }); 244 | defer level_index_zone.end(); 245 | 246 | // Calculate the byte offsets, taking into account that KTX2 requires mipmaps be stored from 247 | // smallest to largest for streaming purpose, but the index is in the expected order. 248 | var byte_offsets_reverse_buf: [Ktx2.max_levels]usize = undefined; 249 | var byte_offsets_reverse: std.ArrayList(usize) = .initBuffer(&byte_offsets_reverse_buf); 250 | { 251 | var byte_offset: usize = index.dfd_byte_offset + index.dfd_byte_length; 252 | for (0..self.len) |i| { 253 | byte_offset = std.mem.alignForward(usize, byte_offset, level_alignment); 254 | const compressed_level = self.levelsConst()[self.len - i - 1]; 255 | byte_offsets_reverse.appendBounded(byte_offset) catch @panic("OOB"); 256 | byte_offset += compressed_level.buf.len; 257 | } 258 | } 259 | 260 | // Write the level index data, this is done from largest to smallest, only the actual data 261 | // is stored in reverse order. 262 | for (self.levelsConst(), 0..) |level, i| { 263 | try writer.writeStruct(Ktx2.Level{ 264 | .byte_offset = byte_offsets_reverse.items[self.len - i - 1], 265 | .byte_length = level.buf.len, 266 | .uncompressed_byte_length = level.uncompressed_byte_length, 267 | }, .little); 268 | } 269 | } 270 | 271 | // Write the data descriptor 272 | { 273 | const dfd_zone = Zone.begin(.{ .name = "dfd", .src = @src() }); 274 | defer dfd_zone.end(); 275 | 276 | try writer.writeInt(u32, index.dfd_byte_length, .little); 277 | try writer.writeAll(std.mem.asBytes(&Ktx2.BasicDescriptorBlock{ 278 | .descriptor_block_size = Ktx2.BasicDescriptorBlock.descriptorBlockSize(samples), 279 | .model = switch (encoding) { 280 | .r8g8b8a8_unorm, .r8g8b8a8_srgb, .r32g32b32_sfloat => .rgbsda, 281 | .bc7_unorm_block, .bc7_srgb_block => .bc7, 282 | }, 283 | .primaries = .bt709, 284 | .transfer = switch (encoding.colorSpace()) { 285 | .linear, .hdr => .linear, 286 | .srgb => .srgb, 287 | }, 288 | .flags = .{ 289 | .alpha_premultiplied = premultiplied, 290 | }, 291 | .texel_block_dimension_0 = .fromInt(encoding.blockSize()), 292 | .texel_block_dimension_1 = .fromInt(encoding.blockSize()), 293 | .texel_block_dimension_2 = .fromInt(1), 294 | .texel_block_dimension_3 = .fromInt(1), 295 | .bytes_plane_0 = if (supercompression != .none) 0 else switch (encoding) { 296 | .r8g8b8a8_unorm, .r8g8b8a8_srgb => 4, 297 | .r32g32b32_sfloat => 16, 298 | .bc7_unorm_block, .bc7_srgb_block => 16, 299 | }, 300 | .bytes_plane_1 = 0, 301 | .bytes_plane_2 = 0, 302 | .bytes_plane_3 = 0, 303 | .bytes_plane_4 = 0, 304 | .bytes_plane_5 = 0, 305 | .bytes_plane_6 = 0, 306 | .bytes_plane_7 = 0, 307 | })[0 .. @bitSizeOf(Ktx2.BasicDescriptorBlock) / 8]); 308 | switch (encoding) { 309 | .r8g8b8a8_unorm, .r8g8b8a8_srgb => for (0..4) |i| { 310 | const ChannelType = Ktx2.BasicDescriptorBlock.Sample.ChannelType(.rgbsda); 311 | const channel_type: ChannelType = if (i == 3) .alpha else @enumFromInt(i); 312 | try writer.writeAll(std.mem.asBytes(&Ktx2.BasicDescriptorBlock.Sample{ 313 | .bit_offset = .fromInt(8 * @as(u16, @intCast(i))), 314 | .bit_length = .fromInt(8), 315 | .channel_type = @enumFromInt(@intFromEnum(channel_type)), 316 | .linear = switch (encoding.colorSpace()) { 317 | .linear, .hdr => false, 318 | .srgb => i == 3, 319 | }, 320 | .exponent = false, 321 | .signed = false, 322 | .float = false, 323 | .sample_position_0 = 0, 324 | .sample_position_1 = 0, 325 | .sample_position_2 = 0, 326 | .sample_position_3 = 0, 327 | .lower = 0, 328 | .upper = switch (encoding.colorSpace()) { 329 | .hdr => 1, 330 | .srgb, .linear => 255, 331 | }, 332 | })); 333 | }, 334 | .r32g32b32_sfloat => for (0..4) |i| { 335 | const ChannelType = Ktx2.BasicDescriptorBlock.Sample.ChannelType(.rgbsda); 336 | const channel_type: ChannelType = if (i == 3) .alpha else @enumFromInt(i); 337 | try writer.writeAll(std.mem.asBytes(&Ktx2.BasicDescriptorBlock.Sample{ 338 | .bit_offset = .fromInt(32 * @as(u16, @intCast(i))), 339 | .bit_length = .fromInt(32), 340 | .channel_type = @enumFromInt(@intFromEnum(channel_type)), 341 | .linear = false, 342 | .exponent = false, 343 | .signed = true, 344 | .float = true, 345 | .sample_position_0 = 0, 346 | .sample_position_1 = 0, 347 | .sample_position_2 = 0, 348 | .sample_position_3 = 0, 349 | .lower = @bitCast(@as(f32, -1.0)), 350 | .upper = @bitCast(@as(f32, 1.0)), 351 | })); 352 | }, 353 | .bc7_unorm_block, .bc7_srgb_block => { 354 | const ChannelType = Ktx2.BasicDescriptorBlock.Sample.ChannelType(.bc7); 355 | const channel_type: ChannelType = .data; 356 | try writer.writeAll(std.mem.asBytes(&Ktx2.BasicDescriptorBlock.Sample{ 357 | .bit_offset = .fromInt(0), 358 | .bit_length = .fromInt(128), 359 | .channel_type = @enumFromInt(@intFromEnum(channel_type)), 360 | .linear = false, 361 | .exponent = false, 362 | .signed = false, 363 | .float = false, 364 | .sample_position_0 = 0, 365 | .sample_position_1 = 0, 366 | .sample_position_2 = 0, 367 | .sample_position_3 = 0, 368 | .lower = 0, 369 | .upper = std.math.maxInt(u32), 370 | })); 371 | }, 372 | } 373 | } 374 | 375 | // Write the compressed level data. Note that KTX2 requires mips be stored form smallest to 376 | // largest for streaming purposes. 377 | { 378 | const level_data = Zone.begin(.{ .name = "level data", .src = @src() }); 379 | defer level_data.end(); 380 | 381 | var byte_offset: usize = index.dfd_byte_offset + index.dfd_byte_length; 382 | for (0..self.len) |i| { 383 | // Write padding 384 | const padded = std.mem.alignForward(usize, byte_offset, level_alignment); 385 | try writer.splatByteAll(0, padded - byte_offset); 386 | byte_offset = padded; 387 | 388 | // Write the level 389 | const compressed_level = self.levelsConst()[self.len - i - 1]; 390 | try writer.writeAll(compressed_level.buf); 391 | byte_offset += compressed_level.buf.len; 392 | } 393 | } 394 | 395 | try writer.flush(); 396 | } 397 | -------------------------------------------------------------------------------- /src/Ktx2.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const builtin = @import("builtin"); 4 | 5 | header: *align(1) const Header, 6 | levels: []align(1) const Level, 7 | basic_descriptor_block: *align(1) const BasicDescriptorBlock, 8 | samples: []align(1) const BasicDescriptorBlock.Sample, 9 | key_value_data: []const u8, 10 | supercompression_global_data: []const u8, 11 | 12 | const Error = error{ 13 | /// The KTX2 file is malformed. 14 | InvalidKtx2, 15 | /// The KTX2 file may be correct, but violates a Khronos recommendation in such a way that makes 16 | /// it unnecessarily difficult to parse. 17 | UnsupportedKtx2, 18 | }; 19 | 20 | pub const Header = extern struct { 21 | identifier: [12]u8 = identifier, 22 | format: VkFormat, 23 | type_size: u32, 24 | pixel_width: u32, 25 | pixel_height: u32, 26 | pixel_depth: u32, 27 | layer_count: u32, 28 | face_count: u32, 29 | level_count: LevelCount, 30 | supercompression_scheme: SupercompressionScheme, 31 | index: Index, 32 | 33 | comptime { 34 | // Since we're serializing this type to disk, we don't want any implicit padding. Implicit 35 | // padding may vary target to target, and additionally, can result in otherwise identically 36 | // files not being bitwise identical. Any necessary padding should be made explicit. 37 | assertNoPadding(Header); 38 | } 39 | 40 | pub const VkFormat = enum(u32) { 41 | undefined = 0, 42 | r4g4_unorm_pack8 = 1, 43 | r4g4b4a4_unorm_pack16 = 2, 44 | b4g4r4a4_unorm_pack16 = 3, 45 | r5g6b5_unorm_pack16 = 4, 46 | b5g6r5_unorm_pack16 = 5, 47 | r5g5b5a1_unorm_pack16 = 6, 48 | b5g5r5a1_unorm_pack16 = 7, 49 | a1r5g5b5_unorm_pack16 = 8, 50 | r8_unorm = 9, 51 | r8_snorm = 10, 52 | r8_uint = 13, 53 | r8_sint = 14, 54 | r8_srgb = 15, 55 | r8g8_unorm = 16, 56 | r8g8_snorm = 17, 57 | r8g8_uint = 20, 58 | r8g8_sint = 21, 59 | r8g8_srgb = 22, 60 | r8g8b8_unorm = 23, 61 | r8g8b8_snorm = 24, 62 | r8g8b8_uint = 27, 63 | r8g8b8_sint = 28, 64 | r8g8b8_srgb = 29, 65 | b8g8r8_unorm = 30, 66 | b8g8r8_snorm = 31, 67 | b8g8r8_uint = 34, 68 | b8g8r8_sint = 35, 69 | b8g8r8_srgb = 36, 70 | r8g8b8a8_unorm = 37, 71 | r8g8b8a8_snorm = 38, 72 | r8g8b8a8_uint = 41, 73 | r8g8b8a8_sint = 42, 74 | r8g8b8a8_srgb = 43, 75 | b8g8r8a8_unorm = 44, 76 | b8g8r8a8_snorm = 45, 77 | b8g8r8a8_uint = 48, 78 | b8g8r8a8_sint = 49, 79 | b8g8r8a8_srgb = 50, 80 | a8b8g8r8_unorm_pack32 = 51, 81 | a8b8g8r8_snorm_pack32 = 52, 82 | a8b8g8r8_uint_pack32 = 55, 83 | a8b8g8r8_sint_pack32 = 56, 84 | a8b8g8r8_srgb_pack32 = 57, 85 | a2r10g10b10_unorm_pack32 = 58, 86 | a2r10g10b10_snorm_pack32 = 59, 87 | a2r10g10b10_uint_pack32 = 62, 88 | a2r10g10b10_sint_pack32 = 63, 89 | a2b10g10r10_unorm_pack32 = 64, 90 | a2b10g10r10_snorm_pack32 = 65, 91 | a2b10g10r10_uint_pack32 = 68, 92 | a2b10g10r10_sint_pack32 = 69, 93 | r16_unorm = 70, 94 | r16_snorm = 71, 95 | r16_uint = 74, 96 | r16_sint = 75, 97 | r16_sfloat = 76, 98 | r16g16_unorm = 77, 99 | r16g16_snorm = 78, 100 | r16g16_uint = 81, 101 | r16g16_sint = 82, 102 | r16g16_sfloat = 83, 103 | r16g16b16_unorm = 84, 104 | r16g16b16_snorm = 85, 105 | r16g16b16_uint = 88, 106 | r16g16b16_sint = 89, 107 | r16g16b16_sfloat = 90, 108 | r16g16b16a16_unorm = 91, 109 | r16g16b16a16_snorm = 92, 110 | r16g16b16a16_uint = 95, 111 | r16g16b16a16_sint = 96, 112 | r16g16b16a16_sfloat = 97, 113 | r32_uint = 98, 114 | r32_sint = 99, 115 | r32_sfloat = 100, 116 | r32g32_uint = 101, 117 | r32g32_sint = 102, 118 | r32g32_sfloat = 103, 119 | r32g32b32_uint = 104, 120 | r32g32b32_sint = 105, 121 | r32g32b32_sfloat = 106, 122 | r32g32b32a32_uint = 107, 123 | r32g32b32a32_sint = 108, 124 | r32g32b32a32_sfloat = 109, 125 | r64_uint = 110, 126 | r64_sint = 111, 127 | r64_sfloat = 112, 128 | r64g64_uint = 113, 129 | r64g64_sint = 114, 130 | r64g64_sfloat = 115, 131 | r64g64b64_uint = 116, 132 | r64g64b64_sint = 117, 133 | r64g64b64_sfloat = 118, 134 | r64g64b64a64_uint = 119, 135 | r64g64b64a64_sint = 120, 136 | r64g64b64a64_sfloat = 121, 137 | b10g11r11_ufloat_pack32 = 122, 138 | e5b9g9r9_ufloat_pack32 = 123, 139 | d16_unorm = 124, 140 | x8_d24_unorm_pack32 = 125, 141 | d32_sfloat = 126, 142 | s8_uint = 127, 143 | d16_unorm_s8_uint = 128, 144 | d24_unorm_s8_uint = 129, 145 | d32_sfloat_s8_uint = 130, 146 | bc1_rgb_unorm_block = 131, 147 | bc1_rgb_srgb_block = 132, 148 | bc1_rgba_unorm_block = 133, 149 | bc1_rgba_srgb_block = 134, 150 | bc2_unorm_block = 135, 151 | bc2_srgb_block = 136, 152 | bc3_unorm_block = 137, 153 | bc3_srgb_block = 138, 154 | bc4_unorm_block = 139, 155 | bc4_snorm_block = 140, 156 | bc5_unorm_block = 141, 157 | bc5_snorm_block = 142, 158 | bc6h_ufloat_block = 143, 159 | bc6h_sfloat_block = 144, 160 | bc7_unorm_block = 145, 161 | bc7_srgb_block = 146, 162 | etc2_r8g8b8_unorm_block = 147, 163 | etc2_r8g8b8_srgb_block = 148, 164 | etc2_r8g8b8a1_unorm_block = 149, 165 | etc2_r8g8b8a1_srgb_block = 150, 166 | etc2_r8g8b8a8_unorm_block = 151, 167 | etc2_r8g8b8a8_srgb_block = 152, 168 | eac_r11_unorm_block = 153, 169 | eac_r11_snorm_block = 154, 170 | eac_r11g11_unorm_block = 155, 171 | eac_r11g11_snorm_block = 156, 172 | astc_4x4_unorm_block = 157, 173 | astc_4x4_srgb_block = 158, 174 | astc_5x4_unorm_block = 159, 175 | astc_5x4_srgb_block = 160, 176 | astc_5x5_unorm_block = 161, 177 | astc_5x5_srgb_block = 162, 178 | astc_6x5_unorm_block = 163, 179 | astc_6x5_srgb_block = 164, 180 | astc_6x6_unorm_block = 165, 181 | astc_6x6_srgb_block = 166, 182 | astc_8x5_unorm_block = 167, 183 | astc_8x5_srgb_block = 168, 184 | astc_8x6_unorm_block = 169, 185 | astc_8x6_srgb_block = 170, 186 | astc_8x8_unorm_block = 171, 187 | astc_8x8_srgb_block = 172, 188 | astc_10x5_unorm_block = 173, 189 | astc_10x5_srgb_block = 174, 190 | astc_10x6_unorm_block = 175, 191 | astc_10x6_srgb_block = 176, 192 | astc_10x8_unorm_block = 177, 193 | astc_10x8_srgb_block = 178, 194 | astc_10x10_unorm_block = 179, 195 | astc_10x10_srgb_block = 180, 196 | astc_12x10_unorm_block = 181, 197 | astc_12x10_srgb_block = 182, 198 | astc_12x12_unorm_block = 183, 199 | astc_12x12_srgb_block = 184, 200 | g8b8g8r8_422_unorm = 1000156000, 201 | b8g8r8g8_422_unorm = 1000156001, 202 | r10x6_unorm_pack16 = 1000156007, 203 | r10x6g10x6_unorm_2pack16 = 1000156008, 204 | r10x6g10x6b10x6a10x6_unorm_4pack16 = 1000156009, 205 | g10x6b10x6g10x6r10x6_422_unorm_4pack16 = 1000156010, 206 | b10x6g10x6r10x6g10x6_422_unorm_4pack16 = 1000156011, 207 | r12x4_unorm_pack16 = 1000156017, 208 | r12x4g12x4_unorm_2pack16 = 1000156018, 209 | r12x4g12x4b12x4a12x4_unorm_4pack16 = 1000156019, 210 | g12x4b12x4g12x4r12x4_422_unorm_4pack16 = 1000156020, 211 | b12x4g12x4r12x4g12x4_422_unorm_4pack16 = 1000156021, 212 | g16b16g16r16_422_unorm = 1000156027, 213 | b16g16r16g16_422_unorm = 1000156028, 214 | a4r4g4b4_unorm_pack16 = 1000340000, 215 | a4b4g4r4_unorm_pack16 = 1000340001, 216 | astc_4x4_sfloat_block = 1000066000, 217 | astc_5x4_sfloat_block = 1000066001, 218 | astc_5x5_sfloat_block = 1000066002, 219 | astc_6x5_sfloat_block = 1000066003, 220 | astc_6x6_sfloat_block = 1000066004, 221 | astc_8x5_sfloat_block = 1000066005, 222 | astc_8x6_sfloat_block = 1000066006, 223 | astc_8x8_sfloat_block = 1000066007, 224 | astc_10x5_sfloat_block = 1000066008, 225 | astc_10x6_sfloat_block = 1000066009, 226 | astc_10x8_sfloat_block = 1000066010, 227 | astc_10x10_sfloat_block = 1000066011, 228 | astc_12x10_sfloat_block = 1000066012, 229 | astc_12x12_sfloat_block = 1000066013, 230 | pvrtc1_2bpp_unorm_block_img = 1000054000, 231 | pvrtc1_4bpp_unorm_block_img = 1000054001, 232 | pvrtc2_2bpp_unorm_block_img = 1000054002, 233 | pvrtc2_4bpp_unorm_block_img = 1000054003, 234 | pvrtc1_2bpp_srgb_block_img = 1000054004, 235 | pvrtc1_4bpp_srgb_block_img = 1000054005, 236 | pvrtc2_2bpp_srgb_block_img = 1000054006, 237 | pvrtc2_4bpp_srgb_block_img = 1000054007, 238 | astc_3x3x3_unorm_block_ext = 1000288000, 239 | astc_3x3x3_srgb_block_ext = 1000288001, 240 | astc_3x3x3_sfloat_block_ext = 1000288002, 241 | astc_4x3x3_unorm_block_ext = 1000288003, 242 | astc_4x3x3_srgb_block_ext = 1000288004, 243 | astc_4x3x3_sfloat_block_ext = 1000288005, 244 | astc_4x4x3_unorm_block_ext = 1000288006, 245 | astc_4x4x3_srgb_block_ext = 1000288007, 246 | astc_4x4x3_sfloat_block_ext = 1000288008, 247 | astc_4x4x4_unorm_block_ext = 1000288009, 248 | astc_4x4x4_srgb_block_ext = 1000288010, 249 | astc_4x4x4_sfloat_block_ext = 1000288011, 250 | astc_5x4x4_unorm_block_ext = 1000288012, 251 | astc_5x4x4_srgb_block_ext = 1000288013, 252 | astc_5x4x4_sfloat_block_ext = 1000288014, 253 | astc_5x5x4_unorm_block_ext = 1000288015, 254 | astc_5x5x4_srgb_block_ext = 1000288016, 255 | astc_5x5x4_sfloat_block_ext = 1000288017, 256 | astc_5x5x5_unorm_block_ext = 1000288018, 257 | astc_5x5x5_srgb_block_ext = 1000288019, 258 | astc_5x5x5_sfloat_block_ext = 1000288020, 259 | astc_6x5x5_unorm_block_ext = 1000288021, 260 | astc_6x5x5_srgb_block_ext = 1000288022, 261 | astc_6x5x5_sfloat_block_ext = 1000288023, 262 | astc_6x6x5_unorm_block_ext = 1000288024, 263 | astc_6x6x5_srgb_block_ext = 1000288025, 264 | astc_6x6x5_sfloat_block_ext = 1000288026, 265 | astc_6x6x6_unorm_block_ext = 1000288027, 266 | astc_6x6x6_srgb_block_ext = 1000288028, 267 | astc_6x6x6_sfloat_block_ext = 1000288029, 268 | r16g16_sfixed5_nv = 1000464000, 269 | a1b5g5r5_unorm_pack16_khr = 1000470000, 270 | a8_unorm_khr = 1000470001, 271 | _, 272 | }; 273 | 274 | pub const LevelCount = enum(u32) { 275 | generate = 0, 276 | _, 277 | 278 | pub fn fromInt(count: u32) @This() { 279 | assert(count != 0); 280 | return @enumFromInt(count); 281 | } 282 | 283 | pub fn toInt(self: @This()) u32 { 284 | switch (self) { 285 | .generate => return 1, 286 | else => return @intFromEnum(self), 287 | } 288 | } 289 | }; 290 | 291 | pub const SupercompressionScheme = enum(u32) { 292 | none = 0, 293 | basis_lz = 1, 294 | zstandard = 2, 295 | zlib = 3, 296 | _, 297 | }; 298 | 299 | pub const Index = extern struct { 300 | pub const InitOptions = struct { 301 | levels: u8, 302 | samples: u8, 303 | }; 304 | 305 | pub fn init(options: InitOptions) @This() { 306 | return .{ 307 | .dfd_byte_offset = @sizeOf(Header) + @sizeOf(Level) * @as(u32, options.levels), 308 | .dfd_byte_length = @sizeOf(u32) + 309 | @bitSizeOf(BasicDescriptorBlock) / 8 + 310 | @as(u32, options.samples) * @sizeOf(BasicDescriptorBlock.Sample), 311 | .kvd_byte_offset = 0, 312 | .kvd_byte_length = 0, 313 | .sgd_byte_offset = 0, 314 | .sgd_byte_length = 0, 315 | }; 316 | } 317 | 318 | dfd_byte_offset: u32, 319 | dfd_byte_length: u32, 320 | kvd_byte_offset: u32, 321 | kvd_byte_length: u32, 322 | sgd_byte_offset: u64, 323 | sgd_byte_length: u64, 324 | }; 325 | }; 326 | 327 | pub const identifier = .{ 328 | '«', 329 | 'K', 330 | 'T', 331 | 'X', 332 | ' ', 333 | '2', 334 | '0', 335 | '»', 336 | '\r', 337 | '\n', 338 | '\x1A', 339 | '\n', 340 | }; 341 | 342 | /// The max levels possible in a valid KTX file. 343 | pub const max_levels = 31; 344 | 345 | pub const Level = extern struct { 346 | byte_offset: u64, 347 | byte_length: u64, 348 | uncompressed_byte_length: u64, 349 | }; 350 | 351 | pub const BasicDescriptorBlock = packed struct(u192) { 352 | vendor_id: VendorId = .khronos, 353 | descriptor_type: DescriptorType = .basic_format, 354 | version_number: VersionNumber = .@"1.3", 355 | descriptor_block_size: u16, 356 | model: ColorModel, 357 | primaries: ColorPrimaries, 358 | transfer: TransferFunction, 359 | flags: Flags, 360 | texel_block_dimension_0: TexelBlockDimension, 361 | texel_block_dimension_1: TexelBlockDimension, 362 | texel_block_dimension_2: TexelBlockDimension, 363 | texel_block_dimension_3: TexelBlockDimension, 364 | bytes_plane_0: u8, 365 | bytes_plane_1: u8, 366 | bytes_plane_2: u8, 367 | bytes_plane_3: u8, 368 | bytes_plane_4: u8, 369 | bytes_plane_5: u8, 370 | bytes_plane_6: u8, 371 | bytes_plane_7: u8, 372 | 373 | const VendorId = enum(u17) { 374 | khronos = 0, 375 | _, 376 | }; 377 | 378 | const DescriptorType = enum(u15) { 379 | basic_format = 0, 380 | additional_planes = 0x6001, 381 | additional_dimensions = 0x6002, 382 | needed_for_write_bit = 0x2000, 383 | needed_for_decode_bit = 0x4000, 384 | _, 385 | }; 386 | 387 | const VersionNumber = enum(u16) { 388 | pub const @"1.1": @This() = .@"1.0"; 389 | 390 | @"1.0" = 0, 391 | @"1.2" = 1, 392 | @"1.3" = 2, 393 | _, 394 | }; 395 | 396 | pub const ColorModel = enum(u8) { 397 | pub const dxt1a: @This() = .bc1a; 398 | pub const dxt2: @This() = .bc2; 399 | pub const dxt3: @This() = .bc2; 400 | pub const dxt4: @This() = .bc3; 401 | pub const dxt5: @This() = .bc3; 402 | 403 | unspecified = 0, 404 | rgbsda = 1, 405 | yuvsda = 2, 406 | yiqsda = 3, 407 | labsda = 4, 408 | cmyka = 5, 409 | xyzw = 6, 410 | hsva_ang = 7, 411 | hsla_ang = 8, 412 | hsva_hex = 9, 413 | hsla_hex = 10, 414 | ycgcoa = 11, 415 | yccbccrc = 12, 416 | ictcp = 13, 417 | ciexyz = 14, 418 | ciexyy = 15, 419 | bc1a = 128, 420 | bc2 = 129, 421 | bc3 = 130, 422 | bc4 = 131, 423 | bc5 = 132, 424 | bc6h = 133, 425 | bc7 = 134, 426 | etc1 = 160, 427 | etc2 = 161, 428 | astc = 162, 429 | etc1s = 163, 430 | pvrtc = 164, 431 | pvrtc2 = 165, 432 | uastc = 166, 433 | _, 434 | }; 435 | 436 | pub const ColorPrimaries = enum(u8) { 437 | pub const srgb: @This() = .bt709; 438 | unspecified = 0, 439 | bt709 = 1, 440 | bt601_ebu = 2, 441 | bt601_smpte = 3, 442 | bt2020 = 4, 443 | ciexyz = 5, 444 | aces = 6, 445 | acescc = 7, 446 | ntsc1953 = 8, 447 | pal525 = 9, 448 | displayp3 = 10, 449 | adobergb = 11, 450 | _, 451 | }; 452 | 453 | pub const TransferFunction = enum(u8) { 454 | pub const smtpe170m: @This() = .itu; 455 | unspecified = 0, 456 | linear = 1, 457 | srgb = 2, 458 | itu = 3, 459 | ntsc = 4, 460 | slog = 5, 461 | slog2 = 6, 462 | bt1886 = 7, 463 | hlg_oetf = 8, 464 | hlg_eotf = 9, 465 | pq_eotf = 10, 466 | pq_oetf = 11, 467 | dcip3 = 12, 468 | pal_oetf = 13, 469 | pal625_eotf = 14, 470 | st240 = 15, 471 | acescc = 16, 472 | acescct = 17, 473 | adobergb = 18, 474 | _, 475 | }; 476 | 477 | pub const Flags = packed struct(u8) { 478 | alpha_premultiplied: bool, 479 | _padding0: u7 = 0, 480 | }; 481 | 482 | pub const TexelBlockDimension = enum(u8) { 483 | _, 484 | 485 | pub fn fromInt(i: u8) @This() { 486 | return @enumFromInt(i - 1); 487 | } 488 | 489 | pub fn toInt(self: @This()) u8 { 490 | return @intFromEnum(self) + 1; 491 | } 492 | }; 493 | 494 | pub const Sample = packed struct(u128) { 495 | bit_offset: BitOffset, 496 | bit_length: BitLength, 497 | channel_type: ChannelType(.unspecified), 498 | linear: bool, 499 | exponent: bool, 500 | signed: bool, 501 | float: bool, 502 | sample_position_0: u8, 503 | sample_position_1: u8, 504 | sample_position_2: u8, 505 | sample_position_3: u8, 506 | lower: u32, 507 | upper: u32, 508 | 509 | pub const BitOffset = enum(u16) { 510 | constant_sampler_lower = std.math.maxInt(u16), 511 | _, 512 | 513 | pub fn fromInt(i: u16) @This() { 514 | const result: @This() = @enumFromInt(i); 515 | assert(result != .constant_sampler_lower); 516 | return result; 517 | } 518 | 519 | pub fn toInt(self: @This()) u16 { 520 | assert(self != .constant_sampler_lower); 521 | return @intFromEnum(self); 522 | } 523 | }; 524 | 525 | pub const BitLength = enum(u8) { 526 | _, 527 | 528 | pub fn fromInt(i: u8) @This() { 529 | return @enumFromInt(i - 1); 530 | } 531 | 532 | pub fn toInt(self: @This()) u8 { 533 | return @intFromEnum(self) + 1; 534 | } 535 | }; 536 | 537 | pub fn ChannelType(model: ColorModel) type { 538 | return switch (model) { 539 | .rgbsda => enum(u4) { 540 | red = 0, 541 | green = 1, 542 | blue = 2, 543 | stencil = 13, 544 | depth = 14, 545 | alpha = 15, 546 | _, 547 | }, 548 | .yuvsda => enum(u4) { 549 | pub const cb: @This() = .u; 550 | pub const cr: @This() = .v; 551 | 552 | y = 0, 553 | u = 1, 554 | v = 2, 555 | stencil = 13, 556 | depth = 14, 557 | alpha = 15, 558 | _, 559 | }, 560 | .yiqsda => enum(u4) { 561 | y = 0, 562 | i = 1, 563 | q = 2, 564 | stencil = 13, 565 | depth = 14, 566 | alpha = 15, 567 | _, 568 | }, 569 | .labsda => enum(u4) { 570 | l = 0, 571 | a = 1, 572 | b = 2, 573 | stencil = 13, 574 | depth = 14, 575 | alpha = 15, 576 | _, 577 | }, 578 | .cmyka => enum(u4) { 579 | pub const black: @This() = .key; 580 | cyan = 0, 581 | magenta = 1, 582 | yellow = 2, 583 | key = 3, 584 | alpha = 15, 585 | _, 586 | }, 587 | .xyzw => enum(u4) { 588 | x = 0, 589 | y = 1, 590 | z = 2, 591 | w = 3, 592 | _, 593 | }, 594 | .hsva_ang => enum(u4) { 595 | value = 0, 596 | saturation = 1, 597 | hue = 2, 598 | alpha = 15, 599 | _, 600 | }, 601 | .hsla_ang => enum(u4) { 602 | lightness = 0, 603 | saturation = 1, 604 | hue = 2, 605 | alpha = 15, 606 | _, 607 | }, 608 | .hsva_hex => enum(u4) { 609 | value = 0, 610 | saturation = 1, 611 | hue = 2, 612 | alpha = 15, 613 | _, 614 | }, 615 | .hsla_hex => enum(u4) { 616 | lightness = 0, 617 | saturation = 1, 618 | hue = 2, 619 | alpha = 15, 620 | _, 621 | }, 622 | .ycgcoa => enum(u4) { 623 | y = 0, 624 | cg = 1, 625 | co = 2, 626 | alpha = 15, 627 | _, 628 | }, 629 | .ciexyz => enum(u4) { 630 | x = 0, 631 | y = 1, 632 | z = 2, 633 | _, 634 | }, 635 | .ciexyy => enum(u4) { 636 | x = 0, 637 | ychroma = 1, 638 | yluma = 2, 639 | _, 640 | }, 641 | .bc1a => enum(u4) { 642 | color = 0, 643 | alpha_present = 1, 644 | _, 645 | }, 646 | .bc2 => enum(u4) { 647 | color = 0, 648 | alpha = 15, 649 | _, 650 | }, 651 | .bc3 => enum(u4) { 652 | color = 0, 653 | alpha = 15, 654 | _, 655 | }, 656 | .bc4 => enum(u4) { 657 | data = 0, 658 | _, 659 | }, 660 | .bc5 => enum(u4) { 661 | red = 0, 662 | green = 1, 663 | _, 664 | }, 665 | .bc6h => enum(u4) { 666 | data = 0, 667 | _, 668 | }, 669 | .bc7 => enum(u4) { 670 | data = 0, 671 | _, 672 | }, 673 | .etc1 => enum(u4) { 674 | data = 0, 675 | _, 676 | }, 677 | .etc2 => enum(u4) { 678 | red = 0, 679 | green = 1, 680 | color = 2, 681 | alpha = 15, 682 | _, 683 | }, 684 | .astc => enum(u4) { 685 | data = 0, 686 | _, 687 | }, 688 | .etc1s => enum(u4) { 689 | rgb = 0, 690 | rrr = 3, 691 | ggg = 4, 692 | aaa = 15, 693 | _, 694 | }, 695 | .pvrtc => enum(u4) { 696 | data = 0, 697 | _, 698 | }, 699 | .pvrtc2 => enum(u4) { 700 | data = 0, 701 | _, 702 | }, 703 | .uastc => enum(u4) { 704 | rgb = 0, 705 | rgba = 3, 706 | rrr = 4, 707 | rrrg = 5, 708 | rg = 6, 709 | _, 710 | }, 711 | else => enum(u4) { _ }, 712 | }; 713 | } 714 | }; 715 | 716 | pub fn descriptorBlockSize(samples: u8) u16 { 717 | return @bitSizeOf(BasicDescriptorBlock) / 8 + @sizeOf(BasicDescriptorBlock.Sample) * samples; 718 | } 719 | }; 720 | 721 | pub fn init(bytes: []align(@alignOf(u64)) const u8) Error!@This() { 722 | comptime assert(builtin.cpu.arch.endian() == .little); 723 | 724 | // Parse the header 725 | if (bytes.len < @sizeOf(Header)) return error.InvalidKtx2; 726 | const header: *const Header = @ptrCast(bytes.ptr); 727 | if (!std.mem.eql(u8, &header.identifier, &identifier)) { 728 | return error.InvalidKtx2; 729 | } 730 | 731 | // Check that we don't have too many levels 732 | const max_levels_for_size = std.math.log2_int(u32, @max( 733 | header.pixel_width, 734 | header.pixel_height, 735 | header.pixel_depth, 736 | )); 737 | if (header.level_count.toInt() > max_levels_for_size) { 738 | return error.InvalidKtx2; 739 | } 740 | 741 | // Parse the level index. 742 | // 743 | // The multiplication here can't overflow because we already verified level count (it can 744 | // never exceed 32.) 745 | const levels_bytes = @sizeOf(Level) * header.level_count.toInt(); 746 | if (bytes.len < @sizeOf(Header) + levels_bytes) { 747 | return error.InvalidKtx2; 748 | } 749 | const levels_unsized: [*]align(1) const Level = @ptrCast(&bytes[@sizeOf(Header)]); 750 | const levels: []align(1) const Level = levels_unsized[0..header.level_count.toInt()]; 751 | for (levels) |level| { 752 | if (level.byte_offset + level.byte_length > bytes.len) { 753 | return error.InvalidKtx2; 754 | } 755 | } 756 | 757 | // Parse the DFD 758 | if (bytes.len < header.index.dfd_byte_offset + header.index.dfd_byte_length) { 759 | return error.InvalidKtx2; 760 | } 761 | if (header.index.dfd_byte_length < @sizeOf(u32) + @bitSizeOf(BasicDescriptorBlock) / 8) { 762 | return error.UnsupportedKtx2; 763 | } 764 | const dfd_total_size: *align(1) const u32 = @ptrCast(&bytes[header.index.dfd_byte_offset]); 765 | if (header.index.dfd_byte_length != dfd_total_size.*) { 766 | return error.InvalidKtx2; 767 | } 768 | const basic_df: *align(1) const BasicDescriptorBlock = @ptrCast( 769 | &bytes[header.index.dfd_byte_offset + @sizeOf(u32)], 770 | ); 771 | if (basic_df.vendor_id != .khronos) return error.UnsupportedKtx2; 772 | if (basic_df.descriptor_type != .basic_format) return error.UnsupportedKtx2; 773 | if (basic_df.version_number != .@"1.3") return error.UnsupportedKtx2; 774 | const basic_descriptor_block_remaining_bytes = std.math.sub( 775 | u32, 776 | basic_df.descriptor_block_size, 777 | @bitSizeOf(BasicDescriptorBlock) / 8, 778 | ) catch { 779 | return error.InvalidKtx2; 780 | }; 781 | const sample_count = std.math.divExact( 782 | u32, 783 | basic_descriptor_block_remaining_bytes, 784 | @sizeOf(BasicDescriptorBlock.Sample), 785 | ) catch { 786 | return error.InvalidKtx2; 787 | }; 788 | const samples_unsized: [*]align(1) const BasicDescriptorBlock.Sample = @ptrCast(&bytes[ 789 | header.index.dfd_byte_offset + 790 | header.index.dfd_byte_length - 791 | sample_count * @sizeOf(BasicDescriptorBlock.Sample) 792 | ]); 793 | const samples = samples_unsized[0..sample_count]; 794 | 795 | // Parse key and value data 796 | if (header.index.kvd_byte_offset + header.index.kvd_byte_length > bytes.len) { 797 | return error.InvalidKtx2; 798 | } 799 | const kv_data = bytes[header.index.kvd_byte_offset..][0..header.index.kvd_byte_length]; 800 | 801 | // Parse global supercompression data 802 | if (header.index.sgd_byte_offset + header.index.sgd_byte_length > bytes.len) { 803 | return error.InvalidKtx2; 804 | } 805 | const sg_data = bytes[header.index.sgd_byte_offset..][0..header.index.sgd_byte_length]; 806 | 807 | return .{ 808 | .header = header, 809 | .levels = levels, 810 | .basic_descriptor_block = basic_df, 811 | .samples = samples, 812 | .key_value_data = kv_data, 813 | .supercompression_global_data = sg_data, 814 | }; 815 | } 816 | 817 | pub const KeyValueIter = struct { 818 | pub const Item = struct { 819 | key: [:0]const u8, 820 | value: []const u8, 821 | }; 822 | 823 | data: []const u8, 824 | 825 | pub fn next(self: *@This()) error{InvalidKtx2}!?Item { 826 | // Stop if we're out of data 827 | if (self.data.len == 0) return null; 828 | 829 | // Get the length of this key value pair 830 | if (self.data.len < @sizeOf(u32)) return error.InvalidKtx2; 831 | const length: *align(1) const u32 = @ptrCast(self.data.ptr); 832 | if (length.* < 2) return error.InvalidKtx2; 833 | 834 | // Get the keyrgbsda value pair 835 | if (@as(usize, @sizeOf(u32)) + length.* > self.data.len) return error.InvalidKtx2; 836 | const kv = self.data[@sizeOf(u32)..][0..length.*]; 837 | 838 | // Advance the iterator 839 | const offset = std.mem.alignForward(u32, @sizeOf(u32) + length.*, 4); 840 | if (offset > self.data.len) return error.InvalidKtx2; 841 | self.data = self.data[offset..]; 842 | 843 | // Split the key and value out 844 | const null_index = std.mem.indexOfScalar(u8, kv, 0) orelse return error.InvalidKtx2; 845 | return .{ 846 | .key = @ptrCast(kv[0..null_index]), 847 | .value = kv[null_index + 1 ..], 848 | }; 849 | } 850 | }; 851 | 852 | pub fn keyValueIter(self: *const @This()) KeyValueIter { 853 | return .{ .data = self.key_value_data }; 854 | } 855 | 856 | pub fn levelBytes(self: *const @This(), index: u8) ?[]const u8 { 857 | // Check that the level exists. If it does, the offsets inside of it were already bounds checked 858 | // on init. Otherwise fail. 859 | if (index >= self.levels.len) return null; 860 | const level = self.levels[index]; 861 | 862 | const all_bytes: [*]const u8 = @ptrCast(self.header); 863 | return all_bytes[level.byte_offset..][0..level.byte_length]; 864 | } 865 | 866 | fn assertNoPadding(T: type) void { 867 | switch (@typeInfo(T)) { 868 | .int, .float, .bool => {}, 869 | .array => |a| assertNoPadding(a.child), 870 | .@"struct" => |s| { 871 | // Check that all our field types don't have padding 872 | for (s.fields) |field| { 873 | assertNoPadding(field.type); 874 | } 875 | 876 | // Check that we have no padding 877 | comptime var size: usize = 0; 878 | for (@typeInfo(T).@"struct".fields) |field| { 879 | size += @sizeOf(field.type); 880 | } 881 | assert(size == @sizeOf(T)); 882 | }, 883 | .@"enum" => {}, 884 | else => unreachable, 885 | } 886 | } 887 | -------------------------------------------------------------------------------- /src/Image.zig: -------------------------------------------------------------------------------- 1 | //! An image for texture processing. 2 | //! 3 | //! Many manipulations are only supported on uncompressed rgbaf32 encoded images. This is indicated 4 | //! by prefixing the function names with `rgbaf32`. This allows processing LDR and HDR images in the 5 | //! same code paths, and increases precision of intermediate operations. Technically this does result 6 | //! in extra work being done when loading LDR images and exporting them with no processing, but most 7 | //! images require processing (if only to generate mipmaps). 8 | //! 9 | //! The allocator field may be replaced depending on operations done on the image. For example, 10 | //! since resizing is done with STB, resize operations will free the original allocation and replace 11 | //! it with an STB managed allocation, updating the allocator as needed. We can't simply pass a 12 | //! user supplied Zig allocator to STB since it doesn't provide a length when freeing memory. 13 | 14 | const std = @import("std"); 15 | const Allocator = std.mem.Allocator; 16 | const assert = std.debug.assert; 17 | const log = std.log; 18 | const tracy = @import("tracy"); 19 | const Zone = tracy.Zone; 20 | const Ktx2 = @import("Ktx2"); 21 | 22 | const c = @cImport({ 23 | @cDefine("STBI_NO_STDIO", "1"); 24 | @cInclude("stb_image.h"); 25 | @cInclude("stb_image_resize2.h"); 26 | @cInclude("zlib.h"); 27 | }); 28 | 29 | const Image = @This(); 30 | 31 | /// The width of the image in pixels. 32 | width: u32, 33 | /// The height of the image in pixels. 34 | height: u32, 35 | /// The bytes requires to store the image without supercompression. 36 | uncompressed_byte_length: u64, 37 | /// The encoded image data. See `encoding` and `supercompression` for a description of how to 38 | /// interpret these bytes. 39 | buf: []u8, 40 | /// Whether or not the source data was HDR, regardless of the current encoding. 41 | hdr: bool, 42 | /// The current encoding. 43 | encoding: Encoding, 44 | /// The current supercompression scheme. 45 | supercompression: Ktx2.Header.SupercompressionScheme, 46 | /// True if the alpha channel is premultiplied by the color channels. 47 | premultiplied: bool, 48 | /// The meaning of the alpha channel. 49 | alpha: Alpha, 50 | /// The allocator used to freeing buf on deinit. 51 | allocator: Allocator, 52 | 53 | pub const Alpha = union(enum) { 54 | /// The alpha channel represents opacity. 55 | opacity: void, 56 | /// The alpha channel represents opacity and is used for alpha testing. 57 | alpha_test: struct { 58 | /// Values less than or equal to threshold are expected to be considered transparent by the 59 | /// renderer, values larger are expected to be considered opaque. 60 | threshold: f32, 61 | /// The ratio of pixels expected to pass the alpha test. See `rgbaF32PreserveAlphaCoverage`. 62 | target_coverage: f32, 63 | }, 64 | /// The alpha channel is used for something other than transparency. 65 | other: void, 66 | }; 67 | 68 | pub const Encoding = enum(u32) { 69 | r8g8b8a8_unorm = @intFromEnum(Ktx2.Header.VkFormat.r8g8b8a8_unorm), 70 | r8g8b8a8_srgb = @intFromEnum(Ktx2.Header.VkFormat.r8g8b8a8_srgb), 71 | r32g32b32_sfloat = @intFromEnum(Ktx2.Header.VkFormat.r32g32b32_sfloat), 72 | bc7_unorm_block = @intFromEnum(Ktx2.Header.VkFormat.bc7_unorm_block), 73 | bc7_srgb_block = @intFromEnum(Ktx2.Header.VkFormat.bc7_srgb_block), 74 | 75 | /// Returns the encoding as a Vulkan format. 76 | pub fn vkFormat(self: @This()) Ktx2.Header.VkFormat { 77 | return @enumFromInt(@intFromEnum(self)); 78 | } 79 | 80 | /// Returns the number of samples per pixel. 81 | pub fn samples(self: @This()) u8 { 82 | return switch (self) { 83 | .r8g8b8a8_unorm, .r8g8b8a8_srgb, .r32g32b32_sfloat => 4, 84 | .bc7_unorm_block, .bc7_srgb_block => 1, 85 | }; 86 | } 87 | 88 | /// Returns the size of a block in pixels. 89 | pub fn blockSize(self: @This()) u8 { 90 | return switch (self) { 91 | .r8g8b8a8_unorm, .r8g8b8a8_srgb, .r32g32b32_sfloat => 1, 92 | .bc7_unorm_block, .bc7_srgb_block => 4, 93 | }; 94 | } 95 | 96 | /// Returns the color space. 97 | pub fn colorSpace(self: @This()) Image.ColorSpace { 98 | return switch (self) { 99 | .bc7_unorm_block, .r8g8b8a8_unorm => .linear, 100 | .bc7_srgb_block, .r8g8b8a8_srgb => .srgb, 101 | .r32g32b32_sfloat => .hdr, 102 | }; 103 | } 104 | 105 | /// Returns the element size. 106 | pub fn typeSize(self: @This()) u8 { 107 | return switch (self) { 108 | .r8g8b8a8_unorm, .r8g8b8a8_srgb, .bc7_unorm_block, .bc7_srgb_block => 1, 109 | .r32g32b32_sfloat => 4, 110 | }; 111 | } 112 | }; 113 | 114 | pub const ColorSpace = enum(c_uint) { 115 | /// Linear LDR data. 116 | linear, 117 | /// SRGB LDR data. 118 | srgb, 119 | /// Linear HDR data. 120 | hdr, 121 | }; 122 | 123 | pub const InitFromReaderOptions = struct { 124 | /// See `Image.Alpha`. 125 | pub const Alpha = union(enum) { 126 | opacity: void, 127 | alpha_test: struct { threshold: f32 = 0.5 }, 128 | other, 129 | }; 130 | color_space: ColorSpace, 131 | alpha: @This().Alpha, 132 | premultiply: bool = true, 133 | }; 134 | 135 | pub const InitFromReaderError = error{ 136 | StbImageFailure, 137 | WrongColorSpace, 138 | StreamTooLong, 139 | OutOfMemory, 140 | } || std.Io.Reader.Error; 141 | 142 | /// Read an image using `stb_image.h`. Allocation done by STB. 143 | pub fn rgbaF32InitFromReader( 144 | gpa: std.mem.Allocator, 145 | reader: *std.Io.Reader, 146 | options: InitFromReaderOptions, 147 | ) InitFromReaderError!Image { 148 | const zone = Zone.begin(.{ .src = @src() }); 149 | defer zone.end(); 150 | 151 | // We could pass the reader into STB for additional pipelining and reduced allocations. For 152 | // simplicity's sake we don't do this yet since it isn't a particularly large performance win, 153 | // but we keep our options open by taking a reader. 154 | const input_bytes = b: { 155 | const read_zone = Zone.begin(.{ .name = "read", .src = @src() }); 156 | defer read_zone.end(); 157 | break :b try reader.allocRemaining(gpa, .unlimited); 158 | }; 159 | defer gpa.free(input_bytes); 160 | 161 | // Check if the input is HDR 162 | const hdr = c.stbi_is_hdr_from_memory( 163 | input_bytes.ptr, 164 | @intCast(input_bytes.len), 165 | ) == 1; 166 | switch (options.color_space) { 167 | .linear, .srgb => if (hdr) return error.WrongColorSpace, 168 | .hdr => if (!hdr) return error.WrongColorSpace, 169 | } 170 | 171 | // We're gonna do our own premul, and STB doesn't expose whether it was already done or not. 172 | // Typically it is not unless we're dealing with an iPhone PNG. Always get canonical format. 173 | c.stbi_set_unpremultiply_on_load(1); 174 | c.stbi_convert_iphone_png_to_rgb(1); 175 | 176 | // All images are loaded as linear floats regardless of the source and dest formats. 177 | c.stbi_ldr_to_hdr_gamma(switch (options.color_space) { 178 | .srgb => 2.2, 179 | .linear, .hdr => 1.0, 180 | }); 181 | 182 | // Load the image using STB 183 | var width: c_int = 0; 184 | var height: c_int = 0; 185 | var input_channels: c_int = 0; 186 | const buf = b: { 187 | const read_zone = Zone.begin(.{ .name = "stbi_loadf_from_memory", .src = @src() }); 188 | defer read_zone.end(); 189 | const ptr = c.stbi_loadf_from_memory( 190 | input_bytes.ptr, 191 | @intCast(input_bytes.len), 192 | &width, 193 | &height, 194 | &input_channels, 195 | 4, 196 | ) orelse return error.StbImageFailure; 197 | const len = @as(usize, @intCast(width)) * @as(usize, @intCast(height)) * 4; 198 | break :b std.mem.sliceAsBytes(ptr[0..len]); 199 | }; 200 | 201 | // Create the image 202 | var result: @This() = .{ 203 | .width = @intCast(width), 204 | .height = @intCast(height), 205 | .uncompressed_byte_length = buf.len, 206 | .buf = buf, 207 | .hdr = hdr, 208 | .encoding = .r32g32b32_sfloat, 209 | .alpha = switch (options.alpha) { 210 | .opacity => .opacity, 211 | .alpha_test => |at| .{ .alpha_test = .{ 212 | .threshold = at.threshold, 213 | .target_coverage = 0.0, 214 | } }, 215 | .other => .other, 216 | }, 217 | .premultiplied = options.premultiply, 218 | .supercompression = .none, 219 | .allocator = stb_allocator, 220 | }; 221 | 222 | // Premultiply the alpha if requested 223 | if (options.premultiply) { 224 | const premultiply_zone = Zone.begin(.{ .name = "premultiply", .src = @src() }); 225 | defer premultiply_zone.end(); 226 | var px: usize = 0; 227 | const f32s = result.rgbaF32Samples(); 228 | while (px < @as(usize, result.width) * @as(usize, result.height) * 4) : (px += 4) { 229 | const a = f32s[px + 3]; 230 | f32s[px + 0] = f32s[px + 0] * a; 231 | f32s[px + 1] = f32s[px + 1] * a; 232 | f32s[px + 2] = f32s[px + 2] * a; 233 | } 234 | } 235 | 236 | // Calculate coverage so that we can preserve it on resize. We could do this lazily since it may 237 | // not be used, but it's not that expensive and typically wanted for alpha tests so caching it 238 | // up front is a bit simpler. 239 | switch (result.alpha) { 240 | .alpha_test => |*at| at.target_coverage = result.rgbaF32AlphaCoverage(at.threshold, 1.0), 241 | else => {}, 242 | } 243 | 244 | return result; 245 | } 246 | 247 | pub fn deinit(self: *Image) void { 248 | const zone = Zone.begin(.{ .src = @src() }); 249 | defer zone.end(); 250 | if (self.isUncompressedRgbaF32()) { 251 | self.allocator.free(self.rgbaF32Samples()); 252 | } else { 253 | self.allocator.free(self.buf); 254 | } 255 | _ = self.toOwned(); 256 | } 257 | 258 | pub fn isUncompressedRgbaF32(self: Image) bool { 259 | return self.encoding == .r32g32b32_sfloat and self.supercompression == .none; 260 | } 261 | 262 | pub fn assertIsUncompressedRgbaF32(self: Image) void { 263 | if (!isUncompressedRgbaF32(self)) @panic("expected uncompressed rgba-f32"); 264 | } 265 | 266 | pub fn rgbaF32Samples(self: Image) []f32 { 267 | self.assertIsUncompressedRgbaF32(); 268 | var f32s: []f32 = undefined; 269 | f32s.ptr = @ptrCast(@alignCast(self.buf.ptr)); 270 | f32s.len = self.buf.len / @sizeOf(f32); 271 | return f32s; 272 | } 273 | 274 | pub const AddressMode = enum(c_uint) { 275 | clamp = c.STBIR_EDGE_CLAMP, 276 | reflect = c.STBIR_EDGE_REFLECT, 277 | wrap = c.STBIR_EDGE_WRAP, 278 | zero = c.STBIR_EDGE_ZERO, 279 | }; 280 | 281 | pub const Filter = enum(c_uint) { 282 | // See https://github.com/Games-by-Mason/Zex/issues/20 283 | // box = c.STBIR_FILTER_BOX, 284 | default, 285 | triangle, 286 | cubic_b_spline, 287 | catmull_rom, 288 | mitchell, 289 | point_sample, 290 | 291 | fn sharpens(self: @This(), hdr: bool) bool { 292 | return switch (self) { 293 | .default => !hdr, 294 | .triangle, .point_sample, .cubic_b_spline => false, 295 | .mitchell, .catmull_rom => true, 296 | }; 297 | } 298 | 299 | fn toStbFilter(self: @This(), hdr: bool) c.stbir_filter { 300 | return switch (self) { 301 | .default => if (hdr) c.STBIR_FILTER_TRIANGLE else c.STBIR_FILTER_MITCHELL, 302 | .triangle => c.STBIR_FILTER_TRIANGLE, 303 | .cubic_b_spline => c.STBIR_FILTER_CUBICBSPLINE, 304 | .catmull_rom => c.STBIR_FILTER_CATMULLROM, 305 | .mitchell => c.STBIR_FILTER_MITCHELL, 306 | .point_sample => c.STBIR_FILTER_POINT_SAMPLE, 307 | }; 308 | } 309 | }; 310 | 311 | pub const ResizeError = error{ StbResizeFailure, OutOfMemory }; 312 | 313 | pub const ResizeOptions = struct { 314 | width: u32, 315 | height: u32, 316 | address_mode_u: AddressMode, 317 | address_mode_v: AddressMode, 318 | filter_u: Filter, 319 | filter_v: Filter, 320 | preserve_alpha_coverage_max_steps: u8 = 10, 321 | }; 322 | 323 | /// Resizes this image. 324 | pub fn rgbaF32Resize(self: *Image, options: ResizeOptions) ResizeError!void { 325 | const zone = Zone.begin(.{ .src = @src() }); 326 | defer zone.end(); 327 | self.assertIsUncompressedRgbaF32(); 328 | if (self.width != options.width or self.height != options.height) { 329 | const result = try self.rgbaF32Resized(options); 330 | self.deinit(); 331 | self.* = result; 332 | } 333 | } 334 | 335 | /// Returns a resized copy of this image. 336 | pub fn rgbaF32Resized(self: Image, options: ResizeOptions) ResizeError!Image { 337 | const zone = Zone.begin(.{ .src = @src() }); 338 | defer zone.end(); 339 | self.assertIsUncompressedRgbaF32(); 340 | assert(options.width > 0 and options.height > 0); 341 | 342 | const output_samples_len = @as(usize, options.width) * @as(usize, options.height) * 4; 343 | const output_samples_ptr: [*]f32 = @ptrCast(@alignCast(c.malloc( 344 | output_samples_len * @sizeOf(f32), 345 | ) orelse return error.OutOfMemory)); 346 | const output_samples = output_samples_ptr[0..output_samples_len]; 347 | errdefer c.free(output_samples.ptr); 348 | 349 | var stbr_options: c.STBIR_RESIZE = undefined; 350 | c.stbir_resize_init( 351 | &stbr_options, 352 | self.rgbaF32Samples().ptr, 353 | @intCast(self.width), 354 | @intCast(self.height), 355 | 0, 356 | output_samples.ptr, 357 | @intCast(options.width), 358 | @intCast(options.height), 359 | 0, 360 | if (self.premultiplied or self.alpha == .other) b: { 361 | // We're either already premultiplied, or the alpha channel doesn't represent opacity 362 | // and therefore doesn't need to be premultiplied. 363 | break :b c.STBIR_RGBA_PM; 364 | } else b: { 365 | // We need to multiply by alpha for correct weighting, then unpremultiply after. 366 | break :b c.STBIR_RGBA; 367 | }, 368 | c.STBIR_TYPE_FLOAT, 369 | ); 370 | 371 | stbr_options.horizontal_edge = @intFromEnum(options.address_mode_u); 372 | stbr_options.vertical_edge = @intFromEnum(options.address_mode_v); 373 | stbr_options.horizontal_filter = options.filter_u.toStbFilter(self.hdr); 374 | stbr_options.vertical_filter = options.filter_v.toStbFilter(self.hdr); 375 | 376 | { 377 | const resize_zone = Zone.begin(.{ .name = "stbir_resize_extended", .src = @src() }); 378 | defer resize_zone.end(); 379 | if (c.stbir_resize_extended(&stbr_options) != 1) { 380 | return error.StbResizeFailure; 381 | } 382 | } 383 | 384 | // Sharpening filters can push values below zero. Clamp them before doing further processing. 385 | // We could alternatively use `STBIR_FLOAT_LOW_CLAMP`, see issue #18. 386 | if (options.filter_u.sharpens(self.hdr) or options.filter_v.sharpens(self.hdr)) { 387 | const clamp_zone = Zone.begin(.{ .name = "clamp", .src = @src() }); 388 | defer clamp_zone.end(); 389 | for (output_samples) |*d| { 390 | d.* = @max(d.*, 0.0); 391 | } 392 | } 393 | 394 | const buf = std.mem.sliceAsBytes(output_samples); 395 | const result: @This() = .{ 396 | .width = options.width, 397 | .height = options.height, 398 | .uncompressed_byte_length = buf.len, 399 | .buf = buf, 400 | .hdr = self.hdr, 401 | .encoding = .r32g32b32_sfloat, 402 | .supercompression = .none, 403 | .allocator = stb_allocator, 404 | .alpha = self.alpha, 405 | .premultiplied = self.premultiplied, 406 | }; 407 | 408 | self.rgbaF32PreserveAlphaCoverage(options.preserve_alpha_coverage_max_steps); 409 | 410 | return result; 411 | } 412 | 413 | pub const SizeToFitOptions = struct { 414 | max_size: u32 = std.math.maxInt(u32), 415 | max_width: u32 = std.math.maxInt(u32), 416 | max_height: u32 = std.math.maxInt(u32), 417 | }; 418 | 419 | /// Returns the largest size that fits within `options` while preserving the aspect ratio. 420 | pub fn rgbaF32SizeToFit(self: Image, options: SizeToFitOptions) struct { u32, u32 } { 421 | const zone = Zone.begin(.{ .src = @src() }); 422 | defer zone.end(); 423 | self.assertIsUncompressedRgbaF32(); 424 | 425 | const self_width_f: f64 = @floatFromInt(self.width); 426 | const self_height_f: f64 = @floatFromInt(self.height); 427 | 428 | const max_width: u32 = @min(options.max_width, options.max_size, self.width); 429 | const max_height: u32 = @min(options.max_height, options.max_size, self.height); 430 | 431 | const max_width_f: f64 = @floatFromInt(max_width); 432 | const max_height_f: f64 = @floatFromInt(max_height); 433 | 434 | const x_scale = @min(max_width_f / self_width_f, 1.0); 435 | const y_scale = @min(max_height_f / self_height_f, 1.0); 436 | 437 | const scale = @min(x_scale, y_scale); 438 | const width = @min(@as(u32, @intFromFloat(scale * self_width_f)), max_width); 439 | const height = @min(@as(u32, @intFromFloat(scale * self_height_f)), max_height); 440 | 441 | return .{ width, height }; 442 | } 443 | 444 | pub const ResizeToFitOptions = struct { 445 | max_size: u32 = std.math.maxInt(u32), 446 | max_width: u32 = std.math.maxInt(u32), 447 | max_height: u32 = std.math.maxInt(u32), 448 | address_mode_u: AddressMode, 449 | address_mode_v: AddressMode, 450 | filter_u: Filter, 451 | filter_v: Filter, 452 | }; 453 | 454 | /// Resizes the image to fit within `options` while preserving the aspect ratio. 455 | pub fn rgbaF32ResizeToFit(self: *Image, options: ResizeToFitOptions) ResizeError!void { 456 | self.assertIsUncompressedRgbaF32(); 457 | const width, const height = self.rgbaF32SizeToFit(.{ 458 | .max_size = options.max_size, 459 | .max_width = options.max_width, 460 | .max_height = options.max_height, 461 | }); 462 | 463 | try self.rgbaF32Resize(.{ 464 | .width = width, 465 | .height = height, 466 | .address_mode_u = options.address_mode_u, 467 | .address_mode_v = options.address_mode_v, 468 | .filter_u = options.filter_u, 469 | .filter_v = options.filter_v, 470 | }); 471 | } 472 | 473 | /// Resizes a copy of the image resized to fit within `options` while preserving the aspect ratio. 474 | pub fn rgbaF32ResizedToFit(self: Image, options: ResizeToFitOptions) ResizeError!Image { 475 | self.assertIsUncompressedRgbaF32(); 476 | const width, const height = self.sizeToFit(.{ 477 | .max_size = options.max_size, 478 | .max_width = options.max_width, 479 | .max_height = options.max_height, 480 | }); 481 | 482 | return self.resized(.{ 483 | .width = width, 484 | .height = height, 485 | .address_mode_u = options.address_mode_u, 486 | .address_mode_v = options.address_mode_v, 487 | .filter_u = options.filter_u, 488 | .filter_v = options.filter_v, 489 | }); 490 | } 491 | 492 | pub const GenerateMipMapsOptions = struct { 493 | block_size: u8, 494 | address_mode_u: AddressMode, 495 | address_mode_v: AddressMode, 496 | filter_u: Filter, 497 | filter_v: Filter, 498 | }; 499 | 500 | pub fn rgbaF32GenerateMipmaps(self: Image, options: GenerateMipMapsOptions) GenerateMipmaps { 501 | self.assertIsUncompressedRgbaF32(); 502 | return .{ 503 | .options = options, 504 | .image = self, 505 | }; 506 | } 507 | 508 | pub const GenerateMipmaps = struct { 509 | options: GenerateMipMapsOptions, 510 | image: Image, 511 | 512 | pub fn next(self: *@This()) ResizeError!?Image { 513 | // Stop once we're below the block size, there's no benefit to further mipmaps 514 | if (self.image.width <= self.options.block_size and 515 | self.image.height <= self.options.block_size) 516 | { 517 | return null; 518 | } 519 | 520 | // Halve the image size 521 | self.image = try self.image.rgbaF32Resized(.{ 522 | .width = @max(1, self.image.width / 2), 523 | .height = @max(1, self.image.height / 2), 524 | .address_mode_u = self.options.address_mode_u, 525 | .address_mode_v = self.options.address_mode_v, 526 | .filter_u = self.options.filter_u, 527 | .filter_v = self.options.filter_v, 528 | }); 529 | return self.image; 530 | } 531 | }; 532 | 533 | /// Calculates the ratio of pixels that would pass the given alpha test threshold if the given 534 | /// scaling was applied. 535 | pub fn rgbaF32AlphaCoverage(self: Image, threshold: f32, scale: f32) f32 { 536 | const zone = Zone.begin(.{ .src = @src() }); 537 | defer zone.end(); 538 | self.assertIsUncompressedRgbaF32(); 539 | 540 | // Quantize the threshold to the output type 541 | const quantized_threshold = if (self.hdr) threshold else @round(threshold * 255.0) / 255.0; 542 | 543 | // Calculate the coverage 544 | var coverage: f32 = 0; 545 | const f32s = self.rgbaF32Samples(); 546 | for (0..@as(usize, self.width) * @as(usize, self.height)) |i| { 547 | const alpha = f32s[i * 4 + 3]; 548 | if (alpha * scale > quantized_threshold) coverage += 1.0; 549 | } 550 | coverage /= @floatFromInt(@as(usize, self.width) * @as(usize, self.height)); 551 | return coverage; 552 | } 553 | 554 | /// Attempts to preserve the ratio of pixels that pass the alpha test. Automatically called on 555 | /// resize for alpha tested images, exposed for use with custom image processing. 556 | pub fn rgbaF32PreserveAlphaCoverage(self: Image, max_steps: u8) void { 557 | const zone = Zone.begin(.{ .src = @src() }); 558 | defer zone.end(); 559 | self.assertIsUncompressedRgbaF32(); 560 | const alpha_test = switch (self.alpha) { 561 | .alpha_test => |at| at, 562 | else => return, 563 | }; 564 | 565 | // Binary search for the best scale parameter 566 | var best_scale: f32 = 1.0; 567 | var best_dist = std.math.inf(f32); 568 | var upper_threshold: f32 = 1.0; 569 | var lower_threshold: f32 = 0.0; 570 | var curr_threshold: f32 = alpha_test.threshold; 571 | { 572 | const search_zone = Zone.begin(.{ .name = "search", .src = @src() }); 573 | defer search_zone.end(); 574 | for (0..max_steps) |_| { 575 | const curr_scale = alpha_test.threshold / curr_threshold; 576 | const coverage = self.rgbaF32AlphaCoverage(alpha_test.threshold, curr_scale); 577 | const dist_to_coverage = @abs(coverage - alpha_test.target_coverage); 578 | if (dist_to_coverage < best_dist) { 579 | best_dist = dist_to_coverage; 580 | best_scale = curr_scale; 581 | } 582 | 583 | if (coverage < alpha_test.target_coverage) { 584 | upper_threshold = curr_threshold; 585 | } else if (coverage > alpha_test.target_coverage) { 586 | lower_threshold = curr_threshold; 587 | } else { 588 | break; 589 | } 590 | 591 | curr_threshold = (lower_threshold + upper_threshold) / 2.0; 592 | } 593 | } 594 | 595 | // Apply the scaling 596 | if (best_scale != 1.0) { 597 | const search_zone = Zone.begin(.{ .name = "scale", .src = @src() }); 598 | defer search_zone.end(); 599 | const f32s = self.rgbaF32Samples(); 600 | for (0..@as(usize, self.width) * @as(usize, self.height)) |i| { 601 | const a = &f32s[i * 4 + 3]; 602 | a.* = @min(a.* * best_scale, 1.0); 603 | } 604 | } 605 | } 606 | 607 | pub fn toOwned(self: *Image) Image { 608 | const owned: Image = self.*; 609 | self.width = 0; 610 | self.height = 0; 611 | self.buf = &.{}; 612 | self.allocator = moved_allocator; 613 | return owned; 614 | } 615 | 616 | pub const EncodeOptions = union(Encoding) { 617 | r8g8b8a8_unorm: void, 618 | r8g8b8a8_srgb: void, 619 | r32g32b32_sfloat: void, 620 | bc7_unorm_block: Bc7Options, 621 | bc7_srgb_block: Bc7Options, 622 | }; 623 | 624 | pub const EncodeError = EncodeRgbaU8Error || EncodeBc7Error; 625 | 626 | /// Transcodes from rgba-f32 to the given encoding. 627 | pub fn rgbaF32Encode( 628 | self: *@This(), 629 | gpa: Allocator, 630 | max_threads: ?u16, 631 | options: EncodeOptions, 632 | ) EncodeError!void { 633 | const zone = Zone.begin(.{ .src = @src() }); 634 | defer zone.end(); 635 | switch (options) { 636 | .r8g8b8a8_unorm => try self.rgbaF32EncodeRgbaU8(gpa), 637 | .r8g8b8a8_srgb => try self.rgbaF32EncodeRgbaSrgbU8(gpa), 638 | .r32g32b32_sfloat => {}, 639 | .bc7_unorm_block => |bc7_options| try self.rgbaF32EncodeBc7(max_threads, bc7_options), 640 | .bc7_srgb_block => |bc7_options| try self.rgbaF32EncodeBc7Srgb(max_threads, bc7_options), 641 | } 642 | } 643 | 644 | pub const EncodeRgbaU8Error = error{OutOfMemory}; 645 | 646 | pub fn rgbaF32EncodeRgbaU8(self: *@This(), gpa: Allocator) EncodeRgbaU8Error!void { 647 | try self.rgbaF32EncodeRgbaU8Ex(gpa, false); 648 | } 649 | 650 | pub fn rgbaF32EncodeRgbaSrgbU8(self: *@This(), gpa: Allocator) EncodeRgbaU8Error!void { 651 | try self.rgbaF32EncodeRgbaU8Ex(gpa, true); 652 | } 653 | 654 | fn rgbaF32EncodeRgbaU8Ex(self: *@This(), gpa: Allocator, srgb: bool) EncodeRgbaU8Error!void { 655 | const zone = Zone.begin(.{ .src = @src() }); 656 | defer zone.end(); 657 | self.assertIsUncompressedRgbaF32(); 658 | 659 | const f32s = self.rgbaF32Samples(); 660 | const buf = b: { 661 | const alloc_zone = Zone.begin(.{ .name = "alloc", .src = @src() }); 662 | defer alloc_zone.end(); 663 | break :b try gpa.alloc(u8, f32s.len); 664 | }; 665 | 666 | { 667 | const encode_zone = Zone.begin(.{ .name = "encode", .src = @src() }); 668 | defer encode_zone.end(); 669 | for (0..f32s.len) |i| { 670 | var ldr = f32s[i]; 671 | if (srgb and i % 4 != 3) { 672 | ldr = std.math.pow(f32, ldr, 1.0 / 2.2); 673 | } 674 | ldr = std.math.clamp(ldr * 255.0 + 0.5, 0.0, 255.0); 675 | buf[i] = @intFromFloat(ldr); 676 | } 677 | } 678 | 679 | self.allocator.free(self.rgbaF32Samples()); 680 | self.* = .{ 681 | .width = self.width, 682 | .height = self.height, 683 | .hdr = self.hdr, 684 | .encoding = if (srgb) .r8g8b8a8_srgb else .r8g8b8a8_unorm, 685 | .alpha = self.alpha, 686 | .premultiplied = self.premultiplied, 687 | .uncompressed_byte_length = buf.len, 688 | .buf = buf, 689 | .allocator = gpa, 690 | .supercompression = .none, 691 | }; 692 | } 693 | 694 | pub const Bc7Options = struct { 695 | uber_level: u8 = Bc7Enc.Params.max_uber_level, 696 | reduce_entropy: bool = false, 697 | max_partitions_to_scan: u16 = Bc7Enc.Params.max_partitions, 698 | mode_6_only: bool = false, 699 | rdo: ?struct { 700 | lambda: f32 = 0.5, 701 | lookback_window: ?u17 = null, 702 | smooth_block_error_scale: ?f32 = 15.0, 703 | quantize_mode_6_endpoints: bool = true, 704 | weight_modes: bool = true, 705 | weight_low_frequency_partitions: bool = true, 706 | pbit1_weighting: bool = true, 707 | max_smooth_block_std_dev: f32 = 18.0, 708 | try_two_matches: bool = true, 709 | ultrasmooth_block_handling: bool = true, 710 | } = .{}, 711 | }; 712 | 713 | pub const EncodeBc7Error = error{ InvalidOption, EncoderFailed }; 714 | 715 | pub fn rgbaF32EncodeBc7( 716 | self: *@This(), 717 | max_threads: ?u16, 718 | options: Bc7Options, 719 | ) EncodeBc7Error!void { 720 | try self.encodeBc7Ex(max_threads, false, options); 721 | } 722 | 723 | pub fn rgbaF32EncodeBc7Srgb( 724 | self: *@This(), 725 | max_threads: ?u16, 726 | options: Bc7Options, 727 | ) EncodeBc7Error!void { 728 | try self.encodeBc7Ex(max_threads, true, options); 729 | } 730 | 731 | fn encodeBc7Ex( 732 | self: *@This(), 733 | max_threads: ?u16, 734 | srgb: bool, 735 | options: Bc7Options, 736 | ) EncodeBc7Error!void { 737 | const zone = Zone.begin(.{ .src = @src() }); 738 | defer zone.end(); 739 | 740 | if (self.encoding != .r32g32b32_sfloat) @panic("can only encode from rgba-f32"); 741 | if (self.supercompression != .none) @panic("can only encode uncompressed data"); 742 | 743 | // Determine the bc7_unorm_block params 744 | var params: Bc7Enc.Params = .{}; 745 | { 746 | const params_zone = Zone.begin(.{ .name = "params", .src = @src() }); 747 | defer params_zone.end(); 748 | 749 | if (options.uber_level > Bc7Enc.Params.max_uber_level) { 750 | log.err("Invalid uber level.", .{}); 751 | return error.InvalidOption; 752 | } 753 | params.bc7_uber_level = options.uber_level; 754 | 755 | params.reduce_entropy = options.reduce_entropy; 756 | 757 | if (options.max_partitions_to_scan > Bc7Enc.Params.max_partitions) { 758 | log.err("Invalid max partitions to scan.", .{}); 759 | return error.InvalidOption; 760 | } 761 | params.max_partitions_to_scan = options.max_partitions_to_scan; 762 | // Ignored when using RDO. However, we use it in our bindings. The actual encoder 763 | // just clears it so it doesn't matter that we set it regardless. 764 | params.perceptual = srgb; 765 | params.mode6_only = options.mode_6_only; 766 | 767 | if (max_threads) |v| { 768 | if (v == 0) { 769 | log.err("Invalid max threads.", .{}); 770 | return error.InvalidOption; 771 | } 772 | params.rdo_max_threads = v; 773 | } else { 774 | params.rdo_max_threads = @intCast(std.math.clamp( 775 | std.Thread.getCpuCount() catch 1, 776 | 1, 777 | std.math.maxInt(u32), 778 | )); 779 | } 780 | params.rdo_multithreading = params.rdo_max_threads > 1; 781 | 782 | if (options.rdo) |rdo| { 783 | if ((rdo.lambda < 0.0) or (rdo.lambda > 500.0)) { 784 | log.err("Invalid RDO lambda.", .{}); 785 | return error.InvalidOption; 786 | } 787 | params.rdo_lambda = rdo.lambda; 788 | 789 | if (rdo.lookback_window) |lookback_window| { 790 | if (lookback_window < Bc7Enc.Params.min_lookback_window_size) { 791 | log.err("Invalid lookback window.", .{}); 792 | return error.InvalidOption; 793 | } 794 | params.lookback_window_size = lookback_window; 795 | params.custom_lookback_window_size = true; 796 | } 797 | 798 | if (rdo.smooth_block_error_scale) |v| { 799 | if ((v < 1.0) or (v > 500.0)) { 800 | log.err("Invalid smooth block error scale.", .{}); 801 | return error.InvalidOption; 802 | } 803 | params.rdo_smooth_block_error_scale = v; 804 | params.custom_rdo_smooth_block_error_scale = true; 805 | } 806 | 807 | params.rdo_bc7_quant_mode6_endpoints = rdo.quantize_mode_6_endpoints; 808 | params.rdo_bc7_weight_modes = rdo.weight_modes; 809 | params.rdo_bc7_weight_low_frequency_partitions = rdo.weight_low_frequency_partitions; 810 | params.rdo_bc7_pbit1_weighting = rdo.pbit1_weighting; 811 | 812 | if ((rdo.max_smooth_block_std_dev) < 0.000125 or (rdo.max_smooth_block_std_dev > 256.0)) { 813 | log.err("Invalid smooth block standard deviation.", .{}); 814 | return error.InvalidOption; 815 | } 816 | params.rdo_max_smooth_block_std_dev = rdo.max_smooth_block_std_dev; 817 | params.rdo_try_2_matches = rdo.try_two_matches; 818 | params.rdo_ultrasmooth_block_handling = rdo.ultrasmooth_block_handling; 819 | } 820 | } 821 | 822 | // Encode the image 823 | const bc7_encoder = b: { 824 | const init_zone = Zone.begin(.{ .name = "init", .src = @src() }); 825 | defer init_zone.end(); 826 | break :b Bc7Enc.init() orelse return error.EncoderFailed; 827 | }; 828 | 829 | { 830 | const encode_zone = Zone.begin(.{ .name = "encode", .src = @src() }); 831 | defer encode_zone.end(); 832 | if (!bc7_encoder.encode(¶ms, self.width, self.height, self.rgbaF32Samples().ptr)) { 833 | return error.EncoderFailed; 834 | } 835 | } 836 | 837 | self.allocator.free(self.rgbaF32Samples()); 838 | const buf = bc7_encoder.getBlocks(); 839 | self.* = .{ 840 | .width = self.width, 841 | .height = self.height, 842 | .uncompressed_byte_length = buf.len, 843 | .buf = buf, 844 | .encoding = if (srgb) .bc7_srgb_block else .bc7_unorm_block, 845 | .alpha = self.alpha, 846 | .premultiplied = self.premultiplied, 847 | .allocator = bc7EncAllocator(bc7_encoder), 848 | .supercompression = .none, 849 | .hdr = self.hdr, 850 | }; 851 | } 852 | 853 | pub const Bc7Enc = opaque { 854 | pub const Params = extern struct { 855 | pub const max_partitions = 64; 856 | pub const max_uber_level = 4; 857 | pub const max_level = 18; 858 | pub const min_lookback_window_size = 8; 859 | 860 | const Bc345ModeMask = enum(u32) { 861 | const bc4_use_all_modes: @This() = .bc4_default_search_rad; 862 | 863 | bc4_default_search_rad = 3, 864 | bc4_use_mode8_flag = 1, 865 | bc4_use_mode6_flag = 2, 866 | 867 | _, 868 | }; 869 | 870 | pub const Bc1ApproxMode = enum(c_uint) { 871 | ideal = 0, 872 | nvidia = 1, 873 | amd = 2, 874 | ideal_round_4 = 3, 875 | _, 876 | }; 877 | 878 | pub const DxgiFormat = enum(c_uint) { 879 | bc7_unorm = 98, 880 | }; 881 | 882 | bc7_uber_level: c_int = max_uber_level, 883 | max_partitions_to_scan: c_int = max_partitions, 884 | perceptual: bool = false, 885 | bc45_channel0: u32 = 0, 886 | bc45_channel1: u32 = 1, 887 | 888 | bc1_mode: Bc1ApproxMode = .ideal, 889 | use_bc1_3color_mode: bool = true, 890 | 891 | use_bc1_3color_mode_for_black: bool = true, 892 | 893 | bc1_quality_level: c_int = max_level, 894 | 895 | dxgi_format: DxgiFormat = .bc7_unorm, 896 | 897 | rdo_lambda: f32 = 0.0, 898 | rdo_debug_output: bool = false, 899 | rdo_smooth_block_error_scale: f32 = 15.0, 900 | custom_rdo_smooth_block_error_scale: bool = false, 901 | lookback_window_size: u32 = 128, 902 | custom_lookback_window_size: bool = false, 903 | rdo_bc7_quant_mode6_endpoints: bool = true, 904 | rdo_bc7_weight_modes: bool = true, 905 | rdo_bc7_weight_low_frequency_partitions: bool = true, 906 | rdo_bc7_pbit1_weighting: bool = true, 907 | rdo_max_smooth_block_std_dev: f32 = 18.0, 908 | rdo_allow_relative_movement: bool = false, 909 | rdo_try_2_matches: bool = true, 910 | rdo_ultrasmooth_block_handling: bool = true, 911 | 912 | use_hq_bc345: bool = true, 913 | bc345_search_rad: c_int = 5, 914 | bc345_mode_mask: Bc345ModeMask = Bc345ModeMask.bc4_use_all_modes, 915 | 916 | mode6_only: bool = false, 917 | rdo_multithreading: bool = true, 918 | 919 | reduce_entropy: bool = false, 920 | 921 | m_use_bc7e: bool = false, 922 | status_output: bool = false, 923 | 924 | rdo_max_threads: u32 = 128, 925 | }; 926 | 927 | pub const init = bc7enc_init; 928 | pub const deinit = bc7enc_deinit; 929 | pub const encode = bc7enc_encode; 930 | pub fn getBlocks(self: *@This()) []u8 { 931 | const bytes = bc7enc_getTotalBlocksSizeInBytes(self); 932 | return bc7enc_getBlocks(self)[0..bytes]; 933 | } 934 | 935 | extern fn bc7enc_init() callconv(.c) ?*@This(); 936 | extern fn bc7enc_deinit(self: *@This()) callconv(.c) void; 937 | extern fn bc7enc_encode( 938 | self: *@This(), 939 | params: *const Params, 940 | width: u32, 941 | height: u32, 942 | pixels: [*]const f32, 943 | ) callconv(.c) bool; 944 | extern fn bc7enc_getBlocks(self: *@This()) callconv(.c) [*]u8; 945 | extern fn bc7enc_getTotalBlocksSizeInBytes(self: *@This()) callconv(.c) u32; 946 | }; 947 | 948 | pub const CompressZlibOptions = union(enum) { 949 | /// Only levels matching what Zig's std could previously handle are supported so we have the 950 | /// option to switch back to the std implementation when it's re-implemented. 951 | pub const Level = enum(u4) { 952 | @"4" = 4, 953 | @"5" = 5, 954 | @"6" = 6, 955 | @"7" = 7, 956 | @"8" = 8, 957 | @"9" = 9, 958 | }; 959 | 960 | level: Level, 961 | }; 962 | 963 | pub const CompressZlibError = Allocator.Error || std.Io.Writer.Error; 964 | 965 | pub fn compressZlib( 966 | self: *@This(), 967 | gpa: Allocator, 968 | options: CompressZlibOptions, 969 | ) CompressZlibError!void { 970 | const zone = Zone.begin(.{ .src = @src() }); 971 | defer zone.end(); 972 | 973 | if (self.supercompression != .none) std.debug.panic("expected {} found {}", .{ 974 | Ktx2.Header.SupercompressionScheme.none, 975 | self.supercompression, 976 | }); 977 | 978 | const zlib_zone = Zone.begin(.{ .name = "zlib", .src = @src() }); 979 | defer zlib_zone.end(); 980 | 981 | var compressed_len = c.compressBound(@intCast(self.buf.len)); 982 | 983 | var compressed = b: { 984 | const alloc_zone = Zone.begin(.{ .name = "alloc", .src = @src() }); 985 | defer alloc_zone.end(); 986 | break :b try std.ArrayList(u8).initCapacity(gpa, compressed_len); 987 | }; 988 | defer compressed.deinit(gpa); 989 | 990 | switch (c.compress2( 991 | compressed.items.ptr, 992 | &compressed_len, 993 | self.buf.ptr, 994 | @intCast(self.buf.len), 995 | @intFromEnum(options.level), 996 | )) { 997 | c.Z_OK => {}, 998 | c.Z_STREAM_END => @panic("Z_STREAM_END"), 999 | c.Z_NEED_DICT => @panic("Z_NEED_DICT"), 1000 | c.Z_ERRNO => @panic("Z_ERRNO"), 1001 | c.Z_STREAM_ERROR => @panic("Z_STREAM_ERROR"), 1002 | c.Z_DATA_ERROR => @panic("Z_DATA_ERROR"), 1003 | c.Z_MEM_ERROR => @panic("Z_MEM_ERROR"), 1004 | c.Z_BUF_ERROR => @panic("Z_BUF_ERROR"), 1005 | c.Z_VERSION_ERROR => @panic("Z_VERSION_ERROR"), 1006 | else => |n| std.debug.panic("unknown zlib error: {}", .{n}), 1007 | } 1008 | compressed.items.len = compressed_len; 1009 | 1010 | const original = self.*; 1011 | self.deinit(); 1012 | self.* = .{ 1013 | .width = original.width, 1014 | .height = original.height, 1015 | .uncompressed_byte_length = original.uncompressed_byte_length, 1016 | .buf = b: { 1017 | const to_owned_zone = Zone.begin(.{ .name = "toOwnedSlice", .src = @src() }); 1018 | defer to_owned_zone.end(); 1019 | break :b try compressed.toOwnedSlice(gpa); 1020 | }, 1021 | .hdr = original.hdr, 1022 | .encoding = original.encoding, 1023 | .alpha = original.alpha, 1024 | .premultiplied = original.premultiplied, 1025 | .supercompression = .zlib, 1026 | .allocator = gpa, 1027 | }; 1028 | } 1029 | 1030 | fn remapNoop( 1031 | ctx: *anyopaque, 1032 | memory: []u8, 1033 | alignment: std.mem.Alignment, 1034 | new_len: usize, 1035 | ret_addr: usize, 1036 | ) ?[*]u8 { 1037 | _ = ctx; 1038 | _ = memory; 1039 | _ = alignment; 1040 | _ = new_len; 1041 | _ = ret_addr; 1042 | return null; 1043 | } 1044 | 1045 | fn unsupportedAlloc( 1046 | ctx: *anyopaque, 1047 | len: usize, 1048 | alignment: std.mem.Alignment, 1049 | ret_addr: usize, 1050 | ) ?[*]u8 { 1051 | _ = ctx; 1052 | _ = len; 1053 | _ = alignment; 1054 | _ = ret_addr; 1055 | @panic("unsupported"); 1056 | } 1057 | 1058 | fn unsupportedResize( 1059 | ctx: *anyopaque, 1060 | buf: []u8, 1061 | alignment: std.mem.Alignment, 1062 | new_len: usize, 1063 | ret_addr: usize, 1064 | ) bool { 1065 | _ = ctx; 1066 | _ = buf; 1067 | _ = alignment; 1068 | _ = new_len; 1069 | _ = ret_addr; 1070 | @panic("unsupported"); 1071 | } 1072 | 1073 | fn stbFree(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 1074 | _ = ctx; 1075 | _ = alignment; 1076 | _ = ret_addr; 1077 | c.stbi_image_free(buf.ptr); 1078 | } 1079 | 1080 | fn movedFree(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 1081 | _ = ctx; 1082 | _ = alignment; 1083 | _ = ret_addr; 1084 | _ = buf; 1085 | } 1086 | 1087 | const stb_allocator: Allocator = .{ 1088 | .ptr = undefined, 1089 | .vtable = &.{ 1090 | .alloc = &unsupportedAlloc, 1091 | .resize = &unsupportedResize, 1092 | .free = &stbFree, 1093 | .remap = &remapNoop, 1094 | }, 1095 | }; 1096 | 1097 | const moved_allocator: Allocator = .{ 1098 | .ptr = undefined, 1099 | .vtable = &.{ 1100 | .alloc = &unsupportedAlloc, 1101 | .resize = &unsupportedResize, 1102 | .free = &movedFree, 1103 | .remap = &remapNoop, 1104 | }, 1105 | }; 1106 | 1107 | fn bc7EncFree(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void { 1108 | _ = alignment; 1109 | _ = ret_addr; 1110 | _ = buf; 1111 | const bc7_encoder: *Bc7Enc = @ptrCast(ctx); 1112 | bc7_encoder.deinit(); 1113 | } 1114 | 1115 | fn bc7EncAllocator(bc7_encoder: *Bc7Enc) Allocator { 1116 | return .{ 1117 | .ptr = bc7_encoder, 1118 | .vtable = &.{ 1119 | .alloc = &unsupportedAlloc, 1120 | .resize = &unsupportedResize, 1121 | .free = &bc7EncFree, 1122 | .remap = &remapNoop, 1123 | }, 1124 | }; 1125 | } 1126 | --------------------------------------------------------------------------------