├── .gitattributes ├── .github ├── FUNDING.yml └── workflows │ └── validate.yml ├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── build.zig.zon ├── concept ├── cli.txt └── script.dis ├── data ├── rootfs.dis └── rootfs │ ├── README.md │ └── Windows │ ├── explorer.exe │ └── system32 │ └── calc.exe ├── justfile ├── src ├── BuildInterface.zig ├── Parser.zig ├── Tokenizer.zig ├── build.old.zig ├── components │ ├── EmptyData.zig │ ├── FillData.zig │ ├── PasteFile.zig │ ├── fs │ │ ├── FatFileSystem.zig │ │ └── common.zig │ └── part │ │ ├── GptPartitionTable.zig │ │ └── MbrPartitionTable.zig └── dim.zig └── tests ├── basic ├── empty.dis ├── fill-0x00.dis ├── fill-0xAA.dis ├── fill-0xFF.dis └── raw.dis ├── compound └── mbr-boot.dis ├── fs ├── fat12.dis ├── fat16.dis └── fat32.dis ├── part └── mbr │ ├── basic-single-part-sized.dis │ ├── basic-single-part-unsized.dis │ ├── minimal.dis │ └── no-part-bootloader.dis └── zig-build-interface ├── build.zig └── build.zig.zon /.gitattributes: -------------------------------------------------------------------------------- 1 | *.zig text=auto eol=lf 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: ikskuh 2 | -------------------------------------------------------------------------------- /.github/workflows/validate.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | push: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | strategy: 12 | matrix: 13 | os: 14 | - ubuntu-latest 15 | - macos-latest 16 | - windows-latest 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v2 21 | 22 | - name: Setup Zig 23 | uses: mlugg/setup-zig@v1 24 | with: 25 | version: 0.14.0 26 | 27 | - name: Basic Build 28 | run: | 29 | zig build 30 | 31 | - name: Compile and run tests 32 | run: | 33 | zig build test 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .zig-cache/ 2 | zig-out/ 3 | .vscode/ 4 | .dim-out/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Zig OSDev Community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 💡 Dimmer - The Disk Imager 2 | 3 | > *Realize bright ideas with less energy!* 4 | 5 | Dimmer is a tool that uses a simple textual description of a disk image to create actual images. 6 | 7 | This tool is incredibly valuable when implementing your own operating system, embedded systems or other kinds of deployment. 8 | 9 | ## Example 10 | 11 | ```rb 12 | mbr-part 13 | bootloader paste-file "./syslinux.bin" 14 | part # partition 1 15 | type fat16-lba 16 | size 25M 17 | contains vfat fat16 18 | label "BOOT" 19 | copy-dir "/syslinux" "./bootfs/syslinux" 20 | endfat 21 | endpart 22 | part # partition 2 23 | type fat32-lba 24 | contains vfat fat32 25 | label "OS" 26 | mkdir "/home/dimmer" 27 | copy-file "/home/dimmer/.config/dimmer.cfg" "./dimmer.cfg" 28 | !include "./rootfs/files.dis" 29 | endfat 30 | endpart 31 | ignore # partition 3 32 | ignore # partition 4 33 | ``` 34 | 35 | ## Available Content Types 36 | 37 | ### Empty Content (`empty`) 38 | 39 | This type of content does not change its range at all and keeps it empty. No bytes will be emitted. 40 | 41 | ```plain 42 | empty 43 | ``` 44 | 45 | ### Fill (`fill`) 46 | 47 | The *Fill* type will fill the remaining size in its space with the given `` value. 48 | 49 | ```plain 50 | fill 51 | ``` 52 | 53 | ### Paste File Contents (`paste-file`) 54 | 55 | The *Raw* type will include the file at `` verbatim and will error, if not enough space is available. 56 | 57 | `` is relative to the current file. 58 | 59 | ```plain 60 | paste-file 61 | ``` 62 | 63 | ### MBR Partition Table (`mbr-part`) 64 | 65 | ```plain 66 | mbr-part 67 | [bootloader ] 68 | [part <…> | ignore] # partition 1 69 | [part <…> | ignore] # partition 2 70 | [part <…> | ignore] # partition 3 71 | [part <…> | ignore] # partition 4 72 | ``` 73 | 74 | ```plain 75 | part 76 | type 77 | [bootable] 78 | [size ] 79 | [offset ] 80 | contains 81 | endpart 82 | ``` 83 | 84 | If `bootloader ` is given, will copy the `` into the boot block, setting the boot code. 85 | 86 | The `mbr-part` component will end after all 4 partitions are specified. 87 | 88 | - Each partition must specify the `` (see table below) to mark the partition type as well as `contains ` which defines what's stored in the partition. 89 | - If `bootable` is present, the partition is marked as bootable. 90 | - `size ` is required for all but the last partition and defines the size in bytes. It can use disk-size specifiers. 91 | - `offset ` is required for either all or no partition and defines the disk offset for the partitions. This can be used to explicitly place the partitions. 92 | 93 | #### Partition Types 94 | 95 | | Type | ID | Description | 96 | | ------------ | ---- | -------------------------------------------------------------------------------------------------------------------------------------------------- | 97 | | `empty` | 0x00 | No content | 98 | | `fat12` | 0x01 | [FAT12](https://en.wikipedia.org/wiki/FAT12) | 99 | | `ntfs` | 0x07 | [NTFS](https://en.wikipedia.org/wiki/NTFS) | 100 | | `fat32-chs` | 0x0B | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [CHS](https://en.wikipedia.org/wiki/Cylinder-head-sector) addressing | 101 | | `fat32-lba` | 0x0C | [FAT32](https://en.wikipedia.org/wiki/FAT32) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | 102 | | `fat16-lba` | 0x0E | [FAT16B](https://en.wikipedia.org/wiki/File_Allocation_Table#FAT16B) with [LBA](https://en.wikipedia.org/wiki/Logical_block_addressing) addressing | 103 | | `linux-swap` | 0x82 | [Linux swap space](https://en.wikipedia.org/wiki/Swap_space#Linux) | 104 | | `linux-fs` | 0x83 | Any [Linux file system](https://en.wikipedia.org/wiki/File_system#Linux) | 105 | | `linux-lvm` | 0x8E | [Linux LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)) | 106 | 107 | A complete list can be [found on Wikipedia](https://en.wikipedia.org/wiki/Partition_type), but [we do not support that yet](https://github.com/zig-osdev/disk-image-step/issues/8). 108 | 109 | ### GPT Partition Table (`gpt-part`) 110 | 111 | ```plain 112 | 113 | ``` 114 | 115 | ### FAT File System (`vfat`) 116 | 117 | ```plain 118 | vfat 119 | [label ] 120 | [fats ] 121 | [root-size ] 122 | [sector-align ] 123 | [cluster-size ] 124 | 125 | endfat 126 | ``` 127 | 128 | | Parameter | Values | Description | 129 | | ------------ | ------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 130 | | `` | `fat12`, `fat16`, `fat32` | Selects the type of FAT filesystem that is created | 131 | | `` | `one`, `two` | Number of FAT count. Select between small and safe. | 132 | | `` | ascii string <= 11 chars | Display name of the volume. | 133 | | `` | integers <= 32768 | Number of entries in the root directory. | 134 | | `` | power of two >= 1 and <= 32768 | Specifies alignment of the volume data area (file allocation pool, usually erase block boundary of flash memory media) in unit of sector. The valid value for this member is between 1 and 32768 inclusive in power of 2. If a zero (the default value) or any invalid value is given, the function obtains the block size from lower layer with disk_ioctl function. | 135 | | `` | powers of two | Specifies size of the allocation unit (cluter) in unit of byte. | 136 | 137 | ## Standard Filesystem Operations 138 | 139 | All `` values use an absolute unix-style path, starting with a `/` and using `/` as a file separator. 140 | 141 | All operations do create the parent directories if necessary. 142 | 143 | ### Create Directory (`mkdir`) 144 | 145 | ```plain 146 | mkdir 147 | ``` 148 | 149 | Creates a directory. 150 | 151 | ### Create File (`create-file`) 152 | 153 | ```plain 154 | create-file 155 | ``` 156 | 157 | Creates a file in the file system with `` bytes (can use sized spec) and embeds another `` element. 158 | 159 | This can be used to construct special or nested files ad-hoc. 160 | 161 | ### Copy File (`copy-file`) 162 | 163 | ```plain 164 | copy-file 165 | ``` 166 | 167 | Copies a file from `` (relative to the current file) into the filesystem at ``. 168 | 169 | ### Copy Directory (`copy-dir`) 170 | 171 | ```plain 172 | copy-file 173 | ``` 174 | 175 | Copies a directory from `` (relative to the current file) *recursively* into the filesystem at ``. 176 | 177 | This will include *all files* from ``. 178 | 179 | ## Compiling 180 | 181 | 182 | - Install [Zig 0.14.0](https://ziglang.org/download/). 183 | - Invoke `zig build -Drelease` in the repository root. 184 | - Execute `./zig-out/bin/dim --help` to verify your compilation worked. 185 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub const BuildInterface = @import("src/BuildInterface.zig"); 4 | 5 | pub fn build(b: *std.Build) void { 6 | const target = b.standardTargetOptions(.{}); 7 | const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe }); 8 | 9 | const test_step = b.step("test", "Runs the test suite."); 10 | 11 | // Dependency Setup: 12 | const zfat_dep = b.dependency("zfat", .{ 13 | // .max_long_name_len = 121, 14 | .code_page = .us, 15 | .@"volume-count" = @as(u32, 1), 16 | .@"sector-size" = @as(u32, 512), 17 | // .rtc = .dynamic, 18 | .mkfs = true, 19 | .exfat = true, 20 | .label = true, 21 | }); 22 | 23 | const zfat_mod = zfat_dep.module("zfat"); 24 | 25 | const args_dep = b.dependency("args", .{}); 26 | const args_mod = args_dep.module("args"); 27 | 28 | const dim_mod = b.addModule("dim", .{ 29 | .root_source_file = b.path("src/dim.zig"), 30 | .target = target, 31 | .optimize = optimize, 32 | .link_libc = true, 33 | }); 34 | dim_mod.addImport("args", args_mod); 35 | dim_mod.addImport("zfat", zfat_mod); 36 | 37 | const dim_exe = b.addExecutable(.{ 38 | .name = "dimmer", 39 | .root_module = dim_mod, 40 | }); 41 | b.installArtifact(dim_exe); 42 | 43 | const dim_tests = b.addTest(.{ 44 | .root_module = dim_mod, 45 | }); 46 | const run_dim_tests = b.addRunArtifact(dim_tests); 47 | test_step.dependOn(&run_dim_tests.step); 48 | 49 | const behaviour_tests_step = b.step("behaviour", "Run all behaviour tests"); 50 | for (behaviour_tests) |script| { 51 | const step_name = b.dupe(script); 52 | std.mem.replaceScalar(u8, step_name, '/', '-'); 53 | const script_test = b.step(step_name, b.fmt("Run {s} behaviour test", .{script})); 54 | 55 | const run_behaviour = b.addRunArtifact(dim_exe); 56 | run_behaviour.addArg("--output"); 57 | _ = run_behaviour.addOutputFileArg("disk.img"); 58 | run_behaviour.addArg("--script"); 59 | run_behaviour.addFileArg(b.path(script)); 60 | run_behaviour.addArgs(&.{ "--size", "30M" }); 61 | script_test.dependOn(&run_behaviour.step); 62 | 63 | behaviour_tests_step.dependOn(script_test); 64 | } 65 | } 66 | 67 | const behaviour_tests: []const []const u8 = &.{ 68 | "tests/basic/empty.dis", 69 | "tests/basic/fill-0x00.dis", 70 | "tests/basic/fill-0xAA.dis", 71 | "tests/basic/fill-0xFF.dis", 72 | "tests/basic/raw.dis", 73 | "tests/part/mbr/minimal.dis", 74 | }; 75 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .dimmer, 3 | .version = "2.0.0", 4 | .fingerprint = 0x9947018c924eecb2, 5 | .dependencies = .{ 6 | .zfat = .{ 7 | .url = "https://github.com/ZigEmbeddedGroup/zfat/archive/3ce06d43a4e04d387034dcae2f486b050701f321.tar.gz", 8 | .hash = "zfat-0.0.0-AAAAAMYlcABdh06Mn9CNk8Ccy_3bBFgJr8wo4jKza1q-", 9 | }, 10 | .args = .{ 11 | .url = "git+https://github.com/ikskuh/zig-args.git#9425b94c103a031777fdd272c555ce93a7dea581", 12 | .hash = "args-0.0.0-CiLiqv_NAAC97fGpk9hS2K681jkiqPsWP6w3ucb_ctGH", 13 | }, 14 | }, 15 | .paths = .{ 16 | "build.zig", 17 | "build.zig.zon", 18 | "README.md", 19 | "src", 20 | }, 21 | } 22 | -------------------------------------------------------------------------------- /concept/cli.txt: -------------------------------------------------------------------------------- 1 | dim \ 2 | --output zig-cache/disk.img \ 3 | --size 64M \ 4 | --script zig-cache/script.dis \ 5 | PATH1=vendor/syslinux-6.03/…/mbr.bin \ 6 | PATH2=… \ 7 | PATH3=… \ 8 | PATH4=… -------------------------------------------------------------------------------- /concept/script.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | bootloader paste-file $PATH1 3 | part # partition 1 4 | type fat32-lba 5 | size 500M 6 | bootable 7 | contents 8 | vfat fat32 9 | label AshetOS 10 | copy-dir ../../rootfs . 11 | copy-dir $PATH2 . 12 | copy-file $PATH3 apps/hello-world.ashex 13 | copy-file $PATH3 apps/hello-gui.ashex 14 | copy-file $PATH4 apps/clock.ashex 15 | copy-file $PATH5 apps/paint.ashex 16 | copy-file $PATH6 apps/init.ashex 17 | copy-file $PATH7 apps/testing 18 | copy-file $PATH8 apps/desktop 19 | copy-file $PATH9 apps/testing/behaviour.ashex 20 | copy-file $PATH10 apps/desktop/classic.ashex 21 | copy-file $PATH11 ashet-os 22 | copy-file ../../rootfs-x86/syslinux/modules.alias syslinux/modules.alias 23 | copy-file ../../rootfs-x86/syslinux/pci.ids syslinux/pci.ids 24 | copy-file ../../rootfs-x86/syslinux/syslinux.cfg syslinux/syslinux.cfg 25 | copy-file $PATH12 syslinux/libmenu.c32 26 | … 27 | endfat 28 | endpart 29 | ignore # partition 2 30 | ignore # partition 3 31 | ignore # partition 4 -------------------------------------------------------------------------------- /data/rootfs.dis: -------------------------------------------------------------------------------- 1 | mkdir /boot/EFI/refind/icons 2 | mkdir /boot/EFI/nixos/.extra-files/ 3 | mkdir /Users/xq/ 4 | 5 | # copy-XXX uses syntax as it's consistent with other paths 6 | copy-dir /Windows ./rootfs/Windows 7 | copy-file /Users/xq/README.md ./rootfs/README.md 8 | 9 | # create-file creates nested data 10 | create-file /Users/xq/blob.data 512k fill 0x70 11 | -------------------------------------------------------------------------------- /data/rootfs/README.md: -------------------------------------------------------------------------------- 1 | # Dummy Files 2 | 3 | This folder contains files used in the build test. -------------------------------------------------------------------------------- /data/rootfs/Windows/explorer.exe: -------------------------------------------------------------------------------- 1 | This is an example file in a nested subdirectory. -------------------------------------------------------------------------------- /data/rootfs/Windows/system32/calc.exe: -------------------------------------------------------------------------------- 1 | This is an example file in a nested subdirectory. -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | 2 | zig:="zig-0.14.0" 3 | 4 | out:=".dim-out" 5 | 6 | default: install test 7 | 8 | install: 9 | {{zig}} build install 10 | 11 | test: unit-test behaviour-tests build-test 12 | 13 | unit-test: 14 | {{zig}} build test 15 | 16 | behaviour-tests: \ 17 | (behaviour-test "tests/basic/empty.dis") \ 18 | (behaviour-test "tests/basic/fill-0x00.dis") \ 19 | (behaviour-test "tests/basic/fill-0xAA.dis") \ 20 | (behaviour-test "tests/basic/fill-0xFF.dis") \ 21 | (behaviour-test "tests/basic/raw.dis") \ 22 | (behaviour-test "tests/part/mbr/minimal.dis") \ 23 | (behaviour-test "tests/part/mbr/no-part-bootloader.dis") \ 24 | (behaviour-test "tests/part/mbr/basic-single-part-sized.dis") \ 25 | (behaviour-test "tests/fs/fat12.dis") \ 26 | (behaviour-test "tests/fs/fat16.dis") \ 27 | (behaviour-test "tests/fs/fat32.dis") \ 28 | (behaviour-test "tests/compound/mbr-boot.dis") 29 | 30 | behaviour-test script: install 31 | @mkdir -p {{ join(out, parent_directory(script)) }} 32 | ./zig-out/bin/dimmer --output {{ join(out, without_extension(script) + ".img") }} --script "{{script}}" --size 33M 33 | ./zig-out/bin/dimmer --output {{ join(out, without_extension(script) + ".img") }} --deps-file {{ join(out, without_extension(script) + ".d") }} --script "{{script}}" --size 33M 34 | 35 | # TODO(fqu): sfdisk --json .dim-out/tests/part/mbr/basic-single-part-unsized.img 36 | 37 | 38 | [working-directory: 'tests/zig-build-interface'] 39 | build-test: 40 | {{zig}} build 41 | 42 | fuzz: 43 | {{zig}} build install test --fuzz --port 35991 44 | -------------------------------------------------------------------------------- /src/BuildInterface.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! This file implements the Zig build system interface for Dimmer. 3 | //! 4 | //! It is included by it's build.zig 5 | //! 6 | const std = @import("std"); 7 | 8 | const Interface = @This(); 9 | 10 | pub const kiB = 1024; 11 | pub const MiB = 1024 * 1024; 12 | pub const GiB = 1024 * 1024 * 1024; 13 | 14 | builder: *std.Build, 15 | dimmer_exe: *std.Build.Step.Compile, 16 | 17 | pub fn init(builder: *std.Build, dep: *std.Build.Dependency) Interface { 18 | return .{ 19 | .builder = builder, 20 | .dimmer_exe = dep.artifact("dimmer"), 21 | }; 22 | } 23 | 24 | pub fn createDisk(dimmer: Interface, size: u64, content: Content) std.Build.LazyPath { 25 | const b = dimmer.builder; 26 | 27 | const write_files = b.addWriteFiles(); 28 | 29 | const script_source, const variables = renderContent(write_files, b.allocator, content); 30 | 31 | const script_file = write_files.add("image.dis", script_source); 32 | 33 | const compile_script = b.addRunArtifact(dimmer.dimmer_exe); 34 | 35 | _ = compile_script.addPrefixedDepFileOutputArg("--deps-file=", "image.d"); 36 | 37 | compile_script.addArg(b.fmt("--size={d}", .{size})); 38 | 39 | compile_script.addPrefixedFileArg("--script=", script_file); 40 | 41 | const result_file = compile_script.addPrefixedOutputFileArg("--output=", "disk.img"); 42 | 43 | { 44 | var iter = variables.iterator(); 45 | while (iter.next()) |kvp| { 46 | const key = kvp.key_ptr.*; 47 | const path, const usage = kvp.value_ptr.*; 48 | 49 | switch (usage) { 50 | .file => compile_script.addPrefixedFileArg( 51 | b.fmt("{s}=", .{key}), 52 | path, 53 | ), 54 | .directory => compile_script.addPrefixedDirectoryArg( 55 | b.fmt("{s}=", .{key}), 56 | path, 57 | ), 58 | } 59 | } 60 | } 61 | 62 | return result_file; 63 | } 64 | 65 | fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, content: Content) struct { []const u8, ContentWriter.VariableMap } { 66 | var code: std.ArrayList(u8) = .init(allocator); 67 | defer code.deinit(); 68 | 69 | var variables: ContentWriter.VariableMap = .init(allocator); 70 | 71 | var cw: ContentWriter = .{ 72 | .code = code.writer(), 73 | .wfs = wfs, 74 | .vars = &variables, 75 | }; 76 | 77 | cw.render(content) catch @panic("out of memory"); 78 | 79 | const source = std.mem.trim( 80 | u8, 81 | code.toOwnedSlice() catch @panic("out of memory"), 82 | " \r\n\t", 83 | ); 84 | 85 | variables.sort(struct { 86 | map: *ContentWriter.VariableMap, 87 | 88 | pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool { 89 | return std.mem.lessThan(u8, ctx.map.keys()[lhs], ctx.map.keys()[rhs]); 90 | } 91 | }{ 92 | .map = &variables, 93 | }); 94 | 95 | return .{ source, variables }; 96 | } 97 | 98 | const ContentWriter = struct { 99 | pub const VariableMap = std.StringArrayHashMap(struct { std.Build.LazyPath, ContentWriter.UsageHint }); 100 | 101 | wfs: *std.Build.Step.WriteFile, 102 | code: std.ArrayList(u8).Writer, 103 | vars: *VariableMap, 104 | 105 | fn render(cw: ContentWriter, content: Content) !void { 106 | // Always insert some padding before and after: 107 | try cw.code.writeAll(" "); 108 | errdefer cw.code.writeAll(" ") catch {}; 109 | 110 | switch (content) { 111 | .empty => { 112 | try cw.code.writeAll("empty"); 113 | }, 114 | 115 | .fill => |data| { 116 | try cw.code.print("fill 0x{X:0>2}", .{data}); 117 | }, 118 | 119 | .paste_file => |data| { 120 | try cw.code.print("paste-file {}", .{cw.fmtLazyPath(data, .file)}); 121 | }, 122 | 123 | .mbr_part_table => |data| { 124 | try cw.code.writeAll("mbr-part\n"); 125 | 126 | if (data.bootloader) |loader| { 127 | try cw.code.writeAll(" bootloader "); 128 | try cw.render(loader.*); 129 | try cw.code.writeAll("\n"); 130 | } 131 | 132 | for (data.partitions) |mpart| { 133 | if (mpart) |part| { 134 | try cw.code.writeAll(" part\n"); 135 | if (part.bootable) { 136 | try cw.code.print(" type {s}\n", .{@tagName(part.type)}); 137 | try cw.code.writeAll(" bootable\n"); 138 | if (part.offset) |offset| { 139 | try cw.code.print(" offset {d}\n", .{offset}); 140 | } 141 | if (part.size) |size| { 142 | try cw.code.print(" size {d}\n", .{size}); 143 | } 144 | try cw.code.writeAll(" contains"); 145 | try cw.render(part.data); 146 | try cw.code.writeAll("\n"); 147 | } 148 | try cw.code.writeAll(" endpart\n"); 149 | } else { 150 | try cw.code.writeAll(" ignore\n"); 151 | } 152 | } 153 | }, 154 | 155 | .vfat => |data| { 156 | try cw.code.print("vfat {s}\n", .{ 157 | @tagName(data.format), 158 | }); 159 | if (data.label) |label| { 160 | try cw.code.print(" label {}\n", .{ 161 | fmtPath(label), 162 | }); 163 | } 164 | 165 | try cw.renderFileSystemTree(data.tree); 166 | 167 | try cw.code.writeAll("endfat\n"); 168 | }, 169 | } 170 | } 171 | 172 | fn renderFileSystemTree(cw: ContentWriter, fs: FileSystem) !void { 173 | for (fs.items) |item| { 174 | switch (item) { 175 | .empty_dir => |dir| try cw.code.print("mkdir {}\n", .{ 176 | fmtPath(dir), 177 | }), 178 | 179 | .copy_dir => |copy| try cw.code.print("copy-dir {} {}\n", .{ 180 | fmtPath(copy.destination), 181 | cw.fmtLazyPath(copy.source, .directory), 182 | }), 183 | 184 | .copy_file => |copy| try cw.code.print("copy-file {} {}\n", .{ 185 | fmtPath(copy.destination), 186 | cw.fmtLazyPath(copy.source, .file), 187 | }), 188 | 189 | .include_script => |script| try cw.code.print("!include {}\n", .{ 190 | cw.fmtLazyPath(script, .file), 191 | }), 192 | } 193 | } 194 | } 195 | 196 | const PathFormatter = std.fmt.Formatter(formatPath); 197 | const LazyPathFormatter = std.fmt.Formatter(formatLazyPath); 198 | const UsageHint = enum { file, directory }; 199 | 200 | fn fmtLazyPath(cw: ContentWriter, path: std.Build.LazyPath, hint: UsageHint) LazyPathFormatter { 201 | return .{ .data = .{ cw, path, hint } }; 202 | } 203 | 204 | fn fmtPath(path: []const u8) PathFormatter { 205 | return .{ .data = path }; 206 | } 207 | 208 | fn formatLazyPath( 209 | data: struct { ContentWriter, std.Build.LazyPath, UsageHint }, 210 | comptime fmt: []const u8, 211 | options: std.fmt.FormatOptions, 212 | writer: anytype, 213 | ) !void { 214 | const cw, const path, const hint = data; 215 | _ = fmt; 216 | _ = options; 217 | 218 | switch (path) { 219 | .cwd_relative, 220 | .dependency, 221 | .src_path, 222 | => { 223 | // We can safely call getPath2 as we can fully resolve the path 224 | // already 225 | const full_path = path.getPath2(cw.wfs.step.owner, &cw.wfs.step); 226 | 227 | std.debug.assert(std.fs.path.isAbsolute(full_path)); 228 | 229 | try writer.print("{}", .{ 230 | fmtPath(full_path), 231 | }); 232 | }, 233 | 234 | .generated => { 235 | // this means we can't emit the variable just verbatim, but we 236 | // actually have a build-time dependency 237 | const var_id = cw.vars.count() + 1; 238 | const var_name = cw.wfs.step.owner.fmt("PATH{}", .{var_id}); 239 | 240 | try cw.vars.put(var_name, .{ path, hint }); 241 | 242 | try writer.print("${s}", .{var_name}); 243 | }, 244 | } 245 | } 246 | 247 | fn formatPath( 248 | path: []const u8, 249 | comptime fmt: []const u8, 250 | options: std.fmt.FormatOptions, 251 | writer: anytype, 252 | ) !void { 253 | _ = fmt; 254 | _ = options; 255 | 256 | const is_safe_word = for (path) |char| { 257 | switch (char) { 258 | 'A'...'Z', 259 | 'a'...'z', 260 | '0'...'9', 261 | '_', 262 | '-', 263 | '/', 264 | '.', 265 | ':', 266 | => {}, 267 | else => break false, 268 | } 269 | } else true; 270 | 271 | if (is_safe_word) { 272 | try writer.writeAll(path); 273 | } else { 274 | try writer.writeAll("\""); 275 | 276 | for (path) |c| { 277 | if (c == '\\') { 278 | try writer.writeAll("/"); 279 | } else { 280 | try writer.print("{}", .{std.zig.fmtEscapes(&[_]u8{c})}); 281 | } 282 | } 283 | 284 | try writer.writeAll("\""); 285 | } 286 | } 287 | }; 288 | 289 | pub const Content = union(enum) { 290 | empty, 291 | fill: u8, 292 | paste_file: std.Build.LazyPath, 293 | mbr_part_table: MbrPartTable, 294 | vfat: FatFs, 295 | }; 296 | 297 | pub const MbrPartTable = struct { 298 | bootloader: ?*const Content = null, 299 | partitions: [4]?*const Partition, 300 | 301 | pub const Partition = struct { 302 | type: enum { 303 | empty, 304 | fat12, 305 | ntfs, 306 | @"fat32-chs", 307 | @"fat32-lba", 308 | @"fat16-lba", 309 | @"linux-swa", 310 | @"linux-fs", 311 | @"linux-lvm", 312 | }, 313 | bootable: bool = false, 314 | size: ?u64 = null, 315 | offset: ?u64 = null, 316 | data: Content, 317 | }; 318 | }; 319 | 320 | pub const FatFs = struct { 321 | format: enum { 322 | fat12, 323 | fat16, 324 | fat32, 325 | } = .fat32, 326 | 327 | label: ?[]const u8 = null, 328 | 329 | // TODO: fats 330 | // TODO: root-size 331 | // TODO: sector-align 332 | // TODO: cluster-size 333 | 334 | tree: FileSystem, 335 | }; 336 | 337 | pub const FileSystemBuilder = struct { 338 | b: *std.Build, 339 | list: std.ArrayListUnmanaged(FileSystem.Item), 340 | 341 | pub fn init(b: *std.Build) FileSystemBuilder { 342 | return FileSystemBuilder{ 343 | .b = b, 344 | .list = .{}, 345 | }; 346 | } 347 | 348 | pub fn finalize(fsb: *FileSystemBuilder) FileSystem { 349 | return .{ 350 | .items = fsb.list.toOwnedSlice(fsb.b.allocator) catch @panic("out of memory"), 351 | }; 352 | } 353 | 354 | pub fn includeScript(fsb: *FileSystemBuilder, source: std.Build.LazyPath) void { 355 | fsb.list.append(fsb.b.allocator, .{ 356 | .include_script = source.dupe(fsb.b), 357 | }) catch @panic("out of memory"); 358 | } 359 | 360 | pub fn copyFile(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { 361 | fsb.list.append(fsb.b.allocator, .{ 362 | .copy_file = .{ 363 | .source = source.dupe(fsb.b), 364 | .destination = fsb.b.dupe(destination), 365 | }, 366 | }) catch @panic("out of memory"); 367 | } 368 | 369 | pub fn copyDirectory(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { 370 | fsb.list.append(fsb.b.allocator, .{ 371 | .copy_dir = .{ 372 | .source = source.dupe(fsb.b), 373 | .destination = fsb.b.dupe(destination), 374 | }, 375 | }) catch @panic("out of memory"); 376 | } 377 | 378 | pub fn mkdir(fsb: *FileSystemBuilder, destination: []const u8) void { 379 | fsb.list.append(fsb.b.allocator, .{ 380 | .empty_dir = fsb.b.dupe(destination), 381 | }) catch @panic("out of memory"); 382 | } 383 | }; 384 | 385 | pub const FileSystem = struct { 386 | pub const Copy = struct { 387 | source: std.Build.LazyPath, 388 | destination: []const u8, 389 | }; 390 | 391 | pub const Item = union(enum) { 392 | empty_dir: []const u8, 393 | copy_dir: Copy, 394 | copy_file: Copy, 395 | include_script: std.Build.LazyPath, 396 | }; 397 | 398 | // format: Format, 399 | // label: []const u8, 400 | items: []const Item, 401 | 402 | // private: 403 | // executable: ?std.Build.LazyPath = null, 404 | }; 405 | -------------------------------------------------------------------------------- /src/Parser.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Tokenizer = @import("Tokenizer.zig"); 4 | 5 | const Token = Tokenizer.Token; 6 | const TokenType = Tokenizer.TokenType; 7 | 8 | const Parser = @This(); 9 | 10 | pub const Error = Tokenizer.Error || error{ 11 | FileNotFound, 12 | InvalidPath, 13 | UnknownVariable, 14 | IoError, 15 | BadDirective, 16 | MaxIncludeDepthReached, 17 | ExpectedIncludePath, 18 | UnknownDirective, 19 | OutOfMemory, 20 | InvalidEscapeSequence, 21 | }; 22 | 23 | pub const IO = struct { 24 | fetch_file_fn: *const fn (io: *const IO, std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8, 25 | resolve_variable_fn: *const fn (io: *const IO, name: []const u8) error{UnknownVariable}![]const u8, 26 | 27 | pub fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { 28 | return io.fetch_file_fn(io, allocator, path); 29 | } 30 | 31 | pub fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { 32 | return io.resolve_variable_fn(io, name); 33 | } 34 | }; 35 | 36 | const File = struct { 37 | path: []const u8, 38 | tokenizer: Tokenizer, 39 | }; 40 | 41 | allocator: std.mem.Allocator, 42 | arena: std.heap.ArenaAllocator, 43 | io: *const IO, 44 | 45 | file_stack: []File, 46 | max_include_depth: usize, 47 | 48 | pub const InitOptions = struct { 49 | max_include_depth: usize, 50 | }; 51 | pub fn init(allocator: std.mem.Allocator, io: *const IO, options: InitOptions) error{OutOfMemory}!Parser { 52 | var slice = try allocator.alloc(File, options.max_include_depth); 53 | slice.len = 0; 54 | return .{ 55 | .arena = std.heap.ArenaAllocator.init(allocator), 56 | .allocator = allocator, 57 | .io = io, 58 | .max_include_depth = options.max_include_depth, 59 | .file_stack = slice, 60 | }; 61 | } 62 | 63 | pub fn deinit(parser: *Parser) void { 64 | parser.file_stack.len = parser.max_include_depth; 65 | parser.allocator.free(parser.file_stack); 66 | parser.arena.deinit(); 67 | parser.* = undefined; 68 | } 69 | 70 | pub fn push_source(parser: *Parser, options: struct { 71 | path: []const u8, 72 | contents: []const u8, 73 | }) !void { 74 | std.debug.assert(parser.file_stack.len <= parser.max_include_depth); 75 | if (parser.file_stack.len == parser.max_include_depth) 76 | return error.MaxIncludeDepthReached; 77 | 78 | const index = parser.file_stack.len; 79 | parser.file_stack.len += 1; 80 | 81 | parser.file_stack[index] = .{ 82 | .path = options.path, 83 | .tokenizer = .init(options.contents), 84 | }; 85 | } 86 | 87 | pub fn push_file(parser: *Parser, include_path: []const u8) !void { 88 | const abs_include_path = try parser.get_include_path(parser.arena.allocator(), include_path); 89 | 90 | const file_contents = try parser.io.fetch_file(parser.arena.allocator(), abs_include_path); 91 | 92 | const index = parser.file_stack.len; 93 | parser.file_stack.len += 1; 94 | 95 | parser.file_stack[index] = .{ 96 | .path = abs_include_path, 97 | .tokenizer = .init(file_contents), 98 | }; 99 | } 100 | 101 | pub fn get_include_path(parser: Parser, allocator: std.mem.Allocator, rel_include_path: []const u8) ![]const u8 { 102 | std.debug.assert(parser.file_stack.len <= parser.max_include_depth); 103 | if (parser.file_stack.len == parser.max_include_depth) 104 | return error.MaxIncludeDepthReached; 105 | 106 | const top_path = if (parser.file_stack.len > 0) 107 | parser.file_stack[parser.file_stack.len - 1].path 108 | else 109 | ""; 110 | 111 | const abs_include_path = try std.fs.path.resolvePosix( 112 | allocator, 113 | &.{ 114 | std.fs.path.dirnamePosix(top_path) orelse ".", 115 | rel_include_path, 116 | }, 117 | ); 118 | errdefer allocator.free(abs_include_path); 119 | 120 | return abs_include_path; 121 | } 122 | 123 | pub fn next(parser: *Parser) (Error || error{UnexpectedEndOfFile})![]const u8 { 124 | return if (try parser.next_or_eof()) |word| 125 | word 126 | else 127 | error.UnexpectedEndOfFile; 128 | } 129 | 130 | pub fn next_or_eof(parser: *Parser) Error!?[]const u8 { 131 | fetch_loop: while (parser.file_stack.len > 0) { 132 | const top = &parser.file_stack[parser.file_stack.len - 1]; 133 | 134 | const token = (try fetch_token(&top.tokenizer)) orelse { 135 | // we exhausted tokens in the current file, pop the stack and continue 136 | // on lower file 137 | parser.file_stack.len -= 1; 138 | continue :fetch_loop; 139 | }; 140 | 141 | switch (token.type) { 142 | .whitespace, .comment => unreachable, 143 | 144 | .word, .variable, .string => return try parser.resolve_value( 145 | token.type, 146 | top.tokenizer.get_text(token), 147 | ), 148 | 149 | .directive => { 150 | const directive = top.tokenizer.get_text(token); 151 | 152 | if (std.mem.eql(u8, directive, "!include")) { 153 | if (try fetch_token(&top.tokenizer)) |path_token| { 154 | const rel_include_path = switch (path_token.type) { 155 | .word, .variable, .string => try parser.resolve_value( 156 | path_token.type, 157 | top.tokenizer.get_text(path_token), 158 | ), 159 | .comment, .directive, .whitespace => return error.BadDirective, 160 | }; 161 | 162 | try parser.push_file(rel_include_path); 163 | } else { 164 | return error.ExpectedIncludePath; 165 | } 166 | } else { 167 | return error.UnknownDirective; 168 | } 169 | }, 170 | } 171 | } 172 | 173 | return null; 174 | } 175 | 176 | fn fetch_token(tok: *Tokenizer) Tokenizer.Error!?Token { 177 | while (true) { 178 | const token = if (try tok.next()) |t| 179 | t 180 | else 181 | return null; 182 | 183 | switch (token.type) { 184 | // Skipped: 185 | .whitespace, .comment => {}, 186 | 187 | else => return token, 188 | } 189 | } 190 | } 191 | 192 | fn resolve_value(parser: *Parser, token_type: TokenType, text: []const u8) ![]const u8 { 193 | return switch (token_type) { 194 | .word => text, 195 | 196 | .variable => try parser.io.resolve_variable( 197 | text[1..], 198 | ), 199 | 200 | .string => { 201 | const content_slice = text[1 .. text.len - 1]; 202 | 203 | const has_includes = for (content_slice) |c| { 204 | if (c == '\\') 205 | break true; 206 | } else false; 207 | 208 | if (!has_includes) 209 | return content_slice; 210 | 211 | var unescaped: std.ArrayList(u8) = .init(parser.arena.allocator()); 212 | defer unescaped.deinit(); 213 | 214 | try unescaped.ensureTotalCapacityPrecise(content_slice.len); 215 | 216 | { 217 | var i: usize = 0; 218 | while (i < content_slice.len) { 219 | const c = content_slice[i]; 220 | i += 1; 221 | 222 | if (c != '\\') { 223 | try unescaped.append(c); 224 | continue; 225 | } 226 | 227 | if (i == content_slice.len) 228 | return error.InvalidEscapeSequence; 229 | 230 | const esc_code = content_slice[i]; 231 | i += 1; 232 | 233 | errdefer std.log.err("invalid escape sequence: \\{s}", .{[_]u8{esc_code}}); 234 | 235 | switch (esc_code) { 236 | 'r' => try unescaped.append('\r'), 237 | 'n' => try unescaped.append('\n'), 238 | 't' => try unescaped.append('\t'), 239 | '\\' => try unescaped.append('\\'), 240 | '\"' => try unescaped.append('\"'), 241 | '\'' => try unescaped.append('\''), 242 | 'e' => try unescaped.append('\x1B'), 243 | 244 | else => return error.InvalidEscapeSequence, 245 | } 246 | } 247 | } 248 | 249 | return try unescaped.toOwnedSlice(); 250 | }, 251 | 252 | .comment, .directive, .whitespace => unreachable, 253 | }; 254 | } 255 | 256 | test Parser { 257 | const io: IO = .{ 258 | .fetch_file_fn = undefined, 259 | .resolve_variable_fn = undefined, 260 | }; 261 | 262 | var parser: Parser = try .init(std.testing.allocator, &io, .{ 263 | .max_include_depth = 8, 264 | }); 265 | defer parser.deinit(); 266 | 267 | try parser.push_source(.{ 268 | .path = "test.script", 269 | .contents = 270 | \\mbr-part 271 | \\ bootloader PATH1 272 | \\ part # partition 1 273 | \\ type fat32-lba 274 | \\ size 500M 275 | \\ bootable 276 | \\ contents 277 | \\ fat32 ... 278 | , 279 | }); 280 | 281 | const sequence: []const []const u8 = &.{ 282 | "mbr-part", 283 | "bootloader", 284 | "PATH1", 285 | "part", 286 | "type", 287 | "fat32-lba", 288 | "size", 289 | "500M", 290 | "bootable", 291 | "contents", 292 | "fat32", 293 | "...", 294 | }; 295 | 296 | for (sequence) |item| { 297 | try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); 298 | } 299 | 300 | try std.testing.expectEqual(null, parser.next_or_eof()); 301 | } 302 | 303 | test "parser with variables" { 304 | const MyIO = struct { 305 | fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { 306 | _ = io; 307 | if (std.mem.eql(u8, name, "DISK")) 308 | return "./zig-out/disk.img"; 309 | if (std.mem.eql(u8, name, "KERNEL")) 310 | return "./zig-out/bin/kernel.elf"; 311 | return error.UnknownVariable; 312 | } 313 | }; 314 | const io: IO = .{ 315 | .fetch_file_fn = undefined, 316 | .resolve_variable_fn = MyIO.resolve_variable, 317 | }; 318 | 319 | var parser: Parser = try .init(std.testing.allocator, &io, .{ 320 | .max_include_depth = 8, 321 | }); 322 | defer parser.deinit(); 323 | 324 | try parser.push_source(.{ 325 | .path = "test.script", 326 | .contents = 327 | \\select-disk $DISK 328 | \\copy-file $KERNEL /BOOT/vzlinuz 329 | \\ 330 | , 331 | }); 332 | 333 | const sequence: []const []const u8 = &.{ 334 | "select-disk", 335 | "./zig-out/disk.img", 336 | "copy-file", 337 | "./zig-out/bin/kernel.elf", 338 | "/BOOT/vzlinuz", 339 | }; 340 | 341 | for (sequence) |item| { 342 | try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); 343 | } 344 | 345 | try std.testing.expectEqual(null, parser.next_or_eof()); 346 | } 347 | 348 | test "parser with variables and include files" { 349 | const MyIO = struct { 350 | fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { 351 | _ = io; 352 | if (std.mem.eql(u8, name, "DISK")) 353 | return "./zig-out/disk.img"; 354 | if (std.mem.eql(u8, name, "KERNEL")) 355 | return "./zig-out/bin/kernel.elf"; 356 | return error.UnknownVariable; 357 | } 358 | fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { 359 | _ = io; 360 | if (std.mem.eql(u8, path, "path/parent/kernel.script")) 361 | return try allocator.dupe(u8, "copy-file $KERNEL /BOOT/vzlinuz"); 362 | return error.FileNotFound; 363 | } 364 | }; 365 | const io: IO = .{ 366 | .fetch_file_fn = MyIO.fetch_file, 367 | .resolve_variable_fn = MyIO.resolve_variable, 368 | }; 369 | 370 | var parser: Parser = try .init(std.testing.allocator, &io, .{ 371 | .max_include_depth = 8, 372 | }); 373 | defer parser.deinit(); 374 | 375 | try parser.push_source(.{ 376 | .path = "path/to/test.script", 377 | .contents = 378 | \\select-disk $DISK 379 | \\!include "../parent/kernel.script" 380 | \\end-of sequence 381 | , 382 | }); 383 | 384 | const sequence: []const []const u8 = &.{ 385 | "select-disk", 386 | "./zig-out/disk.img", 387 | "copy-file", 388 | "./zig-out/bin/kernel.elf", 389 | "/BOOT/vzlinuz", 390 | "end-of", 391 | "sequence", 392 | }; 393 | 394 | for (sequence) |item| { 395 | try std.testing.expectEqualStrings(item, (try parser.next_or_eof()).?); 396 | } 397 | 398 | try std.testing.expectEqual(null, parser.next_or_eof()); 399 | } 400 | 401 | test "parse nothing" { 402 | const io: IO = .{ 403 | .fetch_file_fn = undefined, 404 | .resolve_variable_fn = undefined, 405 | }; 406 | 407 | var parser: Parser = try .init(std.testing.allocator, &io, .{ 408 | .max_include_depth = 8, 409 | }); 410 | defer parser.deinit(); 411 | 412 | try std.testing.expectEqual(null, parser.next_or_eof()); 413 | } 414 | 415 | fn fuzz_parser(_: void, input: []const u8) !void { 416 | const FuzzIO = struct { 417 | fn fetch_file(io: *const IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory }![]const u8 { 418 | _ = io; 419 | _ = allocator; 420 | _ = path; 421 | return error.FileNotFound; 422 | } 423 | fn resolve_variable(io: *const IO, name: []const u8) error{UnknownVariable}![]const u8 { 424 | _ = io; 425 | return name; 426 | } 427 | }; 428 | 429 | const io: IO = .{ 430 | .fetch_file_fn = FuzzIO.fetch_file, 431 | .resolve_variable_fn = FuzzIO.resolve_variable, 432 | }; 433 | 434 | var parser: Parser = try .init(std.testing.allocator, &io, .{ 435 | .max_include_depth = 8, 436 | }); 437 | defer parser.deinit(); 438 | 439 | try parser.push_source(.{ 440 | .path = "fuzz.script", 441 | .contents = input, 442 | }); 443 | 444 | while (true) { 445 | const res = parser.next_or_eof() catch |err| switch (err) { 446 | error.UnknownDirective, 447 | error.UnknownVariable, 448 | error.BadDirective, 449 | error.FileNotFound, 450 | error.ExpectedIncludePath, 451 | error.InvalidPath, 452 | => continue, 453 | 454 | error.MaxIncludeDepthReached, 455 | error.IoError, 456 | error.SourceInputTooLarge, 457 | => @panic("reached impossible case for fuzz testing"), 458 | 459 | error.OutOfMemory => |e| return e, 460 | 461 | // Fine, must just terminate the parse loop: 462 | error.InvalidSourceEncoding, 463 | error.BadStringLiteral, 464 | error.BadEscapeSequence, 465 | error.InvalidEscapeSequence, 466 | => return, 467 | }; 468 | if (res == null) 469 | break; 470 | } 471 | } 472 | 473 | test "fuzz parser" { 474 | try std.testing.fuzz({}, fuzz_parser, .{}); 475 | } 476 | -------------------------------------------------------------------------------- /src/Tokenizer.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Tokenizer = @This(); 4 | 5 | pub const TokenType = enum { 6 | /// `\S+` 7 | word, 8 | 9 | /// `\$\w+` 10 | variable, 11 | 12 | /// `!\w+` 13 | directive, 14 | 15 | /// `\s+` 16 | whitespace, 17 | 18 | /// `/#[^\n]*\n/` 19 | comment, 20 | 21 | /// `/"([^"]|\\")*"/` 22 | string, 23 | }; 24 | 25 | pub const Token = struct { 26 | offset: u32, 27 | len: u32, 28 | type: TokenType, 29 | }; 30 | 31 | source: []const u8, 32 | index: usize = 0, 33 | 34 | pub fn init(source: []const u8) Tokenizer { 35 | return .{ .source = source }; 36 | } 37 | 38 | pub const Error = error{ 39 | SourceInputTooLarge, 40 | InvalidSourceEncoding, 41 | BadEscapeSequence, 42 | BadStringLiteral, 43 | }; 44 | 45 | pub fn get_text(tk: Tokenizer, token: Token) []const u8 { 46 | return tk.source[token.offset..][0..token.len]; 47 | } 48 | 49 | pub fn next(tk: *Tokenizer) Error!?Token { 50 | const start = tk.index; 51 | const first = if (try tk.next_char()) |char| 52 | char 53 | else 54 | return null; 55 | 56 | if (std.ascii.isWhitespace(first)) { 57 | while (try tk.peek_char()) |c| { 58 | if (!std.ascii.isWhitespace(c)) 59 | break; 60 | tk.take_char(c); 61 | } 62 | return .{ 63 | .offset = @intCast(start), 64 | .len = @intCast(tk.index - start), 65 | .type = .whitespace, 66 | }; 67 | } 68 | 69 | if (first == '#') { 70 | while (try tk.peek_char()) |c| { 71 | if (c == '\n') 72 | break; 73 | tk.take_char(c); 74 | } 75 | return .{ 76 | .offset = @intCast(start), 77 | .len = @intCast(tk.index - start), 78 | .type = .comment, 79 | }; 80 | } 81 | 82 | if (first == '"') { 83 | tk.index += 1; 84 | 85 | var string_ok = false; 86 | while (try tk.peek_char()) |c| { 87 | tk.take_char(c); 88 | if (c == '"') { 89 | string_ok = true; 90 | break; 91 | } 92 | if (c == '\\') { 93 | if ((try tk.next_char()) == null) 94 | return error.BadEscapeSequence; 95 | } 96 | } 97 | if (!string_ok) 98 | return error.BadStringLiteral; 99 | 100 | return .{ 101 | .offset = @intCast(start), 102 | .len = @intCast(tk.index - start), 103 | .type = .string, 104 | }; 105 | } 106 | 107 | var ttype: TokenType = .word; 108 | if (first == '$') { 109 | tk.index += 1; 110 | ttype = .variable; 111 | } else if (first == '!') { 112 | tk.index += 1; 113 | ttype = .directive; 114 | } 115 | while (try tk.peek_char()) |c| { 116 | if (std.ascii.isWhitespace(c)) 117 | break; 118 | tk.take_char(c); 119 | } 120 | return .{ 121 | .offset = @intCast(start), 122 | .len = @intCast(tk.index - start), 123 | .type = ttype, 124 | }; 125 | } 126 | 127 | fn peek_char(tk: Tokenizer) error{ SourceInputTooLarge, InvalidSourceEncoding }!?u8 { 128 | if (tk.index >= tk.source.len) 129 | return null; 130 | 131 | if (tk.index >= std.math.maxInt(u32)) 132 | return error.SourceInputTooLarge; 133 | 134 | const char = tk.source[tk.index]; 135 | if (char < 0x20 and !std.ascii.isWhitespace(char)) 136 | return error.InvalidSourceEncoding; 137 | 138 | return char; 139 | } 140 | 141 | fn take_char(tk: *Tokenizer, c: u8) void { 142 | std.debug.assert(tk.source[tk.index] == c); 143 | tk.index += 1; 144 | } 145 | 146 | fn next_char(tk: *Tokenizer) error{ SourceInputTooLarge, InvalidSourceEncoding }!?u8 { 147 | const char = try tk.peek_char(); 148 | if (char) |c| 149 | tk.take_char(c); 150 | return char; 151 | } 152 | 153 | fn run_fuzz_test(_: void, input: []const u8) !void { 154 | var tokenizer = init(input); 155 | 156 | while (true) { 157 | const tok = tokenizer.next() catch return; 158 | if (tok == null) 159 | break; 160 | } 161 | } 162 | 163 | test "fuzz Tokenizer" { 164 | try std.testing.fuzz({}, run_fuzz_test, .{}); 165 | } 166 | 167 | test Tokenizer { 168 | const seq: []const struct { TokenType, []const u8 } = &.{ 169 | .{ .word, "hello" }, 170 | .{ .whitespace, " " }, 171 | .{ .word, "world" }, 172 | .{ .whitespace, "\n " }, 173 | .{ .variable, "$foobar" }, 174 | .{ .whitespace, " " }, 175 | .{ .comment, "# hello, this is a comment" }, 176 | .{ .whitespace, "\n" }, 177 | .{ .string, "\"stringy content\"" }, 178 | }; 179 | 180 | var tokenizer = init( 181 | \\hello world 182 | \\ $foobar # hello, this is a comment 183 | \\"stringy content" 184 | ); 185 | 186 | var offset: u32 = 0; 187 | for (seq) |expected| { 188 | const actual = (try tokenizer.next()) orelse return error.Unexpected; 189 | errdefer std.debug.print("unexpected token: .{} \"{}\"\n", .{ 190 | std.zig.fmtId(@tagName(actual.type)), 191 | std.zig.fmtEscapes(tokenizer.source[actual.offset..][0..actual.len]), 192 | }); 193 | try std.testing.expectEqualStrings(expected.@"1", tokenizer.get_text(actual)); 194 | try std.testing.expectEqual(offset, actual.offset); 195 | try std.testing.expectEqual(expected.@"0", actual.type); 196 | try std.testing.expectEqual(expected.@"1".len, actual.len); 197 | offset += actual.len; 198 | } 199 | try std.testing.expectEqual(null, try tokenizer.next()); 200 | } 201 | 202 | test "empty file" { 203 | var tokenizer = init(""); 204 | try std.testing.expectEqual(null, try tokenizer.next()); 205 | } 206 | -------------------------------------------------------------------------------- /src/build.old.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const builtin = @import("builtin"); 3 | 4 | fn root() []const u8 { 5 | return comptime (std.fs.path.dirname(@src().file) orelse "."); 6 | } 7 | const build_root = root(); 8 | 9 | pub const KiB = 1024; 10 | pub const MiB = 1024 * KiB; 11 | pub const GiB = 1024 * MiB; 12 | 13 | fn usageDemo( 14 | b: *std.Build, 15 | dependency: *std.Build.Dependency, 16 | debug_step: *std.Build.Step, 17 | ) void { 18 | installDebugDisk(dependency, debug_step, "uninitialized.img", 50 * MiB, .uninitialized); 19 | 20 | installDebugDisk(dependency, debug_step, "empty-mbr.img", 50 * MiB, .{ 21 | .mbr = .{ 22 | .partitions = .{ 23 | null, 24 | null, 25 | null, 26 | null, 27 | }, 28 | }, 29 | }); 30 | 31 | installDebugDisk(dependency, debug_step, "manual-offset-mbr.img", 50 * MiB, .{ 32 | .mbr = .{ 33 | .partitions = .{ 34 | &.{ .offset = 2048 + 0 * 10 * MiB, .size = 10 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, 35 | &.{ .offset = 2048 + 1 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, 36 | &.{ .offset = 2048 + 2 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, 37 | &.{ .offset = 2048 + 3 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, 38 | }, 39 | }, 40 | }); 41 | 42 | installDebugDisk(dependency, debug_step, "auto-offset-mbr.img", 50 * MiB, .{ 43 | .mbr = .{ 44 | .partitions = .{ 45 | &.{ .size = 7 * MiB, .bootable = true, .type = .fat32_lba, .data = .uninitialized }, 46 | &.{ .size = 8 * MiB, .bootable = false, .type = .ntfs, .data = .uninitialized }, 47 | &.{ .size = 9 * MiB, .bootable = false, .type = .linux_swap, .data = .uninitialized }, 48 | &.{ .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .uninitialized }, 49 | }, 50 | }, 51 | }); 52 | 53 | installDebugDisk(dependency, debug_step, "empty-fat32.img", 50 * MiB, .{ 54 | .fs = .{ 55 | .format = .fat32, 56 | .label = "EMPTY", 57 | .items = &.{}, 58 | }, 59 | }); 60 | 61 | installDebugDisk(dependency, debug_step, "initialized-fat32.img", 50 * MiB, .{ 62 | .fs = .{ 63 | .format = .fat32, 64 | .label = "ROOTFS", 65 | .items = &.{ 66 | .{ .empty_dir = "boot/EFI/refind/icons" }, 67 | .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, 68 | .{ .empty_dir = "Users/xq/" }, 69 | .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, 70 | .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, 71 | }, 72 | }, 73 | }); 74 | 75 | installDebugDisk(dependency, debug_step, "initialized-fat32-in-mbr-partitions.img", 100 * MiB, .{ 76 | .mbr = .{ 77 | .partitions = .{ 78 | &.{ 79 | .size = 90 * MiB, 80 | .bootable = true, 81 | .type = .fat32_lba, 82 | .data = .{ 83 | .fs = .{ 84 | .format = .fat32, 85 | .label = "ROOTFS", 86 | .items = &.{ 87 | .{ .empty_dir = "boot/EFI/refind/icons" }, 88 | .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, 89 | .{ .empty_dir = "Users/xq/" }, 90 | .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, 91 | .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, 92 | }, 93 | }, 94 | }, 95 | }, 96 | null, 97 | null, 98 | null, 99 | }, 100 | }, 101 | }); 102 | 103 | // TODO: Implement GPT partition support 104 | // installDebugDisk(debug_step, "empty-gpt.img", 50 * MiB, .{ 105 | // .gpt = .{ 106 | // .partitions = &.{}, 107 | // }, 108 | // }); 109 | } 110 | 111 | pub fn build(b: *std.Build) void { 112 | // Steps: 113 | 114 | const debug_step = b.step("debug", "Builds a basic exemplary disk image."); 115 | 116 | // Dependency Setup: 117 | 118 | const zfat_dep = b.dependency("zfat", .{ 119 | // .max_long_name_len = 121, 120 | .code_page = .us, 121 | .@"volume-count" = @as(u32, 1), 122 | .@"sector-size" = @as(u32, 512), 123 | // .rtc = .dynamic, 124 | .mkfs = true, 125 | .exfat = true, 126 | }); 127 | 128 | const zfat_mod = zfat_dep.module("zfat"); 129 | 130 | const mkfs_fat = b.addExecutable(.{ 131 | .name = "mkfs.fat", 132 | .target = b.graph.host, 133 | .optimize = .ReleaseSafe, 134 | .root_source_file = b.path("src/mkfs.fat.zig"), 135 | }); 136 | mkfs_fat.root_module.addImport("fat", zfat_mod); 137 | mkfs_fat.linkLibC(); 138 | b.installArtifact(mkfs_fat); 139 | 140 | // Usage: 141 | var self_dep: std.Build.Dependency = .{ 142 | .builder = b, 143 | }; 144 | usageDemo(b, &self_dep, debug_step); 145 | } 146 | 147 | fn resolveFilesystemMaker(dependency: *std.Build.Dependency, fs: FileSystem.Format) std.Build.LazyPath { 148 | return switch (fs) { 149 | .fat12, .fat16, .fat32, .exfat => dependency.artifact("mkfs.fat").getEmittedBin(), 150 | 151 | .custom => |path| path, 152 | 153 | else => std.debug.panic("Unsupported builtin file system: {s}", .{@tagName(fs)}), 154 | }; 155 | } 156 | 157 | fn relpath(b: *std.Build, path: []const u8) std.Build.LazyPath { 158 | return .{ 159 | .cwd_relative = b.pathFromRoot(path), 160 | }; 161 | } 162 | 163 | fn installDebugDisk( 164 | dependency: *std.Build.Dependency, 165 | install_step: *std.Build.Step, 166 | name: []const u8, 167 | size: u64, 168 | content: Content, 169 | ) void { 170 | const initialize_disk = initializeDisk(dependency, size, content); 171 | const install_disk = install_step.owner.addInstallFile(initialize_disk.getImageFile(), name); 172 | install_step.dependOn(&install_disk.step); 173 | } 174 | 175 | pub fn initializeDisk(dependency: *std.Build.Dependency, size: u64, content: Content) *InitializeDiskStep { 176 | const ids = dependency.builder.allocator.create(InitializeDiskStep) catch @panic("out of memory"); 177 | 178 | ids.* = .{ 179 | .step = std.Build.Step.init(.{ 180 | .owner = dependency.builder, // TODO: Is this correct? 181 | .id = .custom, 182 | .name = "initialize disk", 183 | .makeFn = InitializeDiskStep.make, 184 | .first_ret_addr = @returnAddress(), 185 | .max_rss = 0, 186 | }), 187 | .disk_file = .{ .step = &ids.step }, 188 | .content = content.dupe(dependency.builder) catch @panic("out of memory"), 189 | .size = size, 190 | }; 191 | 192 | ids.content.resolveFileSystems(dependency); 193 | 194 | ids.content.pushDependenciesTo(&ids.step); 195 | 196 | return ids; 197 | } 198 | 199 | pub const InitializeDiskStep = struct { 200 | const IoPump = std.fifo.LinearFifo(u8, .{ .Static = 8192 }); 201 | 202 | step: std.Build.Step, 203 | 204 | content: Content, 205 | size: u64, 206 | 207 | disk_file: std.Build.GeneratedFile, 208 | 209 | pub fn getImageFile(ids: *InitializeDiskStep) std.Build.LazyPath { 210 | return .{ .generated = .{ 211 | .file = &ids.disk_file, 212 | } }; 213 | } 214 | 215 | fn addDirectoryToCache(b: *std.Build, manifest: *std.Build.Cache.Manifest, parent: std.fs.Dir, path: []const u8) !void { 216 | var dir = try parent.openDir(path, .{ .iterate = true }); 217 | defer dir.close(); 218 | 219 | var walker = try dir.walk(b.allocator); 220 | defer walker.deinit(); 221 | 222 | while (try walker.next()) |entry| { 223 | switch (entry.kind) { 224 | .file => { 225 | const abs_path = try entry.dir.realpathAlloc(b.allocator, entry.basename); 226 | defer b.allocator.free(abs_path); 227 | _ = try manifest.addFile(abs_path, null); 228 | }, 229 | .directory => try addDirectoryToCache(b, manifest, entry.dir, entry.basename), 230 | 231 | else => return error.Unsupported, 232 | } 233 | } 234 | } 235 | 236 | fn addToCacheManifest(b: *std.Build, asking: *std.Build.Step, manifest: *std.Build.Cache.Manifest, content: Content) !void { 237 | manifest.hash.addBytes(@tagName(content)); 238 | switch (content) { 239 | .uninitialized => {}, 240 | 241 | .mbr => |table| { // MbrTable 242 | manifest.hash.addBytes(&table.bootloader); 243 | for (table.partitions) |part_or_null| { 244 | const part = part_or_null orelse { 245 | manifest.hash.addBytes("none"); 246 | break; 247 | }; 248 | manifest.hash.add(part.bootable); 249 | manifest.hash.add(part.offset orelse 0x04_03_02_01); 250 | manifest.hash.add(part.size); 251 | manifest.hash.add(part.type); 252 | try addToCacheManifest(b, asking, manifest, part.data); 253 | } 254 | }, 255 | 256 | .gpt => |table| { // GptTable 257 | manifest.hash.addBytes(&table.disk_id); 258 | 259 | for (table.partitions) |part| { 260 | manifest.hash.addBytes(&part.part_id); 261 | manifest.hash.addBytes(&part.type); 262 | manifest.hash.addBytes(std.mem.sliceAsBytes(&part.name)); 263 | 264 | manifest.hash.add(part.offset orelse 0x04_03_02_01); 265 | manifest.hash.add(part.size); 266 | 267 | manifest.hash.add(@as(u32, @bitCast(part.attributes))); 268 | 269 | try addToCacheManifest(b, asking, manifest, part.data); 270 | } 271 | }, 272 | 273 | .fs => |fs| { // FileSystem 274 | manifest.hash.add(@as(u64, fs.items.len)); 275 | manifest.hash.addBytes(@tagName(fs.format)); 276 | manifest.hash.addBytes(fs.executable.?.getPath2(b, asking)); 277 | 278 | // TODO: Properly add internal file system 279 | for (fs.items) |entry| { 280 | manifest.hash.addBytes(@tagName(entry)); 281 | switch (entry) { 282 | .empty_dir => |dir| { 283 | manifest.hash.addBytes(dir); 284 | }, 285 | .copy_dir => |dir| { 286 | manifest.hash.addBytes(dir.destination); 287 | try addDirectoryToCache(b, manifest, std.fs.cwd(), dir.source.getPath2(b, asking)); 288 | }, 289 | .copy_file => |file| { 290 | manifest.hash.addBytes(file.destination); 291 | _ = try manifest.addFile(file.source.getPath2(b, asking), null); 292 | }, 293 | } 294 | } 295 | }, 296 | .data => |data| { 297 | const path = data.getPath2(b, asking); 298 | _ = try manifest.addFile(path, null); 299 | }, 300 | .binary => |binary| { 301 | const path = binary.getEmittedBin().getPath2(b, asking); 302 | _ = try manifest.addFile(path, null); 303 | }, 304 | } 305 | } 306 | 307 | const HumanContext = std.BoundedArray(u8, 256); 308 | 309 | const DiskImage = struct { 310 | path: []const u8, 311 | handle: *std.fs.File, 312 | }; 313 | 314 | fn writeDiskImage(b: *std.Build, asking: *std.Build.Step, disk: DiskImage, base: u64, length: u64, content: Content, context: *HumanContext) !void { 315 | try disk.handle.seekTo(base); 316 | 317 | const context_len = context.len; 318 | defer context.len = context_len; 319 | 320 | context.appendSliceAssumeCapacity("."); 321 | context.appendSliceAssumeCapacity(@tagName(content)); 322 | 323 | switch (content) { 324 | .uninitialized => {}, 325 | 326 | .mbr => |table| { // MbrTable 327 | { 328 | var boot_sector: [512]u8 = .{0} ** 512; 329 | 330 | @memcpy(boot_sector[0..table.bootloader.len], &table.bootloader); 331 | 332 | std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], if (table.disk_id) |disk_id| disk_id else 0x0000_0000, .little); 333 | std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); 334 | 335 | var all_auto = true; 336 | var all_manual = true; 337 | for (table.partitions) |part_or_null| { 338 | const part = part_or_null orelse continue; 339 | 340 | if (part.offset != null) { 341 | all_auto = false; 342 | } else { 343 | all_manual = false; 344 | } 345 | } 346 | 347 | if (!all_auto and !all_manual) { 348 | std.log.err("{s}: not all partitions have an explicit offset!", .{context.slice()}); 349 | return error.InvalidSectorBoundary; 350 | } 351 | 352 | const part_base = 0x01BE; 353 | var auto_offset: u64 = 2048; 354 | for (table.partitions, 0..) |part_or_null, part_id| { 355 | const reset_len = context.len; 356 | defer context.len = reset_len; 357 | 358 | var buffer: [64]u8 = undefined; 359 | context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); 360 | 361 | const desc = boot_sector[part_base + 16 * part_id ..][0..16]; 362 | 363 | if (part_or_null) |part| { 364 | // https://wiki.osdev.org/MBR#Partition_table_entry_format 365 | 366 | const part_offset = part.offset orelse auto_offset; 367 | 368 | if ((part_offset % 512) != 0) { 369 | std.log.err("{s}: .offset is not divisible by 512!", .{context.slice()}); 370 | return error.InvalidSectorBoundary; 371 | } 372 | if ((part.size % 512) != 0) { 373 | std.log.err("{s}: .size is not divisible by 512!", .{context.slice()}); 374 | return error.InvalidSectorBoundary; 375 | } 376 | 377 | const lba_u64 = @divExact(part_offset, 512); 378 | const size_u64 = @divExact(part.size, 512); 379 | 380 | const lba = std.math.cast(u32, lba_u64) orelse { 381 | std.log.err("{s}: .offset is out of bounds!", .{context.slice()}); 382 | return error.InvalidSectorBoundary; 383 | }; 384 | const size = std.math.cast(u32, size_u64) orelse { 385 | std.log.err("{s}: .size is out of bounds!", .{context.slice()}); 386 | return error.InvalidSectorBoundary; 387 | }; 388 | 389 | desc[0] = if (part.bootable) 0x80 else 0x00; 390 | 391 | desc[1..4].* = mbr.encodeMbrChsEntry(lba); // chs_start 392 | desc[4] = @intFromEnum(part.type); 393 | desc[5..8].* = mbr.encodeMbrChsEntry(lba + size - 1); // chs_end 394 | std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start 395 | std.mem.writeInt(u32, desc[12..16], size, .little); // block_count 396 | 397 | auto_offset += part.size; 398 | } else { 399 | @memset(desc, 0); // inactive 400 | } 401 | } 402 | boot_sector[0x01FE] = 0x55; 403 | boot_sector[0x01FF] = 0xAA; 404 | 405 | try disk.handle.writeAll(&boot_sector); 406 | } 407 | 408 | { 409 | var auto_offset: u64 = 2048; 410 | for (table.partitions, 0..) |part_or_null, part_id| { 411 | const part = part_or_null orelse continue; 412 | 413 | const reset_len = context.len; 414 | defer context.len = reset_len; 415 | 416 | var buffer: [64]u8 = undefined; 417 | context.appendSliceAssumeCapacity(std.fmt.bufPrint(&buffer, "[{}]", .{part_id}) catch unreachable); 418 | 419 | try writeDiskImage(b, asking, disk, base + auto_offset, part.size, part.data, context); 420 | 421 | auto_offset += part.size; 422 | } 423 | } 424 | }, 425 | 426 | .gpt => |table| { // GptTable 427 | _ = table; 428 | std.log.err("{s}: GPT partition tables not supported yet!", .{context.slice()}); 429 | return error.GptUnsupported; 430 | }, 431 | 432 | .fs => |fs| { 433 | const maker_exe = fs.executable.?.getPath2(b, asking); 434 | 435 | try disk.handle.sync(); 436 | 437 | // const disk_image_path = switch (builtin.os.tag) { 438 | // .linux => blk: { 439 | // const self_pid = std.os.linux.getpid(); 440 | // break :blk b.fmt("/proc/{}/fd/{}", .{ self_pid, disk.handle }); 441 | // }, 442 | 443 | // else => @compileError("TODO: Support this on other OS as well!"), 444 | // }; 445 | 446 | var argv = std.ArrayList([]const u8).init(b.allocator); 447 | defer argv.deinit(); 448 | 449 | try argv.appendSlice(&.{ 450 | maker_exe, // exe 451 | disk.path, // image file 452 | b.fmt("0x{X:0>8}", .{base}), // filesystem offset (bytes) 453 | b.fmt("0x{X:0>8}", .{length}), // filesystem length (bytes) 454 | @tagName(fs.format), // filesystem type 455 | "format", // cmd 1: format the disk 456 | "mount", // cmd 2: mount it internally 457 | }); 458 | 459 | for (fs.items) |item| { 460 | switch (item) { 461 | .empty_dir => |dir| { 462 | try argv.append(b.fmt("mkdir;{s}", .{dir})); 463 | }, 464 | .copy_dir => |src_dst| { 465 | try argv.append(b.fmt("dir;{s};{s}", .{ 466 | src_dst.source.getPath2(b, asking), 467 | src_dst.destination, 468 | })); 469 | }, 470 | .copy_file => |src_dst| { 471 | try argv.append(b.fmt("file;{s};{s}", .{ 472 | src_dst.source.getPath2(b, asking), 473 | src_dst.destination, 474 | })); 475 | }, 476 | } 477 | } 478 | 479 | // use shared access to the file: 480 | const stdout = b.run(argv.items); 481 | 482 | try disk.handle.sync(); 483 | 484 | _ = stdout; 485 | }, 486 | 487 | .data => |data| { 488 | const path = data.getPath2(b, asking); 489 | try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); 490 | }, 491 | 492 | .binary => |binary| { 493 | const path = binary.getEmittedBin().getPath2(b, asking); 494 | try copyFileToImage(disk, length, std.fs.cwd(), path, context.slice()); 495 | }, 496 | } 497 | } 498 | 499 | fn copyFileToImage(disk: DiskImage, max_length: u64, dir: std.fs.Dir, path: []const u8, context: []const u8) !void { 500 | errdefer std.log.err("{s}: failed to copy data to image.", .{context}); 501 | 502 | var file = try dir.openFile(path, .{}); 503 | defer file.close(); 504 | 505 | const stat = try file.stat(); 506 | if (stat.size > max_length) { 507 | var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; 508 | std.log.err("{s}: The file '{!s}' exceeds the size of the container. The file is {:.2} large, while the container only allows for {:.2}.", .{ 509 | context, 510 | dir.realpath(path, &realpath_buffer), 511 | std.fmt.fmtIntSizeBin(stat.size), 512 | std.fmt.fmtIntSizeBin(max_length), 513 | }); 514 | return error.FileTooLarge; 515 | } 516 | 517 | var pumper = IoPump.init(); 518 | 519 | try pumper.pump(file.reader(), disk.handle.writer()); 520 | 521 | const padding = max_length - stat.size; 522 | if (padding > 0) { 523 | try disk.handle.writer().writeByteNTimes(' ', padding); 524 | } 525 | } 526 | 527 | fn make(step: *std.Build.Step, options: std.Build.Step.MakeOptions) !void { 528 | const b = step.owner; 529 | _ = options; 530 | 531 | const ids: *InitializeDiskStep = @fieldParentPtr("step", step); 532 | 533 | var man = b.graph.cache.obtain(); 534 | defer man.deinit(); 535 | 536 | man.hash.addBytes(&.{ 232, 8, 75, 249, 2, 210, 51, 118, 171, 12 }); // Change when impl changes 537 | 538 | try addToCacheManifest(b, step, &man, ids.content); 539 | 540 | step.result_cached = try step.cacheHit(&man); 541 | const digest = man.final(); 542 | 543 | const output_components = .{ "o", &digest, "disk.img" }; 544 | const output_sub_path = b.pathJoin(&output_components); 545 | const output_sub_dir_path = std.fs.path.dirname(output_sub_path).?; 546 | b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { 547 | return step.fail("unable to make path '{}{s}': {s}", .{ 548 | b.cache_root, output_sub_dir_path, @errorName(err), 549 | }); 550 | }; 551 | 552 | ids.disk_file.path = try b.cache_root.join(b.allocator, &output_components); 553 | 554 | if (step.result_cached) 555 | return; 556 | 557 | { 558 | const disk_path = ids.disk_file.path.?; 559 | 560 | var disk = try std.fs.cwd().createFile(disk_path, .{}); 561 | defer disk.close(); 562 | 563 | try disk.seekTo(ids.size - 1); 564 | try disk.writeAll("\x00"); 565 | try disk.seekTo(0); 566 | 567 | var context: HumanContext = .{}; 568 | context.appendSliceAssumeCapacity("disk"); 569 | 570 | const disk_image = DiskImage{ 571 | .path = disk_path, 572 | .handle = &disk, 573 | }; 574 | 575 | try writeDiskImage(b, step, disk_image, 0, ids.size, ids.content, &context); 576 | } 577 | 578 | // if (!step.result_cached) 579 | try step.writeManifest(&man); 580 | } 581 | }; 582 | 583 | pub const Content = union(enum) { 584 | uninitialized, 585 | 586 | mbr: mbr.Table, 587 | gpt: gpt.Table, 588 | 589 | fs: FileSystem, 590 | 591 | data: std.Build.LazyPath, 592 | 593 | binary: *std.Build.Step.Compile, 594 | 595 | pub fn dupe(content: Content, b: *std.Build) !Content { 596 | const allocator = b.allocator; 597 | 598 | switch (content) { 599 | .uninitialized => return content, 600 | .mbr => |table| { 601 | var copy = table; 602 | for (©.partitions) |*part| { 603 | if (part.*) |*p| { 604 | const buf = try b.allocator.create(mbr.Partition); 605 | buf.* = p.*.*; 606 | buf.data = try buf.data.dupe(b); 607 | p.* = buf; 608 | } 609 | } 610 | return .{ .mbr = copy }; 611 | }, 612 | .gpt => |table| { 613 | var copy = table; 614 | const partitions = try allocator.dupe(gpt.Partition, table.partitions); 615 | for (partitions) |*part| { 616 | part.data = try part.data.dupe(b); 617 | } 618 | copy.partitions = partitions; 619 | return .{ .gpt = copy }; 620 | }, 621 | .fs => |fs| { 622 | var copy = fs; 623 | 624 | copy.label = try allocator.dupe(u8, fs.label); 625 | const items = try allocator.dupe(FileSystem.Item, fs.items); 626 | for (items) |*item| { 627 | switch (item.*) { 628 | .empty_dir => |*dir| { 629 | dir.* = try allocator.dupe(u8, dir.*); 630 | }, 631 | .copy_dir, .copy_file => |*cp| { 632 | const cp_new: FileSystem.Copy = .{ 633 | .destination = try allocator.dupe(u8, cp.destination), 634 | .source = cp.source.dupe(b), 635 | }; 636 | cp.* = cp_new; 637 | }, 638 | } 639 | } 640 | copy.items = items; 641 | 642 | switch (copy.format) { 643 | .custom => |*path| path.* = path.dupe(b), 644 | else => {}, 645 | } 646 | 647 | return .{ .fs = copy }; 648 | }, 649 | .data => |data| { 650 | return .{ .data = data.dupe(b) }; 651 | }, 652 | .binary => |binary| { 653 | return .{ .binary = binary }; 654 | }, 655 | } 656 | } 657 | 658 | pub fn pushDependenciesTo(content: Content, step: *std.Build.Step) void { 659 | switch (content) { 660 | .uninitialized => {}, 661 | .mbr => |table| { 662 | for (table.partitions) |part| { 663 | if (part) |p| { 664 | p.data.pushDependenciesTo(step); 665 | } 666 | } 667 | }, 668 | .gpt => |table| { 669 | for (table.partitions) |part| { 670 | part.data.pushDependenciesTo(step); 671 | } 672 | }, 673 | .fs => |fs| { 674 | for (fs.items) |item| { 675 | switch (item) { 676 | .empty_dir => {}, 677 | .copy_dir, .copy_file => |*cp| { 678 | cp.source.addStepDependencies(step); 679 | }, 680 | } 681 | } 682 | if (fs.format == .custom) { 683 | fs.format.custom.addStepDependencies(step); 684 | } 685 | fs.executable.?.addStepDependencies(step); // Must be resolved already, invoke resolveFileSystems before! 686 | }, 687 | .data => |data| data.addStepDependencies(step), 688 | .binary => |binary| step.dependOn(&binary.step), 689 | } 690 | } 691 | 692 | pub fn resolveFileSystems(content: *Content, dependency: *std.Build.Dependency) void { 693 | switch (content.*) { 694 | .uninitialized => {}, 695 | .mbr => |*table| { 696 | for (&table.partitions) |*part| { 697 | if (part.*) |p| { 698 | @constCast(&p.data).resolveFileSystems(dependency); 699 | } 700 | } 701 | }, 702 | .gpt => |*table| { 703 | for (table.partitions) |*part| { 704 | @constCast(&part.data).resolveFileSystems(dependency); 705 | } 706 | }, 707 | .fs => |*fs| { 708 | fs.executable = resolveFilesystemMaker(dependency, fs.format); 709 | }, 710 | .data, .binary => {}, 711 | } 712 | } 713 | }; 714 | 715 | pub const FileSystem = struct { 716 | pub const Format = union(enum) { 717 | pub const Tag = std.meta.Tag(@This()); 718 | 719 | fat12, 720 | fat16, 721 | fat32, 722 | 723 | ext2, 724 | ext3, 725 | ext4, 726 | 727 | exfat, 728 | ntfs, 729 | 730 | iso_9660, 731 | iso_13490, 732 | udf, 733 | 734 | /// usage: mkfs. 735 | /// is a path to the image file 736 | /// is the byte base of the file system 737 | /// is the byte length of the file system 738 | /// is the file system that should be used to format 739 | /// is a list of operations that should be performed on the file system: 740 | /// - format Formats the disk image. 741 | /// - mount Mounts the file system, must be before all following: 742 | /// - mkdir; Creates directory and all necessary parents. 743 | /// - file;; Copy to path . If exists, it will be overwritten. 744 | /// - dir;; Copy recursively into . If exists, they will be merged. 745 | /// 746 | /// paths are always rooted, even if they don't start with a /, and always use / as a path separator. 747 | /// 748 | custom: std.Build.LazyPath, 749 | }; 750 | 751 | pub const Copy = struct { 752 | source: std.Build.LazyPath, 753 | destination: []const u8, 754 | }; 755 | 756 | pub const Item = union(enum) { 757 | empty_dir: []const u8, 758 | copy_dir: Copy, 759 | copy_file: Copy, 760 | }; 761 | 762 | format: Format, 763 | label: []const u8, 764 | items: []const Item, 765 | 766 | // private: 767 | executable: ?std.Build.LazyPath = null, 768 | }; 769 | 770 | pub const FileSystemBuilder = struct { 771 | b: *std.Build, 772 | list: std.ArrayListUnmanaged(FileSystem.Item), 773 | 774 | pub fn init(b: *std.Build) FileSystemBuilder { 775 | return FileSystemBuilder{ 776 | .b = b, 777 | .list = .{}, 778 | }; 779 | } 780 | 781 | pub fn finalize(fsb: *FileSystemBuilder, options: struct { 782 | format: FileSystem.Format, 783 | label: []const u8, 784 | }) FileSystem { 785 | return .{ 786 | .format = options.format, 787 | .label = fsb.b.dupe(options.label), 788 | .items = fsb.list.toOwnedSlice(fsb.b.allocator) catch @panic("out of memory"), 789 | }; 790 | } 791 | 792 | pub fn addFile(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { 793 | fsb.list.append(fsb.b.allocator, .{ 794 | .copy_file = .{ 795 | .source = source.dupe(fsb.b), 796 | .destination = fsb.b.dupe(destination), 797 | }, 798 | }) catch @panic("out of memory"); 799 | } 800 | 801 | pub fn addDirectory(fsb: *FileSystemBuilder, source: std.Build.LazyPath, destination: []const u8) void { 802 | fsb.list.append(fsb.b.allocator, .{ 803 | .copy_dir = .{ 804 | .source = source.dupe(fsb.b), 805 | .destination = fsb.b.dupe(destination), 806 | }, 807 | }) catch @panic("out of memory"); 808 | } 809 | 810 | pub fn mkdir(fsb: *FileSystemBuilder, destination: []const u8) void { 811 | fsb.list.append(fsb.b.allocator, .{ 812 | .empty_dir = fsb.b.dupe(destination), 813 | }) catch @panic("out of memory"); 814 | } 815 | }; 816 | -------------------------------------------------------------------------------- /src/components/EmptyData.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! The `empty` content will just not touch anything in the output 3 | //! and serves as a placeholder. 4 | //! 5 | 6 | const std = @import("std"); 7 | const dim = @import("../dim.zig"); 8 | 9 | const EmptyData = @This(); 10 | 11 | pub fn parse(ctx: dim.Context) !dim.Content { 12 | _ = ctx; 13 | return .create_handle(undefined, .create(@This(), .{ 14 | .render_fn = render, 15 | })); 16 | } 17 | 18 | fn render(self: *EmptyData, stream: *dim.BinaryStream) dim.Content.RenderError!void { 19 | _ = self; 20 | _ = stream; 21 | } 22 | -------------------------------------------------------------------------------- /src/components/FillData.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! The `fill ` content will fill the remaining space with the given `` value. 3 | //! 4 | 5 | const std = @import("std"); 6 | const dim = @import("../dim.zig"); 7 | 8 | const FillData = @This(); 9 | 10 | fill_value: u8, 11 | 12 | pub fn parse(ctx: dim.Context) !dim.Content { 13 | const pf = try ctx.alloc_object(FillData); 14 | pf.* = .{ 15 | .fill_value = try ctx.parse_integer(u8, 0), 16 | }; 17 | return .create_handle(pf, .create(@This(), .{ 18 | .render_fn = render, 19 | })); 20 | } 21 | 22 | fn render(self: *FillData, stream: *dim.BinaryStream) dim.Content.RenderError!void { 23 | try stream.writer().writeByteNTimes( 24 | self.fill_value, 25 | stream.length, 26 | ); 27 | } 28 | -------------------------------------------------------------------------------- /src/components/PasteFile.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const dim = @import("../dim.zig"); 3 | 4 | const PasteFile = @This(); 5 | 6 | file_handle: dim.FileName, 7 | 8 | pub fn parse(ctx: dim.Context) !dim.Content { 9 | const pf = try ctx.alloc_object(PasteFile); 10 | pf.* = .{ 11 | .file_handle = try ctx.parse_file_name(), 12 | }; 13 | return .create_handle(pf, .create(@This(), .{ 14 | .render_fn = render, 15 | })); 16 | } 17 | 18 | fn render(self: *PasteFile, stream: *dim.BinaryStream) dim.Content.RenderError!void { 19 | try self.file_handle.copy_to(stream); 20 | } 21 | -------------------------------------------------------------------------------- /src/components/fs/FatFileSystem.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const dim = @import("../../dim.zig"); 3 | const common = @import("common.zig"); 4 | 5 | const fatfs = @import("zfat"); 6 | 7 | const block_size = 512; 8 | const max_path_len = 8192; // this should be enough 9 | const max_label_len = 11; // see http://elm-chan.org/fsw/ff/doc/setlabel.html 10 | 11 | const FAT = @This(); 12 | 13 | format_as: FatType, 14 | label: ?[]const u8 = null, 15 | fats: ?fatfs.FatTables = null, 16 | rootdir_size: ?c_uint = null, 17 | ops: std.ArrayList(common.FsOperation), 18 | sector_align: ?c_uint = null, 19 | cluster_size: ?u32 = null, 20 | 21 | pub fn parse(ctx: dim.Context) !dim.Content { 22 | const fat_type = try ctx.parse_enum(FatType); 23 | 24 | const pf = try ctx.alloc_object(FAT); 25 | pf.* = .{ 26 | .format_as = fat_type, 27 | .ops = .init(ctx.get_arena()), 28 | }; 29 | 30 | var appender: Appender = .{ 31 | .fat = pf, 32 | .updater = .init(ctx, pf), 33 | }; 34 | 35 | try common.parse_ops(ctx, "endfat", &appender); 36 | 37 | try appender.updater.validate(); 38 | 39 | return .create_handle(pf, .create(@This(), .{ 40 | .render_fn = render, 41 | })); 42 | } 43 | 44 | const Appender = struct { 45 | fat: *FAT, 46 | updater: dim.FieldUpdater(FAT, &.{ 47 | .fats, 48 | .label, 49 | .rootdir_size, 50 | .sector_align, 51 | .cluster_size, 52 | 53 | // cannot be accessed: 54 | .format_as, 55 | .ops, 56 | }), 57 | 58 | pub fn append_common_op(self: @This(), op: common.FsOperation) !void { 59 | try self.fat.ops.append(op); 60 | } 61 | 62 | pub fn parse_custom_op(self: *@This(), ctx: dim.Context, str_op: []const u8) !void { 63 | const Op = enum { 64 | label, 65 | fats, 66 | @"root-size", 67 | @"sector-align", 68 | @"cluster-size", 69 | }; 70 | const op = std.meta.stringToEnum(Op, str_op) orelse return ctx.report_fatal_error( 71 | "Unknown file system operation '{s}'", 72 | .{str_op}, 73 | ); 74 | switch (op) { 75 | .label => try self.updater.set(.label, try ctx.parse_string()), 76 | .fats => try self.updater.set(.fats, try ctx.parse_enum(fatfs.FatTables)), 77 | .@"root-size" => try self.updater.set(.rootdir_size, try ctx.parse_integer(c_uint, 0)), 78 | .@"sector-align" => try self.updater.set(.sector_align, try ctx.parse_integer(c_uint, 0)), 79 | .@"cluster-size" => try self.updater.set(.cluster_size, try ctx.parse_integer(u32, 0)), 80 | } 81 | } 82 | }; 83 | 84 | fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { 85 | var bsd: BinaryStreamDisk = .{ .stream = stream }; 86 | 87 | const min_size, const max_size = self.format_as.get_size_limits(); 88 | 89 | if (stream.length < min_size) { 90 | // TODO(fqu): Report fatal erro! 91 | std.log.err("cannot format {} bytes with {s}: min required size is {}", .{ 92 | @as(dim.DiskSize, @enumFromInt(stream.length)), 93 | @tagName(self.format_as), 94 | @as(dim.DiskSize, @enumFromInt(min_size)), 95 | }); 96 | return; 97 | } 98 | 99 | if (stream.length > max_size) { 100 | // TODO(fqu): Report warning 101 | std.log.warn("will not use all available space: available space is {}, but maximum size for {s} is {}", .{ 102 | @as(dim.DiskSize, @enumFromInt(stream.length)), 103 | @tagName(self.format_as), 104 | @as(dim.DiskSize, @enumFromInt(min_size)), 105 | }); 106 | } 107 | 108 | var filesystem: fatfs.FileSystem = undefined; 109 | 110 | fatfs.disks[0] = &bsd.disk; 111 | defer fatfs.disks[0] = null; 112 | 113 | var workspace: [8192]u8 = undefined; 114 | fatfs.mkfs("0:", .{ 115 | .filesystem = self.format_as.get_zfat_type(), 116 | .fats = self.fats orelse .two, 117 | .sector_align = self.sector_align orelse 0, // default/auto 118 | .cluster_size = self.cluster_size orelse 0, 119 | .rootdir_size = self.rootdir_size orelse 512, // randomly chosen, might need adjustment 120 | .use_partitions = false, // we have other means for this 121 | }, &workspace) catch |err| switch (err) { 122 | error.OutOfMemory => return error.OutOfMemory, 123 | error.WriteProtected => @panic("bug in zfat"), 124 | error.InvalidParameter => @panic("bug in zfat disk wrapper"), 125 | error.DiskErr => return error.IoError, 126 | error.NotReady => @panic("bug in zfat disk wrapper"), 127 | error.InvalidDrive => @panic("bug in AtomicOps"), 128 | error.MkfsAborted => return error.IoError, 129 | }; 130 | 131 | const ops = self.ops.items; 132 | 133 | filesystem.mount("0:", true) catch |err| switch (err) { 134 | error.NotEnabled => @panic("bug in zfat"), 135 | error.DiskErr => return error.IoError, 136 | error.NotReady => @panic("bug in zfat disk wrapper"), 137 | error.InvalidDrive => @panic("bug in AtomicOps"), 138 | error.NoFilesystem => @panic("bug in zfat"), 139 | }; 140 | 141 | if (self.label) |label| { 142 | if (label.len <= max_label_len) { 143 | var label_buffer: [max_label_len + 3:0]u8 = undefined; 144 | const buf = std.fmt.bufPrintZ(&label_buffer, "0:{s}", .{label}) catch @panic("buffer too small"); 145 | 146 | if (fatfs.api.setlabel(buf.ptr) != 0) { 147 | return error.IoError; 148 | } 149 | } else { 150 | std.log.err("label \"{}\" is {} characters long, but only up to {} are permitted.", .{ 151 | std.zig.fmtEscapes(label), 152 | label.len, 153 | max_label_len, 154 | }); 155 | } 156 | } 157 | 158 | const wrapper = AtomicOps{}; 159 | 160 | for (ops) |op| { 161 | try op.execute(wrapper); 162 | } 163 | } 164 | 165 | const FatType = enum { 166 | fat12, 167 | fat16, 168 | fat32, 169 | // exfat, 170 | 171 | fn get_zfat_type(fat: FatType) fatfs.DiskFormat { 172 | return switch (fat) { 173 | .fat12 => .fat, 174 | .fat16 => .fat, 175 | .fat32 => .fat32, 176 | // .exfat => .exfat, 177 | }; 178 | } 179 | 180 | fn get_size_limits(fat: FatType) struct { u64, u64 } { 181 | // see https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system#Size_limits 182 | return switch (fat) { 183 | .fat12 => .{ 512, 133_824_512 }, // 512 B ... 127 MB 184 | .fat16 => .{ 2_091_520, 2_147_090_432 }, // 2042.5 kB ... 2047 MB 185 | .fat32 => .{ 33_548_800, 1_099_511_578_624 }, // 32762.5 kB ... 1024 GB 186 | }; 187 | } 188 | }; 189 | 190 | const AtomicOps = struct { 191 | pub fn mkdir(ops: AtomicOps, path: []const u8) dim.Content.RenderError!void { 192 | _ = ops; 193 | 194 | var path_buffer: [max_path_len:0]u8 = undefined; 195 | var fba: std.heap.FixedBufferAllocator = .init(&path_buffer); 196 | 197 | const joined = try std.mem.concatWithSentinel(fba.allocator(), u8, &.{ "0:/", path }, 0); 198 | fatfs.mkdir(joined) catch |err| switch (err) { 199 | error.Exist => {}, // this is good 200 | error.OutOfMemory => return error.OutOfMemory, 201 | error.Timeout => @panic("implementation bug in fatfs glue"), 202 | error.InvalidName => return error.ConfigurationError, 203 | error.WriteProtected => @panic("implementation bug in fatfs glue"), 204 | error.DiskErr => return error.IoError, 205 | error.NotReady => @panic("implementation bug in fatfs glue"), 206 | error.InvalidDrive => @panic("implementation bug in fatfs glue"), 207 | error.NotEnabled => @panic("implementation bug in fatfs glue"), 208 | error.NoFilesystem => @panic("implementation bug in fatfs glue"), 209 | error.IntErr => return error.IoError, 210 | error.NoPath => @panic("implementation bug in fatfs glue"), 211 | error.Denied => @panic("implementation bug in fatfs glue"), 212 | }; 213 | } 214 | 215 | pub fn mkfile(ops: AtomicOps, path: []const u8, reader: anytype) dim.Content.RenderError!void { 216 | _ = ops; 217 | 218 | var path_buffer: [max_path_len:0]u8 = undefined; 219 | if (path.len > path_buffer.len) 220 | return error.InvalidPath; 221 | @memcpy(path_buffer[0..path.len], path); 222 | path_buffer[path.len] = 0; 223 | 224 | const path_z = path_buffer[0..path.len :0]; 225 | 226 | var fs_file = fatfs.File.create(path_z) catch |err| switch (err) { 227 | error.OutOfMemory => return error.OutOfMemory, 228 | error.Timeout => @panic("implementation bug in fatfs glue"), 229 | error.InvalidName => return error.ConfigurationError, 230 | error.WriteProtected => @panic("implementation bug in fatfs glue"), 231 | error.DiskErr => return error.IoError, 232 | error.NotReady => @panic("implementation bug in fatfs glue"), 233 | error.InvalidDrive => @panic("implementation bug in fatfs glue"), 234 | error.NotEnabled => @panic("implementation bug in fatfs glue"), 235 | error.NoFilesystem => @panic("implementation bug in fatfs glue"), 236 | error.IntErr => return error.IoError, 237 | error.NoFile => @panic("implementation bug in fatfs glue"), 238 | error.NoPath => @panic("implementation bug in fatfs glue"), 239 | error.Denied => @panic("implementation bug in fatfs glue"), 240 | error.Exist => @panic("implementation bug in fatfs glue"), 241 | error.InvalidObject => @panic("implementation bug in fatfs glue"), 242 | error.Locked => @panic("implementation bug in fatfs glue"), 243 | error.TooManyOpenFiles => @panic("implementation bug in fatfs glue"), 244 | }; 245 | defer fs_file.close(); 246 | 247 | var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); 248 | fifo.pump( 249 | reader, 250 | fs_file.writer(), 251 | ) catch |err| switch (@as(dim.FileHandle.ReadError || fatfs.File.ReadError.Error, err)) { 252 | error.Overflow => return error.IoError, 253 | error.ReadFileFailed => return error.IoError, 254 | error.Timeout => @panic("implementation bug in fatfs glue"), 255 | error.DiskErr => return error.IoError, 256 | error.IntErr => return error.IoError, 257 | error.Denied => @panic("implementation bug in fatfs glue"), 258 | error.InvalidObject => @panic("implementation bug in fatfs glue"), 259 | }; 260 | } 261 | }; 262 | 263 | const BinaryStreamDisk = struct { 264 | disk: fatfs.Disk = .{ 265 | .getStatusFn = disk_getStatus, 266 | .initializeFn = disk_initialize, 267 | .readFn = disk_read, 268 | .writeFn = disk_write, 269 | .ioctlFn = disk_ioctl, 270 | }, 271 | stream: *dim.BinaryStream, 272 | 273 | fn disk_getStatus(intf: *fatfs.Disk) fatfs.Disk.Status { 274 | _ = intf; 275 | return .{ 276 | .initialized = true, 277 | .disk_present = true, 278 | .write_protected = false, 279 | }; 280 | } 281 | 282 | fn disk_initialize(intf: *fatfs.Disk) fatfs.Disk.Error!fatfs.Disk.Status { 283 | return disk_getStatus(intf); 284 | } 285 | 286 | fn disk_read(intf: *fatfs.Disk, buff: [*]u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { 287 | const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); 288 | 289 | bsd.stream.read(block_size * sector, buff[0 .. count * block_size]) catch return error.IoError; 290 | } 291 | 292 | fn disk_write(intf: *fatfs.Disk, buff: [*]const u8, sector: fatfs.LBA, count: c_uint) fatfs.Disk.Error!void { 293 | const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); 294 | 295 | bsd.stream.write(block_size * sector, buff[0 .. count * block_size]) catch return error.IoError; 296 | } 297 | 298 | fn disk_ioctl(intf: *fatfs.Disk, cmd: fatfs.IoCtl, buff: [*]u8) fatfs.Disk.Error!void { 299 | const bsd: *BinaryStreamDisk = @fieldParentPtr("disk", intf); 300 | 301 | switch (cmd) { 302 | .sync => {}, 303 | 304 | .get_sector_count => { 305 | const size: *fatfs.LBA = @ptrCast(@alignCast(buff)); 306 | size.* = @intCast(bsd.stream.length / block_size); 307 | }, 308 | .get_sector_size => { 309 | const size: *fatfs.WORD = @ptrCast(@alignCast(buff)); 310 | size.* = block_size; 311 | }, 312 | .get_block_size => { 313 | const size: *fatfs.DWORD = @ptrCast(@alignCast(buff)); 314 | size.* = 1; 315 | }, 316 | 317 | else => return error.InvalidParameter, 318 | } 319 | } 320 | }; 321 | -------------------------------------------------------------------------------- /src/components/fs/common.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! This file contains a common base implementation which should be valid for 3 | //! all typical path based file systems. 4 | //! 5 | const std = @import("std"); 6 | const dim = @import("../../dim.zig"); 7 | 8 | pub const FsOperation = union(enum) { 9 | copy_file: struct { 10 | path: [:0]const u8, 11 | source: dim.FileName, 12 | }, 13 | 14 | copy_dir: struct { 15 | path: [:0]const u8, 16 | source: dim.FileName, 17 | }, 18 | 19 | make_dir: struct { 20 | path: [:0]const u8, 21 | }, 22 | 23 | create_file: struct { 24 | path: [:0]const u8, 25 | size: u64, 26 | contents: dim.Content, 27 | }, 28 | 29 | pub fn execute(op: FsOperation, executor: anytype) !void { 30 | const exec: Executor(@TypeOf(executor)) = .init(executor); 31 | 32 | try exec.execute(op); 33 | } 34 | }; 35 | 36 | fn Executor(comptime T: type) type { 37 | return struct { 38 | const Exec = @This(); 39 | 40 | inner: T, 41 | 42 | fn init(wrapped: T) Exec { 43 | return .{ .inner = wrapped }; 44 | } 45 | 46 | fn execute(exec: Exec, op: FsOperation) dim.Content.RenderError!void { 47 | switch (op) { 48 | .make_dir => |data| { 49 | try exec.recursive_mkdir(data.path); 50 | }, 51 | 52 | .copy_file => |data| { 53 | var handle = data.source.open() catch |err| switch (err) { 54 | error.FileNotFound => return, // open() already reporeted the error 55 | else => |e| return e, 56 | }; 57 | defer handle.close(); 58 | 59 | try exec.add_file(data.path, handle.reader()); 60 | }, 61 | .copy_dir => |data| { 62 | var iter_dir = data.source.open_dir() catch |err| switch (err) { 63 | error.FileNotFound => return, // open() already reporeted the error 64 | else => |e| return e, 65 | }; 66 | defer iter_dir.close(); 67 | 68 | var walker_memory: [16384]u8 = undefined; 69 | var temp_allocator: std.heap.FixedBufferAllocator = .init(&walker_memory); 70 | 71 | var path_memory: [8192]u8 = undefined; 72 | 73 | var walker = try iter_dir.walk(temp_allocator.allocator()); 74 | defer walker.deinit(); 75 | 76 | while (walker.next() catch |err| return walk_err(err)) |entry| { 77 | const path = std.fmt.bufPrintZ(&path_memory, "{s}/{s}", .{ 78 | data.path, 79 | entry.path, 80 | }) catch @panic("buffer too small!"); 81 | 82 | // std.log.debug("- {s}", .{path_buffer.items}); 83 | 84 | switch (entry.kind) { 85 | .file => { 86 | const fname: dim.FileName = .{ 87 | .root_dir = entry.dir, 88 | .rel_path = entry.basename, 89 | }; 90 | 91 | var file = try fname.open(); 92 | defer file.close(); 93 | 94 | try exec.add_file(path, file.reader()); 95 | }, 96 | 97 | .directory => { 98 | try exec.recursive_mkdir(path); 99 | }, 100 | 101 | else => { 102 | var realpath_buffer: [std.fs.max_path_bytes]u8 = undefined; 103 | std.log.warn("cannot copy file {!s}: {s} is not a supported file type!", .{ 104 | entry.dir.realpath(entry.path, &realpath_buffer), 105 | @tagName(entry.kind), 106 | }); 107 | }, 108 | } 109 | } 110 | }, 111 | 112 | .create_file => |data| { 113 | const buffer = try std.heap.page_allocator.alloc(u8, data.size); 114 | defer std.heap.page_allocator.free(buffer); 115 | 116 | var bs: dim.BinaryStream = .init_buffer(buffer); 117 | 118 | try data.contents.render(&bs); 119 | 120 | var fbs: std.io.FixedBufferStream([]u8) = .{ .buffer = buffer, .pos = 0 }; 121 | 122 | try exec.add_file(data.path, fbs.reader()); 123 | }, 124 | } 125 | } 126 | 127 | fn add_file(exec: Exec, path: [:0]const u8, reader: anytype) !void { 128 | if (std.fs.path.dirnamePosix(path)) |dir| { 129 | try exec.recursive_mkdir(dir); 130 | } 131 | 132 | try exec.inner_mkfile(path, reader); 133 | } 134 | 135 | fn recursive_mkdir(exec: Exec, path: []const u8) !void { 136 | var i: usize = 0; 137 | 138 | while (std.mem.indexOfScalarPos(u8, path, i, '/')) |index| { 139 | try exec.inner_mkdir(path[0..index]); 140 | i = index + 1; 141 | } 142 | 143 | try exec.inner_mkdir(path); 144 | } 145 | 146 | fn inner_mkfile(exec: Exec, path: []const u8, reader: anytype) dim.Content.RenderError!void { 147 | try exec.inner.mkfile(path, reader); 148 | } 149 | 150 | fn inner_mkdir(exec: Exec, path: []const u8) dim.Content.RenderError!void { 151 | try exec.inner.mkdir(path); 152 | } 153 | 154 | fn walk_err(err: (std.fs.Dir.OpenError || std.mem.Allocator.Error)) dim.Content.RenderError { 155 | return switch (err) { 156 | error.InvalidUtf8 => error.InvalidPath, 157 | error.InvalidWtf8 => error.InvalidPath, 158 | error.BadPathName => error.InvalidPath, 159 | error.NameTooLong => error.InvalidPath, 160 | 161 | error.OutOfMemory => error.OutOfMemory, 162 | error.FileNotFound => error.FileNotFound, 163 | 164 | error.DeviceBusy => error.IoError, 165 | error.AccessDenied => error.IoError, 166 | error.SystemResources => error.IoError, 167 | error.NoDevice => error.IoError, 168 | error.Unexpected => error.IoError, 169 | error.NetworkNotFound => error.IoError, 170 | error.SymLinkLoop => error.IoError, 171 | error.ProcessFdQuotaExceeded => error.IoError, 172 | error.SystemFdQuotaExceeded => error.IoError, 173 | error.NotDir => error.IoError, 174 | }; 175 | } 176 | }; 177 | } 178 | 179 | fn parse_path(ctx: dim.Context) ![:0]const u8 { 180 | const path = try ctx.parse_string(); 181 | 182 | if (path.len == 0) { 183 | try ctx.report_nonfatal_error("Path cannot be empty!", .{}); 184 | return ""; 185 | } 186 | 187 | if (!std.mem.startsWith(u8, path, "/")) { 188 | try ctx.report_nonfatal_error("Path '{}' did not start with a \"/\"", .{ 189 | std.zig.fmtEscapes(path), 190 | }); 191 | } 192 | 193 | for (path) |c| { 194 | if (c < 0x20 or c == 0x7F or c == '\\') { 195 | try ctx.report_nonfatal_error("Path '{}' contains invalid character 0x{X:0>2}", .{ 196 | std.zig.fmtEscapes(path), 197 | c, 198 | }); 199 | } 200 | } 201 | 202 | _ = std.unicode.Utf8View.init(path) catch |err| { 203 | try ctx.report_nonfatal_error("Path '{}' is not a valid UTF-8 string: {s}", .{ 204 | std.zig.fmtEscapes(path), 205 | @errorName(err), 206 | }); 207 | }; 208 | 209 | return try normalize(ctx.get_arena(), path); 210 | } 211 | 212 | pub fn parse_ops(ctx: dim.Context, end_seq: []const u8, handler: anytype) !void { 213 | while (true) { 214 | const opsel = try ctx.parse_string(); 215 | if (std.mem.eql(u8, opsel, end_seq)) 216 | return; 217 | 218 | if (std.mem.eql(u8, opsel, "mkdir")) { 219 | const path = try parse_path(ctx); 220 | try handler.append_common_op(FsOperation{ 221 | .make_dir = .{ .path = path }, 222 | }); 223 | } else if (std.mem.eql(u8, opsel, "copy-dir")) { 224 | const path = try parse_path(ctx); 225 | const src = try ctx.parse_file_name(); 226 | 227 | try handler.append_common_op(FsOperation{ 228 | .copy_dir = .{ .path = path, .source = src }, 229 | }); 230 | } else if (std.mem.eql(u8, opsel, "copy-file")) { 231 | const path = try parse_path(ctx); 232 | const src = try ctx.parse_file_name(); 233 | 234 | try handler.append_common_op(FsOperation{ 235 | .copy_file = .{ .path = path, .source = src }, 236 | }); 237 | } else if (std.mem.eql(u8, opsel, "create-file")) { 238 | const path = try parse_path(ctx); 239 | const size = try ctx.parse_mem_size(); 240 | const contents = try ctx.parse_content(); 241 | 242 | try handler.append_common_op(FsOperation{ 243 | .create_file = .{ .path = path, .size = size, .contents = contents }, 244 | }); 245 | } else { 246 | try handler.parse_custom_op(ctx, opsel); 247 | } 248 | } 249 | } 250 | 251 | fn normalize(allocator: std.mem.Allocator, src_path: []const u8) ![:0]const u8 { 252 | var list = std.ArrayList([]const u8).init(allocator); 253 | defer list.deinit(); 254 | 255 | var parts = std.mem.tokenizeAny(u8, src_path, "\\/"); 256 | 257 | while (parts.next()) |part| { 258 | if (std.mem.eql(u8, part, ".")) { 259 | // "cd same" is a no-op, we can remove it 260 | continue; 261 | } else if (std.mem.eql(u8, part, "..")) { 262 | // "cd up" is basically just removing the last pushed part 263 | _ = list.pop(); 264 | } else { 265 | // this is an actual "descend" 266 | try list.append(part); 267 | } 268 | } 269 | 270 | return try std.mem.joinZ(allocator, "/", list.items); 271 | } 272 | -------------------------------------------------------------------------------- /src/components/part/GptPartitionTable.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const dim = @import("../../dim.zig"); 3 | 4 | pub fn execute(ctx: dim.Context) !void { 5 | _ = ctx; 6 | @panic("gpt-part not implemented yet!"); 7 | } 8 | 9 | pub const gpt = struct { 10 | pub const Guid = [16]u8; 11 | 12 | pub const Table = struct { 13 | disk_id: Guid, 14 | 15 | partitions: []const Partition, 16 | }; 17 | 18 | pub const Partition = struct { 19 | type: Guid, 20 | part_id: Guid, 21 | 22 | offset: ?u64 = null, 23 | size: u64, 24 | 25 | name: [36]u16, 26 | 27 | attributes: Attributes, 28 | 29 | // data: Content, 30 | 31 | pub const Attributes = packed struct(u32) { 32 | system: bool, 33 | efi_hidden: bool, 34 | legacy: bool, 35 | read_only: bool, 36 | hidden: bool, 37 | no_automount: bool, 38 | 39 | padding: u26 = 0, 40 | }; 41 | }; 42 | 43 | /// https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs 44 | pub const PartitionType = struct { 45 | pub const unused: Guid = .{}; 46 | 47 | pub const microsoft_basic_data: Guid = .{}; 48 | pub const microsoft_reserved: Guid = .{}; 49 | 50 | pub const windows_recovery: Guid = .{}; 51 | 52 | pub const plan9: Guid = .{}; 53 | 54 | pub const linux_swap: Guid = .{}; 55 | pub const linux_fs: Guid = .{}; 56 | pub const linux_reserved: Guid = .{}; 57 | pub const linux_lvm: Guid = .{}; 58 | }; 59 | 60 | pub fn nameLiteral(comptime name: []const u8) [36]u16 { 61 | return comptime blk: { 62 | var buf: [36]u16 = undefined; 63 | const len = std.unicode.utf8ToUtf16Le(&buf, name) catch |err| @compileError(@tagName(err)); 64 | @memset(buf[len..], 0); 65 | break :blk &buf; 66 | }; 67 | } 68 | }; 69 | -------------------------------------------------------------------------------- /src/components/part/MbrPartitionTable.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! The `mbr-part` content will assembly a managed boot record partition table. 3 | //! 4 | //! 5 | const std = @import("std"); 6 | const dim = @import("../../dim.zig"); 7 | 8 | const PartTable = @This(); 9 | 10 | const block_size = 512; 11 | 12 | bootloader: ?dim.Content, 13 | disk_id: ?u32, 14 | partitions: [4]?Partition, 15 | 16 | pub fn parse(ctx: dim.Context) !dim.Content { 17 | const pf = try ctx.alloc_object(PartTable); 18 | pf.* = .{ 19 | .bootloader = null, 20 | .disk_id = null, 21 | .partitions = .{ 22 | null, 23 | null, 24 | null, 25 | null, 26 | }, 27 | }; 28 | 29 | var next_part_id: usize = 0; 30 | var last_part_id: ?usize = null; 31 | while (next_part_id < pf.partitions.len) { 32 | const kw = try ctx.parse_enum(enum { 33 | bootloader, 34 | part, 35 | ignore, 36 | }); 37 | switch (kw) { 38 | .bootloader => { 39 | const bootloader_content = try ctx.parse_content(); 40 | if (pf.bootloader != null) { 41 | try ctx.report_nonfatal_error("mbr-part.bootloader specified twice!", .{}); 42 | } 43 | pf.bootloader = bootloader_content; 44 | }, 45 | .ignore => { 46 | pf.partitions[next_part_id] = null; 47 | next_part_id += 1; 48 | }, 49 | .part => { 50 | pf.partitions[next_part_id] = try parse_partition(ctx); 51 | last_part_id = next_part_id; 52 | next_part_id += 1; 53 | }, 54 | } 55 | } 56 | 57 | if (last_part_id) |part_id| { 58 | for (0..part_id -| 1) |prev| { 59 | if (pf.partitions[prev].?.size == null) { 60 | try ctx.report_nonfatal_error("MBR partition {} does not have a size, but is not last.", .{prev}); 61 | } 62 | } 63 | 64 | var all_auto = true; 65 | var all_manual = true; 66 | for (pf.partitions) |part_or_null| { 67 | const part = part_or_null orelse continue; 68 | 69 | if (part.offset != null) { 70 | all_auto = false; 71 | } else { 72 | all_manual = false; 73 | } 74 | } 75 | 76 | if (!all_auto and !all_manual) { 77 | try ctx.report_nonfatal_error("not all partitions have an explicit offset!", .{}); 78 | } 79 | } 80 | 81 | return .create_handle(pf, .create(PartTable, .{ 82 | .render_fn = render, 83 | })); 84 | } 85 | 86 | fn parse_partition(ctx: dim.Context) !Partition { 87 | var part: Partition = .{ 88 | .offset = null, 89 | .size = null, 90 | .bootable = false, 91 | .type = 0x00, 92 | .contains = .empty, 93 | }; 94 | 95 | var updater: dim.FieldUpdater(Partition, &.{ 96 | .offset, 97 | .size, 98 | .bootable, 99 | }) = .init(ctx, &part); 100 | 101 | parse_loop: while (true) { 102 | const kw = try ctx.parse_enum(enum { 103 | type, 104 | bootable, 105 | size, 106 | offset, 107 | contains, 108 | endpart, 109 | }); 110 | try switch (kw) { 111 | .type => { 112 | const part_name = try ctx.parse_string(); 113 | 114 | const encoded = if (std.fmt.parseInt(u8, part_name, 0)) |value| 115 | value 116 | else |_| 117 | known_partition_types.get(part_name) orelse blk: { 118 | try ctx.report_nonfatal_error("unknown partition type '{}'", .{std.zig.fmtEscapes(part_name)}); 119 | break :blk 0x00; 120 | }; 121 | 122 | try updater.set(.type, encoded); 123 | }, 124 | .bootable => updater.set(.bootable, true), 125 | .size => updater.set(.size, try ctx.parse_mem_size()), 126 | .offset => updater.set(.offset, try ctx.parse_mem_size()), 127 | .contains => updater.set(.contains, try ctx.parse_content()), 128 | .endpart => break :parse_loop, 129 | }; 130 | } 131 | 132 | try updater.validate(); 133 | 134 | return part; 135 | } 136 | 137 | fn render(table: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderError!void { 138 | const last_part_id = blk: { 139 | var last: usize = 0; 140 | for (table.partitions, 0..) |p, i| { 141 | if (p != null) 142 | last = i; 143 | } 144 | break :blk last; 145 | }; 146 | 147 | const PartInfo = struct { 148 | offset: u64, 149 | size: u64, 150 | }; 151 | var part_infos: [4]?PartInfo = @splat(null); 152 | 153 | // Compute and write boot sector, based on the follow: 154 | // - https://en.wikipedia.org/wiki/Master_boot_record#Sector_layout 155 | { 156 | var boot_sector: [block_size]u8 = @splat(0); 157 | 158 | if (table.bootloader) |bootloader| { 159 | var sector: dim.BinaryStream = .init_buffer(&boot_sector); 160 | 161 | try bootloader.render(§or); 162 | 163 | const upper_limit: u64 = if (table.disk_id != null) 164 | 0x01B8 165 | else 166 | 0x1BE; 167 | 168 | if (sector.virtual_offset >= upper_limit) { 169 | // TODO(fqu): Emit warning diagnostics here that parts of the bootloader will be overwritten by the MBR data. 170 | } 171 | } 172 | 173 | if (table.disk_id) |disk_id| { 174 | std.mem.writeInt(u32, boot_sector[0x1B8..0x1BC], disk_id, .little); 175 | } 176 | 177 | // TODO(fqu): Implement "0x5A5A if copy-protected" 178 | std.mem.writeInt(u16, boot_sector[0x1BC..0x1BE], 0x0000, .little); 179 | 180 | const part_base = 0x01BE; 181 | var auto_offset: u64 = 2048 * block_size; // TODO(fqu): Make this configurable by allowing `offset` on the first partition, but still allow auto-layouting 182 | for (table.partitions, &part_infos, 0..) |part_or_null, *pinfo, part_id| { 183 | const desc: *[16]u8 = boot_sector[part_base + 16 * part_id ..][0..16]; 184 | 185 | // Initialize to "inactive" state 186 | desc.* = @splat(0); 187 | pinfo.* = null; 188 | 189 | if (part_or_null) |part| { 190 | // https://wiki.osdev.org/MBR#Partition_table_entry_format 191 | 192 | const part_offset = part.offset orelse auto_offset; 193 | const part_size = part.size orelse if (part_id == last_part_id) 194 | std.mem.alignBackward(u64, stream.length - part_offset, block_size) 195 | else 196 | return error.ConfigurationError; 197 | 198 | pinfo.* = .{ 199 | .offset = part_offset, 200 | .size = part_size, 201 | }; 202 | 203 | if ((part_offset % block_size) != 0) { 204 | std.log.err("partition offset is not divisible by {}!", .{block_size}); 205 | return error.ConfigurationError; 206 | } 207 | if ((part_size % block_size) != 0) { 208 | std.log.err("partition size is not divisible by {}!", .{block_size}); 209 | return error.ConfigurationError; 210 | } 211 | 212 | const lba_u64 = @divExact(part_offset, block_size); 213 | const size_u64 = @divExact(part_size, block_size); 214 | 215 | const lba = std.math.cast(u32, lba_u64) orelse { 216 | std.log.err("partition offset is out of bounds!", .{}); 217 | return error.ConfigurationError; 218 | }; 219 | const size = std.math.cast(u32, size_u64) orelse { 220 | std.log.err("partition size is out of bounds!", .{}); 221 | return error.ConfigurationError; 222 | }; 223 | 224 | desc[0] = if (part.bootable) 0x80 else 0x00; 225 | 226 | desc[1..4].* = encodeMbrChsEntry(lba); // chs_start 227 | desc[4] = part.type; 228 | desc[5..8].* = encodeMbrChsEntry(lba + size - 1); // chs_end 229 | std.mem.writeInt(u32, desc[8..12], lba, .little); // lba_start 230 | std.mem.writeInt(u32, desc[12..16], size, .little); // block_count 231 | 232 | auto_offset += part_size; 233 | } 234 | } 235 | boot_sector[0x01FE] = 0x55; 236 | boot_sector[0x01FF] = 0xAA; 237 | 238 | try stream.write(0, &boot_sector); 239 | } 240 | 241 | for (part_infos, table.partitions) |maybe_info, maybe_part| { 242 | const part = maybe_part orelse continue; 243 | const info = maybe_info orelse unreachable; 244 | 245 | var sub_view = try stream.slice(info.offset, info.size); 246 | 247 | try part.contains.render(&sub_view); 248 | } 249 | } 250 | 251 | pub const Partition = struct { 252 | offset: ?u64 = null, 253 | size: ?u64, 254 | 255 | bootable: bool, 256 | type: u8, 257 | 258 | contains: dim.Content, 259 | }; 260 | 261 | // TODO: Fill from https://en.wikipedia.org/wiki/Partition_type 262 | const known_partition_types = std.StaticStringMap(u8).initComptime(.{ 263 | .{ "empty", 0x00 }, 264 | 265 | .{ "fat12", 0x01 }, 266 | 267 | .{ "ntfs", 0x07 }, 268 | 269 | .{ "fat32-chs", 0x0B }, 270 | .{ "fat32-lba", 0x0C }, 271 | 272 | .{ "fat16-lba", 0x0E }, 273 | 274 | .{ "linux-swap", 0x82 }, 275 | .{ "linux-fs", 0x83 }, 276 | .{ "linux-lvm", 0x8E }, 277 | }); 278 | 279 | pub fn encodeMbrChsEntry(lba: u32) [3]u8 { 280 | var chs = lbaToChs(lba); 281 | 282 | if (chs.cylinder >= 1024) { 283 | chs = .{ 284 | .cylinder = 1023, 285 | .head = 255, 286 | .sector = 63, 287 | }; 288 | } 289 | 290 | const cyl: u10 = @intCast(chs.cylinder); 291 | const head: u8 = @intCast(chs.head); 292 | const sect: u6 = @intCast(chs.sector); 293 | 294 | const sect_cyl: u8 = @as(u8, 0xC0) & @as(u8, @truncate(cyl >> 2)) + sect; 295 | const sect_8: u8 = @truncate(cyl); 296 | 297 | return .{ head, sect_cyl, sect_8 }; 298 | } 299 | 300 | const CHS = struct { 301 | cylinder: u32, 302 | head: u8, // limit: 256 303 | sector: u6, // limit: 64 304 | 305 | pub fn init(c: u32, h: u8, s: u6) CHS { 306 | return .{ .cylinder = c, .head = h, .sector = s }; 307 | } 308 | }; 309 | 310 | pub fn lbaToChs(lba: u32) CHS { 311 | const hpc = 255; 312 | const spt = 63; 313 | 314 | // C, H and S are the cylinder number, the head number, and the sector number 315 | // LBA is the logical block address 316 | // HPC is the maximum number of heads per cylinder (reported by disk drive, typically 16 for 28-bit LBA) 317 | // SPT is the maximum number of sectors per track (reported by disk drive, typically 63 for 28-bit LBA) 318 | // LBA = (C * HPC + H) * SPT + (S - 1) 319 | 320 | const sector = (lba % spt); 321 | const cyl_head = (lba / spt); 322 | 323 | const head = (cyl_head % hpc); 324 | const cyl = (cyl_head / hpc); 325 | 326 | return CHS{ 327 | .sector = @intCast(sector + 1), 328 | .head = @intCast(head), 329 | .cylinder = cyl, 330 | }; 331 | } 332 | 333 | // test "lba to chs" { 334 | // // table from https://en.wikipedia.org/wiki/Logical_block_addressing#CHS_conversion 335 | // try std.testing.expectEqual(mbr.CHS.init(0, 0, 1), mbr.lbaToChs(0)); 336 | // try std.testing.expectEqual(mbr.CHS.init(0, 0, 2), mbr.lbaToChs(1)); 337 | // try std.testing.expectEqual(mbr.CHS.init(0, 0, 3), mbr.lbaToChs(2)); 338 | // try std.testing.expectEqual(mbr.CHS.init(0, 0, 63), mbr.lbaToChs(62)); 339 | // try std.testing.expectEqual(mbr.CHS.init(0, 1, 1), mbr.lbaToChs(63)); 340 | // try std.testing.expectEqual(mbr.CHS.init(0, 15, 1), mbr.lbaToChs(945)); 341 | // try std.testing.expectEqual(mbr.CHS.init(0, 15, 63), mbr.lbaToChs(1007)); 342 | // try std.testing.expectEqual(mbr.CHS.init(1, 0, 1), mbr.lbaToChs(1008)); 343 | // try std.testing.expectEqual(mbr.CHS.init(1, 0, 63), mbr.lbaToChs(1070)); 344 | // try std.testing.expectEqual(mbr.CHS.init(1, 1, 1), mbr.lbaToChs(1071)); 345 | // try std.testing.expectEqual(mbr.CHS.init(1, 1, 63), mbr.lbaToChs(1133)); 346 | // try std.testing.expectEqual(mbr.CHS.init(1, 2, 1), mbr.lbaToChs(1134)); 347 | // try std.testing.expectEqual(mbr.CHS.init(1, 15, 63), mbr.lbaToChs(2015)); 348 | // try std.testing.expectEqual(mbr.CHS.init(2, 0, 1), mbr.lbaToChs(2016)); 349 | // try std.testing.expectEqual(mbr.CHS.init(15, 15, 63), mbr.lbaToChs(16127)); 350 | // try std.testing.expectEqual(mbr.CHS.init(16, 0, 1), mbr.lbaToChs(16128)); 351 | // try std.testing.expectEqual(mbr.CHS.init(31, 15, 63), mbr.lbaToChs(32255)); 352 | // try std.testing.expectEqual(mbr.CHS.init(32, 0, 1), mbr.lbaToChs(32256)); 353 | // try std.testing.expectEqual(mbr.CHS.init(16319, 15, 63), mbr.lbaToChs(16450559)); 354 | // try std.testing.expectEqual(mbr.CHS.init(16382, 15, 63), mbr.lbaToChs(16514063)); 355 | // } 356 | -------------------------------------------------------------------------------- /src/dim.zig: -------------------------------------------------------------------------------- 1 | //! 2 | //! Disk Imager Command Line 3 | //! 4 | const std = @import("std"); 5 | const builtin = @import("builtin"); 6 | 7 | const Tokenizer = @import("Tokenizer.zig"); 8 | const Parser = @import("Parser.zig"); 9 | const args = @import("args"); 10 | 11 | pub const std_options: std.Options = .{ 12 | .log_level = if (builtin.mode == .Debug) 13 | .debug 14 | else 15 | .info, 16 | .log_scope_levels = &.{ 17 | .{ .scope = .fatfs, .level = .info }, 18 | }, 19 | }; 20 | 21 | comptime { 22 | // Ensure zfat is linked to prevent compiler errors! 23 | _ = @import("zfat"); 24 | } 25 | 26 | const max_script_size = 10 * DiskSize.MiB; 27 | 28 | const Options = struct { 29 | output: ?[]const u8 = null, 30 | size: DiskSize = DiskSize.empty, 31 | script: ?[]const u8 = null, 32 | @"import-env": bool = false, 33 | @"deps-file": ?[]const u8 = null, 34 | }; 35 | 36 | const usage = 37 | \\dim OPTIONS [VARS] 38 | \\ 39 | \\OPTIONS: 40 | \\ --output 41 | \\ mandatory: where to store the output file 42 | \\ --size 43 | \\ mandatory: how big is the resulting disk image? allowed suffixes: k,K,M,G 44 | \\ --script 45 | \\ mandatory: which script file to execute? 46 | \\[--import-env] 47 | \\ optional: if set, imports the current process environment into the variables 48 | \\VARS: 49 | \\{ KEY=VALUE }* 50 | \\ multiple ≥ 0: Sets variable KEY to VALUE 51 | \\ 52 | ; 53 | 54 | const VariableMap = std.StringArrayHashMapUnmanaged([]const u8); 55 | 56 | var global_deps_file: ?std.fs.File = null; 57 | 58 | pub fn main() !u8 { 59 | var gpa_impl: std.heap.DebugAllocator(.{}) = .init; 60 | defer _ = gpa_impl.deinit(); 61 | 62 | const gpa = gpa_impl.allocator(); 63 | 64 | const opts = try args.parseForCurrentProcess(Options, gpa, .print); 65 | defer opts.deinit(); 66 | 67 | const options = opts.options; 68 | 69 | const output_path = options.output orelse fatal("No output path specified"); 70 | const script_path = options.script orelse fatal("No script specified"); 71 | 72 | var var_map: VariableMap = .empty; 73 | defer var_map.deinit(gpa); 74 | 75 | var env_map = try std.process.getEnvMap(gpa); 76 | defer env_map.deinit(); 77 | 78 | if (options.@"import-env") { 79 | var iter = env_map.iterator(); 80 | while (iter.next()) |entry| { 81 | try var_map.putNoClobber(gpa, entry.key_ptr.*, entry.value_ptr.*); 82 | } 83 | } 84 | 85 | var bad_args = false; 86 | for (opts.positionals) |pos| { 87 | if (std.mem.indexOfScalar(u8, pos, '=')) |idx| { 88 | const key = pos[0..idx]; 89 | const val = pos[idx + 1 ..]; 90 | try var_map.put(gpa, key, val); 91 | } else { 92 | std.debug.print("unexpected argument positional '{}'\n", .{ 93 | std.zig.fmtEscapes(pos), 94 | }); 95 | bad_args = true; 96 | } 97 | } 98 | if (bad_args) 99 | return 1; 100 | 101 | const size_limit: u64 = options.size.size_in_bytes(); 102 | if (size_limit == 0) { 103 | return fatal("--size must be given!"); 104 | } 105 | 106 | var current_dir = try std.fs.cwd().openDir(".", .{}); 107 | defer current_dir.close(); 108 | 109 | const script_source = try current_dir.readFileAlloc(gpa, script_path, max_script_size); 110 | defer gpa.free(script_source); 111 | 112 | if (options.@"deps-file") |deps_file_path| { 113 | global_deps_file = try std.fs.cwd().createFile(deps_file_path, .{}); 114 | 115 | try global_deps_file.?.writer().print( 116 | \\{s}: {s} 117 | , .{ 118 | output_path, 119 | script_path, 120 | }); 121 | } 122 | defer if (global_deps_file) |deps_file| 123 | deps_file.close(); 124 | 125 | var mem_arena: std.heap.ArenaAllocator = .init(gpa); 126 | defer mem_arena.deinit(); 127 | 128 | var env = Environment{ 129 | .allocator = gpa, 130 | .arena = mem_arena.allocator(), 131 | .vars = &var_map, 132 | .include_base = current_dir, 133 | .parser = undefined, 134 | }; 135 | 136 | var parser = try Parser.init( 137 | gpa, 138 | &env.io, 139 | .{ 140 | .max_include_depth = 8, 141 | }, 142 | ); 143 | defer parser.deinit(); 144 | 145 | env.parser = &parser; 146 | 147 | try parser.push_source(.{ 148 | .path = script_path, 149 | .contents = script_source, 150 | }); 151 | 152 | const root_content: Content = env.parse_content() catch |err| switch (err) { 153 | error.FatalConfigError => return 1, 154 | 155 | else => |e| return e, 156 | }; 157 | 158 | if (env.error_flag) { 159 | return 1; 160 | } 161 | 162 | { 163 | var output_file = try current_dir.createFile(output_path, .{ .read = true }); 164 | defer output_file.close(); 165 | 166 | try output_file.setEndPos(size_limit); 167 | 168 | var stream: BinaryStream = .init_file(output_file, size_limit); 169 | 170 | try root_content.render(&stream); 171 | } 172 | 173 | if (global_deps_file) |deps_file| { 174 | try deps_file.writeAll("\n"); 175 | } 176 | 177 | return 0; 178 | } 179 | 180 | pub fn declare_file_dependency(path: []const u8) !void { 181 | const deps_file = global_deps_file orelse return; 182 | 183 | const stat = std.fs.cwd().statFile(path) catch |err| switch (err) { 184 | error.IsDir => return, 185 | else => |e| return e, 186 | }; 187 | if (stat.kind != .directory) { 188 | try deps_file.writeAll(" \\\n "); 189 | try deps_file.writeAll(path); 190 | } 191 | } 192 | 193 | fn fatal(msg: []const u8) noreturn { 194 | std.debug.print("Error: {s}\n", .{msg}); 195 | std.debug.print("Usage: {s}", .{usage}); 196 | std.process.exit(1); 197 | } 198 | 199 | const content_types: []const struct { []const u8, type } = &.{ 200 | .{ "mbr-part", @import("components/part/MbrPartitionTable.zig") }, 201 | // .{ "gpt-part", @import("components/part/GptPartitionTable.zig") }, 202 | .{ "vfat", @import("components/fs/FatFileSystem.zig") }, 203 | .{ "paste-file", @import("components/PasteFile.zig") }, 204 | .{ "empty", @import("components/EmptyData.zig") }, 205 | .{ "fill", @import("components/FillData.zig") }, 206 | }; 207 | 208 | pub const Context = struct { 209 | env: *Environment, 210 | 211 | pub fn get_arena(ctx: Context) std.mem.Allocator { 212 | return ctx.env.arena; 213 | } 214 | 215 | pub fn alloc_object(ctx: Context, comptime T: type) error{OutOfMemory}!*T { 216 | return try ctx.env.arena.create(T); 217 | } 218 | 219 | pub fn report_nonfatal_error(ctx: Context, comptime msg: []const u8, params: anytype) error{OutOfMemory}!void { 220 | try ctx.env.report_error(msg, params); 221 | } 222 | 223 | pub fn report_fatal_error(ctx: Context, comptime msg: []const u8, params: anytype) error{ FatalConfigError, OutOfMemory } { 224 | try ctx.env.report_error(msg, params); 225 | return error.FatalConfigError; 226 | } 227 | 228 | pub fn parse_string(ctx: Context) Environment.ParseError![]const u8 { 229 | const str = try ctx.env.parser.next(); 230 | // std.debug.print("token: '{}'\n", .{std.zig.fmtEscapes(str)}); 231 | return str; 232 | } 233 | 234 | pub fn parse_file_name(ctx: Context) Environment.ParseError!FileName { 235 | const rel_path = try ctx.parse_string(); 236 | 237 | const abs_path = try ctx.env.parser.get_include_path(ctx.env.arena, rel_path); 238 | 239 | return .{ 240 | .root_dir = ctx.env.include_base, 241 | .rel_path = abs_path, 242 | }; 243 | } 244 | 245 | pub fn parse_enum(ctx: Context, comptime E: type) Environment.ParseError!E { 246 | if (@typeInfo(E) != .@"enum") 247 | @compileError("get_enum requires an enum type!"); 248 | const tag_name = try ctx.parse_string(); 249 | const converted = std.meta.stringToEnum( 250 | E, 251 | tag_name, 252 | ); 253 | if (converted) |ok| 254 | return ok; 255 | std.debug.print("detected invalid enum tag for {s}: \"{}\"\n", .{ @typeName(E), std.zig.fmtEscapes(tag_name) }); 256 | std.debug.print("valid options are:\n", .{}); 257 | 258 | for (std.enums.values(E)) |val| { 259 | std.debug.print("- '{s}'\n", .{@tagName(val)}); 260 | } 261 | 262 | return error.InvalidEnumTag; 263 | } 264 | 265 | pub fn parse_integer(ctx: Context, comptime I: type, base: u8) Environment.ParseError!I { 266 | if (@typeInfo(I) != .int) 267 | @compileError("get_integer requires an integer type!"); 268 | return std.fmt.parseInt( 269 | I, 270 | try ctx.parse_string(), 271 | base, 272 | ) catch return error.InvalidNumber; 273 | } 274 | 275 | pub fn parse_mem_size(ctx: Context) Environment.ParseError!u64 { 276 | const str = try ctx.parse_string(); 277 | 278 | const ds: DiskSize = try .parse(str); 279 | 280 | return ds.size_in_bytes(); 281 | } 282 | 283 | pub fn parse_content(ctx: Context) Environment.ParseError!Content { 284 | const content_type_str = try ctx.env.parser.next(); 285 | 286 | inline for (content_types) |tn| { 287 | const name, const impl = tn; 288 | 289 | if (std.mem.eql(u8, name, content_type_str)) { 290 | const content: Content = try impl.parse(ctx); 291 | 292 | return content; 293 | } 294 | } 295 | 296 | return ctx.report_fatal_error("unknown content type: '{}'", .{ 297 | std.zig.fmtEscapes(content_type_str), 298 | }); 299 | } 300 | }; 301 | 302 | pub fn FieldUpdater(comptime Obj: type, comptime optional_fields: []const std.meta.FieldEnum(Obj)) type { 303 | return struct { 304 | const FUP = @This(); 305 | const FieldName = std.meta.FieldEnum(Obj); 306 | 307 | ctx: Context, 308 | target: *Obj, 309 | 310 | updated_fields: std.EnumSet(FieldName) = .initEmpty(), 311 | 312 | pub fn init(ctx: Context, target: *Obj) FUP { 313 | return .{ 314 | .ctx = ctx, 315 | .target = target, 316 | }; 317 | } 318 | 319 | pub fn set(fup: *FUP, comptime field: FieldName, value: @FieldType(Obj, @tagName(field))) !void { 320 | if (fup.updated_fields.contains(field)) { 321 | try fup.ctx.report_nonfatal_error("duplicate assignment of {s}.{s}", .{ 322 | @typeName(Obj), 323 | @tagName(field), 324 | }); 325 | } 326 | 327 | @field(fup.target, @tagName(field)) = value; 328 | fup.updated_fields.insert(field); 329 | } 330 | 331 | pub fn validate(fup: FUP) !void { 332 | var missing_fields = fup.updated_fields; 333 | for (optional_fields) |fld| { 334 | missing_fields.insert(fld); 335 | } 336 | missing_fields = missing_fields.complement(); 337 | var iter = missing_fields.iterator(); 338 | while (iter.next()) |fld| { 339 | try fup.ctx.report_nonfatal_error("missing assignment of {s}.{s}", .{ 340 | @typeName(Obj), 341 | @tagName(fld), 342 | }); 343 | } 344 | } 345 | }; 346 | } 347 | 348 | const Environment = struct { 349 | const ParseError = Parser.Error || error{ 350 | OutOfMemory, 351 | UnexpectedEndOfFile, 352 | InvalidNumber, 353 | UnknownContentType, 354 | FatalConfigError, 355 | InvalidEnumTag, 356 | Overflow, 357 | InvalidSize, 358 | }; 359 | 360 | arena: std.mem.Allocator, 361 | allocator: std.mem.Allocator, 362 | parser: *Parser, 363 | include_base: std.fs.Dir, 364 | vars: *const VariableMap, 365 | error_flag: bool = false, 366 | 367 | io: Parser.IO = .{ 368 | .fetch_file_fn = fetch_file, 369 | .resolve_variable_fn = resolve_var, 370 | }, 371 | 372 | fn parse_content(env: *Environment) ParseError!Content { 373 | var ctx = Context{ .env = env }; 374 | 375 | return try ctx.parse_content(); 376 | } 377 | 378 | fn report_error(env: *Environment, comptime fmt: []const u8, params: anytype) error{OutOfMemory}!void { 379 | env.error_flag = true; 380 | std.log.err("PARSE ERROR: " ++ fmt, params); 381 | } 382 | 383 | fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { 384 | const env: *const Environment = @fieldParentPtr("io", io); 385 | 386 | const contents = env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { 387 | error.OutOfMemory => return error.OutOfMemory, 388 | error.FileNotFound => { 389 | const ctx = Context{ .env = @constCast(env) }; 390 | var buffer: [std.fs.max_path_bytes]u8 = undefined; 391 | try ctx.report_nonfatal_error("failed to open file: \"{}/{}\"", .{ 392 | std.zig.fmtEscapes(env.include_base.realpath(".", &buffer) catch return error.FileNotFound), 393 | std.zig.fmtEscapes(path), 394 | }); 395 | return error.FileNotFound; 396 | }, 397 | else => return error.IoError, 398 | }; 399 | errdefer allocator.free(contents); 400 | 401 | const name: FileName = .{ .root_dir = env.include_base, .rel_path = path }; 402 | try name.declare_dependency(); 403 | 404 | return contents; 405 | } 406 | 407 | fn resolve_var(io: *const Parser.IO, name: []const u8) error{UnknownVariable}![]const u8 { 408 | const env: *const Environment = @fieldParentPtr("io", io); 409 | return env.vars.get(name) orelse return error.UnknownVariable; 410 | } 411 | }; 412 | 413 | /// A "Content" is something that will fill a given space of a disk image. 414 | /// It can be raw data, a pattern, a file system, a partition table, ... 415 | /// 416 | /// 417 | pub const Content = struct { 418 | pub const RenderError = FileName.OpenError || FileHandle.ReadError || BinaryStream.WriteError || error{ 419 | ConfigurationError, 420 | OutOfBounds, 421 | OutOfMemory, 422 | }; 423 | pub const GuessError = FileName.GetSizeError; 424 | 425 | obj: *anyopaque, 426 | vtable: *const VTable, 427 | 428 | pub const empty: Content = @import("components/EmptyData.zig").parse(undefined) catch unreachable; 429 | 430 | pub fn create_handle(obj: *anyopaque, vtable: *const VTable) Content { 431 | return .{ .obj = obj, .vtable = vtable }; 432 | } 433 | 434 | /// Emits the content into a binary stream. 435 | pub fn render(content: Content, stream: *BinaryStream) RenderError!void { 436 | try content.vtable.render_fn(content.obj, stream); 437 | } 438 | 439 | pub const VTable = struct { 440 | render_fn: *const fn (*anyopaque, *BinaryStream) RenderError!void, 441 | 442 | pub fn create( 443 | comptime Container: type, 444 | comptime funcs: struct { 445 | render_fn: *const fn (*Container, *BinaryStream) RenderError!void, 446 | }, 447 | ) *const VTable { 448 | const Wrap = struct { 449 | fn render(self: *anyopaque, stream: *BinaryStream) RenderError!void { 450 | return funcs.render_fn( 451 | @ptrCast(@alignCast(self)), 452 | stream, 453 | ); 454 | } 455 | }; 456 | return comptime &.{ 457 | .render_fn = Wrap.render, 458 | }; 459 | } 460 | }; 461 | }; 462 | 463 | pub const FileName = struct { 464 | root_dir: std.fs.Dir, 465 | rel_path: []const u8, 466 | 467 | pub const OpenError = error{ FileNotFound, InvalidPath, IoError }; 468 | 469 | pub fn open(name: FileName) OpenError!FileHandle { 470 | const file = name.root_dir.openFile(name.rel_path, .{}) catch |err| switch (err) { 471 | error.FileNotFound => { 472 | var buffer: [std.fs.max_path_bytes]u8 = undefined; 473 | std.log.err("failed to open \"{}/{}\": not found", .{ 474 | std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), 475 | std.zig.fmtEscapes(name.rel_path), 476 | }); 477 | return error.FileNotFound; 478 | }, 479 | 480 | error.NameTooLong, 481 | error.InvalidWtf8, 482 | error.BadPathName, 483 | error.InvalidUtf8, 484 | => return error.InvalidPath, 485 | 486 | error.NoSpaceLeft, 487 | error.FileTooBig, 488 | error.DeviceBusy, 489 | error.AccessDenied, 490 | error.SystemResources, 491 | error.WouldBlock, 492 | error.NoDevice, 493 | error.Unexpected, 494 | error.SharingViolation, 495 | error.PathAlreadyExists, 496 | error.PipeBusy, 497 | error.NetworkNotFound, 498 | error.AntivirusInterference, 499 | error.SymLinkLoop, 500 | error.ProcessFdQuotaExceeded, 501 | error.SystemFdQuotaExceeded, 502 | error.IsDir, 503 | error.NotDir, 504 | error.FileLocksNotSupported, 505 | error.FileBusy, 506 | => return error.IoError, 507 | }; 508 | 509 | try name.declare_dependency(); 510 | 511 | return .{ .file = file }; 512 | } 513 | 514 | pub fn open_dir(name: FileName) OpenError!std.fs.Dir { 515 | const dir = name.root_dir.openDir(name.rel_path, .{ .iterate = true }) catch |err| switch (err) { 516 | error.FileNotFound => { 517 | var buffer: [std.fs.max_path_bytes]u8 = undefined; 518 | std.log.err("failed to open \"{}/{}\": not found", .{ 519 | std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), 520 | std.zig.fmtEscapes(name.rel_path), 521 | }); 522 | return error.FileNotFound; 523 | }, 524 | 525 | error.NameTooLong, 526 | error.InvalidWtf8, 527 | error.BadPathName, 528 | error.InvalidUtf8, 529 | => return error.InvalidPath, 530 | 531 | error.DeviceBusy, 532 | error.AccessDenied, 533 | error.SystemResources, 534 | error.NoDevice, 535 | error.Unexpected, 536 | error.NetworkNotFound, 537 | error.SymLinkLoop, 538 | error.ProcessFdQuotaExceeded, 539 | error.SystemFdQuotaExceeded, 540 | error.NotDir, 541 | => return error.IoError, 542 | }; 543 | 544 | try name.declare_dependency(); 545 | 546 | return dir; 547 | } 548 | 549 | pub fn declare_dependency(name: FileName) OpenError!void { 550 | var buffer: [std.fs.max_path_bytes]u8 = undefined; 551 | 552 | const realpath = name.root_dir.realpath( 553 | name.rel_path, 554 | &buffer, 555 | ) catch |e| std.debug.panic("failed to determine real path for dependency file: {s}", .{@errorName(e)}); 556 | declare_file_dependency(realpath) catch |e| std.debug.panic("Failed to write to deps file: {s}", .{@errorName(e)}); 557 | } 558 | 559 | pub const GetSizeError = error{ FileNotFound, InvalidPath, IoError }; 560 | pub fn get_size(name: FileName) GetSizeError!u64 { 561 | const stat = name.root_dir.statFile(name.rel_path) catch |err| switch (err) { 562 | error.FileNotFound => return error.FileNotFound, 563 | 564 | error.NameTooLong, 565 | error.InvalidWtf8, 566 | error.BadPathName, 567 | error.InvalidUtf8, 568 | => return error.InvalidPath, 569 | 570 | error.NoSpaceLeft, 571 | error.FileTooBig, 572 | error.DeviceBusy, 573 | error.AccessDenied, 574 | error.SystemResources, 575 | error.WouldBlock, 576 | error.NoDevice, 577 | error.Unexpected, 578 | error.SharingViolation, 579 | error.PathAlreadyExists, 580 | error.PipeBusy, 581 | 582 | error.NetworkNotFound, 583 | error.AntivirusInterference, 584 | error.SymLinkLoop, 585 | error.ProcessFdQuotaExceeded, 586 | error.SystemFdQuotaExceeded, 587 | error.IsDir, 588 | error.NotDir, 589 | error.FileLocksNotSupported, 590 | error.FileBusy, 591 | => return error.IoError, 592 | }; 593 | return stat.size; 594 | } 595 | 596 | pub fn copy_to(file: FileName, stream: *BinaryStream) (OpenError || FileHandle.ReadError || BinaryStream.WriteError)!void { 597 | var handle = try file.open(); 598 | defer handle.close(); 599 | 600 | var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); 601 | 602 | try fifo.pump( 603 | handle.reader(), 604 | stream.writer(), 605 | ); 606 | } 607 | }; 608 | 609 | pub const FileHandle = struct { 610 | pub const ReadError = error{ReadFileFailed}; 611 | 612 | pub const Reader = std.io.Reader(std.fs.File, ReadError, read_some); 613 | 614 | file: std.fs.File, 615 | 616 | pub fn close(fd: *FileHandle) void { 617 | fd.file.close(); 618 | fd.* = undefined; 619 | } 620 | 621 | pub fn reader(fd: FileHandle) Reader { 622 | return .{ .context = fd.file }; 623 | } 624 | 625 | fn read_some(file: std.fs.File, data: []u8) ReadError!usize { 626 | return file.read(data) catch |err| switch (err) { 627 | error.InputOutput, 628 | error.AccessDenied, 629 | error.BrokenPipe, 630 | error.SystemResources, 631 | error.OperationAborted, 632 | error.LockViolation, 633 | error.WouldBlock, 634 | error.ConnectionResetByPeer, 635 | error.ProcessNotFound, 636 | error.Unexpected, 637 | error.IsDir, 638 | error.ConnectionTimedOut, 639 | error.NotOpenForReading, 640 | error.SocketNotConnected, 641 | error.Canceled, 642 | => return error.ReadFileFailed, 643 | }; 644 | } 645 | }; 646 | 647 | pub const BinaryStream = struct { 648 | pub const WriteError = error{ Overflow, IoError }; 649 | pub const ReadError = error{ Overflow, IoError }; 650 | pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); 651 | 652 | backing: Backing, 653 | 654 | virtual_offset: u64 = 0, 655 | 656 | /// Max number of bytes that can be written 657 | length: u64, 658 | 659 | /// Constructs a BinaryStream from a slice. 660 | pub fn init_buffer(data: []u8) BinaryStream { 661 | return .{ 662 | .backing = .{ .buffer = data.ptr }, 663 | .length = data.len, 664 | }; 665 | } 666 | 667 | /// Constructs a BinaryStream from a file. 668 | pub fn init_file(file: std.fs.File, max_len: u64) BinaryStream { 669 | return .{ 670 | .backing = .{ 671 | .file = .{ 672 | .file = file, 673 | .base = 0, 674 | }, 675 | }, 676 | .length = max_len, 677 | }; 678 | } 679 | 680 | /// Returns a view into the stream. 681 | pub fn slice(bs: BinaryStream, offset: u64, length: ?u64) error{OutOfBounds}!BinaryStream { 682 | if (offset > bs.length) 683 | return error.OutOfBounds; 684 | const true_length = length orelse bs.length - offset; 685 | if (true_length > bs.length) 686 | return error.OutOfBounds; 687 | 688 | return .{ 689 | .length = true_length, 690 | .backing = switch (bs.backing) { 691 | .buffer => |old| .{ .buffer = old + offset }, 692 | .file => |old| .{ 693 | .file = .{ 694 | .file = old.file, 695 | .base = old.base + offset, 696 | }, 697 | }, 698 | }, 699 | }; 700 | } 701 | 702 | pub fn read(bs: *BinaryStream, offset: u64, data: []u8) ReadError!void { 703 | const end_pos = offset + data.len; 704 | if (end_pos > bs.length) 705 | return error.Overflow; 706 | 707 | switch (bs.backing) { 708 | .buffer => |ptr| @memcpy(data, ptr[@intCast(offset)..][0..data.len]), 709 | .file => |state| { 710 | state.file.seekTo(state.base + offset) catch return error.IoError; 711 | state.file.reader().readNoEof(data) catch |err| switch (err) { 712 | error.InputOutput, 713 | error.AccessDenied, 714 | error.BrokenPipe, 715 | error.SystemResources, 716 | error.OperationAborted, 717 | error.LockViolation, 718 | error.WouldBlock, 719 | error.ConnectionResetByPeer, 720 | error.ProcessNotFound, 721 | error.Unexpected, 722 | error.IsDir, 723 | error.ConnectionTimedOut, 724 | error.NotOpenForReading, 725 | error.SocketNotConnected, 726 | error.Canceled, 727 | error.EndOfStream, 728 | => return error.IoError, 729 | }; 730 | }, 731 | } 732 | } 733 | 734 | pub fn write(bs: *BinaryStream, offset: u64, data: []const u8) WriteError!void { 735 | const end_pos = offset + data.len; 736 | if (end_pos > bs.length) 737 | return error.Overflow; 738 | 739 | switch (bs.backing) { 740 | .buffer => |ptr| @memcpy(ptr[@intCast(offset)..][0..data.len], data), 741 | .file => |state| { 742 | state.file.seekTo(state.base + offset) catch return error.IoError; 743 | state.file.writeAll(data) catch |err| switch (err) { 744 | error.DiskQuota, error.NoSpaceLeft, error.FileTooBig => return error.Overflow, 745 | 746 | error.InputOutput, 747 | error.DeviceBusy, 748 | error.InvalidArgument, 749 | error.AccessDenied, 750 | error.BrokenPipe, 751 | error.SystemResources, 752 | error.OperationAborted, 753 | error.NotOpenForWriting, 754 | error.LockViolation, 755 | error.WouldBlock, 756 | error.ConnectionResetByPeer, 757 | error.ProcessNotFound, 758 | error.NoDevice, 759 | error.Unexpected, 760 | => return error.IoError, 761 | }; 762 | }, 763 | } 764 | } 765 | 766 | pub fn seek_to(bs: *BinaryStream, offset: u64) error{OutOfBounds}!void { 767 | if (offset > bs.length) 768 | return error.OutOfBounds; 769 | bs.virtual_offset = offset; 770 | } 771 | 772 | pub fn writer(bs: *BinaryStream) Writer { 773 | return .{ .context = bs }; 774 | } 775 | 776 | fn write_some(stream: *BinaryStream, data: []const u8) WriteError!usize { 777 | const remaining_len = stream.length - stream.virtual_offset; 778 | 779 | const written_len: usize = @intCast(@min(remaining_len, data.len)); 780 | 781 | try stream.write(stream.virtual_offset, data[0..written_len]); 782 | stream.virtual_offset += written_len; 783 | 784 | return written_len; 785 | } 786 | 787 | pub const Backing = union(enum) { 788 | file: struct { 789 | file: std.fs.File, 790 | base: u64, 791 | }, 792 | buffer: [*]u8, 793 | }; 794 | }; 795 | 796 | test { 797 | _ = Tokenizer; 798 | _ = Parser; 799 | } 800 | 801 | pub const DiskSize = enum(u64) { 802 | const KiB = 1024; 803 | const MiB = 1024 * 1024; 804 | const GiB = 1024 * 1024 * 1024; 805 | 806 | pub const empty: DiskSize = @enumFromInt(0); 807 | 808 | _, 809 | 810 | pub fn parse(str: []const u8) error{ InvalidSize, Overflow }!DiskSize { 811 | const suffix_scaling: ?u64 = if (std.mem.endsWith(u8, str, "K") or std.mem.endsWith(u8, str, "k")) 812 | KiB 813 | else if (std.mem.endsWith(u8, str, "M")) 814 | MiB 815 | else if (std.mem.endsWith(u8, str, "G")) 816 | GiB 817 | else 818 | null; 819 | 820 | const cutoff: usize = if (suffix_scaling != null) 1 else 0; 821 | 822 | const numeric_text = std.mem.trim(u8, str[0 .. str.len - cutoff], " \t\r\n"); 823 | 824 | const raw_number = std.fmt.parseInt(u64, numeric_text, 0) catch |err| switch (err) { 825 | error.Overflow => return error.Overflow, 826 | error.InvalidCharacter => return error.InvalidSize, 827 | }; 828 | 829 | const byte_size = if (suffix_scaling) |scale| 830 | try std.math.mul(u64, raw_number, scale) 831 | else 832 | raw_number; 833 | 834 | return @enumFromInt(byte_size); 835 | } 836 | 837 | pub fn size_in_bytes(ds: DiskSize) u64 { 838 | return @intFromEnum(ds); 839 | } 840 | 841 | pub fn format(ds: DiskSize, fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { 842 | _ = fmt; 843 | _ = opt; 844 | 845 | const size = ds.size_in_bytes(); 846 | 847 | const div: u64, const unit: []const u8 = if (size > GiB) 848 | .{ GiB, " GiBi" } 849 | else if (size > MiB) 850 | .{ MiB, " MeBi" } 851 | else if (size > KiB) 852 | .{ KiB, " KiBi" } 853 | else 854 | .{ 1, " B" }; 855 | 856 | if (size == 0) { 857 | try writer.writeAll("0 B"); 858 | return; 859 | } 860 | 861 | const scaled_value = (1000 * size) / div; 862 | 863 | var buf: [std.math.log2_int_ceil(u64, std.math.maxInt(u64))]u8 = undefined; 864 | const divided = try std.fmt.bufPrint(&buf, "{d}", .{scaled_value}); 865 | 866 | std.debug.assert(divided.len >= 3); 867 | 868 | const prefix, const suffix = .{ 869 | divided[0 .. divided.len - 3], 870 | std.mem.trimRight(u8, divided[divided.len - 3 ..], "0"), 871 | }; 872 | 873 | if (suffix.len > 0) { 874 | try writer.print("{s}.{s}{s}", .{ prefix, suffix, unit }); 875 | } else { 876 | try writer.print("{s}{s}", .{ prefix, unit }); 877 | } 878 | } 879 | }; 880 | 881 | test DiskSize { 882 | const KiB = 1024; 883 | const MiB = 1024 * 1024; 884 | const GiB = 1024 * 1024 * 1024; 885 | 886 | const patterns: []const struct { u64, []const u8 } = &.{ 887 | .{ 0, "0" }, 888 | .{ 1000, "1000" }, 889 | .{ 4096, "0x1000" }, 890 | .{ 4096 * MiB, "0x1000 M" }, 891 | .{ 1 * KiB, "1K" }, 892 | .{ 1 * KiB, "1K" }, 893 | .{ 1 * KiB, "1 K" }, 894 | .{ 150 * KiB, "150K" }, 895 | 896 | .{ 1 * MiB, "1M" }, 897 | .{ 1 * MiB, "1M" }, 898 | .{ 1 * MiB, "1 M" }, 899 | .{ 150 * MiB, "150M" }, 900 | 901 | .{ 1 * GiB, "1G" }, 902 | .{ 1 * GiB, "1G" }, 903 | .{ 1 * GiB, "1 G" }, 904 | .{ 150 * GiB, "150G" }, 905 | }; 906 | 907 | for (patterns) |pat| { 908 | const size_in_bytes, const stringified = pat; 909 | const actual_size = try DiskSize.parse(stringified); 910 | 911 | try std.testing.expectEqual(size_in_bytes, actual_size.size_in_bytes()); 912 | } 913 | } 914 | -------------------------------------------------------------------------------- /tests/basic/empty.dis: -------------------------------------------------------------------------------- 1 | empty 2 | -------------------------------------------------------------------------------- /tests/basic/fill-0x00.dis: -------------------------------------------------------------------------------- 1 | fill 0x00 2 | -------------------------------------------------------------------------------- /tests/basic/fill-0xAA.dis: -------------------------------------------------------------------------------- 1 | fill 0xAA 2 | -------------------------------------------------------------------------------- /tests/basic/fill-0xFF.dis: -------------------------------------------------------------------------------- 1 | fill 0xFF 2 | -------------------------------------------------------------------------------- /tests/basic/raw.dis: -------------------------------------------------------------------------------- 1 | paste-file ./raw.dis 2 | -------------------------------------------------------------------------------- /tests/compound/mbr-boot.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | bootloader empty 3 | part # partition 1 4 | type fat16-lba 5 | contains vfat fat16 6 | label "BOOT" 7 | endfat 8 | size 10M 9 | endpart 10 | part # partition 2 11 | type fat16-lba 12 | contains vfat fat16 13 | label "OS" 14 | !include "../../data/rootfs.dis" 15 | endfat 16 | endpart 17 | ignore # partition 3 18 | ignore # partition 4 19 | -------------------------------------------------------------------------------- /tests/fs/fat12.dis: -------------------------------------------------------------------------------- 1 | vfat fat12 2 | label "Demo FS" 3 | !include ../../data/rootfs.dis 4 | endfat 5 | -------------------------------------------------------------------------------- /tests/fs/fat16.dis: -------------------------------------------------------------------------------- 1 | vfat fat16 2 | label "Demo FS" 3 | !include ../../data/rootfs.dis 4 | endfat 5 | -------------------------------------------------------------------------------- /tests/fs/fat32.dis: -------------------------------------------------------------------------------- 1 | vfat fat32 2 | label "Demo FS" 3 | !include ../../data/rootfs.dis 4 | endfat 5 | -------------------------------------------------------------------------------- /tests/part/mbr/basic-single-part-sized.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | part 3 | type empty 4 | contains fill 0xAA 5 | size 10M 6 | endpart 7 | ignore # partition 2 8 | ignore # partition 3 9 | ignore # partition 4 10 | -------------------------------------------------------------------------------- /tests/part/mbr/basic-single-part-unsized.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | part 3 | type empty 4 | contains fill 0xAA 5 | endpart 6 | ignore # partition 2 7 | ignore # partition 3 8 | ignore # partition 4 9 | -------------------------------------------------------------------------------- /tests/part/mbr/minimal.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | ignore # partition 1 3 | ignore # partition 2 4 | ignore # partition 3 5 | ignore # partition 4 6 | -------------------------------------------------------------------------------- /tests/part/mbr/no-part-bootloader.dis: -------------------------------------------------------------------------------- 1 | mbr-part 2 | bootloader paste-file ./minimal.dis 3 | ignore # partition 1 4 | ignore # partition 2 5 | ignore # partition 3 6 | ignore # partition 4 7 | -------------------------------------------------------------------------------- /tests/zig-build-interface/build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Dimmer = @import("dimmer").BuildInterface; 3 | 4 | pub const KiB = 1024; 5 | pub const MiB = 1024 * KiB; 6 | pub const GiB = 1024 * MiB; 7 | 8 | pub fn build(b: *std.Build) void { 9 | const dimmer_dep = b.dependency("dimmer", .{}); 10 | 11 | const dimmer: Dimmer = .init(b, dimmer_dep); 12 | 13 | const install_step = b.getInstallStep(); 14 | 15 | installDebugDisk(dimmer, install_step, "empty.img", 50 * KiB, .empty); 16 | installDebugDisk(dimmer, install_step, "fill-0x00.img", 50 * KiB, .{ .fill = 0x00 }); 17 | installDebugDisk(dimmer, install_step, "fill-0xAA.img", 50 * KiB, .{ .fill = 0xAA }); 18 | installDebugDisk(dimmer, install_step, "fill-0xFF.img", 50 * KiB, .{ .fill = 0xFF }); 19 | installDebugDisk(dimmer, install_step, "paste-file.img", 50 * KiB, .{ .paste_file = b.path("build.zig.zon") }); 20 | 21 | // installDebugDisk(dimmer, install_step, "empty-mbr.img", 50 * MiB, .{ 22 | // .mbr_part_table = .{ 23 | // .partitions = .{ 24 | // null, 25 | // null, 26 | // null, 27 | // null, 28 | // }, 29 | // }, 30 | // }); 31 | 32 | // installDebugDisk(dimmer, install_step, "manual-offset-mbr.img", 50 * MiB, .{ 33 | // .mbr_part_table = .{ 34 | // .partitions = .{ 35 | // &.{ .offset = 2048 + 0 * 10 * MiB, .size = 10 * MiB, .bootable = true, .type = .fat32_lba, .data = .empty }, 36 | // &.{ .offset = 2048 + 1 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .ntfs, .data = .empty }, 37 | // &.{ .offset = 2048 + 2 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_swap, .data = .empty }, 38 | // &.{ .offset = 2048 + 3 * 10 * MiB, .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .empty }, 39 | // }, 40 | // }, 41 | // }); 42 | 43 | // installDebugDisk(dimmer, install_step, "auto-offset-mbr.img", 50 * MiB, .{ 44 | // .mbr_part_table = .{ 45 | // .partitions = .{ 46 | // &.{ .size = 7 * MiB, .bootable = true, .type = .fat32_lba, .data = .empty }, 47 | // &.{ .size = 8 * MiB, .bootable = false, .type = .ntfs, .data = .empty }, 48 | // &.{ .size = 9 * MiB, .bootable = false, .type = .linux_swap, .data = .empty }, 49 | // &.{ .size = 10 * MiB, .bootable = false, .type = .linux_fs, .data = .empty }, 50 | // }, 51 | // }, 52 | // }); 53 | 54 | // installDebugDisk(dimmer, install_step, "empty-fat32.img", 50 * MiB, .{ 55 | // .vfat = .{ 56 | // .format = .fat32, 57 | // .label = "EMPTY", 58 | // .items = &.{}, 59 | // }, 60 | // }); 61 | 62 | // installDebugDisk(dimmer, install_step, "initialized-fat32.img", 50 * MiB, .{ 63 | // .vfat = .{ 64 | // .format = .fat32, 65 | // .label = "ROOTFS", 66 | // .items = &.{ 67 | // .{ .empty_dir = "boot/EFI/refind/icons" }, 68 | // .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, 69 | // .{ .empty_dir = "Users/xq/" }, 70 | // .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, 71 | // .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, 72 | // }, 73 | // }, 74 | // }); 75 | 76 | // installDebugDisk(dimmer, install_step, "initialized-fat32-in-mbr-partitions.img", 100 * MiB, .{ 77 | // .mbr = .{ 78 | // .partitions = .{ 79 | // &.{ 80 | // .size = 90 * MiB, 81 | // .bootable = true, 82 | // .type = .fat32_lba, 83 | // .data = .{ 84 | // .vfat = .{ 85 | // .format = .fat32, 86 | // .label = "ROOTFS", 87 | // .items = &.{ 88 | // .{ .empty_dir = "boot/EFI/refind/icons" }, 89 | // .{ .empty_dir = "/boot/EFI/nixos/.extra-files/" }, 90 | // .{ .empty_dir = "Users/xq/" }, 91 | // .{ .copy_dir = .{ .source = b.path("dummy/Windows"), .destination = "Windows" } }, 92 | // .{ .copy_file = .{ .source = b.path("dummy/README.md"), .destination = "Users/xq/README.md" } }, 93 | // }, 94 | // }, 95 | // }, 96 | // }, 97 | // null, 98 | // null, 99 | // null, 100 | // }, 101 | // }, 102 | // }); 103 | 104 | // TODO: Implement GPT partition support 105 | // installDebugDisk(debug_step, "empty-gpt.img", 50 * MiB, .{ 106 | // .gpt = .{ 107 | // .partitions = &.{}, 108 | // }, 109 | // }); 110 | } 111 | 112 | fn installDebugDisk( 113 | dimmer: Dimmer, 114 | install_step: *std.Build.Step, 115 | name: []const u8, 116 | size: u64, 117 | content: Dimmer.Content, 118 | ) void { 119 | const disk_file = dimmer.createDisk(size, content); 120 | 121 | const install_disk = install_step.owner.addInstallFile( 122 | disk_file, 123 | name, 124 | ); 125 | install_step.dependOn(&install_disk.step); 126 | } 127 | -------------------------------------------------------------------------------- /tests/zig-build-interface/build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .dimmer_usage_demo, 3 | .fingerprint = 0x6a630b1cfa8384c, 4 | .version = "1.0.0", 5 | .dependencies = .{ 6 | .dimmer = .{ 7 | .path = "../..", 8 | }, 9 | }, 10 | .paths = .{ 11 | ".", 12 | }, 13 | } 14 | --------------------------------------------------------------------------------