├── .gitignore ├── src ├── net │ ├── lib.zig │ └── socket.zig ├── runtime │ ├── timer.zig │ ├── task.zig │ ├── storage.zig │ ├── scheduler.zig │ └── lib.zig ├── tests.zig ├── frame │ ├── asm │ │ ├── x86_64_sysv.asm │ │ ├── aarch64_gen.asm │ │ └── x86_64_win.asm │ └── lib.zig ├── cross │ ├── lib.zig │ ├── fd.zig │ └── socket.zig ├── fs │ ├── lib.zig │ └── dir.zig ├── core │ ├── queue.zig │ ├── atomic_ring.zig │ ├── ring.zig │ ├── atomic_bitset.zig │ ├── zero_copy.zig │ └── pool.zig ├── aio │ ├── job.zig │ ├── completion.zig │ ├── lib.zig │ └── apis │ │ └── poll.zig ├── channel │ └── spsc.zig └── lib.zig ├── test ├── e2e │ ├── lib.zig │ ├── second.zig │ ├── first.zig │ ├── main.zig │ ├── file_chain.zig │ └── tcp_chain.zig └── e2e.sh ├── examples ├── basic │ └── main.zig ├── rmdir │ └── main.zig ├── stat │ └── main.zig ├── shove │ └── main.zig ├── channel │ └── main.zig ├── cat │ └── main.zig ├── http │ └── main.zig ├── echo │ └── main.zig └── stream │ └── main.zig ├── flake.nix ├── flake.lock ├── .github └── workflows │ └── ci.yaml ├── README.md └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | zig-out/ 2 | .zig-cache/ 3 | perf*.data* 4 | heaptrack* 5 | -------------------------------------------------------------------------------- /src/net/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub const Socket = @import("socket.zig").Socket; 4 | -------------------------------------------------------------------------------- /src/runtime/timer.zig: -------------------------------------------------------------------------------- 1 | const Frame = @import("../frame/lib.zig").Frame; 2 | const Timespec = @import("../lib.zig").Timespec; 3 | const Runtime = @import("lib.zig").Runtime; 4 | 5 | pub const Timer = struct { 6 | pub fn delay(rt: *Runtime, timespec: Timespec) !void { 7 | try rt.scheduler.io_await(.{ .timer = timespec }); 8 | } 9 | }; 10 | -------------------------------------------------------------------------------- /test/e2e/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Atomic = std.atomic.Value; 3 | 4 | pub const log = std.log.scoped(.@"tardy/e2e"); 5 | 6 | pub const SharedParams = struct { 7 | // Seed Info 8 | seed_string: [:0]const u8, 9 | seed: u64, 10 | 11 | // Tardy Initalization 12 | size_tasks_initial: usize, 13 | size_aio_reap_max: usize, 14 | }; 15 | -------------------------------------------------------------------------------- /src/tests.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const testing = std.testing; 3 | 4 | test "tardy unit tests" { 5 | // Core 6 | testing.refAllDecls(@import("./core/atomic_ring.zig")); 7 | testing.refAllDecls(@import("./core/pool.zig")); 8 | testing.refAllDecls(@import("./core/ring.zig")); 9 | testing.refAllDecls(@import("./core/zero_copy.zig")); 10 | 11 | // Runtime 12 | testing.refAllDecls(@import("./runtime/storage.zig")); 13 | } 14 | -------------------------------------------------------------------------------- /src/frame/asm/x86_64_sysv.asm: -------------------------------------------------------------------------------- 1 | .global _tardy_swap_frame 2 | .global tardy_swap_frame 3 | 4 | _tardy_swap_frame: 5 | tardy_swap_frame: 6 | pushq %rbx 7 | pushq %rbp 8 | pushq %r12 9 | pushq %r13 10 | pushq %r14 11 | pushq %r15 12 | 13 | // swap stacks 14 | movq %rsp, (%rdi) 15 | movq (%rsi), %rsp 16 | 17 | popq %r15 18 | popq %r14 19 | popq %r13 20 | popq %r12 21 | popq %rbp 22 | popq %rbx 23 | retq 24 | -------------------------------------------------------------------------------- /src/cross/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub const fd = @import("fd.zig"); 4 | pub const socket = @import("socket.zig"); 5 | 6 | /// Get the `fd_t` for `stdin`. 7 | pub fn get_std_in() std.posix.fd_t { 8 | return std.fs.File.stdin().handle; 9 | } 10 | 11 | /// Get the `fd_t` for `stdout`. 12 | pub fn get_std_out() std.posix.fd_t { 13 | return std.fs.File.stdout().handle; 14 | } 15 | 16 | /// Get the `fd_t` for `stderr`. 17 | pub fn get_std_err() std.posix.fd_t { 18 | return std.fs.File.stderr().handle; 19 | } 20 | -------------------------------------------------------------------------------- /test/e2e.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # TODO: implement this test in Zig 3 | 4 | set -e 5 | count=$1 6 | 7 | for ((i = 1; i <= count; i++)) 8 | do 9 | rand64=$((RANDOM)) 10 | rand64=$((rand64 | RANDOM << 15)) 11 | rand64=$((rand64 | RANDOM << 30)) 12 | rand64=$((rand64 | RANDOM << 45)) 13 | 14 | echo "running e2e with argument $rand64" 15 | zig build -Dasync=io_uring test_e2e -- "$rand64" 16 | zig build -Dasync=epoll test_e2e -- "$rand64" 17 | zig build -Dasync=poll test_e2e -- "$rand64" 18 | echo "$rand64 passed" 19 | done 20 | -------------------------------------------------------------------------------- /src/runtime/task.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | const Result = @import("../aio/completion.zig").Result; 5 | const Frame = @import("../frame/lib.zig").Frame; 6 | const Runtime = @import("../runtime/lib.zig").Runtime; 7 | 8 | const log = std.log.scoped(.@"tardy/runtime/task"); 9 | 10 | pub const Task = struct { 11 | pub const State = union(enum) { 12 | /// Waiting for a Runtime Trigger. 13 | wait_for_trigger, 14 | /// Waiting for an Async I/O Event. 15 | wait_for_io, 16 | /// Immediately Runnable. 17 | runnable, 18 | /// Dead. 19 | dead, 20 | }; 21 | // 1 byte 22 | state: State = .dead, 23 | // no idea on bytes. 24 | result: Result = .none, 25 | // 8 bytes 26 | index: usize, 27 | // 8 bytes 28 | frame: *Frame, 29 | }; 30 | -------------------------------------------------------------------------------- /src/frame/asm/aarch64_gen.asm: -------------------------------------------------------------------------------- 1 | .global _tardy_swap_frame 2 | .global tardy_swap_frame 3 | 4 | _tardy_swap_frame: 5 | tardy_swap_frame: 6 | stp lr, fp, [sp, #-20*8]! 7 | stp d8, d9, [sp, #2*8] 8 | stp d10, d11, [sp, #4*8] 9 | stp d12, d13, [sp, #6*8] 10 | stp d14, d15, [sp, #8*8] 11 | stp x19, x20, [sp, #10*8] 12 | stp x21, x22, [sp, #12*8] 13 | stp x23, x24, [sp, #14*8] 14 | stp x25, x26, [sp, #16*8] 15 | stp x27, x28, [sp, #18*8] 16 | 17 | mov x9, sp 18 | str x9, [x0] 19 | ldr x9, [x1] 20 | mov sp, x9 21 | 22 | ldp x27, x28, [sp, #18*8] 23 | ldp x25, x26, [sp, #16*8] 24 | ldp x23, x24, [sp, #14*8] 25 | ldp x21, x22, [sp, #12*8] 26 | ldp x19, x20, [sp, #10*8] 27 | ldp d14, d15, [sp, #8*8] 28 | ldp d12, d13, [sp, #6*8] 29 | ldp d10, d11, [sp, #4*8] 30 | ldp d8, d9, [sp, #2*8] 31 | ldp lr, fp, [sp], #20*8 32 | 33 | ret 34 | -------------------------------------------------------------------------------- /src/cross/fd.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const builtin = @import("builtin"); 3 | const os = builtin.os.tag; 4 | 5 | /// Invalid `fd_t`. 6 | pub const INVALID_FD = if (os == .windows) std.os.windows.INVALID_HANDLE_VALUE else -1; 7 | 8 | /// Ensures that the `std.posix.fd_t` is valid. 9 | pub fn is_valid(fd: std.posix.fd_t) bool { 10 | switch (comptime os) { 11 | .windows => return fd != std.os.windows.INVALID_HANDLE_VALUE, 12 | else => return fd >= 0, 13 | } 14 | } 15 | 16 | pub fn to_nonblock(fd: std.posix.fd_t) !void { 17 | if (comptime os == .windows) { 18 | // windows doesn't have non-blocking I/O w/o overlapped. 19 | } else { 20 | const current_flags = try std.posix.fcntl(fd, std.posix.F.GETFL, 0); 21 | var new_flags = @as( 22 | std.posix.O, 23 | @bitCast(@as(u32, @intCast(current_flags))), 24 | ); 25 | new_flags.NONBLOCK = true; 26 | const arg: u32 = @bitCast(new_flags); 27 | _ = try std.posix.fcntl(fd, std.posix.F.SETFL, arg); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/fs/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Timespec = @import("../lib.zig").Timespec; 4 | pub const Dir = @import("dir.zig").Dir; 5 | pub const File = @import("file.zig").File; 6 | 7 | pub const Path = union(enum) { 8 | /// Relative to given Directory 9 | rel: struct { 10 | dir: std.posix.fd_t, 11 | path: [:0]const u8, 12 | }, 13 | /// Absolute Path 14 | abs: [:0]const u8, 15 | 16 | pub fn dupe(self: *const Path, allocator: std.mem.Allocator) !Path { 17 | switch (self.*) { 18 | .rel => |inner| { 19 | const path_dupe = try allocator.dupeZ(u8, inner.path); 20 | errdefer allocator.free(path_dupe); 21 | return .{ .rel = .{ .dir = inner.dir, .path = path_dupe } }; 22 | }, 23 | .abs => |path| return .{ .abs = try allocator.dupeZ(u8, path) }, 24 | } 25 | } 26 | }; 27 | 28 | pub const Stat = struct { 29 | size: u64, 30 | mode: u64 = 0, 31 | accessed: ?Timespec = null, 32 | modified: ?Timespec = null, 33 | changed: ?Timespec = null, 34 | }; 35 | -------------------------------------------------------------------------------- /examples/basic/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Runtime = @import("tardy").Runtime; 4 | const Task = @import("tardy").Task; 5 | const Timer = @import("tardy").Timer; 6 | 7 | const Tardy = @import("tardy").Tardy(.auto); 8 | const log = std.log.scoped(.@"tardy/example/basic"); 9 | 10 | fn log_frame(rt: *Runtime) !void { 11 | var count: usize = 0; 12 | 13 | while (count < 10) : (count += 1) { 14 | log.debug("{d} - tardy example | {d}", .{ std.time.milliTimestamp(), count }); 15 | try Timer.delay(rt, .{ .seconds = 1 }); 16 | } 17 | } 18 | 19 | pub fn main() !void { 20 | const allocator = std.heap.page_allocator; 21 | 22 | var tardy: Tardy = try .init(allocator, .{ 23 | .threading = .single, 24 | .pooling = .static, 25 | .size_tasks_initial = 2, 26 | .size_aio_reap_max = 2, 27 | }); 28 | defer tardy.deinit(); 29 | 30 | try tardy.entry( 31 | {}, 32 | struct { 33 | fn init(rt: *Runtime, _: void) !void { 34 | try rt.spawn(.{rt}, log_frame, 1024 * 16); 35 | } 36 | }.init, 37 | ); 38 | } 39 | -------------------------------------------------------------------------------- /src/core/queue.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn Queue(comptime T: type) type { 4 | const List = std.DoublyLinkedList(T); 5 | const Node = List.Node; 6 | 7 | return struct { 8 | allocator: std.mem.Allocator, 9 | items: List, 10 | 11 | pub fn init(allocator: std.mem.Allocator) Queue { 12 | return .{ .allocator = allocator, .items = List{} }; 13 | } 14 | 15 | pub fn deinit(self: *Queue) void { 16 | while (self.items.pop()) |node| self.allocator.destroy(node); 17 | } 18 | 19 | pub fn append(self: *Queue, item: T) !void { 20 | const node = try self.allocator.create(Node); 21 | node.* = .{ .data = item }; 22 | self.items.append(node); 23 | } 24 | 25 | pub fn pop(self: *Queue) ?T { 26 | const node = self.items.popFirst() orelse return null; 27 | defer self.allocator.destroy(node); 28 | return node.data; 29 | } 30 | 31 | pub fn pop_assert(self: *Queue) T { 32 | const node = self.items.popFirst().?; 33 | defer self.allocator.destroy(node); 34 | return node.data; 35 | } 36 | }; 37 | } 38 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "an asynchronous runtime for Zig"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs/release-25.05"; 6 | 7 | zigPkgs.url = "github:mitchellh/zig-overlay"; 8 | zigPkgs.inputs.nixpkgs.follows = "nixpkgs"; 9 | 10 | zlsPkg.url = "github:zigtools/zls/0.15.0"; 11 | zlsPkg.inputs.zig-overlay.follows = "zigPkgs"; 12 | zlsPkg.inputs.nixpkgs.follows = "nixpkgs"; 13 | 14 | flake-utils.url = "github:numtide/flake-utils"; 15 | }; 16 | 17 | outputs = 18 | { 19 | nixpkgs, 20 | zigPkgs, 21 | zlsPkg, 22 | flake-utils, 23 | ... 24 | }: 25 | flake-utils.lib.eachDefaultSystem ( 26 | system: 27 | let 28 | overlays = [ 29 | (final: prev: { 30 | zigpkgs = zigPkgs.packages.${prev.system}; 31 | zls = zlsPkg.packages.${prev.system}.default; 32 | }) 33 | ]; 34 | pkgs = import nixpkgs { inherit system overlays; }; 35 | in 36 | { 37 | devShells.default = pkgs.mkShell { 38 | packages = with pkgs; [ 39 | zigpkgs."0.15.1" 40 | zls 41 | openssl 42 | inetutils 43 | wrk 44 | ]; 45 | }; 46 | } 47 | ); 48 | } 49 | -------------------------------------------------------------------------------- /src/frame/asm/x86_64_win.asm: -------------------------------------------------------------------------------- 1 | .global tardy_swap_frame 2 | 3 | tardy_swap_frame: 4 | pushq %gs:0x10 5 | pushq %gs:0x08 6 | 7 | pushq %rbx 8 | pushq %rbp 9 | pushq %rdi 10 | pushq %rsi 11 | pushq %r12 12 | pushq %r13 13 | pushq %r14 14 | pushq %r15 15 | 16 | subq $160, %rsp 17 | movups %xmm6, 0x00(%rsp) 18 | movups %xmm7, 0x10(%rsp) 19 | movups %xmm8, 0x20(%rsp) 20 | movups %xmm9, 0x30(%rsp) 21 | movups %xmm10, 0x40(%rsp) 22 | movups %xmm11, 0x50(%rsp) 23 | movups %xmm12, 0x60(%rsp) 24 | movups %xmm13, 0x70(%rsp) 25 | movups %xmm14, 0x80(%rsp) 26 | movups %xmm15, 0x90(%rsp) 27 | 28 | movq %rsp, (%rcx) 29 | movq (%rdx), %rsp 30 | 31 | movups 0x00(%rsp), %xmm6 32 | movups 0x10(%rsp), %xmm7 33 | movups 0x20(%rsp), %xmm8 34 | movups 0x30(%rsp), %xmm9 35 | movups 0x40(%rsp), %xmm10 36 | movups 0x50(%rsp), %xmm11 37 | movups 0x60(%rsp), %xmm12 38 | movups 0x70(%rsp), %xmm13 39 | movups 0x80(%rsp), %xmm14 40 | movups 0x90(%rsp), %xmm15 41 | addq $160, %rsp 42 | 43 | popq %r15 44 | popq %r14 45 | popq %r13 46 | popq %r12 47 | popq %rsi 48 | popq %rdi 49 | popq %rbp 50 | popq %rbx 51 | 52 | popq %gs:0x08 53 | popq %gs:0x10 54 | 55 | retq 56 | -------------------------------------------------------------------------------- /examples/rmdir/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Cross = @import("tardy").Cross; 4 | const Dir = @import("tardy").Dir; 5 | const Runtime = @import("tardy").Runtime; 6 | const Task = @import("tardy").Task; 7 | 8 | const Tardy = @import("tardy").Tardy(.auto); 9 | const log = std.log.scoped(.@"tardy/example/rmdir"); 10 | 11 | fn main_frame(rt: *Runtime, name: [:0]const u8) !void { 12 | try Dir.cwd().delete_tree(rt, name); 13 | log.debug("deleted tree :)", .{}); 14 | } 15 | 16 | pub fn main() !void { 17 | var gpa: std.heap.DebugAllocator(.{ .thread_safe = true }) = .init; 18 | const allocator = gpa.allocator(); 19 | defer _ = gpa.deinit(); 20 | 21 | var tardy: Tardy = try .init(allocator, .{ 22 | .threading = .single, 23 | .pooling = .static, 24 | .size_tasks_initial = 1, 25 | .size_aio_reap_max = 1, 26 | }); 27 | defer tardy.deinit(); 28 | 29 | var i: usize = 0; 30 | var args = try std.process.argsWithAllocator(allocator); 31 | defer args.deinit(); 32 | 33 | const tree_name: [:0]const u8 = blk: { 34 | while (args.next()) |arg| : (i += 1) { 35 | if (i == 1) break :blk arg; 36 | } 37 | 38 | try std.fs.File.stdout().writeAll("tree name not passed in: ./rmdir [tree name]"); 39 | return; 40 | }; 41 | 42 | try tardy.entry( 43 | tree_name, 44 | struct { 45 | fn start(rt: *Runtime, name: [:0]const u8) !void { 46 | try rt.spawn(.{ rt, name }, main_frame, 1024 * 1024 * 2); 47 | } 48 | }.start, 49 | ); 50 | } 51 | -------------------------------------------------------------------------------- /examples/stat/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Dir = @import("tardy").Dir; 4 | const File = @import("tardy").File; 5 | const Runtime = @import("tardy").Runtime; 6 | const Stat = @import("tardy").Stat; 7 | const StatResult = @import("tardy").StatResult; 8 | const Task = @import("tardy").Task; 9 | 10 | const Tardy = @import("tardy").Tardy(.auto); 11 | const log = std.log.scoped(.@"tardy/example/stat"); 12 | 13 | fn main_frame(rt: *Runtime, name: [:0]const u8) !void { 14 | const file = try Dir.cwd().open_file(rt, name, .{}); 15 | defer file.close_blocking(); 16 | 17 | const stat = try file.stat(rt); 18 | std.debug.print("stat: {any}\n", .{stat}); 19 | } 20 | 21 | pub fn main() !void { 22 | var gpa: std.heap.DebugAllocator(.{}) = .init; 23 | const allocator = gpa.allocator(); 24 | defer _ = gpa.deinit(); 25 | 26 | var tardy: Tardy = try .init(allocator, .{ 27 | .threading = .single, 28 | }); 29 | defer tardy.deinit(); 30 | 31 | var i: usize = 0; 32 | var args = try std.process.argsWithAllocator(allocator); 33 | defer args.deinit(); 34 | 35 | const file_name: [:0]const u8 = blk: { 36 | while (args.next()) |arg| : (i += 1) { 37 | if (i == 1) break :blk arg; 38 | } 39 | 40 | try std.fs.File.stdout().writeAll("file name not passed in: ./stat [file name]"); 41 | return; 42 | }; 43 | 44 | try tardy.entry( 45 | file_name, 46 | struct { 47 | fn init(rt: *Runtime, path: [:0]const u8) !void { 48 | try rt.spawn(.{ rt, path }, main_frame, 1024 * 1024 * 2); 49 | } 50 | }.init, 51 | ); 52 | } 53 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1731533236, 9 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1752148952, 24 | "narHash": "sha256-DjAgyOTqqg97s/eKIeLrLBHNLPhtul8e58i9MkC1L/c=", 25 | "owner": "nixos", 26 | "repo": "nixpkgs", 27 | "rev": "fd771c56ad7059fc6c9c6ea43cb7a6ba26c8159f", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "nixos", 32 | "ref": "release-25.05", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs" 41 | } 42 | }, 43 | "systems": { 44 | "locked": { 45 | "lastModified": 1681028828, 46 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 47 | "owner": "nix-systems", 48 | "repo": "default", 49 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "nix-systems", 54 | "repo": "default", 55 | "type": "github" 56 | } 57 | } 58 | }, 59 | "root": "root", 60 | "version": 7 61 | } 62 | -------------------------------------------------------------------------------- /src/cross/socket.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const builtin = @import("builtin"); 3 | const os = builtin.os.tag; 4 | 5 | const Timespec = @import("../lib.zig").Timespec; 6 | 7 | /// Invalid `socket_t`. 8 | pub const INVALID_SOCKET = if (os == .windows) std.os.windows.ws2_32.INVALID_SOCKET else -1; 9 | 10 | /// Ensures that the `std.posix.socket_t` is valid. 11 | pub fn is_valid(socket: std.posix.socket_t) bool { 12 | switch (comptime os) { 13 | .windows => return socket != std.os.windows.ws2_32.INVALID_SOCKET, 14 | else => return socket >= 0, 15 | } 16 | } 17 | 18 | /// Sets the `std.posix.socket_t` to nonblocking. 19 | pub fn to_nonblock(socket: std.posix.socket_t) !void { 20 | if (comptime os == .windows) { 21 | var mode: u32 = 1; 22 | _ = std.os.windows.ws2_32.ioctlsocket( 23 | socket, 24 | std.os.windows.ws2_32.FIONBIO, 25 | &mode, 26 | ); 27 | } else { 28 | const current_flags = try std.posix.fcntl(socket, std.posix.F.GETFL, 0); 29 | var new_flags = @as( 30 | std.posix.O, 31 | @bitCast(@as(u32, @intCast(current_flags))), 32 | ); 33 | new_flags.NONBLOCK = true; 34 | const arg: u32 = @bitCast(new_flags); 35 | _ = try std.posix.fcntl(socket, std.posix.F.SETFL, arg); 36 | } 37 | } 38 | 39 | pub fn disable_nagle(socket: std.posix.socket_t) !void { 40 | if (comptime os.isBSD()) { 41 | // system.TCP is weird on MacOS. 42 | try std.posix.setsockopt( 43 | socket, 44 | std.posix.IPPROTO.TCP, 45 | 1, 46 | &std.mem.toBytes(@as(c_int, 1)), 47 | ); 48 | } else { 49 | try std.posix.setsockopt( 50 | socket, 51 | std.posix.IPPROTO.TCP, 52 | std.posix.TCP.NODELAY, 53 | &std.mem.toBytes(@as(c_int, 1)), 54 | ); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/aio/job.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Path = @import("../fs/lib.zig").Path; 4 | const Timespec = @import("../lib.zig").Timespec; 5 | const Socket = @import("../net/lib.zig").Socket; 6 | const AsyncOpenFlags = @import("lib.zig").AsyncOpenFlags; 7 | 8 | pub const Job = struct { 9 | type: union(enum) { 10 | wake, 11 | timer: TimerJob, 12 | open: OpenJob, 13 | mkdir: MkdirJob, 14 | delete: DeleteJob, 15 | stat: std.posix.fd_t, 16 | read: ReadJob, 17 | write: WriteJob, 18 | close: std.posix.fd_t, 19 | accept: AcceptJob, 20 | connect: ConnectJob, 21 | send: SendJob, 22 | recv: RecvJob, 23 | }, 24 | 25 | index: usize = 0, 26 | task: usize = 0, 27 | }; 28 | 29 | const TimerJob = union(enum) { 30 | none, 31 | fd: std.posix.fd_t, 32 | ns: i128, 33 | }; 34 | 35 | const OpenJob = struct { 36 | path: Path, 37 | kind: enum { file, dir }, 38 | flags: AsyncOpenFlags, 39 | }; 40 | 41 | const MkdirJob = struct { 42 | path: Path, 43 | mode: isize, 44 | }; 45 | 46 | const DeleteJob = struct { 47 | path: Path, 48 | is_dir: bool, 49 | }; 50 | 51 | const ReadJob = struct { 52 | fd: std.posix.fd_t, 53 | buffer: []u8, 54 | offset: ?usize, 55 | }; 56 | 57 | const WriteJob = struct { 58 | fd: std.posix.fd_t, 59 | buffer: []const u8, 60 | offset: ?usize, 61 | }; 62 | 63 | const AcceptJob = struct { 64 | socket: std.posix.socket_t, 65 | addr: std.net.Address, 66 | addr_len: usize = @sizeOf(std.net.Address), 67 | kind: Socket.Kind, 68 | }; 69 | 70 | const ConnectJob = struct { 71 | socket: std.posix.socket_t, 72 | addr: std.net.Address, 73 | kind: Socket.Kind, 74 | }; 75 | 76 | const SendJob = struct { 77 | socket: std.posix.socket_t, 78 | buffer: []const u8, 79 | }; 80 | 81 | const RecvJob = struct { 82 | socket: std.posix.socket_t, 83 | buffer: []u8, 84 | }; 85 | -------------------------------------------------------------------------------- /examples/shove/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Cross = @import("tardy").Cross; 4 | const Dir = @import("tardy").Dir; 5 | const File = @import("tardy").File; 6 | const OpenFileResult = @import("tardy").OpenFileResult; 7 | const ReadResult = @import("tardy").ReadResult; 8 | const Runtime = @import("tardy").Runtime; 9 | const Task = @import("tardy").Task; 10 | const WriteResult = @import("tardy").WriteResult; 11 | 12 | const Tardy = @import("tardy").Tardy(.auto); 13 | const log = std.log.scoped(.@"tardy/example/shove"); 14 | pub const std_options: std.Options = .{ .log_level = .debug }; 15 | 16 | fn main_frame(rt: *Runtime, name: [:0]const u8) !void { 17 | const file = try Dir.cwd().create_file(rt, name, .{}); 18 | for (0..8) |_| _ = try file.write_all(rt, "*shoved*\n", null); 19 | 20 | const stat = try file.stat(rt); 21 | std.debug.print("size: {d}\n", .{stat.size}); 22 | 23 | try file.close(rt); 24 | } 25 | 26 | pub fn main() !void { 27 | var gpa: std.heap.DebugAllocator(.{ .thread_safe = true }) = .init; 28 | const allocator = gpa.allocator(); 29 | defer _ = gpa.deinit(); 30 | 31 | var tardy: Tardy = try .init(allocator, .{ 32 | .threading = .single, 33 | .pooling = .grow, 34 | .size_tasks_initial = 1, 35 | .size_aio_reap_max = 1, 36 | }); 37 | defer tardy.deinit(); 38 | 39 | var i: usize = 0; 40 | var args = try std.process.argsWithAllocator(allocator); 41 | defer args.deinit(); 42 | 43 | const file_name: [:0]const u8 = blk: { 44 | while (args.next()) |arg| : (i += 1) if (i == 1) break :blk arg; 45 | try std.fs.File.stdout().writeAll("file name not passed in: ./shove [file name]"); 46 | return; 47 | }; 48 | 49 | try tardy.entry( 50 | file_name, 51 | struct { 52 | fn start(rt: *Runtime, name: [:0]const u8) !void { 53 | try rt.spawn(.{ rt, name }, main_frame, 1024 * 1024 * 2); 54 | } 55 | }.start, 56 | ); 57 | } 58 | -------------------------------------------------------------------------------- /test/e2e/second.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | const Runtime = @import("tardy").Runtime; 5 | const Socket = @import("tardy").Socket; 6 | 7 | const log = @import("lib.zig").log; 8 | const SharedParams = @import("lib.zig").SharedParams; 9 | const TcpClientChain = @import("tcp_chain.zig").TcpClientChain; 10 | const TcpServerChain = @import("tcp_chain.zig").TcpServerChain; 11 | 12 | pub const STACK_SIZE = 1024 * 1024 * 8; 13 | threadlocal var tcp_client_chain_count: usize = 1; 14 | threadlocal var tcp_server_chain_count: usize = 1; 15 | 16 | pub fn start_frame(rt: *Runtime, shared_params: *const SharedParams) !void { 17 | var prng: std.Random.DefaultPrng = .init(shared_params.seed); 18 | const rand = prng.random(); 19 | 20 | const port: u16 = rand.intRangeLessThan(u16, 30000, @intCast(std.math.maxInt(u16))); 21 | log.debug("tcp chain port: {d}", .{port}); 22 | const socket = try Socket.init(.{ .tcp = .{ .host = "127.0.0.1", .port = port } }); 23 | try socket.bind(); 24 | try socket.listen(128); 25 | 26 | const chain = try TcpServerChain.generate_random_chain(rt.allocator, shared_params.seed); 27 | log.info("creating tcp chain... ({d})", .{chain.len}); 28 | defer rt.allocator.free(chain); 29 | 30 | const server_chain_ptr = try rt.allocator.create(TcpServerChain); 31 | errdefer rt.allocator.destroy(server_chain_ptr); 32 | 33 | const client_chain_ptr = try rt.allocator.create(TcpClientChain); 34 | errdefer rt.allocator.destroy(client_chain_ptr); 35 | 36 | server_chain_ptr.* = try .init(rt.allocator, chain, 4096); 37 | client_chain_ptr.* = try server_chain_ptr.derive_client_chain(); 38 | 39 | try rt.spawn( 40 | .{ client_chain_ptr, rt, &tcp_client_chain_count, port }, 41 | TcpClientChain.chain_frame, 42 | STACK_SIZE, 43 | ); 44 | try rt.spawn( 45 | .{ server_chain_ptr, rt, &tcp_server_chain_count, socket }, 46 | TcpServerChain.chain_frame, 47 | STACK_SIZE, 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /examples/channel/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Runtime = @import("tardy").Runtime; 4 | const Spsc = @import("tardy").Spsc; 5 | const Task = @import("tardy").Task; 6 | const Timer = @import("tardy").Timer; 7 | 8 | const Tardy = @import("tardy").Tardy(.auto); 9 | 10 | const log = std.log.scoped(.@"tardy/example/channel"); 11 | pub const std_options: std.Options = .{ .log_level = .err }; 12 | 13 | const MAX_COUNT = 100; 14 | 15 | fn producer_frame(rt: *Runtime, producer: Spsc(usize).Producer) !void { 16 | _ = rt; 17 | defer producer.close(); 18 | 19 | var count: usize = 0; 20 | while (count <= MAX_COUNT) : (count += 1) { 21 | try producer.send(count); 22 | try producer.send(count); 23 | try producer.send(count); 24 | //try Timer.delay(rt, .{ .nanos = std.time.ns_per_ms * 10 }); 25 | } 26 | 27 | log.debug("producer frame done running!", .{}); 28 | } 29 | 30 | fn consumer_frame(_: *Runtime, consumer: Spsc(usize).Consumer) !void { 31 | defer consumer.close(); 32 | 33 | while (true) { 34 | const recvd = consumer.recv() catch break; 35 | std.debug.print("{d} - tardy example | {d}\n", .{ std.time.milliTimestamp(), recvd }); 36 | } 37 | 38 | log.debug("consumer frame done running!", .{}); 39 | } 40 | 41 | pub fn main() !void { 42 | const allocator = std.heap.smp_allocator; 43 | 44 | var tardy: Tardy = try .init(allocator, .{ 45 | .threading = .{ .multi = 2 }, 46 | .pooling = .static, 47 | .size_tasks_initial = 1, 48 | .size_aio_reap_max = 1, 49 | }); 50 | defer tardy.deinit(); 51 | 52 | var channel: Spsc(usize) = try .init(allocator, 2); 53 | defer channel.deinit(); 54 | 55 | try tardy.entry( 56 | &channel, 57 | struct { 58 | fn init(rt: *Runtime, spsc: *Spsc(usize)) !void { 59 | switch (rt.id) { 60 | 0 => try rt.spawn(.{ rt, spsc.producer(rt) }, producer_frame, 1024 * 32), 61 | 1 => try rt.spawn(.{ rt, spsc.consumer(rt) }, consumer_frame, 1024 * 32), 62 | else => unreachable, 63 | } 64 | } 65 | }.init, 66 | ); 67 | } 68 | -------------------------------------------------------------------------------- /test/e2e/first.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const Atomic = std.atomic.Value; 4 | 5 | const Dir = @import("tardy").Dir; 6 | const Runtime = @import("tardy").Runtime; 7 | 8 | const FileChain = @import("file_chain.zig").FileChain; 9 | const log = @import("lib.zig").log; 10 | const SharedParams = @import("lib.zig").SharedParams; 11 | 12 | pub const STACK_SIZE = 1024 * 1024 * 8; 13 | threadlocal var file_chain_counter: usize = 0; 14 | 15 | pub fn start_frame(rt: *Runtime, shared_params: *const SharedParams) !void { 16 | errdefer unreachable; 17 | 18 | const new_dir = try Dir.cwd().create_dir(rt, shared_params.seed_string); 19 | log.debug("created new shared dir (seed={d})", .{shared_params.seed}); 20 | 21 | var prng: std.Random.DefaultPrng = .init(shared_params.seed); 22 | const rand = prng.random(); 23 | 24 | const chain_count = shared_params.size_tasks_initial * rand.intRangeAtMost(usize, 1, 2); 25 | file_chain_counter = chain_count; 26 | 27 | log.info("creating file chains... ({d})", .{chain_count}); 28 | for (0..chain_count) |i| { 29 | var prng2: std.Random.DefaultPrng = .init(shared_params.seed + i); 30 | const rand2 = prng2.random(); 31 | 32 | const chain_ptr = try rt.allocator.create(FileChain); 33 | errdefer rt.allocator.destroy(chain_ptr); 34 | 35 | const sub_chain = try FileChain.generate_random_chain( 36 | rt.allocator, 37 | (shared_params.seed + i) % std.math.maxInt(usize), 38 | ); 39 | defer rt.allocator.free(sub_chain); 40 | const subpath = try std.fmt.allocPrintSentinel(rt.allocator, "{s}-{d}", .{ shared_params.seed_string, i }, 0x0); 41 | defer rt.allocator.free(subpath); 42 | 43 | chain_ptr.* = try .init( 44 | rt.allocator, 45 | sub_chain, 46 | .{ .rel = .{ .dir = new_dir.handle, .path = subpath } }, 47 | rand2.intRangeLessThan(usize, 1, 64), 48 | ); 49 | errdefer chain_ptr.deinit(); 50 | 51 | try rt.spawn( 52 | .{ chain_ptr, rt, &file_chain_counter, shared_params.seed_string }, 53 | FileChain.chain_frame, 54 | STACK_SIZE, 55 | ); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/core/atomic_ring.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const Atomic = std.atomic.Value; 4 | const testing = std.testing; 5 | 6 | pub fn SpscAtomicRing(comptime T: type) type { 7 | return struct { 8 | const Self = @This(); 9 | 10 | allocator: std.mem.Allocator, 11 | items: []T, 12 | 13 | write_index: Atomic(usize) align(std.atomic.cache_line), 14 | read_index: Atomic(usize) align(std.atomic.cache_line), 15 | 16 | pub fn init(allocator: std.mem.Allocator, size: usize) !Self { 17 | assert(size >= 2); 18 | assert(std.math.isPowerOfTwo(size)); 19 | 20 | const items = try allocator.alloc(T, size); 21 | errdefer allocator.free(items); 22 | 23 | return .{ 24 | .allocator = allocator, 25 | .items = items, 26 | .write_index = .{ .raw = 0 }, 27 | .read_index = .{ .raw = 0 }, 28 | }; 29 | } 30 | 31 | pub fn deinit(self: Self) void { 32 | self.allocator.free(self.items); 33 | } 34 | 35 | pub fn push(self: *Self, item: T) !void { 36 | const write = self.write_index.load(.acquire); 37 | const next: usize = (write + 1) % self.items.len; 38 | if (next == self.read_index.load(.acquire)) return error.RingFull; 39 | self.items[write] = item; 40 | self.write_index.store((write + 1) % self.items.len, .release); 41 | } 42 | 43 | pub fn pop(self: *Self) !T { 44 | const read = self.read_index.load(.acquire); 45 | if (read == self.write_index.load(.acquire)) return error.RingEmpty; 46 | const item = self.items[read]; 47 | self.read_index.store((read + 1) % self.items.len, .release); 48 | return item; 49 | } 50 | }; 51 | } 52 | 53 | test "SpscAtomicRing: Fill and Empty" { 54 | const size: u32 = 128; 55 | var ring = try SpscAtomicRing(usize).init(testing.allocator, size); 56 | defer ring.deinit(); 57 | 58 | try testing.expectError(error.RingEmpty, ring.pop()); 59 | for (0..size - 1) |i| try ring.push(i); 60 | try testing.expectError(error.RingFull, ring.push(1)); 61 | for (0..size - 1) |i| try testing.expectEqual(i, try ring.pop()); 62 | try testing.expectError(error.RingEmpty, ring.pop()); 63 | } 64 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | branches: [ main ] 5 | push: 6 | branches: [ main ] 7 | workflow_dispatch: 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | unit-tests: 15 | name: Build and Unit Tests 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v5 19 | - uses: mlugg/setup-zig@v2 20 | with: 21 | version: 0.15.1 22 | - name: Build Examples 23 | run: zig build -Dexample=all 24 | - name: Run unit tests 25 | run: zig build test --summary all 26 | 27 | linux-pipeline: 28 | name: Linux Pipeline 29 | needs: unit-tests 30 | runs-on: ubuntu-latest 31 | strategy: 32 | matrix: 33 | async: [io_uring, epoll, poll] 34 | steps: 35 | - uses: actions/checkout@v5 36 | - uses: mlugg/setup-zig@v2 37 | with: 38 | version: 0.15.1 39 | - name: Generate random 64-bit integer 40 | id: random 41 | shell: bash 42 | run: echo "RANDOM_INT=$(shuf -i 1-9223372036854775807 -n 1)" >> $GITHUB_ENV 43 | - name: Run E2E tests with ${{ matrix.async }} 44 | run: zig build -Dasync=${{ matrix.async }} test_e2e -- ${{ env.RANDOM_INT }} 45 | 46 | macos-pipeline: 47 | name: macOS Pipeline 48 | needs: unit-tests 49 | runs-on: macos-latest 50 | strategy: 51 | matrix: 52 | async: [kqueue, poll] 53 | steps: 54 | - uses: actions/checkout@v5 55 | - uses: mlugg/setup-zig@v2 56 | with: 57 | version: 0.15.1 58 | - name: Generate random 64-bit integer 59 | id: random 60 | shell: bash 61 | run: echo "RANDOM_INT=$((RANDOM + (RANDOM << 15) + (RANDOM << 30) + (RANDOM << 45)))" >> $GITHUB_ENV 62 | - name: Run E2E tests with ${{ matrix.async }} 63 | run: zig build -Dasync=${{ matrix.async }} test_e2e -- ${{ env.RANDOM_INT }} 64 | 65 | windows-pipeline: 66 | name: Windows Pipeline 67 | needs: unit-tests 68 | runs-on: windows-latest 69 | steps: 70 | - uses: actions/checkout@v5 71 | - uses: mlugg/setup-zig@v2 72 | with: 73 | version: 0.15.1 74 | - name: Generate random 64-bit integer 75 | id: random 76 | shell: pwsh 77 | run: echo "RANDOM_INT=$([long]::Parse([math]::Floor([decimal](Get-Random -Minimum 1 -Maximum 9223372036854775807)).ToString()))" >> $env:GITHUB_ENV 78 | - name: Run E2E tests with poll 79 | run: zig build -Dasync=poll test_e2e -- ${{ env.RANDOM_INT }} 80 | -------------------------------------------------------------------------------- /examples/cat/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Cross = @import("tardy").Cross; 4 | const Dir = @import("tardy").Dir; 5 | const File = @import("tardy").File; 6 | const Frame = @import("tardy").Frame; 7 | const Runtime = @import("tardy").Runtime; 8 | const Task = @import("tardy").Task; 9 | 10 | const log = std.log.scoped(.@"tardy/example/cat"); 11 | pub const std_options: std.Options = .{ .log_level = .err }; 12 | 13 | const Tardy = @import("tardy").Tardy(.auto); 14 | const EntryParams = struct { file_name: [:0]const u8 }; 15 | 16 | fn main_frame(rt: *Runtime, p: *EntryParams) !void { 17 | const file = Dir.cwd().open_file(rt, p.file_name, .{}) catch |e| switch (e) { 18 | error.NotFound => { 19 | std.debug.print("{s}: No such file!", .{p.file_name}); 20 | return; 21 | }, 22 | else => return e, 23 | }; 24 | 25 | var file_reader = file.reader(rt, &.{}); 26 | const file_r = &file_reader.interface; 27 | 28 | var std_out = File.std_out().writer(rt, &.{}); 29 | const stdout_w = &std_out.interface; 30 | defer stdout_w.flush() catch unreachable; 31 | 32 | var buffer: [1024 * 32]u8 = undefined; 33 | var done: bool = false; 34 | while (!done) { 35 | const length = file_r.readSliceShort(&buffer) catch unreachable; 36 | done = length < buffer.len; 37 | stdout_w.writeAll(buffer[0..length]) catch unreachable; 38 | } 39 | } 40 | 41 | pub fn main() !void { 42 | var gpa: std.heap.DebugAllocator(.{ .thread_safe = true }) = .init; 43 | const allocator = gpa.allocator(); 44 | defer _ = gpa.deinit(); 45 | 46 | var tardy: Tardy = try .init(allocator, .{ 47 | .threading = .single, 48 | .pooling = .static, 49 | .size_tasks_initial = 1, 50 | .size_aio_reap_max = 1, 51 | }); 52 | defer tardy.deinit(); 53 | 54 | var i: usize = 0; 55 | var args = try std.process.argsWithAllocator(allocator); 56 | defer args.deinit(); 57 | 58 | const file_name: [:0]const u8 = blk: { 59 | while (args.next()) |arg| : (i += 1) { 60 | if (i == 1) break :blk arg; 61 | } 62 | 63 | try std.fs.File.stdout().writeAll("file name not passed in: ./cat [file name]"); 64 | return; 65 | }; 66 | 67 | var params: EntryParams = .{ 68 | .file_name = file_name, 69 | }; 70 | 71 | try tardy.entry( 72 | ¶ms, 73 | struct { 74 | fn start(rt: *Runtime, p: *EntryParams) !void { 75 | try rt.spawn(.{ rt, p }, main_frame, 1024 * 1024 * 4); 76 | } 77 | }.start, 78 | ); 79 | } 80 | -------------------------------------------------------------------------------- /examples/http/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const AcceptResult = @import("tardy").AcceptResult; 4 | const Cross = @import("tardy").Cross; 5 | const Pool = @import("tardy").Pool; 6 | const RecvResult = @import("tardy").RecvResult; 7 | const Runtime = @import("tardy").Runtime; 8 | const SendResult = @import("tardy").SendResult; 9 | const Socket = @import("tardy").Socket; 10 | const Task = @import("tardy").Task; 11 | const Timer = @import("tardy").Timer; 12 | 13 | const Tardy = @import("tardy").Tardy(.auto); 14 | const log = std.log.scoped(.@"tardy/example/echo"); 15 | 16 | const STACK_SIZE: usize = 1024 * 16; 17 | const HTTP_RESPONSE = "HTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Length: 27\r\nContent-Type: text/plain\r\n\r\nThis is an HTTP benchmark\r\n"; 18 | 19 | fn main_frame(rt: *Runtime, server: *const Socket) !void { 20 | const socket = try server.accept(rt); 21 | defer socket.close_blocking(); 22 | 23 | log.debug( 24 | "{d} - accepted socket [{f}]", 25 | .{ std.time.milliTimestamp(), socket.addr }, 26 | ); 27 | 28 | // spawn off a new frame. 29 | try rt.spawn(.{ rt, server }, main_frame, STACK_SIZE); 30 | 31 | var buffer: [1024]u8 = undefined; 32 | var recv_length: usize = 0; 33 | while (true) { 34 | recv_length += socket.recv(rt, &buffer) catch |e| { 35 | log.err("Failed to recv on socket | {}", .{e}); 36 | return; 37 | }; 38 | 39 | if (std.mem.indexOf(u8, buffer[0..recv_length], "\r\n\r\n")) |_| { 40 | _ = socket.send_all(rt, HTTP_RESPONSE[0..]) catch |e| { 41 | log.err("Failed to send on socket | {}", .{e}); 42 | return; 43 | }; 44 | recv_length = 0; 45 | } 46 | } 47 | } 48 | 49 | pub fn main() !void { 50 | var gpa: std.heap.DebugAllocator(.{}) = .init; 51 | const allocator = gpa.allocator(); 52 | defer _ = gpa.deinit(); 53 | 54 | var tardy: Tardy = try .init(allocator, .{ 55 | .threading = .auto, 56 | .pooling = .grow, 57 | .size_tasks_initial = 256, 58 | .size_aio_reap_max = 1024, 59 | }); 60 | defer tardy.deinit(); 61 | 62 | const host = "0.0.0.0"; 63 | const port = 9862; 64 | 65 | const server: Socket = try .init(.{ .tcp = .{ .host = host, .port = port } }); 66 | try server.bind(); 67 | try server.listen(1024); 68 | 69 | try tardy.entry( 70 | &server, 71 | struct { 72 | fn start(rt: *Runtime, tcp_server: *const Socket) !void { 73 | try rt.spawn(.{ rt, tcp_server }, main_frame, STACK_SIZE); 74 | } 75 | }.start, 76 | ); 77 | } 78 | -------------------------------------------------------------------------------- /examples/echo/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const AcceptResult = @import("tardy").AcceptResult; 4 | const Cross = @import("tardy").Cross; 5 | const Pool = @import("tardy").Pool; 6 | const RecvResult = @import("tardy").RecvResult; 7 | const Runtime = @import("tardy").Runtime; 8 | const SendResult = @import("tardy").SendResult; 9 | const Socket = @import("tardy").Socket; 10 | const Task = @import("tardy").Task; 11 | const Timer = @import("tardy").Timer; 12 | 13 | const Tardy = @import("tardy").Tardy(.auto); 14 | const log = std.log.scoped(.@"tardy/example/echo"); 15 | 16 | fn echo_frame(rt: *Runtime, server: *const Socket) !void { 17 | const socket = try server.accept(rt); 18 | defer socket.close_blocking(); 19 | 20 | var sock_reader = socket.reader(rt, &.{}); 21 | const sock_r = &sock_reader.interface; 22 | 23 | var sock_writer = socket.writer(rt, &.{}); 24 | const sock_w = &sock_writer.interface; 25 | defer sock_w.flush() catch unreachable; 26 | 27 | log.debug( 28 | "{d} - accepted socket [{f}]", 29 | .{ std.time.milliTimestamp(), socket.addr }, 30 | ); 31 | 32 | // spawn off a new frame. 33 | try rt.spawn(.{ rt, server }, echo_frame, 1024 * 16); 34 | 35 | //TODO: investigate why using readSliceShort with a buffer bigger 36 | // than reader size leads to a connection reset (try to get a repro) 37 | var buffer: [501]u8 = undefined; 38 | while (true) { 39 | const recv_length = sock_r.readSliceShort(&buffer) catch |e| { 40 | log.err("Failed to recv on socket | {t}", .{e}); 41 | break; 42 | }; 43 | 44 | if (recv_length == 0) { 45 | break; 46 | } 47 | 48 | sock_w.writeAll(buffer[0..recv_length]) catch |e| { 49 | log.err("Failed to send on socket | {t}", .{e}); 50 | break; 51 | }; 52 | 53 | log.debug("Echoed: {s}", .{buffer[0..recv_length]}); 54 | } 55 | } 56 | 57 | pub fn main() !void { 58 | var gpa: std.heap.DebugAllocator(.{}) = .init; 59 | const allocator = gpa.allocator(); 60 | defer _ = gpa.deinit(); 61 | 62 | var tardy: Tardy = try .init(allocator, .{ 63 | .threading = .single, 64 | .pooling = .static, 65 | .size_tasks_initial = 256, 66 | .size_aio_reap_max = 256, 67 | }); 68 | defer tardy.deinit(); 69 | 70 | const host = "0.0.0.0"; 71 | const port = 9862; 72 | 73 | const server: Socket = try .init(.{ .tcp = .{ .host = host, .port = port } }); 74 | try server.bind(); 75 | try server.listen(1024); 76 | 77 | try tardy.entry( 78 | &server, 79 | struct { 80 | fn start(rt: *Runtime, tcp_server: *const Socket) !void { 81 | try rt.spawn(.{ rt, tcp_server }, echo_frame, 1024 * 16); 82 | } 83 | }.start, 84 | ); 85 | } 86 | -------------------------------------------------------------------------------- /examples/stream/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const AcceptResult = @import("tardy").AcceptResult; 4 | const Cross = @import("tardy").Cross; 5 | const Dir = @import("tardy").Dir; 6 | const File = @import("tardy").File; 7 | const Pool = @import("tardy").Pool; 8 | const RecvResult = @import("tardy").RecvResult; 9 | const Runtime = @import("tardy").Runtime; 10 | const SendResult = @import("tardy").SendResult; 11 | const Socket = @import("tardy").Socket; 12 | const Task = @import("tardy").Task; 13 | const Timer = @import("tardy").Timer; 14 | 15 | const Tardy = @import("tardy").Tardy(.auto); 16 | const EntryParams = struct { 17 | file_name: [:0]const u8, 18 | server_socket: *const Socket, 19 | }; 20 | const log = std.log.scoped(.@"tardy/example/echo"); 21 | 22 | fn stream_frame(rt: *Runtime, server: *const Socket, file_name: [:0]const u8) !void { 23 | defer rt.spawn(.{ rt, server, file_name }, stream_frame, 1024 * 1024 * 4) catch unreachable; 24 | 25 | const socket = try server.accept(rt); 26 | defer socket.close_blocking(); 27 | 28 | const file = try Dir.cwd().open_file(rt, file_name, .{}); 29 | defer file.close_blocking(); 30 | 31 | log.debug( 32 | "{d} - accepted socket [{f}]", 33 | .{ std.time.milliTimestamp(), socket.addr }, 34 | ); 35 | 36 | var buffer: [1024]u8 = undefined; 37 | var socket_w = socket.writer(rt, &buffer); 38 | const socket_sw = &socket_w.interface; 39 | defer socket_sw.flush() catch unreachable; 40 | 41 | file.stream_to( 42 | socket_sw, 43 | rt, 44 | ) catch unreachable; 45 | } 46 | 47 | pub fn main() !void { 48 | var gpa: std.heap.DebugAllocator(.{}) = .init; 49 | const allocator = gpa.allocator(); 50 | defer _ = gpa.deinit(); 51 | 52 | var tardy: Tardy = try .init(allocator, .{ 53 | .threading = .single, 54 | .pooling = .static, 55 | .size_tasks_initial = 2, 56 | .size_aio_reap_max = 1, 57 | }); 58 | defer tardy.deinit(); 59 | 60 | const host = "0.0.0.0"; 61 | const port = 9862; 62 | 63 | const server: Socket = try .init(.{ .tcp = .{ .host = host, .port = port } }); 64 | try server.bind(); 65 | try server.listen(1024); 66 | 67 | var i: usize = 0; 68 | var args = try std.process.argsWithAllocator(allocator); 69 | defer args.deinit(); 70 | 71 | const file_name: [:0]const u8 = blk: { 72 | while (args.next()) |arg| : (i += 1) { 73 | if (i == 1) break :blk arg; 74 | } 75 | 76 | try std.fs.File.stdout().writeAll("file name not passed in: ./stream [file name]"); 77 | return; 78 | }; 79 | 80 | var params: EntryParams = .{ 81 | .file_name = file_name, 82 | .server_socket = &server, 83 | }; 84 | 85 | try tardy.entry( 86 | ¶ms, 87 | struct { 88 | fn start(rt: *Runtime, p: *EntryParams) !void { 89 | try rt.spawn(.{ rt, p.server_socket, p.file_name }, stream_frame, 1024 * 1024 * 4); 90 | } 91 | }.start, 92 | ); 93 | } 94 | -------------------------------------------------------------------------------- /src/core/ring.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | pub fn Ring(comptime T: type) type { 6 | return struct { 7 | const Self = @This(); 8 | 9 | allocator: std.mem.Allocator, 10 | items: []T, 11 | // This is where we will read off of. 12 | read_index: usize = 0, 13 | // This is where we will write into. 14 | write_index: usize = 0, 15 | // Total count of elements. 16 | count: usize = 0, 17 | 18 | pub fn init(allocator: std.mem.Allocator, size: usize) !Self { 19 | assert(size >= 1); 20 | const items = try allocator.alloc(T, size); 21 | return .{ 22 | .allocator = allocator, 23 | .items = items, 24 | }; 25 | } 26 | 27 | pub fn deinit(self: Self) void { 28 | self.allocator.free(self.items); 29 | } 30 | 31 | pub fn full(self: Self) bool { 32 | return self.count == self.items.len; 33 | } 34 | 35 | pub fn empty(self: Self) bool { 36 | return self.count == 0; 37 | } 38 | 39 | pub fn push(self: *Self, message: T) !void { 40 | if (self.full()) return error.RingFull; 41 | self.items[self.write_index] = message; 42 | self.write_index = (self.write_index + 1) % self.items.len; 43 | self.count += 1; 44 | } 45 | 46 | pub fn push_assert(self: *Self, message: T) void { 47 | assert(!self.full()); 48 | self.items[self.write_index] = message; 49 | self.write_index = (self.write_index + 1) % self.items.len; 50 | self.count += 1; 51 | } 52 | 53 | pub fn pop(self: *Self) !T { 54 | if (self.empty()) return error.RingEmpty; 55 | const message = self.items[self.read_index]; 56 | self.read_index = (self.read_index + 1) % self.items.len; 57 | self.count -= 1; 58 | return message; 59 | } 60 | 61 | pub fn pop_assert(self: *Self) T { 62 | assert(!self.empty()); 63 | const message = self.items[self.read_index]; 64 | self.read_index = (self.read_index + 1) % self.items.len; 65 | self.count -= 1; 66 | return message; 67 | } 68 | 69 | pub fn pop_ptr(self: *Self) !*T { 70 | if (self.empty()) return error.RingEmpty; 71 | const message = &self.items[self.read_index]; 72 | self.read_index = (self.read_index + 1) % self.items.len; 73 | self.count -= 1; 74 | return message; 75 | } 76 | }; 77 | } 78 | 79 | test "Ring Send and Recv" { 80 | const size: u32 = 100; 81 | var ring = try Ring(usize).init(testing.allocator, size); 82 | defer ring.deinit(); 83 | 84 | for (0..size) |i| { 85 | for (0..i) |j| { 86 | try ring.push(j); 87 | } 88 | 89 | for (0..i) |j| { 90 | try testing.expectEqual(j, try ring.pop()); 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/runtime/storage.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | /// Storage is deleteless and clobberless. 6 | pub const Storage = struct { 7 | arena: std.heap.ArenaAllocator, 8 | map: std.StringHashMapUnmanaged(*anyopaque), 9 | 10 | pub fn init(allocator: std.mem.Allocator) Storage { 11 | return .{ 12 | .arena = std.heap.ArenaAllocator.init(allocator), 13 | .map = std.StringHashMapUnmanaged(*anyopaque){}, 14 | }; 15 | } 16 | 17 | pub fn deinit(self: Storage) void { 18 | self.arena.deinit(); 19 | } 20 | 21 | /// Store a pointer that is not managed. 22 | /// This will NOT CLONE the item. 23 | /// This asserts that no other item has the same name. 24 | pub fn store_ptr(self: *Storage, name: []const u8, item: anytype) !void { 25 | assert(@typeInfo(@TypeOf(item)) == .Pointer); 26 | try self.map.putNoClobber(self.arena.allocator(), name, @ptrCast(item)); 27 | } 28 | 29 | pub fn store_alloc_ret(self: *Storage, name: []const u8, item: anytype) !*@TypeOf(item) { 30 | const allocator = self.arena.allocator(); 31 | const clone = try allocator.create(@TypeOf(item)); 32 | errdefer allocator.destroy(clone); 33 | clone.* = item; 34 | try self.map.putNoClobber(allocator, name, @ptrCast(clone)); 35 | return clone; 36 | } 37 | 38 | /// Store a new item in the Storage. 39 | /// This will CLONE (allocate) the item that you pass in and manage the clone. 40 | /// This asserts that no other item has the same name. 41 | pub fn store_alloc(self: *Storage, name: []const u8, item: anytype) !void { 42 | _ = try self.store_alloc_ret(name, item); 43 | } 44 | 45 | /// Get an item that is within the Storage. 46 | /// This asserts that the item you are looking for exists. 47 | pub fn get(self: *Storage, name: []const u8, comptime T: type) T { 48 | return self.get_ptr(name, T).*; 49 | } 50 | 51 | /// Get a const (immutable) pointer to an item that is within the Storage. 52 | /// This asserts that the item you are looking for exists. 53 | pub fn get_const_ptr(self: *Storage, name: []const u8, comptime T: type) *const T { 54 | const got = self.map.get(name).?; 55 | return @ptrCast(@alignCast(got)); 56 | } 57 | 58 | /// Get a (mutable) pointer to an item that is within the Storage. 59 | /// This asserts that the item you are looking for exists. 60 | pub fn get_ptr(self: *Storage, name: []const u8, comptime T: type) *T { 61 | const got = self.map.get(name).?; 62 | return @ptrCast(@alignCast(got)); 63 | } 64 | }; 65 | 66 | test "Storage Storing" { 67 | var storage = Storage.init(testing.allocator); 68 | defer storage.deinit(); 69 | 70 | const byte: u8 = 100; 71 | try storage.store_alloc("byte", byte); 72 | 73 | const index: usize = 9447721; 74 | try storage.store_alloc("index", index); 75 | 76 | const value: u32 = 100; 77 | try storage.store_alloc("value", value); 78 | 79 | const value_ptr = storage.get_ptr("value", u32); 80 | try testing.expectEqual(value_ptr.*, 100); 81 | value_ptr.* += 100; 82 | 83 | try testing.expectEqual(byte, storage.get("byte", u8)); 84 | try testing.expectEqual(index, storage.get("index", usize)); 85 | try testing.expectEqual(value + 100, storage.get("value", u32)); 86 | } 87 | -------------------------------------------------------------------------------- /src/runtime/scheduler.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | pub const AsyncSubmission = @import("../aio/lib.zig").AsyncSubmission; 5 | const AtomicDynamicBitSet = @import("../core/atomic_bitset.zig").AtomicDynamicBitSet; 6 | const Pool = @import("../core/pool.zig").Pool; 7 | const PoolKind = @import("../core/pool.zig").PoolKind; 8 | const Queue = @import("../core/queue.zig").Queue; 9 | const Frame = @import("../frame/lib.zig").Frame; 10 | const Runtime = @import("lib.zig").Runtime; 11 | const Task = @import("task.zig").Task; 12 | 13 | const TaskWithJob = struct { 14 | task: Task, 15 | job: ?AsyncSubmission = null, 16 | }; 17 | 18 | pub const Scheduler = struct { 19 | allocator: std.mem.Allocator, 20 | tasks: Pool(Task), 21 | runnable: usize, 22 | released: std.ArrayListUnmanaged(usize), 23 | triggers: AtomicDynamicBitSet, 24 | 25 | pub fn init(allocator: std.mem.Allocator, size: usize, pooling: PoolKind) !Scheduler { 26 | var tasks = try Pool(Task).init(allocator, size, pooling); 27 | errdefer tasks.deinit(); 28 | 29 | var released = try std.ArrayListUnmanaged(usize).initCapacity(allocator, size); 30 | errdefer released.deinit(allocator); 31 | 32 | const triggers = try AtomicDynamicBitSet.init(allocator, size, false); 33 | errdefer triggers.deinit(allocator); 34 | 35 | return .{ 36 | .allocator = allocator, 37 | .tasks = tasks, 38 | .runnable = 0, 39 | .released = released, 40 | .triggers = triggers, 41 | }; 42 | } 43 | 44 | pub fn deinit(self: *Scheduler) void { 45 | self.tasks.deinit(); 46 | self.released.deinit(self.allocator); 47 | self.triggers.deinit(self.allocator); 48 | } 49 | 50 | pub fn set_runnable(self: *Scheduler, index: usize) !void { 51 | const task = self.tasks.get_ptr(index); 52 | assert(task.state != .runnable); 53 | task.state = .runnable; 54 | self.runnable += 1; 55 | } 56 | 57 | pub fn trigger_await(self: *Scheduler) !void { 58 | const rt: *Runtime = @fieldParentPtr("scheduler", self); 59 | const index = rt.current_task.?; 60 | const task = self.tasks.get_ptr(index); 61 | 62 | // To waiting... 63 | task.state = .wait_for_trigger; 64 | self.runnable -= 1; 65 | 66 | Frame.yield(); 67 | } 68 | 69 | // NOTE: This can spuriously trigger a Task later in the Run Loop. 70 | /// Safe to call from a different Runtime. 71 | pub fn trigger(self: *Scheduler, index: usize) !void { 72 | try self.triggers.set(index); 73 | } 74 | 75 | // This is only safe to call from the Runtime that the Frame is running on. 76 | pub fn io_await(self: *Scheduler, job: AsyncSubmission) !void { 77 | const rt: *Runtime = @fieldParentPtr("scheduler", self); 78 | const index = rt.current_task.?; 79 | const task = self.tasks.get_ptr(index); 80 | 81 | // To waiting... 82 | task.state = .wait_for_io; 83 | self.runnable -= 1; 84 | 85 | // Queue the related I/O job. 86 | try rt.aio.queue_job(index, job); 87 | Frame.yield(); 88 | } 89 | 90 | pub fn spawn(self: *Scheduler, frame_ctx: anytype, comptime frame_fn: anytype, stack_size: usize) !void { 91 | const index = blk: { 92 | if (self.released.pop()) |index| { 93 | break :blk self.tasks.borrow_assume_unset(index); 94 | } else { 95 | break :blk try self.tasks.borrow(); 96 | } 97 | }; 98 | 99 | const frame = try Frame.init(self.allocator, stack_size, frame_ctx, frame_fn); 100 | 101 | const item: Task = .{ .index = index, .frame = frame, .state = .dead }; 102 | const item_ptr = self.tasks.get_ptr(index); 103 | item_ptr.* = item; 104 | try self.set_runnable(index); 105 | } 106 | 107 | pub fn release(self: *Scheduler, index: usize) !void { 108 | // must be runnable to set? 109 | const task = self.tasks.get_ptr(index); 110 | assert(task.state == .runnable); 111 | task.state = .dead; 112 | self.runnable -= 1; 113 | 114 | self.tasks.release(index); 115 | try self.released.append(self.allocator, index); 116 | } 117 | }; 118 | -------------------------------------------------------------------------------- /src/core/atomic_bitset.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const Atomic = std.atomic.Value; 4 | 5 | pub const AtomicDynamicBitSet = struct { 6 | allocator: std.mem.Allocator, 7 | words: []Atomic(usize), 8 | lock: std.Thread.RwLock, 9 | /// Not safe to access. Use `get_bit_length`. 10 | bit_length: usize, 11 | 12 | pub fn init(allocator: std.mem.Allocator, size: usize, default: bool) !AtomicDynamicBitSet { 13 | const word_count = try std.math.divCeil(usize, size, @bitSizeOf(usize)); 14 | const words = try allocator.alloc(Atomic(usize), word_count); 15 | errdefer allocator.free(words); 16 | const value: usize = if (default) std.math.maxInt(usize) else 0; 17 | for (words) |*word| word.* = .{ .raw = value }; 18 | return .{ .allocator = allocator, .words = words, .lock = .{}, .bit_length = size }; 19 | } 20 | 21 | pub fn deinit(self: *AtomicDynamicBitSet, allocator: std.mem.Allocator) void { 22 | self.lock.lock(); 23 | defer self.lock.unlock(); 24 | allocator.free(self.words); 25 | } 26 | 27 | fn resize(self: *AtomicDynamicBitSet, allocator: std.mem.Allocator, new_size: usize, default: bool) !void { 28 | self.lock.lock(); 29 | defer self.lock.unlock(); 30 | 31 | const new_word_count = try std.math.divCeil(usize, new_size, @bitSizeOf(usize)); 32 | assert(new_word_count > self.words.len); 33 | 34 | const value: usize = if (default) std.math.maxInt(usize) else 0; 35 | const old_words = self.words; 36 | if (allocator.resize(self.words, new_word_count)) { 37 | for (self.words[old_words.len..]) |*word| word.* = .{ .raw = value }; 38 | } else { 39 | defer allocator.free(old_words); 40 | const new_words = try allocator.alloc(Atomic(usize), new_word_count); 41 | std.mem.copyForwards(Atomic(usize), new_words[0..old_words.len], old_words[0..]); 42 | for (new_words[old_words.len..]) |*word| word.* = .{ .raw = value }; 43 | self.words = new_words; 44 | self.bit_length = new_size; 45 | } 46 | } 47 | 48 | pub fn is_empty(self: *AtomicDynamicBitSet) bool { 49 | self.lock.lockShared(); 50 | defer self.lock.unlockShared(); 51 | for (self.words) |*word| if (word.load(.acquire) != 0) return false; 52 | return true; 53 | } 54 | 55 | pub fn get_bit_length(self: *AtomicDynamicBitSet) usize { 56 | self.lock.lockShared(); 57 | defer self.lock.unlockShared(); 58 | return self.bit_length; 59 | } 60 | 61 | pub fn set(self: *AtomicDynamicBitSet, index: usize) !void { 62 | self.lock.lockShared(); 63 | defer self.lock.unlockShared(); 64 | 65 | if (index > self.bit_length) { 66 | self.lock.unlockShared(); 67 | try self.resize(self.allocator, try std.math.ceilPowerOfTwo(usize, index), false); 68 | self.lock.lockShared(); 69 | } 70 | assert(self.bit_length >= index); 71 | 72 | const word = index / @bitSizeOf(usize); 73 | assert(word < self.words.len); 74 | const mask: usize = @as(usize, 1) << @intCast(@mod(index, @bitSizeOf(usize))); 75 | _ = self.words[word].fetchOr(mask, .release); 76 | } 77 | 78 | pub fn is_set(self: *AtomicDynamicBitSet, index: usize) bool { 79 | self.lock.lockShared(); 80 | defer self.lock.unlockShared(); 81 | assert(self.bit_length >= index); 82 | 83 | const word = index / @bitSizeOf(usize); 84 | assert(word < self.words.len); 85 | const mask: usize = @as(usize, 1) << @intCast(@mod(index, @bitSizeOf(usize))); 86 | return (self.words[word].load(.acquire) & mask) != 0; 87 | } 88 | 89 | pub fn unset(self: *AtomicDynamicBitSet, index: usize) void { 90 | self.lock.lockShared(); 91 | defer self.lock.unlockShared(); 92 | assert(self.bit_length >= index); 93 | 94 | const word = index / @bitSizeOf(usize); 95 | assert(word < self.words.len); 96 | var mask: usize = std.math.maxInt(usize); 97 | mask ^= @as(usize, 1) << @intCast(@mod(index, @bitSizeOf(usize))); 98 | _ = self.words[word].fetchAnd(mask, .release); 99 | } 100 | 101 | pub fn unset_all(self: *AtomicDynamicBitSet) void { 102 | self.lock.lockShared(); 103 | defer self.lock.unlockShared(); 104 | for (self.words) |*word| word.store(0, .release); 105 | } 106 | }; 107 | -------------------------------------------------------------------------------- /test/e2e/main.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | const AsyncType = @import("tardy").AsyncType; 5 | const Dir = @import("tardy").Dir; 6 | const options = @import("options"); 7 | const Runtime = @import("tardy").Runtime; 8 | const Task = @import("tardy").Task; 9 | const Timer = @import("tardy").Timer; 10 | 11 | const First = @import("first.zig"); 12 | const log = @import("lib.zig").log; 13 | const Second = @import("second.zig"); 14 | const SharedParams = @import("lib.zig").SharedParams; 15 | 16 | const backend: AsyncType = switch (options.async_option) { 17 | .auto => .auto, 18 | .kqueue => .kqueue, 19 | .io_uring => .io_uring, 20 | .epoll => .epoll, 21 | .poll => .poll, 22 | .custom => unreachable, 23 | }; 24 | const Tardy = @import("tardy").Tardy(backend); 25 | 26 | pub const std_options: std.Options = .{ .log_level = .debug }; 27 | 28 | pub fn main() !void { 29 | var gpa: std.heap.DebugAllocator(.{}) = .init; 30 | const allocator = gpa.allocator(); 31 | defer _ = gpa.deinit(); 32 | 33 | var args = try std.process.argsWithAllocator(allocator); 34 | defer args.deinit(); 35 | 36 | _ = args.next().?; 37 | 38 | // max u64 is 21 characters long :p 39 | var maybe_seed_buffer: [21]u8 = undefined; 40 | const seed_string = args.next() orelse blk: { 41 | const stdin: std.fs.File = .stdin(); 42 | const bytes = try stdin.readToEndAlloc(allocator, std.math.maxInt(usize)); 43 | defer allocator.free(bytes); 44 | 45 | var iter = std.mem.splitScalar(u8, bytes, '\n'); 46 | const not_passed_in = "seed not passed in: ./e2e [seed]"; 47 | const pre_new = iter.next() orelse @panic(not_passed_in); 48 | const length = pre_new.len; 49 | 50 | if (length <= 1) @panic(not_passed_in); 51 | if (length >= maybe_seed_buffer.len) @panic("seed too long to be a u64"); 52 | 53 | assert(length < maybe_seed_buffer.len); 54 | std.mem.copyForwards(u8, &maybe_seed_buffer, pre_new); 55 | maybe_seed_buffer[length] = 0; 56 | break :blk maybe_seed_buffer[0..length :0]; 57 | }; 58 | 59 | const seed = std.fmt.parseUnsigned(u64, seed_string, 10) catch @panic("seed passed in is not u64"); 60 | var prng: std.Random.DefaultPrng = .init(seed); 61 | const rand = prng.random(); 62 | 63 | const shared: SharedParams = blk: { 64 | var p: SharedParams = undefined; 65 | p.seed_string = seed_string; 66 | p.seed = seed; 67 | 68 | p.size_tasks_initial = rand.intRangeAtMost(usize, 1, 64); 69 | p.size_aio_reap_max = rand.intRangeAtMost(usize, 1, p.size_tasks_initial * 2); 70 | break :blk p; 71 | }; 72 | log.debug("{f}", .{std.json.fmt(shared, .{ .whitespace = .indent_1 })}); 73 | 74 | var tardy: Tardy = try .init(allocator, .{ 75 | .threading = .{ .multi = 2 }, 76 | .pooling = .grow, 77 | .size_tasks_initial = shared.size_tasks_initial, 78 | .size_aio_reap_max = shared.size_aio_reap_max, 79 | }); 80 | defer tardy.deinit(); 81 | 82 | const EntryParams = struct { 83 | runtime: ?*Runtime, 84 | shared: *const SharedParams, 85 | }; 86 | 87 | var params: EntryParams = .{ .runtime = null, .shared = &shared }; 88 | 89 | try tardy.entry( 90 | ¶ms, 91 | struct { 92 | fn start(rt: *Runtime, p: *EntryParams) !void { 93 | switch (rt.id) { 94 | 0 => { 95 | p.runtime = rt; 96 | try rt.spawn(.{ rt, p.shared }, First.start_frame, First.STACK_SIZE); 97 | try rt.spawn(.{ rt, p.shared }, Second.start_frame, Second.STACK_SIZE); 98 | }, 99 | 1 => try rt.spawn(.{ rt, &p.runtime }, timeout_task, 1024 * 1024 * 8), 100 | else => unreachable, 101 | } 102 | } 103 | }.start, 104 | ); 105 | 106 | std.debug.print("seed={d} passed\n", .{seed}); 107 | } 108 | 109 | fn timeout_task(rt: *Runtime, other: *const ?*Runtime) !void { 110 | const TIMEOUT_LENGTH_S = std.time.s_per_min; 111 | 112 | // Checks every second to see if the other Runtime is done. 113 | for (0..TIMEOUT_LENGTH_S) |_| { 114 | try Timer.delay(rt, .{ .seconds = 1 }); 115 | if (other.*) |o| if (!o.running) break; 116 | } 117 | 118 | // If it isn't, it'll panic and stop the CI. 119 | if (other.*) |o| { 120 | if (o.running) @panic("e2e test failed! | timed out"); 121 | } else @panic("e2e test failed | test runtime didn't start"); 122 | } 123 | -------------------------------------------------------------------------------- /src/frame/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const builtin = @import("builtin"); 4 | 5 | const log = std.log.scoped(.@"tardy/frame"); 6 | 7 | const Hardware = switch (builtin.cpu.arch) { 8 | .x86_64 => switch (builtin.os.tag) { 9 | .windows => x64Windows, 10 | else => x64SysV, 11 | }, 12 | .aarch64 => aarch64General, 13 | else => @compileError("Architecture not currently supported!"), 14 | }; 15 | 16 | const FrameEntryFn = *const fn () callconv(.c) noreturn; 17 | fn EntryFn(args: anytype, comptime func: anytype) FrameEntryFn { 18 | const Args = @TypeOf(args); 19 | return struct { 20 | fn inner() callconv(.c) noreturn { 21 | const frame = active_frame.?; 22 | 23 | //const args_ptr: *align(1) Args = @ptrFromInt(@intFromPtr(frame) - @sizeOf(Args)); 24 | const args_ptr: *Args = @ptrFromInt(@intFromPtr(frame) - @sizeOf(Args)); 25 | @call(.auto, func, args_ptr.*) catch |e| { 26 | log.warn("frame failed | {any}", .{e}); 27 | frame.status = .errored; 28 | Frame.yield(); 29 | unreachable; 30 | }; 31 | 32 | // When our func is done running, just yield. 33 | frame.status = .done; 34 | Frame.yield(); 35 | unreachable; 36 | } 37 | }.inner; 38 | } 39 | 40 | threadlocal var active_frame: ?*Frame = null; 41 | 42 | pub const Frame = extern struct { 43 | const Status = enum(u8) { 44 | in_progress, 45 | done, 46 | errored, 47 | }; 48 | 49 | /// The previous SP. 50 | caller_sp: [*]u8, 51 | /// The current SP. 52 | current_sp: [*]u8, 53 | /// Stack Info 54 | stack_ptr: [*]u8, 55 | stack_len: usize, 56 | /// Is the Frame done? 57 | status: Status = .in_progress, 58 | 59 | pub fn init( 60 | allocator: std.mem.Allocator, 61 | stack_size: usize, 62 | args: anytype, 63 | comptime func: anytype, 64 | ) !*Frame { 65 | const stack = try allocator.alloc(u8, stack_size); 66 | errdefer allocator.free(stack); 67 | const Args = @TypeOf(args); 68 | 69 | if (comptime builtin.mode == .Debug) { 70 | // this should mark it easily for the debugger. 71 | for (stack) |*byte| byte.* = 0xAA; 72 | } 73 | 74 | const stack_base = @intFromPtr(stack.ptr); 75 | const stack_end = @intFromPtr(stack.ptr + stack.len); 76 | 77 | // space for the frame 78 | var stack_ptr = std.mem.alignBackward( 79 | usize, 80 | stack_end - @sizeOf(Frame), 81 | Hardware.alignment, 82 | ); 83 | if (stack_ptr < stack_base) return error.StackTooSmall; 84 | const frame: *Frame = @ptrFromInt(stack_ptr); 85 | 86 | // space for the args 87 | stack_ptr -= @sizeOf(Args); 88 | const arg_ptr: *Args = @ptrFromInt(stack_ptr); 89 | arg_ptr.* = args; 90 | 91 | // space for the saved registers (pushed) 92 | stack_ptr = std.mem.alignBackward( 93 | usize, 94 | stack_ptr - @sizeOf(usize) * Hardware.stack_count, 95 | Hardware.alignment, 96 | ); 97 | if (stack_ptr < stack_base) return error.StackTooSmall; 98 | assert(std.mem.isAligned(stack_ptr, Hardware.alignment)); 99 | 100 | // set the return address appropriately 101 | const entries: [*]FrameEntryFn = @ptrFromInt(stack_ptr); 102 | entries[Hardware.entry] = EntryFn(args, func); 103 | 104 | frame.* = .{ 105 | .caller_sp = undefined, 106 | .current_sp = @ptrFromInt(stack_ptr), 107 | .stack_ptr = stack.ptr, 108 | .stack_len = stack.len, 109 | }; 110 | 111 | return frame; 112 | } 113 | 114 | pub fn deinit(self: *const Frame, allocator: std.mem.Allocator) void { 115 | const stack = self.stack_ptr[0..self.stack_len]; 116 | allocator.free(stack); 117 | } 118 | 119 | /// This runs/continues a Frame. 120 | pub fn proceed(frame: *Frame) void { 121 | const old_frame = active_frame; 122 | assert(old_frame != frame); 123 | active_frame = frame; 124 | defer active_frame = old_frame; 125 | 126 | Hardware.tardy_swap_frame(&frame.caller_sp, &frame.current_sp); 127 | } 128 | 129 | /// This yields/pauses a Frame. 130 | pub fn yield() void { 131 | const current = active_frame.?; 132 | Hardware.tardy_swap_frame(¤t.current_sp, ¤t.caller_sp); 133 | } 134 | }; 135 | 136 | const x64SysV = struct { 137 | pub const stack_count = 7; 138 | pub const entry = stack_count - 1; 139 | pub const alignment = 16; 140 | extern fn tardy_swap_frame(noalias *[*]u8, noalias *[*]u8) callconv(.c) void; 141 | 142 | comptime { 143 | asm (@embedFile("asm/x86_64_sysv.asm")); 144 | } 145 | }; 146 | 147 | const x64Windows = struct { 148 | pub const stack_count = 31; 149 | pub const entry = stack_count - 1; 150 | pub const alignment = 16; 151 | extern fn tardy_swap_frame(noalias *[*]u8, noalias *[*]u8) callconv(.c) void; 152 | 153 | comptime { 154 | asm (@embedFile("asm/x86_64_win.asm")); 155 | } 156 | }; 157 | 158 | const aarch64General = struct { 159 | pub const stack_count = 20; 160 | pub const entry = 0; 161 | pub const alignment = 16; 162 | extern fn tardy_swap_frame(noalias *[*]u8, noalias *[*]u8) callconv(.c) void; 163 | 164 | comptime { 165 | asm (@embedFile("asm/aarch64_gen.asm")); 166 | } 167 | }; 168 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tardy 2 | 3 | tardy *(def: delaying or delayed beyond the right or expected time; late.)* is an asynchronous runtime for writing applications and services in Zig. 4 | Most of the code for this project originated in [zzz](https://github.com/tardy-org/zzz), a performance oriented networking framework. 5 | 6 | - tardy utilizes the latest Asynchronous APIs while minimizing allocations. 7 | - tardy natively supports Linux, Mac, BSD, and Windows. 8 | - tardy is configurable, allowing you to optimize the runtime for your specific use-case. 9 | 10 | [![Discord](https://img.shields.io/discord/1294761432922980392?logo=discord)](https://discord.gg/FP9Xb7WGPK) 11 | 12 | ## Summary 13 | tardy is a thread-local, I/O driven runtime for Zig, providing the core implementation for asynchronous libraries and services. 14 | - Per-thread Runtime isolation for minimal contention 15 | - Native async I/O (io_uring, epoll, kqueue, poll, etc.) 16 | - Asynchronous `Socket`s and `File`s. 17 | - Coroutines (internally called Frames). 18 | 19 | ## Installing 20 | Compatible Zig Version: `0.14.0` 21 | 22 | Latest Release: `0.3.0` 23 | ``` 24 | zig fetch --save git+https://github.com/tardy-org/tardy#v0.3.0 25 | ``` 26 | 27 | You can then add the dependency in your `build.zig` file: 28 | ```zig 29 | const tardy = b.dependency("tardy", .{ 30 | .target = target, 31 | .optimize = optimize, 32 | }).module("tardy"); 33 | 34 | exe_mod.addImport("tardy", tardy); 35 | ``` 36 | 37 | ## Building and Running Examples 38 | - NOTE: by default build/install step uses `-Dexample=none` , meaning it wont build any examples 39 | 40 | - List available examples 41 | ```sh 42 | zig build --help 43 | ``` 44 | 45 | - Build/run a specific example 46 | ```sh 47 | zig build -Dexample=[nameOfExample] 48 | ``` 49 | ```sh 50 | zig build run -Dexample=[nameOfExample] 51 | ``` 52 | 53 | - Build all examples 54 | ```sh 55 | zig build -Dexample=all 56 | ``` 57 | 58 | ## TCP Example 59 | A basic multi-threaded TCP echo server. 60 | 61 | ```zig 62 | const std = @import("std"); 63 | const log = std.log.scoped(.@"tardy/example/echo"); 64 | 65 | const Pool = @import("tardy").Pool; 66 | const Runtime = @import("tardy").Runtime; 67 | const Task = @import("tardy").Task; 68 | const Tardy = @import("tardy").Tardy(.auto); 69 | const Cross = @import("tardy").Cross; 70 | 71 | const Socket = @import("tardy").Socket; 72 | const Timer = @import("tardy").Timer; 73 | 74 | const AcceptResult = @import("tardy").AcceptResult; 75 | const RecvResult = @import("tardy").RecvResult; 76 | const SendResult = @import("tardy").SendResult; 77 | 78 | fn echo_frame(rt: *Runtime, server: *const Socket) !void { 79 | const socket = try server.accept(rt); 80 | defer socket.close_blocking(); 81 | 82 | // you can use the standard Zig Reader/Writer if you want! 83 | const reader = socket.reader(rt); 84 | const writer = socket.writer(rt); 85 | 86 | log.debug( 87 | "{d} - accepted socket [{}]", 88 | .{ std.time.milliTimestamp(), socket.addr }, 89 | ); 90 | 91 | try rt.spawn(.{ rt, server }, echo_frame, 1024 * 16); 92 | 93 | var buffer: [1024]u8 = undefined; 94 | while (true) { 95 | const recv_length = reader.read(&buffer) catch |e| { 96 | log.err("Failed to recv on socket | {}", .{e}); 97 | return; 98 | }; 99 | 100 | writer.writeAll(buffer[0..recv_length]) catch |e| { 101 | log.err("Failed to send on socket | {}", .{e}); 102 | return; 103 | }; 104 | 105 | log.debug("Echoed: {s}", .{buffer[0..recv_length]}); 106 | } 107 | } 108 | 109 | pub fn main() !void { 110 | var gpa = std.heap.GeneralPurposeAllocator(.{}){}; 111 | const allocator = gpa.allocator(); 112 | defer _ = gpa.deinit(); 113 | 114 | // tardy by default is 115 | // - multithreaded 116 | // - unbounded in terms of spawnable tasks 117 | var tardy = try Tardy.init(allocator, .{}); 118 | defer tardy.deinit(); 119 | 120 | const server = try Socket.init(.{ .tcp = .{ .host = "127.0.0.1", .port = 9862 } }); 121 | try server.bind(); 122 | try server.listen(256); 123 | 124 | try tardy.entry( 125 | &server, 126 | struct { 127 | fn start(rt: *Runtime, tcp_server: *const Socket) !void { 128 | try rt.spawn(.{ rt, tcp_server }, echo_frame, 1024 * 1024 * 4); 129 | } 130 | }.start, 131 | ); 132 | } 133 | ``` 134 | 135 | There exist a lot more examples, highlighting a variety of use cases and features [here](https://github.com/tardy-org/tardy/tree/main/examples). For an example of tardy in use, you can check out any of the projects in the [ecosystem](#ecosystem). 136 | 137 | ## Ecosystem 138 | - [zzz](https://github.com/tardy-org/zzz): a framework for writing performant and reliable networked services. 139 | - [secsock](https://github.com/tardy-org/secsock): Async TLS for the Tardy Socket. 140 | 141 | ## Contribution 142 | We use Nix Flakes for managing the development environment. Nix Flakes provide a reproducible, declarative approach to managing dependencies and development tools. 143 | 144 | ### Prerequisites 145 | - Install [Nix](https://nixos.org/download/) 146 | ```bash 147 | sh <(curl -L https://nixos.org/nix/install) --daemon 148 | ``` 149 | - Enable [Flake support](https://nixos.wiki/wiki/Flakes) in your Nix config (`~/.config/nix/nix.conf`): `experimental-features = nix-command flakes` 150 | 151 | ### Getting Started 152 | 1. Clone this repository: 153 | ```bash 154 | git clone https://github.com/tardy-org/tardy.git 155 | cd tardy 156 | ``` 157 | 158 | 2. Enter the development environment: 159 | ```bash 160 | nix develop 161 | ``` 162 | 163 | This will provide you with a shell that contains all of the necessary tools and dependencies for development. 164 | 165 | Once you are inside of the development shell, you can update the development dependencies by: 166 | 1. Modifying the `flake.nix` 167 | 2. Running `nix flake update` 168 | 3. Committing both the `flake.nix` and the `flake.lock` 169 | 170 | ### License 171 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in tardy by you, shall be licensed as MPL2.0, without any additional terms or conditions. 172 | -------------------------------------------------------------------------------- /src/aio/completion.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const File = @import("../fs/lib.zig").File; 4 | const Dir = @import("../fs/lib.zig").Dir; 5 | const Stat = @import("../fs/lib.zig").Stat; 6 | const Timespec = @import("../lib.zig").Timespec; 7 | const Socket = @import("../net/lib.zig").Socket; 8 | 9 | pub fn Resulted(comptime T: type, comptime E: type) type { 10 | return union(enum) { 11 | const Self = @This(); 12 | actual: T, 13 | err: E, 14 | 15 | pub fn unwrap(self: *const Self) E!T { 16 | switch (self.*) { 17 | .actual => |a| return a, 18 | .err => |e| return e, 19 | } 20 | } 21 | }; 22 | } 23 | 24 | pub const AcceptError = error{ 25 | WouldBlock, 26 | InvalidFd, 27 | ConnectionAborted, 28 | InvalidAddress, 29 | Interrupted, 30 | NotListening, 31 | ProcessFdQuotaExceeded, 32 | SystemFdQuotaExceeded, 33 | OutOfMemory, 34 | NotASocket, 35 | OperationNotSupported, 36 | Unexpected, 37 | }; 38 | 39 | pub const ConnectError = error{ 40 | AccessDenied, 41 | AddressInUse, 42 | AddressNotAvailable, 43 | AddressFamilyNotSupported, 44 | WouldBlock, 45 | InvalidFd, 46 | ConnectionRefused, 47 | InvalidAddress, 48 | Interrupted, 49 | AlreadyConnected, 50 | NetworkUnreachable, 51 | NotASocket, 52 | ProtocolFamilyNotSupported, 53 | TimedOut, 54 | Unexpected, 55 | }; 56 | 57 | pub const RecvError = error{ 58 | Closed, 59 | WouldBlock, 60 | InvalidFd, 61 | ConnectionRefused, 62 | /// Invalid Receive Buffer Address. 63 | InvalidAddress, 64 | Interrupted, 65 | InvalidArguments, 66 | OutOfMemory, 67 | NotConnected, 68 | NotASocket, 69 | Unexpected, 70 | }; 71 | 72 | pub const SendError = error{ 73 | AccessDenied, 74 | WouldBlock, 75 | OpenInProgress, 76 | InvalidFd, 77 | Closed, 78 | NoDestinationAddress, 79 | InvalidAddress, 80 | Interrupted, 81 | InvalidArguments, 82 | AlreadyConnected, 83 | InvalidSize, 84 | OutOfMemory, 85 | NotConnected, 86 | OperationNotSupported, 87 | BrokenPipe, 88 | Unexpected, 89 | }; 90 | 91 | pub const OpenError = error{ 92 | AccessDenied, 93 | InvalidFd, 94 | Busy, 95 | DiskQuotaExceeded, 96 | AlreadyExists, 97 | InvalidAddress, 98 | FileTooBig, 99 | Interrupted, 100 | InvalidArguments, 101 | IsDirectory, 102 | Loop, 103 | ProcessFdQuotaExceeded, 104 | NameTooLong, 105 | SystemFdQuotaExceeded, 106 | DeviceNotFound, 107 | NotFound, 108 | OutOfMemory, 109 | NoSpace, 110 | NotADirectory, 111 | OperationNotSupported, 112 | ReadOnlyFileSystem, 113 | FileLocked, 114 | WouldBlock, 115 | Unexpected, 116 | }; 117 | 118 | pub const ReadError = error{ 119 | AccessDenied, 120 | EndOfFile, 121 | WouldBlock, 122 | InvalidFd, 123 | InvalidAddress, 124 | Interrupted, 125 | InvalidArguments, 126 | IoError, 127 | IsDirectory, 128 | Unexpected, 129 | }; 130 | 131 | pub const WriteError = error{ 132 | WouldBlock, 133 | InvalidFd, 134 | NoDestinationAddress, 135 | DiskQuotaExceeded, 136 | InvalidAddress, 137 | FileTooBig, 138 | Interrupted, 139 | InvalidArguments, 140 | IoError, 141 | NoSpace, 142 | AccessDenied, 143 | BrokenPipe, 144 | Unexpected, 145 | }; 146 | 147 | pub const StatError = error{ 148 | AccessDenied, 149 | InvalidFd, 150 | InvalidAddress, 151 | InvalidArguments, 152 | Loop, 153 | NameTooLong, 154 | NotFound, 155 | OutOfMemory, 156 | NotADirectory, 157 | Unexpected, 158 | PermissionDenied, 159 | }; 160 | 161 | pub const MkdirError = error{ 162 | AccessDenied, 163 | AlreadyExists, 164 | Loop, 165 | NameTooLong, 166 | NotFound, 167 | NoSpace, 168 | NotADirectory, 169 | ReadOnlyFileSystem, 170 | Unexpected, 171 | }; 172 | 173 | pub const DeleteError = error{ 174 | AccessDenied, 175 | Busy, 176 | InvalidAddress, 177 | IoError, 178 | IsDirectory, 179 | Loop, 180 | NameTooLong, 181 | NotFound, 182 | OutOfMemory, 183 | IsNotDirectory, 184 | ReadOnlyFileSystem, 185 | InvalidArguments, 186 | NotEmpty, 187 | InvalidFd, 188 | Unexpected, 189 | }; 190 | 191 | pub const AcceptResult = Resulted(Socket, AcceptError); 192 | 193 | pub const ConnectResult = Resulted(void, ConnectError); 194 | pub const RecvResult = Resulted(usize, RecvError); 195 | 196 | pub const SendResult = Resulted(usize, SendError); 197 | 198 | // This is ONLY used internally. This helps us avoid Result enum bloat 199 | // by encoding multiple possibilities within one Result. 200 | const OpenResultType = union(enum) { file: File, dir: Dir }; 201 | pub const InnerOpenResult = Resulted(OpenResultType, OpenError); 202 | pub const OpenFileResult = Resulted(File, OpenError); 203 | pub const OpenDirResult = Resulted(Dir, OpenError); 204 | 205 | pub const MkdirResult = Resulted(void, MkdirError); 206 | pub const CreateDirError = MkdirError || OpenError || error{InternalFailure}; 207 | pub const CreateDirResult = Resulted(Dir, CreateDirError); 208 | 209 | pub const DeleteResult = Resulted(void, DeleteError); 210 | pub const DeleteTreeError = OpenError || DeleteError || error{InternalFailure}; 211 | pub const DeleteTreeResult = Resulted(void, DeleteTreeError); 212 | 213 | pub const ReadResult = Resulted(usize, ReadError); 214 | pub const WriteResult = Resulted(usize, WriteError); 215 | 216 | pub const StatResult = Resulted(Stat, StatError); 217 | 218 | pub const Result = union(enum) { 219 | none, 220 | /// If we want to wake the runtime up. 221 | wake, 222 | /// If we have returned a stat object. 223 | stat: StatResult, 224 | accept: AcceptResult, 225 | connect: ConnectResult, 226 | recv: RecvResult, 227 | send: SendResult, 228 | open: InnerOpenResult, 229 | mkdir: MkdirResult, 230 | delete: DeleteResult, 231 | read: ReadResult, 232 | write: WriteResult, 233 | close, 234 | /// If we have returned a ptr. 235 | ptr: ?*anyopaque, 236 | }; 237 | 238 | pub const Completion = struct { 239 | task: usize, 240 | result: Result, 241 | }; 242 | -------------------------------------------------------------------------------- /src/core/zero_copy.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | pub fn ZeroCopy(comptime T: type) type { 6 | return struct { 7 | const Self = @This(); 8 | allocator: std.mem.Allocator, 9 | ptr: [*]T, 10 | len: usize, 11 | capacity: usize, 12 | 13 | pub fn init(allocator: std.mem.Allocator, capacity: usize) !Self { 14 | const slice = try allocator.alloc(T, capacity); 15 | return .{ 16 | .allocator = allocator, 17 | .ptr = slice.ptr, 18 | .len = 0, 19 | .capacity = capacity, 20 | }; 21 | } 22 | 23 | pub fn deinit(self: *Self) void { 24 | self.allocator.free(self.ptr[0..self.capacity]); 25 | } 26 | 27 | pub fn as_slice(self: *const Self) []T { 28 | return self.ptr[0..self.len]; 29 | } 30 | 31 | const SubsliceOptions = struct { 32 | start: ?usize = null, 33 | end: ?usize = null, 34 | }; 35 | 36 | pub fn subslice(self: *const Self, options: SubsliceOptions) []T { 37 | const start: usize = options.start orelse 0; 38 | const end: usize = options.end orelse self.len; 39 | assert(start <= end); 40 | assert(end <= self.len); 41 | 42 | return self.ptr[start..end]; 43 | } 44 | 45 | /// This returns a slice that you can write into for zero-copy uses. 46 | /// This is mostly used when we are passing a buffer to I/O then acting on it. 47 | /// 48 | /// The write area that is returned is ONLY valid until the next call of get_write_area 49 | /// or mark_written. 50 | pub fn get_write_area(self: *Self, size: usize) ![]T { 51 | const available_space = self.capacity - self.len; 52 | if (available_space >= size) { 53 | return self.ptr[self.len .. self.len + size]; 54 | } else { 55 | const old_slice = self.ptr[0..self.capacity]; 56 | const new_size = try std.math.ceilPowerOfTwo(usize, self.capacity + size); 57 | 58 | if (self.allocator.remap(self.ptr[0..self.capacity], new_size)) |new| { 59 | self.ptr = new.ptr; 60 | self.capacity = new.len; 61 | } else if (self.allocator.resize(self.ptr[0..self.capacity], new_size)) { 62 | self.capacity = new_size; 63 | } else { 64 | const new_slice = try self.allocator.alloc(T, new_size); 65 | @memcpy(new_slice[0..self.len], self.ptr[0..self.len]); 66 | self.allocator.free(old_slice); 67 | 68 | self.ptr = new_slice.ptr; 69 | self.capacity = new_slice.len; 70 | } 71 | 72 | assert(self.capacity - self.len >= size); 73 | return self.ptr[self.len .. self.len + size]; 74 | } 75 | } 76 | 77 | pub fn get_write_area_assume_space(self: *const Self, size: usize) []T { 78 | assert(self.capacity - self.len >= size); 79 | return self.ptr[self.len .. self.len + size]; 80 | } 81 | 82 | pub fn mark_written(self: *Self, length: usize) void { 83 | assert(self.len + length <= self.capacity); 84 | self.len += length; 85 | } 86 | 87 | pub fn shrink_retaining_capacity(self: *Self, new_size: usize) void { 88 | assert(new_size <= self.len); 89 | self.len = new_size; 90 | } 91 | 92 | pub fn shrink_clear_and_free(self: *Self, new_size: usize) !void { 93 | assert(new_size <= self.len); 94 | if (!self.allocator.resize(self.ptr[0..self.capacity], new_size)) { 95 | const slice = try self.allocator.realloc( 96 | self.ptr[0..self.capacity], 97 | new_size, 98 | ); 99 | self.ptr = slice.ptr; 100 | } 101 | self.capacity = new_size; 102 | self.len = 0; 103 | } 104 | 105 | pub fn clear_retaining_capacity(self: *Self) void { 106 | self.len = 0; 107 | } 108 | 109 | pub fn clear_and_free(self: *Self) void { 110 | self.allocator.free(self.ptr[0..self.capacity]); 111 | self.len = 0; 112 | self.capacity = 0; 113 | } 114 | }; 115 | } 116 | 117 | test "ZeroCopy: First" { 118 | const garbage: []const u8 = &[_]u8{212} ** 128; 119 | 120 | var zc: ZeroCopy(u8) = try .init(testing.allocator, 512); 121 | defer zc.deinit(); 122 | 123 | const write_area = try zc.get_write_area(garbage.len); 124 | @memcpy(write_area, garbage); 125 | zc.mark_written(write_area.len); 126 | 127 | try testing.expectEqualSlices(u8, garbage[0..], zc.as_slice()[0..write_area.len]); 128 | } 129 | 130 | test "ZeroCopy: Growth" { 131 | var zc: ZeroCopy(u8) = try .init(testing.allocator, 16); 132 | defer zc.deinit(); 133 | 134 | const large_data = &[_]u8{1} ** 32; 135 | const write_area = try zc.get_write_area(large_data.len); 136 | @memcpy(write_area, large_data); 137 | zc.mark_written(write_area.len); 138 | 139 | try testing.expect(zc.capacity >= 32); 140 | try testing.expectEqualSlices(u8, large_data, zc.as_slice()); 141 | } 142 | 143 | test "ZeroCopy: Multiple Writes" { 144 | var zc: ZeroCopy(u8) = try .init(testing.allocator, 64); 145 | defer zc.deinit(); 146 | 147 | const data1 = "Hello, "; 148 | const data2 = "World!"; 149 | 150 | const area1 = try zc.get_write_area(data1.len); 151 | @memcpy(area1, data1); 152 | zc.mark_written(area1.len); 153 | 154 | const area2 = try zc.get_write_area(data2.len); 155 | @memcpy(area2, data2); 156 | zc.mark_written(area2.len); 157 | 158 | try testing.expectEqualSlices(u8, "Hello, World!", zc.as_slice()); 159 | } 160 | 161 | test "ZeroCopy: Zero Size Write" { 162 | var zc: ZeroCopy(u8) = try .init(testing.allocator, 8); 163 | defer zc.deinit(); 164 | 165 | const area = try zc.get_write_area(0); 166 | try testing.expect(area.len == 0); 167 | zc.mark_written(0); 168 | try testing.expect(zc.len == 0); 169 | } 170 | -------------------------------------------------------------------------------- /src/channel/spsc.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Atomic = std.atomic.Value; 3 | 4 | const SpscAtomicRing = @import("../core/atomic_ring.zig").SpscAtomicRing; 5 | const Ring = @import("../core/ring.zig").Ring; 6 | const Frame = @import("../frame/lib.zig").Frame; 7 | const Runtime = @import("../lib.zig").Runtime; 8 | 9 | const log = std.log.scoped(.@"tardy/channels/spsc"); 10 | 11 | const State = enum(u8) { 12 | starting, 13 | running, 14 | closed, 15 | }; 16 | 17 | pub fn Spsc(comptime T: type) type { 18 | return struct { 19 | const Self = @This(); 20 | 21 | fn trigger_consumer(self: *Self) !void { 22 | try self.consumer_rt.load(.acquire).?.trigger(self.consumer_index.load(.acquire)); 23 | } 24 | 25 | fn trigger_producer(self: *Self) !void { 26 | try self.producer_rt.load(.acquire).?.trigger(self.producer_index.load(.acquire)); 27 | } 28 | 29 | pub const Producer = struct { 30 | inner: *Self, 31 | rt: *Runtime, 32 | 33 | pub fn send(self: Producer, message: T) !void { 34 | std.log.debug("producer sending...", .{}); 35 | while (true) switch (self.inner.state.load(.acquire)) { 36 | // Both ends must be open. 37 | .starting => try self.rt.scheduler.trigger_await(), 38 | // Channel was cleaned up. 39 | .closed => return error.Closed, 40 | .running => { 41 | if (!self.inner.consumer_open.load(.acquire)) return error.Closed; 42 | self.inner.ring.push(message) catch |e| switch (e) { 43 | error.RingFull => { 44 | self.inner.producer_index.store(self.rt.current_task.?, .release); 45 | try self.inner.trigger_consumer(); 46 | try self.rt.scheduler.trigger_await(); 47 | continue; 48 | }, 49 | }; 50 | 51 | return; 52 | }, 53 | }; 54 | } 55 | 56 | pub fn close(self: Producer) void { 57 | self.inner.producer_open.store(false, .release); 58 | self.inner.trigger_consumer() catch unreachable; 59 | } 60 | }; 61 | 62 | pub const Consumer = struct { 63 | inner: *Self, 64 | rt: *Runtime, 65 | 66 | pub fn recv(self: Consumer) !T { 67 | log.debug("consumer recving...", .{}); 68 | while (true) switch (self.inner.state.load(.acquire)) { 69 | // Both ends must be open. 70 | .starting => try self.rt.scheduler.trigger_await(), 71 | // Channel was cleaned up. 72 | .closed => return error.Closed, 73 | .running => { 74 | const data = self.inner.ring.pop() catch |e| switch (e) { 75 | // If we are empty, trigger the producer to run. 76 | error.RingEmpty => { 77 | if (!self.inner.producer_open.load(.acquire)) return error.Closed; 78 | self.inner.consumer_index.store(self.rt.current_task.?, .release); 79 | try self.inner.trigger_producer(); 80 | try self.rt.scheduler.trigger_await(); 81 | continue; 82 | }, 83 | }; 84 | 85 | return data; 86 | }, 87 | }; 88 | } 89 | 90 | pub fn close(self: Consumer) void { 91 | self.inner.consumer_open.store(false, .release); 92 | self.inner.trigger_producer() catch unreachable; 93 | } 94 | }; 95 | 96 | ring: SpscAtomicRing(T), 97 | 98 | producer_rt: Atomic(?*Runtime) align(std.atomic.cache_line), 99 | producer_index: Atomic(usize) align(std.atomic.cache_line), 100 | producer_open: Atomic(bool) align(std.atomic.cache_line), 101 | 102 | consumer_rt: Atomic(?*Runtime) align(std.atomic.cache_line), 103 | consumer_index: Atomic(usize) align(std.atomic.cache_line), 104 | consumer_open: Atomic(bool) align(std.atomic.cache_line), 105 | 106 | state: std.atomic.Value(State) align(std.atomic.cache_line), 107 | 108 | pub fn init(allocator: std.mem.Allocator, size: usize) !Self { 109 | return .{ 110 | .ring = try SpscAtomicRing(T).init(allocator, size), 111 | 112 | .producer_rt = .{ .raw = null }, 113 | .producer_index = .{ .raw = 0 }, 114 | .producer_open = .{ .raw = false }, 115 | 116 | .consumer_rt = .{ .raw = null }, 117 | .consumer_index = .{ .raw = 0 }, 118 | .consumer_open = .{ .raw = false }, 119 | .state = .{ .raw = .starting }, 120 | }; 121 | } 122 | 123 | pub fn deinit(self: *Self) void { 124 | self.producer_open.store(false, .release); 125 | self.consumer_open.store(false, .release); 126 | 127 | if (self.state.cmpxchgStrong(.running, .closed, .acq_rel, .acquire)) |_| { 128 | return; // Someone else is handling deinit 129 | } 130 | 131 | self.ring.deinit(); 132 | } 133 | 134 | pub fn producer(self: *Self, runtime: *Runtime) Producer { 135 | if (self.producer_rt.cmpxchgStrong( 136 | null, 137 | runtime, 138 | .acq_rel, 139 | .acquire, 140 | )) |_| @panic("Only one producer can exist for a Spsc"); 141 | 142 | self.producer_open.store(true, .release); 143 | if (self.consumer_rt.load(.acquire) != null) self.state.store(.running, .release); 144 | return .{ .inner = self, .rt = runtime }; 145 | } 146 | 147 | pub fn consumer(self: *Self, runtime: *Runtime) Consumer { 148 | if (self.consumer_rt.cmpxchgStrong( 149 | null, 150 | runtime, 151 | .acq_rel, 152 | .acquire, 153 | )) |_| @panic("Only one consumer can exist for a Spsc"); 154 | 155 | self.consumer_open.store(true, .release); 156 | if (self.producer_rt.load(.acquire) != null) self.state.store(.running, .release); 157 | return .{ .inner = self, .rt = runtime }; 158 | } 159 | }; 160 | } 161 | -------------------------------------------------------------------------------- /src/runtime/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | 4 | const Async = @import("../aio/lib.zig").Async; 5 | const PoolKind = @import("../core/pool.zig").PoolKind; 6 | const Queue = @import("../core/queue.zig").Queue; 7 | const Frame = @import("../frame/lib.zig").Frame; 8 | const Timespec = @import("../lib.zig").Timespec; 9 | const Scheduler = @import("./scheduler.zig").Scheduler; 10 | const Storage = @import("storage.zig").Storage; 11 | const Task = @import("task.zig").Task; 12 | 13 | const log = std.log.scoped(.@"tardy/runtime"); 14 | 15 | const RuntimeOptions = struct { 16 | id: usize, 17 | pooling: PoolKind, 18 | size_tasks_initial: usize, 19 | size_aio_reap_max: usize, 20 | }; 21 | 22 | /// A runtime is what runs tasks and handles the Async I/O. 23 | /// Every thread should have an independent Runtime. 24 | pub const Runtime = struct { 25 | allocator: std.mem.Allocator, 26 | storage: Storage, 27 | scheduler: Scheduler, 28 | aio: Async, 29 | id: usize, 30 | running: bool, 31 | 32 | // The currently running Task's index. 33 | current_task: ?usize = null, 34 | 35 | pub fn init(allocator: std.mem.Allocator, aio: Async, options: RuntimeOptions) !Runtime { 36 | const scheduler = try Scheduler.init( 37 | allocator, 38 | options.size_tasks_initial, 39 | options.pooling, 40 | ); 41 | const storage = Storage.init(allocator); 42 | 43 | return .{ 44 | .allocator = allocator, 45 | .storage = storage, 46 | .scheduler = scheduler, 47 | .aio = aio, 48 | .id = options.id, 49 | .current_task = null, 50 | .running = false, 51 | }; 52 | } 53 | 54 | pub fn deinit(self: *Runtime) void { 55 | self.storage.deinit(); 56 | self.scheduler.deinit(); 57 | self.allocator.free(self.aio.completions); 58 | self.aio.deinit(self.allocator); 59 | } 60 | 61 | /// Wake the given Runtime. 62 | /// Safe to call from a different Runtime. 63 | pub fn wake(self: *Runtime) !void { 64 | if (self.running) try self.aio.wake(); 65 | } 66 | 67 | /// Trigger a waiting (`.wait_for_trigger`) Task. 68 | /// Safe to call from a different Runtime. 69 | pub fn trigger(self: *Runtime, index: usize) !void { 70 | if (self.running) { 71 | log.debug("{d} - triggering {d}", .{ self.id, index }); 72 | try self.scheduler.trigger(index); 73 | try self.wake(); 74 | } 75 | } 76 | 77 | /// Stop the given Runtime. 78 | /// Safe to call from a different Runtime. 79 | pub fn stop(self: *Runtime) void { 80 | if (self.running) { 81 | self.running = false; 82 | self.aio.wake() catch unreachable; 83 | } 84 | } 85 | 86 | /// Spawns a new Frame. This creates a new heap-allocated stack for the Frame to run. 87 | pub fn spawn( 88 | self: *Runtime, 89 | frame_ctx: anytype, 90 | comptime frame_fn: anytype, 91 | stack_size: usize, 92 | ) !void { 93 | try self.scheduler.spawn(frame_ctx, frame_fn, stack_size); 94 | } 95 | 96 | fn run_task(self: *Runtime, task: *Task) !void { 97 | self.current_task = task.index; 98 | 99 | const frame = task.frame; 100 | frame.proceed(); 101 | 102 | switch (frame.status) { 103 | else => {}, 104 | .done => { 105 | // remember: task is invalid IF it resizes. 106 | // so we only hit that condition sometimes in here. 107 | const index = self.current_task.?; 108 | // If the frame is done, clean it up. 109 | try self.scheduler.release(index); 110 | // frees the heap-allocated stack. 111 | // 112 | // this should be evaluted as it does have a perf impact but 113 | // if frames are long lived (as they should be) and most data is 114 | // stack allocated within that context, i think it should be ok? 115 | frame.deinit(self.allocator); 116 | 117 | // if we have no more tasks, we are done and can set our running status to false. 118 | if (self.scheduler.tasks.empty()) self.running = false; 119 | }, 120 | .errored => { 121 | const index = self.current_task.?; 122 | log.warn("cleaning up failed frame...", .{}); 123 | try self.scheduler.release(index); 124 | frame.deinit(self.allocator); 125 | }, 126 | } 127 | } 128 | 129 | pub fn run(self: *Runtime) !void { 130 | defer self.running = false; 131 | self.running = true; 132 | 133 | while (true) { 134 | var force_woken = false; 135 | 136 | // Processing Section 137 | var iter = self.scheduler.tasks.dirty.iterator(.{ .kind = .set }); 138 | while (iter.next()) |index| { 139 | log.debug("{d} - processing index={d}", .{ self.id, index }); 140 | const task = self.scheduler.tasks.get_ptr(index); 141 | switch (task.state) { 142 | .runnable => { 143 | log.debug("{d} - running index={d}", .{ self.id, index }); 144 | try self.run_task(task); 145 | self.current_task = null; 146 | }, 147 | .wait_for_trigger => if (self.scheduler.triggers.is_set(index)) { 148 | log.debug("{d} - trigger={d} | state={t}", .{ 149 | self.id, 150 | index, 151 | task.state, 152 | }); 153 | 154 | self.scheduler.triggers.unset(index); 155 | try self.scheduler.set_runnable(index); 156 | }, 157 | .wait_for_io => continue, 158 | .dead => unreachable, 159 | } 160 | } 161 | 162 | if (!self.running) break; 163 | // If we have no tasks, we might as well exit. 164 | if (self.scheduler.tasks.empty()) break; 165 | 166 | // I/O Section 167 | try self.aio.submit(); 168 | 169 | // If we don't have any runnable tasks, we just want to wait for an Async I/O. 170 | // Otherwise, we want to just reap whatever completion we have and continue running. 171 | const wait_for_io = self.scheduler.runnable == 0; 172 | log.debug("{d} - Wait for I/O: {}", .{ self.id, wait_for_io }); 173 | 174 | const completions = try self.aio.reap(wait_for_io); 175 | for (completions) |completion| { 176 | if (completion.result == .wake) { 177 | force_woken = true; 178 | log.debug("{d} - waking up", .{self.id}); 179 | if (!self.running) return; 180 | continue; 181 | } 182 | 183 | const index = completion.task; 184 | log.debug("{d} - completion={d}", .{ self.id, index }); 185 | const task = self.scheduler.tasks.get_ptr(index); 186 | assert(task.state == .wait_for_io); 187 | task.result = completion.result; 188 | try self.scheduler.set_runnable(index); 189 | } 190 | 191 | if (self.scheduler.runnable == 0 and !force_woken) { 192 | log.warn("no more runnable tasks", .{}); 193 | break; 194 | } 195 | } 196 | } 197 | }; 198 | -------------------------------------------------------------------------------- /test/e2e/file_chain.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | const DeleteResult = @import("tardy").DeleteResult; 6 | const Dir = @import("tardy").Dir; 7 | const File = @import("tardy").File; 8 | const OpenFileResult = @import("tardy").OpenFileResult; 9 | const Path = @import("tardy").Path; 10 | const ReadResult = @import("tardy").ReadResult; 11 | const Runtime = @import("tardy").Runtime; 12 | const WriteResult = @import("tardy").WriteResult; 13 | 14 | const log = std.log.scoped(.@"tardy/e2e/first"); 15 | pub const FileChain = struct { 16 | const Step = enum { 17 | create, 18 | open, 19 | read, 20 | write, 21 | stat, 22 | close, 23 | delete, 24 | }; 25 | 26 | allocator: std.mem.Allocator, 27 | file: ?File = null, 28 | path: Path, 29 | steps: []Step, 30 | index: usize = 0, 31 | buffer: []u8, 32 | 33 | pub fn next_steps(current: Step) []const Step { 34 | switch (current) { 35 | .create, .open, .read, .write, .stat => return &.{ .read, .write, .stat, .close }, 36 | .close => return &.{ .open, .delete }, 37 | .delete => return &.{}, 38 | } 39 | } 40 | 41 | pub fn validate_chain(chain: []const Step) bool { 42 | if (chain.len < 3) return false; 43 | if (chain[0] != .create) return false; 44 | if (chain[chain.len - 1] != .delete) return false; 45 | 46 | chain: for (chain[0 .. chain.len - 1], chain[1..]) |prev, curr| { 47 | const steps = next_steps(prev); 48 | for (steps[0..]) |step| if (curr == step) continue :chain; 49 | return false; 50 | } 51 | 52 | return true; 53 | } 54 | 55 | pub fn generate_random_chain(allocator: std.mem.Allocator, seed: u64) ![]Step { 56 | var prng: std.Random.DefaultPrng = .init(seed); 57 | const rand = prng.random(); 58 | 59 | var list: std.ArrayList(Step) = try .initCapacity(allocator, 0); 60 | defer list.deinit(allocator); 61 | try list.append(allocator, .create); 62 | 63 | while (true) { 64 | const potentials = next_steps(list.getLast()); 65 | if (potentials.len == 0) break; 66 | const potential = rand.intRangeLessThan(usize, 0, potentials.len); 67 | try list.append(allocator, potentials[potential]); 68 | } 69 | 70 | return try list.toOwnedSlice(allocator); 71 | } 72 | 73 | // Path is expected to remain valid. 74 | pub fn init(allocator: std.mem.Allocator, chain: []const Step, path: Path, buffer_size: usize) !FileChain { 75 | assert(chain.len > 0); 76 | 77 | const chain_dupe = try allocator.dupe(Step, chain); 78 | errdefer allocator.free(chain_dupe); 79 | 80 | const path_dupe = try path.dupe(allocator); 81 | errdefer switch (path_dupe) { 82 | .rel => |inner| allocator.free(inner.path), 83 | .abs => |p| allocator.free(p), 84 | }; 85 | 86 | assert(validate_chain(chain)); 87 | 88 | const buffer = try allocator.alloc(u8, buffer_size); 89 | errdefer allocator.free(buffer); 90 | 91 | return .{ 92 | .allocator = allocator, 93 | .steps = chain_dupe, 94 | .path = path_dupe, 95 | .buffer = buffer, 96 | }; 97 | } 98 | 99 | pub fn deinit(self: *const FileChain) void { 100 | defer self.allocator.free(self.steps); 101 | defer self.allocator.free(self.buffer); 102 | defer switch (self.path) { 103 | .rel => |inner| self.allocator.free(inner.path), 104 | .abs => |p| self.allocator.free(p), 105 | }; 106 | } 107 | 108 | pub fn chain_frame(chain: *FileChain, rt: *Runtime, counter: *usize, seed_string: [:0]const u8) !void { 109 | defer rt.allocator.destroy(chain); 110 | defer chain.deinit(); 111 | 112 | var read_head: usize = 0; 113 | var write_head: usize = 0; 114 | 115 | while (chain.index < chain.steps.len) : (chain.index += 1) { 116 | switch (chain.steps[chain.index]) { 117 | .create => { 118 | const file: File = try .create(rt, chain.path, .{ .mode = .read_write }); 119 | chain.file = file; 120 | }, 121 | .open => { 122 | const file: File = try .open(rt, chain.path, .{ .mode = .read_write }); 123 | chain.file = file; 124 | }, 125 | .read => { 126 | const length = try chain.file.?.read_all(rt, chain.buffer, read_head); 127 | assert(length == @min(chain.buffer.len, write_head - read_head)); 128 | for (chain.buffer[0..length]) |item| assert(item == 123); 129 | read_head += length; 130 | }, 131 | .write => { 132 | for (chain.buffer[0..]) |*item| item.* = 123; 133 | write_head += try chain.file.?.write_all(rt, chain.buffer, write_head); 134 | }, 135 | .stat => { 136 | const stat = try chain.file.?.stat(rt); 137 | assert(stat.size == write_head); 138 | }, 139 | .close => try chain.file.?.close(rt), 140 | .delete => { 141 | const dir = Dir{ .handle = chain.path.rel.dir }; 142 | try dir.delete_file(rt, chain.path.rel.path); 143 | counter.* -= 1; 144 | }, 145 | } 146 | } 147 | 148 | if (counter.* == 0) { 149 | log.debug("deleting the e2e tree...", .{}); 150 | try Dir.cwd().delete_tree(rt, seed_string); 151 | } 152 | } 153 | }; 154 | 155 | test "FileChain: Invalid Exists" { 156 | const chain: []const FileChain.Step = &.{ 157 | .open, 158 | .read, 159 | .write, 160 | .close, 161 | .delete, 162 | }; 163 | 164 | try testing.expect(!FileChain.validate_chain(chain)); 165 | } 166 | 167 | test "FileChain: Invalid Opened" { 168 | const chain: []const FileChain.Step = &.{ 169 | .create, 170 | .close, 171 | .read, 172 | .write, 173 | }; 174 | 175 | try testing.expect(!FileChain.validate_chain(chain)); 176 | } 177 | 178 | test "FileChain: Never Closed" { 179 | const chain: []const FileChain.Step = &.{ 180 | .create, 181 | .delete, 182 | }; 183 | 184 | try testing.expect(!FileChain.validate_chain(chain)); 185 | } 186 | 187 | test "FileChain: Never Deleted" { 188 | const chain: []const FileChain.Step = &.{ 189 | .create, 190 | .read, 191 | .stat, 192 | .write, 193 | .close, 194 | }; 195 | 196 | try testing.expect(!FileChain.validate_chain(chain)); 197 | } 198 | 199 | test "FileChain: Verify Double Close" { 200 | const chain: []const FileChain.Step = &.{ 201 | .create, 202 | .read, 203 | .write, 204 | .close, 205 | .open, 206 | .read, 207 | .read, 208 | .read, 209 | .close, 210 | .delete, 211 | }; 212 | 213 | try testing.expect(FileChain.validate_chain(chain)); 214 | } 215 | 216 | test "FileChain: Validate Random Chain" { 217 | // Actually generates and tests a random FileChain :) 218 | var seed: u64 = undefined; 219 | try std.posix.getrandom(std.mem.asBytes(&seed)); 220 | errdefer std.debug.print("failed seed: {d}\n", .{seed}); 221 | 222 | const chain = try FileChain.generate_random_chain(testing.allocator, seed); 223 | defer testing.allocator.free(chain); 224 | try testing.expect(FileChain.validate_chain(chain)); 225 | } 226 | -------------------------------------------------------------------------------- /test/e2e/tcp_chain.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | const DeleteResult = @import("tardy").DeleteResult; 6 | const OpenFileResult = @import("tardy").OpenFileResult; 7 | const ReadResult = @import("tardy").ReadResult; 8 | const Runtime = @import("tardy").Runtime; 9 | const Socket = @import("tardy").Socket; 10 | const WriteResult = @import("tardy").WriteResult; 11 | 12 | const log = std.log.scoped(.@"tardy/e2e/tcp_chain"); 13 | pub const TcpServerChain = struct { 14 | const Step = enum { 15 | accept, 16 | recv, 17 | send, 18 | close, 19 | }; 20 | 21 | allocator: std.mem.Allocator, 22 | socket: ?Socket = null, 23 | steps: []Step, 24 | index: usize = 0, 25 | buffer: []u8, 26 | 27 | pub fn next_steps(current: Step) []const Step { 28 | switch (current) { 29 | .accept, .recv, .send => return &.{ .recv, .send, .close }, 30 | .close => return &.{}, 31 | } 32 | } 33 | 34 | pub fn validate_chain(chain: []const Step) bool { 35 | if (chain.len < 2) return false; 36 | if (chain[0] != .accept) return false; 37 | if (chain[chain.len - 1] != .close) return false; 38 | 39 | chain: for (chain[0 .. chain.len - 1], chain[1..]) |prev, curr| { 40 | const steps = next_steps(prev); 41 | for (steps[0..]) |step| if (curr == step) continue :chain; 42 | return false; 43 | } 44 | 45 | return true; 46 | } 47 | 48 | pub fn generate_random_chain(allocator: std.mem.Allocator, seed: u64) ![]Step { 49 | var prng: std.Random.DefaultPrng = .init(seed); 50 | const rand = prng.random(); 51 | 52 | var list: std.ArrayList(Step) = try .initCapacity(allocator, 0); 53 | defer list.deinit(allocator); 54 | try list.append(allocator, .accept); 55 | 56 | while (true) { 57 | const potentials = next_steps(list.getLast()); 58 | if (potentials.len == 0) break; 59 | const potential = rand.intRangeLessThan(usize, 0, potentials.len); 60 | try list.append(allocator, potentials[potential]); 61 | } 62 | 63 | return try list.toOwnedSlice(allocator); 64 | } 65 | 66 | pub fn derive_client_chain(self: *const TcpServerChain) !TcpClientChain { 67 | assert(self.steps.len > 0); 68 | 69 | const client_steps = try self.allocator.alloc(TcpClientChain.Step, self.steps.len); 70 | errdefer self.allocator.free(client_steps); 71 | 72 | for (self.steps, 0..) |step, i| { 73 | switch (step) { 74 | .accept => client_steps[i] = .connect, 75 | .recv => client_steps[i] = .send, 76 | .send => client_steps[i] = .recv, 77 | .close => client_steps[i] = .close, 78 | } 79 | } 80 | 81 | const buffer = try self.allocator.alloc(u8, self.buffer.len); 82 | errdefer self.allocator.free(buffer); 83 | 84 | return .{ 85 | .allocator = self.allocator, 86 | .steps = client_steps, 87 | .buffer = buffer, 88 | }; 89 | } 90 | 91 | // Path is expected to remain valid. 92 | pub fn init(allocator: std.mem.Allocator, chain: []const Step, buffer_size: usize) !TcpServerChain { 93 | assert(chain.len > 0); 94 | 95 | const chain_dupe = try allocator.dupe(Step, chain); 96 | errdefer allocator.free(chain_dupe); 97 | assert(validate_chain(chain)); 98 | 99 | const buffer = try allocator.alloc(u8, buffer_size); 100 | errdefer allocator.free(buffer); 101 | 102 | return .{ 103 | .allocator = allocator, 104 | .steps = chain_dupe, 105 | .buffer = buffer, 106 | }; 107 | } 108 | 109 | pub fn deinit(self: *const TcpServerChain) void { 110 | defer self.allocator.free(self.steps); 111 | defer self.allocator.free(self.buffer); 112 | } 113 | 114 | pub fn chain_frame(chain: *TcpServerChain, rt: *Runtime, counter: *usize, server_socket: Socket) !void { 115 | defer rt.allocator.destroy(chain); 116 | defer chain.deinit(); 117 | errdefer unreachable; 118 | 119 | chain: while (chain.index < chain.steps.len) : (chain.index += 1) { 120 | const current_step = chain.steps[chain.index]; 121 | log.debug("server chain step: {t}", .{current_step}); 122 | switch (current_step) { 123 | .accept => { 124 | const socket = try server_socket.accept(rt); 125 | chain.socket = socket; 126 | }, 127 | .recv => { 128 | const length = chain.socket.?.recv(rt, chain.buffer) catch |e| switch (e) { 129 | error.Closed => break :chain, 130 | else => return e, 131 | }; 132 | 133 | for (chain.buffer[0..length]) |item| assert(item == 123); 134 | }, 135 | .send => { 136 | for (chain.buffer[0..]) |*item| item.* = 123; 137 | _ = try chain.socket.?.send_all(rt, chain.buffer); 138 | }, 139 | .close => try chain.socket.?.close(rt), 140 | } 141 | } 142 | counter.* -= 1; 143 | 144 | if (counter.* == 0) { 145 | log.debug("closing main accept socket", .{}); 146 | server_socket.close_blocking(); 147 | } 148 | } 149 | }; 150 | 151 | pub const TcpClientChain = struct { 152 | const Step = enum { 153 | connect, 154 | recv, 155 | send, 156 | close, 157 | }; 158 | 159 | allocator: std.mem.Allocator, 160 | steps: []Step, 161 | index: usize = 0, 162 | buffer: []u8, 163 | 164 | pub fn deinit(self: *const TcpClientChain) void { 165 | defer self.allocator.free(self.steps); 166 | defer self.allocator.free(self.buffer); 167 | } 168 | 169 | pub fn chain_frame(chain: *TcpClientChain, rt: *Runtime, counter: *usize, port: u16) !void { 170 | defer rt.allocator.destroy(chain); 171 | defer chain.deinit(); 172 | errdefer unreachable; 173 | 174 | var socket: Socket = try .init(.{ .tcp = .{ .host = "127.0.0.1", .port = port } }); 175 | 176 | chain: while (chain.index < chain.steps.len) : (chain.index += 1) { 177 | const current_step = chain.steps[chain.index]; 178 | log.debug("client chain step: {t}", .{current_step}); 179 | switch (current_step) { 180 | .connect => try socket.connect(rt), 181 | .recv => { 182 | const length = socket.recv(rt, chain.buffer) catch |e| switch (e) { 183 | error.Closed => break :chain, 184 | else => return e, 185 | }; 186 | 187 | for (chain.buffer[0..length]) |item| assert(item == 123); 188 | }, 189 | .send => { 190 | for (chain.buffer[0..]) |*item| item.* = 123; 191 | _ = try socket.send_all(rt, chain.buffer); 192 | }, 193 | .close => { 194 | log.debug("closing client socket", .{}); 195 | socket.close_blocking(); 196 | }, 197 | } 198 | } 199 | counter.* -= 1; 200 | 201 | if (counter.* == 0) { 202 | log.debug("tcp client chain done!", .{}); 203 | } 204 | } 205 | }; 206 | 207 | test "TcpServerChain: Proper Chain" { 208 | const chain: []const TcpServerChain.Step = &.{ 209 | .accept, 210 | .recv, 211 | .send, 212 | .close, 213 | }; 214 | 215 | try testing.expect(TcpServerChain.validate_chain(chain)); 216 | } 217 | 218 | test "TcpServerChain: Validate Random Chain" { 219 | // Actually generates and tests a random TcpServerChain :) 220 | var seed: u64 = undefined; 221 | try std.posix.getrandom(std.mem.asBytes(&seed)); 222 | 223 | const chain = try TcpServerChain.generate_random_chain(testing.allocator, seed); 224 | defer testing.allocator.free(chain); 225 | 226 | errdefer { 227 | std.debug.print("failed seed: {d}\n", .{seed}); 228 | for (chain) |item| { 229 | std.debug.print("action={t}\n", .{item}); 230 | } 231 | } 232 | 233 | try testing.expect(TcpServerChain.validate_chain(chain)); 234 | } 235 | -------------------------------------------------------------------------------- /src/aio/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const Atomic = std.atomic.Value; 4 | const builtin = @import("builtin"); 5 | 6 | const PoolKind = @import("../core/pool.zig").PoolKind; 7 | const Path = @import("../fs/lib.zig").Path; 8 | const Timespec = @import("../lib.zig").Timespec; 9 | const Socket = @import("../net/lib.zig").Socket; 10 | const Completion = @import("completion.zig").Completion; 11 | 12 | const log = std.log.scoped(.@"tardy/aio"); 13 | pub const AsyncKind = enum { 14 | auto, 15 | io_uring, 16 | epoll, 17 | kqueue, 18 | poll, 19 | custom, 20 | }; 21 | 22 | pub const AsyncType = union(AsyncKind) { 23 | /// Attempts to automatically match 24 | /// the best backend. 25 | /// 26 | /// Linux: io_uring 27 | /// Windows: poll 28 | /// Darwin & BSD: kqueue 29 | /// Solaris: poll 30 | /// POSIX-compliant: poll 31 | auto, 32 | /// Available on Linux >= 5.1 33 | /// 34 | /// Utilizes the io_uring API for handling I/O. 35 | io_uring, 36 | /// Available on Linux >= 2.5.45 37 | /// 38 | /// Utilizes the epoll API for handling I/O. 39 | epoll, 40 | /// Available on Darwin & BSD systems 41 | /// 42 | /// Utilizes the kqueue APO for handling I/O. 43 | kqueue, 44 | /// Available on all POSIX targets. 45 | /// 46 | /// Utilizes the poll API for handling I/O. 47 | poll, 48 | /// Available on all targets. 49 | custom: type, 50 | }; 51 | 52 | pub fn auto_async_match() AsyncType { 53 | switch (comptime builtin.target.os.tag) { 54 | .linux => { 55 | if (comptime builtin.os.isAtLeast( 56 | .linux, 57 | .{ .major = 5, .minor = 1, .patch = 0 }, 58 | ) orelse false) { 59 | return AsyncType.io_uring; 60 | } 61 | 62 | return AsyncType.epoll; 63 | }, 64 | .windows => return AsyncType.poll, 65 | .ios, .macos, .watchos, .tvos, .visionos => return AsyncType.kqueue, 66 | .freebsd, .openbsd, .netbsd, .dragonfly => return AsyncType.kqueue, 67 | .solaris, .illumos => return AsyncType.poll, 68 | else => @compileError("Unsupported platform! Provide a custom Async I/O backend."), 69 | } 70 | } 71 | 72 | pub fn async_to_type(comptime aio: AsyncType) type { 73 | return comptime switch (aio) { 74 | .io_uring => @import("../aio/apis/io_uring.zig").AsyncIoUring, 75 | .epoll => @import("../aio/apis/epoll.zig").AsyncEpoll, 76 | .poll => @import("../aio/apis/poll.zig").AsyncPoll, 77 | .kqueue => @import("../aio/apis/kqueue.zig").AsyncKqueue, 78 | .custom => |inner| { 79 | assert(std.meta.hasMethod(inner, "init")); 80 | assert(std.meta.hasMethod(inner, "inner_deinit")); 81 | assert(std.meta.hasMethod(inner, "queue_job")); 82 | assert(std.meta.hasMethod(inner, "to_async")); 83 | return inner; 84 | }, 85 | .auto => unreachable, 86 | }; 87 | } 88 | 89 | pub const AsyncOptions = struct { 90 | /// The parent AsyncIO that this should 91 | /// inherit parameters from. 92 | parent_async: ?*const Async = null, 93 | // Pooling 94 | pooling: PoolKind, 95 | size_tasks_initial: usize, 96 | /// Maximum number of completions reaped. 97 | size_aio_reap_max: usize, 98 | }; 99 | 100 | const AsyncOp = enum(u16) { 101 | timer = 1 << 0, 102 | open = 1 << 1, 103 | delete = 1 << 2, 104 | mkdir = 1 << 3, 105 | stat = 1 << 4, 106 | read = 1 << 5, 107 | write = 1 << 6, 108 | close = 1 << 7, 109 | accept = 1 << 8, 110 | connect = 1 << 9, 111 | recv = 1 << 10, 112 | send = 1 << 11, 113 | }; 114 | 115 | pub const AsyncFeatures = struct { 116 | bitmask: u16, 117 | 118 | pub fn init(features: []const AsyncOp) AsyncFeatures { 119 | var mask: u16 = 0; 120 | for (features) |op| mask |= @intFromEnum(op); 121 | return .{ .bitmask = mask }; 122 | } 123 | 124 | pub fn all() AsyncFeatures { 125 | const mask: u16 = comptime blk: { 126 | var value: u16 = 0; 127 | for (std.meta.tags(AsyncOp)) |op| value |= @intFromEnum(op); 128 | break :blk value; 129 | }; 130 | 131 | return .{ .bitmask = mask }; 132 | } 133 | 134 | pub fn has_capability(self: AsyncFeatures, op: AsyncOp) bool { 135 | return (self.bitmask & @intFromEnum(op)) != 0; 136 | } 137 | }; 138 | 139 | pub const AsyncSubmission = union(AsyncOp) { 140 | timer: Timespec, 141 | open: struct { 142 | path: Path, 143 | flags: AsyncOpenFlags, 144 | }, 145 | delete: struct { 146 | path: Path, 147 | is_dir: bool, 148 | }, 149 | mkdir: struct { 150 | path: Path, 151 | mode: isize, 152 | }, 153 | stat: std.posix.fd_t, 154 | read: struct { 155 | fd: std.posix.fd_t, 156 | buffer: []u8, 157 | offset: ?usize, 158 | }, 159 | write: struct { 160 | fd: std.posix.fd_t, 161 | buffer: []const u8, 162 | offset: ?usize, 163 | }, 164 | close: std.posix.fd_t, 165 | accept: struct { 166 | socket: std.posix.socket_t, 167 | kind: Socket.Kind, 168 | }, 169 | connect: struct { 170 | socket: std.posix.socket_t, 171 | addr: std.net.Address, 172 | kind: Socket.Kind, 173 | }, 174 | recv: struct { 175 | socket: std.posix.socket_t, 176 | buffer: []u8, 177 | }, 178 | send: struct { 179 | socket: std.posix.socket_t, 180 | buffer: []const u8, 181 | }, 182 | }; 183 | 184 | pub const Async = struct { 185 | const VTable = struct { 186 | queue_job: *const fn (*anyopaque, usize, AsyncSubmission) anyerror!void, 187 | deinit: *const fn (*anyopaque, std.mem.Allocator) void, 188 | wake: *const fn (*anyopaque) anyerror!void, 189 | reap: *const fn (*anyopaque, []Completion, bool) anyerror![]Completion, 190 | submit: *const fn (*anyopaque) anyerror!void, 191 | }; 192 | 193 | runner: *anyopaque, 194 | vtable: VTable, 195 | features: AsyncFeatures = .{ .bitmask = 0 }, 196 | 197 | attached: bool = false, 198 | completions: []Completion = undefined, 199 | mutex: std.Thread.Mutex = .{}, 200 | 201 | // List of Async features that this Async I/O backend has. 202 | // Stored as a bitmask. 203 | 204 | /// This provides the completions that the backend will utilize when 205 | /// submitting and reaping. This MUST be called before any other 206 | /// methods on this AsyncIO instance. 207 | pub fn attach(self: *Async, completions: []Completion) void { 208 | self.completions = completions; 209 | self.attached = true; 210 | } 211 | 212 | pub fn deinit(self: *Async, allocator: std.mem.Allocator) void { 213 | self.mutex.lock(); 214 | defer self.mutex.unlock(); 215 | self.vtable.deinit(self.runner, allocator); 216 | } 217 | 218 | pub fn queue_job(self: *Async, task: usize, job: AsyncSubmission) !void { 219 | assert(self.attached); 220 | log.debug("queuing up job={t} at index={d}", .{ job, task }); 221 | try self.vtable.queue_job(self.runner, task, job); 222 | } 223 | 224 | pub fn wake(self: *Async) !void { 225 | self.mutex.lock(); 226 | defer self.mutex.unlock(); 227 | assert(self.attached); 228 | try self.vtable.wake(self.runner); 229 | } 230 | 231 | pub fn reap(self: *Async, wait: bool) ![]Completion { 232 | assert(self.attached); 233 | return try self.vtable.reap(self.runner, self.completions, wait); 234 | } 235 | 236 | pub fn submit(self: *Async) !void { 237 | assert(self.attached); 238 | try self.vtable.submit(self.runner); 239 | } 240 | }; 241 | 242 | pub const FileMode = enum { 243 | read, 244 | write, 245 | read_write, 246 | }; 247 | 248 | /// These are the OpenFlags used internally. 249 | /// This allows us to abstract out various different FS calls 250 | /// that are all backed by the same underlying call. 251 | pub const AsyncOpenFlags = struct { 252 | mode: FileMode = .read, 253 | /// Permissions used for creating files. 254 | perms: ?isize = null, 255 | /// Open the file for appending. 256 | /// This will force writing permissions. 257 | append: bool = false, 258 | /// Create the file if it doesn't exist. 259 | create: bool = false, 260 | /// Truncate the file to the start. 261 | truncate: bool = false, 262 | /// Fail if the file already exists. 263 | exclusive: bool = false, 264 | /// Open the file for non-blocking I/O. 265 | non_block: bool = true, 266 | /// Ensure data is physically written to disk immediately. 267 | sync: bool = false, 268 | /// Ensure that the file is a directory. 269 | directory: bool = false, 270 | }; 271 | -------------------------------------------------------------------------------- /src/fs/dir.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const StdDir = std.fs.Dir; 4 | const builtin = @import("builtin"); 5 | 6 | const Resulted = @import("../aio/completion.zig").Resulted; 7 | const OpenFileResult = @import("../aio/completion.zig").OpenFileResult; 8 | const OpenDirResult = @import("../aio/completion.zig").OpenDirResult; 9 | const OpenError = @import("../aio/completion.zig").OpenError; 10 | const DeleteError = @import("../aio/completion.zig").DeleteError; 11 | const DeleteResult = @import("../aio/completion.zig").DeleteResult; 12 | const DeleteTreeResult = @import("../aio/completion.zig").DeleteTreeResult; 13 | const StatResult = @import("../aio/completion.zig").StatResult; 14 | const ReadResult = @import("../aio/completion.zig").ReadResult; 15 | const WriteResult = @import("../aio/completion.zig").WriteResult; 16 | const CreateDirResult = @import("../aio/completion.zig").CreateDirResult; 17 | const MkdirResult = @import("../aio/completion.zig").MkdirResult; 18 | const MkdirError = @import("../aio/completion.zig").MkdirError; 19 | const StatError = @import("../aio/completion.zig").StatError; 20 | const FileMode = @import("../aio/lib.zig").FileMode; 21 | const AsyncOpenFlags = @import("../aio/lib.zig").AsyncOpenFlags; 22 | const Runtime = @import("../runtime/lib.zig").Runtime; 23 | const File = @import("lib.zig").File; 24 | const Path = @import("lib.zig").Path; 25 | const Stat = @import("lib.zig").Stat; 26 | 27 | const log = std.log.scoped(.@"tardy/fs/dir"); 28 | pub const Dir = packed struct { 29 | handle: std.posix.fd_t, 30 | 31 | /// Create a std.fs.Dir from a Dir. 32 | pub fn to_std(self: Dir) std.fs.Dir { 33 | return std.fs.Dir{ .fd = self.handle }; 34 | } 35 | 36 | /// Create a Dir from the std.fs.Dir 37 | pub fn from_std(self: std.fs.Dir) Dir { 38 | return .{ .handle = self.fd }; 39 | } 40 | 41 | /// Get `cwd` as a Dir. 42 | pub fn cwd() Dir { 43 | return .{ .handle = std.fs.cwd().fd }; 44 | } 45 | 46 | /// Close the underlying Handle of this Dir. 47 | pub fn close(self: Dir, rt: *Runtime) !void { 48 | if (rt.aio.features.has_capability(.close)) 49 | try rt.scheduler.io_await(.{ .close = self.handle }) 50 | else 51 | std.posix.close(self.handle); 52 | } 53 | 54 | pub fn close_blocking(self: Dir) void { 55 | std.posix.close(self.handle); 56 | } 57 | 58 | /// Open a Directory. 59 | pub fn open(rt: *Runtime, path: Path) !Dir { 60 | const flags: AsyncOpenFlags = .{ 61 | .mode = .read, 62 | .create = false, 63 | .directory = true, 64 | }; 65 | 66 | if (rt.aio.features.has_capability(.open)) { 67 | try rt.scheduler.io_await(.{ .open = .{ .path = path, .flags = flags } }); 68 | 69 | const index = rt.current_task.?; 70 | const task = rt.scheduler.tasks.get_ptr(index); 71 | 72 | const result: OpenDirResult = switch (task.result.open) { 73 | .actual => |actual| .{ .actual = actual.dir }, 74 | .err => |err| .{ .err = err }, 75 | }; 76 | 77 | return try result.unwrap(); 78 | } else { 79 | switch (path) { 80 | .rel => |inner| { 81 | const dir: StdDir = .{ .fd = inner.dir }; 82 | const opened = dir.openDirZ(inner.path, .{ .iterate = true }) catch |e| { 83 | return switch (e) { 84 | StdDir.OpenError.AccessDenied => OpenError.AccessDenied, 85 | else => OpenError.Unexpected, 86 | }; 87 | }; 88 | 89 | return .{ .handle = opened.fd }; 90 | }, 91 | .abs => |inner| { 92 | const opened = std.fs.openDirAbsoluteZ(inner, .{ .iterate = true }) catch |e| { 93 | return switch (e) { 94 | StdDir.OpenError.AccessDenied => OpenError.AccessDenied, 95 | else => OpenError.Unexpected, 96 | }; 97 | }; 98 | 99 | return .{ .handle = opened.fd }; 100 | }, 101 | } 102 | } 103 | } 104 | 105 | /// Creates and opens a Directory. 106 | pub fn create(rt: *Runtime, path: Path) !Dir { 107 | if (rt.aio.features.has_capability(.mkdir)) { 108 | try rt.scheduler.io_await(.{ .mkdir = .{ .path = path, .mode = 0o775 } }); 109 | 110 | const index = rt.current_task.?; 111 | const task = rt.scheduler.tasks.get_ptr(index); 112 | try task.result.mkdir.unwrap(); 113 | 114 | return try Dir.open(rt, path); 115 | } else { 116 | switch (path) { 117 | .rel => |p| { 118 | const dir: StdDir = .{ .fd = p.dir }; 119 | dir.makeDirZ(p.path) catch |e| { 120 | return switch (e) { 121 | else => MkdirError.Unexpected, 122 | }; 123 | }; 124 | }, 125 | .abs => |p| { 126 | std.fs.makeDirAbsoluteZ(p) catch |e| { 127 | return switch (e) { 128 | else => MkdirError.Unexpected, 129 | }; 130 | }; 131 | }, 132 | } 133 | 134 | return try Dir.open(rt, path); 135 | } 136 | } 137 | 138 | /// Create a File relative to this Dir. 139 | pub fn create_file(self: Dir, rt: *Runtime, subpath: [:0]const u8, flags: File.CreateFlags) !File { 140 | return try File.create(rt, .{ .rel = .{ .dir = self.handle, .path = subpath } }, flags); 141 | } 142 | 143 | /// Open a File relative to this Dir. 144 | pub fn open_file(self: Dir, rt: *Runtime, subpath: [:0]const u8, flags: File.OpenFlags) !File { 145 | return try File.open(rt, .{ .rel = .{ .dir = self.handle, .path = subpath } }, flags); 146 | } 147 | 148 | /// Create a Dir relative to this Dir. 149 | pub fn create_dir(self: Dir, rt: *Runtime, subpath: [:0]const u8) !Dir { 150 | return try Dir.create(rt, .{ .rel = .{ .dir = self.handle, .path = subpath } }); 151 | } 152 | 153 | /// Open a Dir relative to this Dir. 154 | pub fn open_dir(self: Dir, rt: *Runtime, subpath: [:0]const u8) !Dir { 155 | return try Dir.open(rt, .{ .rel = .{ .dir = self.handle, .path = subpath } }); 156 | } 157 | 158 | /// Get Stat information of this Dir. 159 | pub fn stat(self: Dir, rt: *Runtime) !Stat { 160 | if (rt.aio.features.has_capability(.stat)) { 161 | try rt.scheduler.io_await(.{ .stat = self.handle }); 162 | 163 | const index = rt.current_task.?; 164 | const task = rt.scheduler.tasks.get_ptr(index); 165 | return try task.result.stat.unwrap(); 166 | } else { 167 | const std_dir = self.to_std(); 168 | const dir_stat = std_dir.stat() catch |e| { 169 | return switch (e) { 170 | StdDir.StatError.AccessDenied => StatError.AccessDenied, 171 | StdDir.StatError.SystemResources => StatError.OutOfMemory, 172 | StdDir.StatError.Unexpected => StatError.Unexpected, 173 | }; 174 | }; 175 | 176 | return Stat{ 177 | .size = dir_stat.size, 178 | .mode = dir_stat.mode, 179 | .changed = .{ 180 | .seconds = @intCast(@divTrunc(dir_stat.ctime, std.time.ns_per_s)), 181 | .nanos = @intCast(@mod(dir_stat.ctime, std.time.ns_per_s)), 182 | }, 183 | .modified = .{ 184 | .seconds = @intCast(@divTrunc(dir_stat.mtime, std.time.ns_per_s)), 185 | .nanos = @intCast(@mod(dir_stat.mtime, std.time.ns_per_s)), 186 | }, 187 | .accessed = .{ 188 | .seconds = @intCast(@divTrunc(dir_stat.atime, std.time.ns_per_s)), 189 | .nanos = @intCast(@mod(dir_stat.atime, std.time.ns_per_s)), 190 | }, 191 | }; 192 | } 193 | } 194 | 195 | /// Delete a File within this Dir. 196 | pub fn delete_file(self: Dir, rt: *Runtime, subpath: [:0]const u8) !void { 197 | if (rt.aio.features.has_capability(.delete)) { 198 | try rt.scheduler.io_await(.{ 199 | .delete = .{ 200 | .path = .{ .rel = .{ .dir = self.handle, .path = subpath } }, 201 | .is_dir = false, 202 | }, 203 | }); 204 | } else { 205 | const std_dir = self.to_std(); 206 | return std_dir.deleteFileZ(subpath) catch |e| switch (e) { 207 | else => DeleteError.Unexpected, 208 | }; 209 | } 210 | } 211 | 212 | /// Delete a Dir within this Dir. 213 | pub fn delete_dir(self: Dir, rt: *Runtime, subpath: [:0]const u8) !void { 214 | if (rt.aio.features.has_capability(.delete)) { 215 | try rt.scheduler.io_await(.{ 216 | .delete = .{ 217 | .path = .{ .rel = .{ .dir = self.handle, .path = subpath } }, 218 | .is_dir = true, 219 | }, 220 | }); 221 | } else { 222 | const std_dir = self.to_std(); 223 | return std_dir.deleteDirZ(subpath) catch |e| switch (e) { 224 | else => DeleteError.Unexpected, 225 | }; 226 | } 227 | } 228 | 229 | /// This will iterate through the Directory at the path given, 230 | /// deleting all files within it and then deleting the Directory. 231 | /// 232 | /// This does allocate within it using the `rt.allocator`. 233 | pub fn delete_tree(self: Dir, rt: *Runtime, subpath: [:0]const u8) !void { 234 | const base_dir = try self.open_dir(rt, subpath); 235 | 236 | const base_std_dir = base_dir.to_std(); 237 | var walker = try base_std_dir.walk(rt.allocator); 238 | defer walker.deinit(); 239 | 240 | while (try walker.next()) |entry| { 241 | const new_dir = Dir.from_std(entry.dir); 242 | switch (entry.kind) { 243 | .directory => try new_dir.delete_tree(rt, entry.basename), 244 | else => try new_dir.delete_file(rt, entry.basename), 245 | } 246 | } 247 | 248 | try base_dir.close(rt); 249 | try self.delete_dir(rt, subpath); 250 | } 251 | }; 252 | -------------------------------------------------------------------------------- /src/core/pool.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const testing = std.testing; 4 | 5 | pub const PoolKind = enum { 6 | /// This keeps the Pool at a static size, never growing. 7 | static, 8 | /// This allows the Pool to grow but never shrink. 9 | grow, 10 | }; 11 | 12 | pub fn Pool(comptime T: type) type { 13 | return struct { 14 | pub const Kind = PoolKind; 15 | 16 | pub const Iterator = struct { 17 | items: []T, 18 | iter: std.DynamicBitSetUnmanaged.Iterator(.{ 19 | .kind = .set, 20 | .direction = .forward, 21 | }), 22 | 23 | pub fn next(self: *Iterator) ?T { 24 | const index = self.iter.next() orelse return null; 25 | return self.items[index]; 26 | } 27 | 28 | pub fn next_ptr(self: *Iterator) ?*T { 29 | const index = self.iter.next() orelse return null; 30 | return &self.items[index]; 31 | } 32 | 33 | pub fn next_index(self: *Iterator) ?usize { 34 | return self.iter.next(); 35 | } 36 | }; 37 | 38 | const Self = @This(); 39 | allocator: std.mem.Allocator, 40 | // Buffer for the Pool. 41 | items: []T, 42 | dirty: std.DynamicBitSetUnmanaged, 43 | kind: PoolKind, 44 | 45 | /// Initalizes our items buffer as undefined. 46 | pub fn init(allocator: std.mem.Allocator, size: usize, kind: PoolKind) !Self { 47 | return .{ 48 | .allocator = allocator, 49 | .items = try allocator.alloc(T, size), 50 | .dirty = try .initEmpty(allocator, size), 51 | .kind = kind, 52 | }; 53 | } 54 | 55 | pub fn deinit(self: *Self) void { 56 | self.allocator.free(self.items); 57 | self.dirty.deinit(self.allocator); 58 | } 59 | 60 | /// Deinitalizes our items buffer with a passed in hook. 61 | pub fn deinit_with_hook( 62 | self: *Self, 63 | args: anytype, 64 | deinit_hook: ?*const fn (buffer: []T, args: @TypeOf(args)) void, 65 | ) void { 66 | if (deinit_hook) |hook| { 67 | @call(.auto, hook, .{ self.items, args }); 68 | } 69 | 70 | self.allocator.free(self.items); 71 | self.dirty.deinit(self.allocator); 72 | } 73 | 74 | pub fn get(self: *const Self, index: usize) T { 75 | assert(index < self.items.len); 76 | return self.items[index]; 77 | } 78 | 79 | pub fn get_ptr(self: *const Self, index: usize) *T { 80 | assert(index < self.items.len); 81 | return &self.items[index]; 82 | } 83 | 84 | /// Is this empty? 85 | pub fn empty(self: *const Self) bool { 86 | return self.dirty.count() == 0; 87 | } 88 | 89 | /// Is this full? 90 | pub fn full(self: *const Self) bool { 91 | return self.dirty.count() == self.list.len; 92 | } 93 | 94 | /// Returns the number of clean (or available) slots. 95 | pub fn clean(self: *const Self) usize { 96 | return self.items.len - self.dirty.count(); 97 | } 98 | 99 | fn grow(self: *Self) !void { 100 | assert(self.kind == .grow); 101 | 102 | const old_slice = self.items; 103 | const new_size = std.math.ceilPowerOfTwoAssert(usize, self.items.len + 1); 104 | 105 | if (self.allocator.remap(self.items, new_size)) |new_slice| { 106 | self.items = new_slice; 107 | } else if (self.allocator.resize(self.items, new_size)) { 108 | self.items = self.items.ptr[0..new_size]; 109 | } else { 110 | const new_slice = try self.allocator.alloc(T, new_size); 111 | errdefer self.allocator.free(new_slice); 112 | @memcpy(new_slice[0..self.items.len], self.items); 113 | self.items = new_slice; 114 | self.allocator.free(old_slice); 115 | } 116 | try self.dirty.resize(self.allocator, new_size, false); 117 | 118 | assert(self.items.len == new_size); 119 | assert(self.dirty.bit_length == new_size); 120 | } 121 | 122 | /// Linearly probes for an available slot in the pool. 123 | /// If dynamic, this *might* grow the Pool. 124 | /// 125 | /// Returns the index into the Pool. 126 | pub fn borrow(self: *Self) !usize { 127 | var iter = self.dirty.iterator(.{ .kind = .unset }); 128 | const index = iter.next() orelse switch (self.kind) { 129 | .static => return error.Full, 130 | .grow => { 131 | const last_index = self.items.len; 132 | try self.grow(); 133 | return self.borrow_assume_unset(last_index); 134 | }, 135 | }; 136 | 137 | self.dirty.set(index); 138 | return index; 139 | } 140 | 141 | /// Linearly probes for an available slot in the pool. 142 | /// Uses a provided hint value as the starting index. 143 | /// 144 | /// Returns the index into the Pool. 145 | pub fn borrow_hint(self: *Self, hint: usize) !usize { 146 | const length = self.items.len; 147 | for (0..length) |i| { 148 | const index = @mod(hint + i, length); 149 | if (!self.dirty.isSet(index)) { 150 | self.dirty.set(index); 151 | return index; 152 | } 153 | } 154 | 155 | switch (self.kind) { 156 | .static => return error.Full, 157 | .grow => { 158 | const last_index = self.items.len; 159 | try self.grow(); 160 | return self.borrow_assume_unset(last_index); 161 | }, 162 | } 163 | } 164 | 165 | /// Attempts to borrow at the given index. 166 | /// Asserts that it is an available slot. 167 | /// This will never grow the Pool. 168 | pub fn borrow_assume_unset(self: *Self, index: usize) usize { 169 | assert(!self.dirty.isSet(index)); 170 | self.dirty.set(index); 171 | return index; 172 | } 173 | 174 | /// Releases the item with the given index back to the Pool. 175 | /// Asserts that the given index was borrowed. 176 | pub fn release(self: *Self, index: usize) void { 177 | assert(self.dirty.isSet(index)); 178 | self.dirty.unset(index); 179 | } 180 | 181 | /// Returns an iterator over the taken values in the Pool. 182 | pub fn iterator(self: *const Self) Iterator { 183 | const iter = self.dirty.iterator(.{}); 184 | return .{ .iter = iter, .items = self.items }; 185 | } 186 | }; 187 | } 188 | 189 | test "Pool: Initalization (integer)" { 190 | var byte_pool: Pool(u8) = try .init(testing.allocator, 1024, .static); 191 | defer byte_pool.deinit(); 192 | 193 | for (0..1024) |i| { 194 | const index = try byte_pool.borrow_hint(i); 195 | const byte_ptr = byte_pool.get_ptr(index); 196 | byte_ptr.* = 2; 197 | } 198 | 199 | for (byte_pool.items) |item| { 200 | try testing.expectEqual(item, 2); 201 | } 202 | } 203 | 204 | test "Pool: Dynamic Growth (integer)" { 205 | var byte_pool: Pool(u8) = try .init(testing.allocator, 1, .grow); 206 | defer byte_pool.deinit(); 207 | 208 | const count = 1024; 209 | 210 | for (0..count) |i| { 211 | const index = try byte_pool.borrow_hint(i); 212 | const byte_ptr = byte_pool.get_ptr(index); 213 | byte_ptr.* = 2; 214 | } 215 | 216 | try testing.expect(byte_pool.items.len >= count); 217 | 218 | for (byte_pool.items[0..count]) |item| { 219 | try testing.expectEqual(item, 2); 220 | } 221 | } 222 | 223 | test "Pool: Initalization & Deinit (ArrayList)" { 224 | var list_pool: Pool(std.ArrayList(u8)) = try .init(testing.allocator, 256, .static); 225 | defer list_pool.deinit(); 226 | 227 | for (list_pool.items, 0..) |*item, i| { 228 | item.* = .empty; 229 | try item.appendNTimes(testing.allocator, 0, i); 230 | } 231 | 232 | for (list_pool.items, 0..) |item, i| { 233 | try testing.expectEqual(item.items.len, i); 234 | } 235 | 236 | for (list_pool.items) |*item| { 237 | item.deinit(testing.allocator); 238 | } 239 | } 240 | 241 | test "Pool: BufferPool ([][]u8)" { 242 | var buffer_pool: Pool([1024]u8) = try .init(testing.allocator, 1024, .static); 243 | defer buffer_pool.deinit(); 244 | 245 | for (buffer_pool.items) |*item| { 246 | std.mem.copyForwards(u8, item, "ABCDEF"); 247 | } 248 | 249 | for (buffer_pool.items) |item| { 250 | try testing.expectEqualStrings("ABCDEF", item[0..6]); 251 | } 252 | } 253 | 254 | test "Pool: Borrowing" { 255 | var byte_pool: Pool(u8) = try .init(testing.allocator, 1024, .static); 256 | defer byte_pool.deinit(); 257 | 258 | for (0..byte_pool.items.len) |_| { 259 | _ = try byte_pool.borrow(); 260 | } 261 | 262 | // Expect a Full. 263 | try testing.expectError(error.Full, byte_pool.borrow()); 264 | 265 | for (0..byte_pool.items.len) |i| { 266 | byte_pool.release(i); 267 | } 268 | } 269 | 270 | test "Pool: Borrowing Hint" { 271 | var byte_pool: Pool(u8) = try .init(testing.allocator, 1024, .static); 272 | defer byte_pool.deinit(); 273 | 274 | for (0..byte_pool.items.len) |i| { 275 | _ = try byte_pool.borrow_hint(i); 276 | } 277 | 278 | for (0..byte_pool.items.len) |i| { 279 | byte_pool.release(i); 280 | } 281 | } 282 | 283 | test "Pool: Borrowing Unset" { 284 | var byte_pool: Pool(u8) = try .init(testing.allocator, 1024, .static); 285 | defer byte_pool.deinit(); 286 | 287 | for (0..byte_pool.items.len) |i| { 288 | _ = byte_pool.borrow_assume_unset(i); 289 | } 290 | 291 | for (0..byte_pool.items.len) |i| { 292 | byte_pool.release(i); 293 | } 294 | } 295 | 296 | test "Pool Iterator" { 297 | var int_pool: Pool(usize) = try .init(testing.allocator, 1024, .static); 298 | defer int_pool.deinit(); 299 | 300 | for (0..(1024 / 2)) |_| { 301 | const borrowed = try int_pool.borrow(); 302 | const item_ptr = int_pool.get_ptr(borrowed); 303 | item_ptr.* = borrowed; 304 | } 305 | 306 | var iter = int_pool.iterator(); 307 | while (iter.next()) |item| { 308 | try testing.expect(int_pool.dirty.isSet(item)); 309 | int_pool.release(item); 310 | } 311 | 312 | try testing.expect(int_pool.empty()); 313 | } 314 | -------------------------------------------------------------------------------- /src/lib.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const Atomic = std.atomic.Value; 4 | const builtin = @import("builtin"); 5 | 6 | pub const AcceptResult = @import("aio/completion.zig").AcceptResult; 7 | pub const ConnectResult = @import("aio/completion.zig").ConnectResult; 8 | pub const RecvResult = @import("aio/completion.zig").RecvResult; 9 | pub const SendResult = @import("aio/completion.zig").SendResult; 10 | pub const OpenFileResult = @import("aio/completion.zig").OpenFileResult; 11 | pub const OpenDirResult = @import("aio/completion.zig").OpenDirResult; 12 | pub const ReadResult = @import("aio/completion.zig").ReadResult; 13 | pub const WriteResult = @import("aio/completion.zig").WriteResult; 14 | pub const StatResult = @import("aio/completion.zig").StatResult; 15 | pub const CreateDirResult = @import("aio/completion.zig").CreateDirResult; 16 | pub const DeleteResult = @import("aio/completion.zig").DeleteResult; 17 | pub const DeleteTreeResult = @import("aio/completion.zig").DeleteTreeResult; 18 | const Completion = @import("aio/completion.zig").Completion; 19 | pub const auto_async_match = @import("aio/lib.zig").auto_async_match; 20 | const async_to_type = @import("aio/lib.zig").async_to_type; 21 | const AsyncIO = @import("aio/lib.zig").Async; 22 | pub const AsyncType = @import("aio/lib.zig").AsyncType; 23 | const AsyncOptions = @import("aio/lib.zig").AsyncOptions; 24 | pub const Spsc = @import("channel/spsc.zig").Spsc; 25 | pub const Pool = @import("core/pool.zig").Pool; 26 | pub const PoolKind = @import("core/pool.zig").PoolKind; 27 | pub const Queue = @import("core/queue.zig").Queue; 28 | pub const ZeroCopy = @import("core/zero_copy.zig").ZeroCopy; 29 | /// Cross-platform abstractions. 30 | /// For the `std.posix` interface types. 31 | pub const Cross = @import("cross/lib.zig"); 32 | pub const Frame = @import("frame/lib.zig").Frame; 33 | pub const File = @import("fs/lib.zig").File; 34 | pub const Dir = @import("fs/lib.zig").Dir; 35 | pub const Path = @import("fs/lib.zig").Path; 36 | pub const Stat = @import("fs/lib.zig").Stat; 37 | pub const Socket = @import("net/lib.zig").Socket; 38 | pub const Runtime = @import("runtime/lib.zig").Runtime; 39 | pub const Task = @import("runtime/task.zig").Task; 40 | pub const Timer = @import("runtime/timer.zig").Timer; 41 | 42 | const log = std.log.scoped(.tardy); 43 | 44 | // Results 45 | pub const TardyThreading = union(enum) { 46 | single, 47 | multi: usize, 48 | all, 49 | /// Calculated by `@max((cpu_count / 2) - 1, 1)` 50 | auto, 51 | }; 52 | 53 | const TardyOptions = struct { 54 | /// Threading that Tardy runtime will use. 55 | /// 56 | /// Default = .auto 57 | threading: TardyThreading = .auto, 58 | /// Pooling Style 59 | /// 60 | /// By default (`.grow`), this means the internal pools 61 | /// will grow to fit however many tasks/async jobs 62 | /// you feed it until an OOM condition 63 | /// 64 | /// You can also set it to `.static` to lock the 65 | /// maximum number of tasks and aio jobs. 66 | /// 67 | /// Default = .grow 68 | pooling: PoolKind = .grow, 69 | /// Number of initial Tasks. 70 | /// 71 | /// If our pooling is grow, this will be the upper-limit 72 | /// before any allocations happen. 73 | /// 74 | /// If our pooling is static, this will be the maximum limit. 75 | /// 76 | /// Default: 1024 77 | size_tasks_initial: usize = 1024, 78 | /// Maximum number of aio completions we can reap 79 | /// with a single call of reap(). 80 | /// 81 | /// Default: 1024 82 | size_aio_reap_max: usize = 1024, 83 | }; 84 | 85 | pub fn Tardy(comptime selected_aio_type: AsyncType) type { 86 | const aio_type: AsyncType = comptime if (selected_aio_type == .auto) 87 | auto_async_match() 88 | else 89 | selected_aio_type; 90 | 91 | const AioInnerType = comptime async_to_type(aio_type); 92 | return struct { 93 | const Self = @This(); 94 | aios: std.ArrayList(*AioInnerType), 95 | allocator: std.mem.Allocator, 96 | options: TardyOptions, 97 | mutex: std.Thread.Mutex = .{}, 98 | 99 | pub fn init(allocator: std.mem.Allocator, options: TardyOptions) !Self { 100 | log.debug("aio backend: {t}", .{aio_type}); 101 | 102 | return .{ 103 | .allocator = allocator, 104 | .options = options, 105 | .aios = try .initCapacity(allocator, 0), 106 | }; 107 | } 108 | 109 | pub fn deinit(self: *Self) void { 110 | for (self.aios.items) |aio| self.allocator.destroy(aio); 111 | self.aios.deinit(self.allocator); 112 | } 113 | 114 | /// This will spawn a new Runtime. 115 | fn spawn_runtime(self: *Self, id: usize, options: AsyncOptions) !Runtime { 116 | self.mutex.lock(); 117 | defer self.mutex.unlock(); 118 | 119 | var aio: AsyncIO = blk: { 120 | var io = try self.allocator.create(AioInnerType); 121 | errdefer self.allocator.destroy(io); 122 | 123 | io.* = try .init(self.allocator, options); 124 | errdefer io.inner_deinit(self.allocator); 125 | 126 | try self.aios.append(self.allocator, io); 127 | var aio = io.to_async(); 128 | 129 | const completions = try self.allocator.alloc(Completion, self.options.size_aio_reap_max); 130 | errdefer self.allocator.free(completions); 131 | 132 | aio.attach(completions); 133 | break :blk aio; 134 | }; 135 | errdefer aio.deinit(self.allocator); 136 | 137 | return try .init(self.allocator, aio, .{ 138 | .id = id, 139 | .pooling = self.options.pooling, 140 | .size_tasks_initial = self.options.size_tasks_initial, 141 | .size_aio_reap_max = self.options.size_aio_reap_max, 142 | }); 143 | } 144 | 145 | /// This is the entry into all of the runtimes. 146 | /// 147 | /// The provided func needs to have a signature of (*Runtime, anytype) !void; 148 | /// 149 | /// The provided allocator is meant to just initialize any structures that will exist throughout the lifetime 150 | /// of the runtime. It happens in an arena and is cleaned up after the runtime terminates. 151 | pub fn entry( 152 | self: *Self, 153 | entry_params: anytype, 154 | comptime entry_func: *const fn (*Runtime, @TypeOf(entry_params)) anyerror!void, 155 | ) !void { 156 | const runtime_count: usize = switch (self.options.threading) { 157 | .single => 1, 158 | .multi => |count| count, 159 | .auto => @max(try std.Thread.getCpuCount() / 2 - 1, 1), 160 | .all => try std.Thread.getCpuCount(), 161 | }; 162 | 163 | // for post-spawn syncing 164 | var spawned_count: Atomic(usize) = .init(0); 165 | const spawning_count = runtime_count - 1; 166 | 167 | var runtime = try self.spawn_runtime(0, .{ 168 | .parent_async = null, 169 | .pooling = self.options.pooling, 170 | .size_tasks_initial = self.options.size_tasks_initial, 171 | .size_aio_reap_max = self.options.size_aio_reap_max, 172 | }); 173 | defer runtime.deinit(); 174 | 175 | assert(runtime_count > 0); 176 | log.info("thread count: {d}", .{runtime_count}); 177 | 178 | var threads: std.ArrayList(std.Thread) = try .initCapacity( 179 | self.allocator, 180 | runtime_count -| 1, 181 | ); 182 | defer { 183 | log.debug("waiting for the remaining threads to terminate", .{}); 184 | for (threads.items) |thread| thread.join(); 185 | threads.deinit(self.allocator); 186 | } 187 | // for in-spawn id assignment 188 | var spawn_id: Atomic(usize) = .init(1); 189 | 190 | for (0..spawning_count) |_| { 191 | const current_index = spawn_id.fetchAdd(1, .monotonic); 192 | const handle: std.Thread = try .spawn(.{}, struct { 193 | fn thread_init( 194 | tardy: *Self, 195 | options: TardyOptions, 196 | parent: *AsyncIO, 197 | entry_parameters: @TypeOf(entry_params), 198 | count: *Atomic(usize), 199 | total_count: usize, 200 | current_id: usize, 201 | ) void { 202 | var thread_rt = tardy.spawn_runtime(current_id, .{ 203 | .parent_async = parent, 204 | .pooling = options.pooling, 205 | .size_tasks_initial = options.size_tasks_initial, 206 | .size_aio_reap_max = options.size_aio_reap_max, 207 | }) catch return; 208 | defer thread_rt.deinit(); 209 | 210 | _ = count.fetchAdd(1, .acquire); 211 | while (count.load(.acquire) < total_count) {} 212 | 213 | @call(.auto, entry_func, .{ &thread_rt, entry_parameters }) catch |e| { 214 | log.err("{d} - entry error={}", .{ thread_rt.id, e }); 215 | thread_rt.stop(); 216 | }; 217 | 218 | thread_rt.run() catch |e| log.err("{d} - runtime error={}", .{ thread_rt.id, e }); 219 | 220 | // wait for the rest to stop before cleaning ourselves up. 221 | // this is because the runtime is allocate on our stack and others might be checking 222 | // our running status or attempting to wake us. 223 | _ = count.fetchSub(1, .acquire); 224 | while (count.load(.acquire) > 0) std.Thread.sleep(std.time.ns_per_s); 225 | } 226 | }.thread_init, .{ 227 | self, 228 | self.options, 229 | &runtime.aio, 230 | entry_params, 231 | &spawned_count, 232 | spawning_count, 233 | current_index, 234 | }); 235 | 236 | threads.appendAssumeCapacity(handle); 237 | } 238 | 239 | while (spawned_count.load(.acquire) < spawning_count) {} 240 | log.debug("all runtimes spawned, initalizing...", .{}); 241 | 242 | @call(.auto, entry_func, .{ &runtime, entry_params }) catch |e| { 243 | log.err("0 - entry error={}", .{e}); 244 | runtime.stop(); 245 | }; 246 | runtime.run() catch |e| log.err("0 - runtime error={}", .{e}); 247 | } 248 | 249 | /// This spawns in and enters into the runtime 250 | /// in a new Thread, allowing for more code to 251 | /// execute even after the runtime spawns. 252 | pub fn entry_in_new_thread( 253 | self: *Self, 254 | entry_params: anytype, 255 | comptime entry_func: *const fn ( 256 | *Runtime, 257 | @TypeOf(entry_params), 258 | ) anyerror!void, 259 | ) !void { 260 | const handle: std.Thread = try .spawn(.{}, struct { 261 | fn entry_in_new_thread(tardy: *Self, ip: @TypeOf(entry_params)) void { 262 | tardy.entry(ip, entry_func) catch unreachable; 263 | } 264 | }.entry_in_new_thread, .{ self, entry_params }); 265 | handle.detach(); 266 | } 267 | }; 268 | } 269 | 270 | pub const Timespec = struct { 271 | seconds: u64 = 0, 272 | nanos: u64 = 0, 273 | }; 274 | -------------------------------------------------------------------------------- /src/net/socket.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const debug = std.debug; 3 | const builtin = @import("builtin"); 4 | 5 | const AcceptResult = @import("../aio/completion.zig").AcceptResult; 6 | const AcceptError = @import("../aio/completion.zig").AcceptError; 7 | const ConnectResult = @import("../aio/completion.zig").ConnectResult; 8 | const ConnectError = @import("../aio/completion.zig").ConnectError; 9 | const RecvResult = @import("../aio/completion.zig").RecvResult; 10 | const RecvError = @import("../aio/completion.zig").RecvError; 11 | const SendResult = @import("../aio/completion.zig").SendResult; 12 | const SendError = @import("../aio/completion.zig").SendError; 13 | const Frame = @import("../frame/lib.zig").Frame; 14 | const Runtime = @import("../runtime/lib.zig").Runtime; 15 | 16 | pub const Socket = struct { 17 | pub const Kind = enum { 18 | tcp, 19 | udp, 20 | unix, 21 | 22 | pub fn listenable(self: Kind) bool { 23 | return switch (self) { 24 | .tcp, .unix => true, 25 | else => false, 26 | }; 27 | } 28 | }; 29 | 30 | const HostPort = struct { 31 | host: []const u8, 32 | port: u16, 33 | }; 34 | 35 | pub const InitKind = union(Kind) { 36 | tcp: HostPort, 37 | udp: HostPort, 38 | unix: []const u8, 39 | }; 40 | 41 | handle: std.posix.socket_t, 42 | addr: std.net.Address, 43 | kind: Kind, 44 | 45 | pub fn init(kind: InitKind) !Socket { 46 | const addr = switch (kind) { 47 | .tcp, .udp => |inner| blk: { 48 | break :blk if (comptime builtin.os.tag == .linux) 49 | try std.net.Address.resolveIp(inner.host, inner.port) 50 | else 51 | try std.net.Address.parseIp(inner.host, inner.port); 52 | }, 53 | // Not supported on Windows at the moment. 54 | .unix => |path| if (builtin.os.tag == .windows) unreachable else try std.net.Address.initUnix(path), 55 | }; 56 | 57 | return try init_with_address(kind, addr); 58 | } 59 | 60 | pub fn init_with_address(kind: Kind, addr: std.net.Address) !Socket { 61 | const sock_type: u32 = switch (kind) { 62 | .tcp, .unix => std.posix.SOCK.STREAM, 63 | .udp => std.posix.SOCK.DGRAM, 64 | }; 65 | 66 | const protocol: u32 = switch (kind) { 67 | .tcp => std.posix.IPPROTO.TCP, 68 | .udp => std.posix.IPPROTO.UDP, 69 | .unix => 0, 70 | }; 71 | 72 | const flags: u32 = sock_type | std.posix.SOCK.CLOEXEC | std.posix.SOCK.NONBLOCK; 73 | const socket = try std.posix.socket(addr.any.family, flags, protocol); 74 | 75 | if (kind != .unix) { 76 | if (@hasDecl(std.posix.SO, "REUSEPORT_LB")) { 77 | try std.posix.setsockopt( 78 | socket, 79 | std.posix.SOL.SOCKET, 80 | std.posix.SO.REUSEPORT_LB, 81 | &std.mem.toBytes(@as(c_int, 1)), 82 | ); 83 | } else if (@hasDecl(std.posix.SO, "REUSEPORT")) { 84 | try std.posix.setsockopt( 85 | socket, 86 | std.posix.SOL.SOCKET, 87 | std.posix.SO.REUSEPORT, 88 | &std.mem.toBytes(@as(c_int, 1)), 89 | ); 90 | } else { 91 | try std.posix.setsockopt( 92 | socket, 93 | std.posix.SOL.SOCKET, 94 | std.posix.SO.REUSEADDR, 95 | &std.mem.toBytes(@as(c_int, 1)), 96 | ); 97 | } 98 | } 99 | 100 | return .{ .handle = socket, .addr = addr, .kind = kind }; 101 | } 102 | 103 | /// Bind the current Socket 104 | pub fn bind(self: Socket) !void { 105 | try std.posix.bind(self.handle, &self.addr.any, self.addr.getOsSockLen()); 106 | } 107 | 108 | /// Listen on the Current Socket. 109 | pub fn listen(self: Socket, backlog: usize) !void { 110 | debug.assert(self.kind.listenable()); 111 | try std.posix.listen(self.handle, @truncate(backlog)); 112 | } 113 | 114 | pub fn close(self: Socket, rt: *Runtime) !void { 115 | if (rt.aio.features.has_capability(.close)) 116 | try rt.scheduler.io_await(.{ .close = self.handle }) 117 | else 118 | std.posix.close(self.handle); 119 | } 120 | 121 | pub fn close_blocking(self: Socket) void { 122 | // todo: delete the unix socket if the 123 | // server is being closed 124 | std.posix.close(self.handle); 125 | } 126 | 127 | pub fn accept(self: Socket, rt: *Runtime) !Socket { 128 | debug.assert(self.kind.listenable()); 129 | if (rt.aio.features.has_capability(.accept)) { 130 | try rt.scheduler.io_await(.{ 131 | .accept = .{ 132 | .socket = self.handle, 133 | .kind = self.kind, 134 | }, 135 | }); 136 | 137 | const index = rt.current_task.?; 138 | const task = rt.scheduler.tasks.get(index); 139 | return try task.result.accept.unwrap(); 140 | } else { 141 | var addr: std.net.Address = undefined; 142 | var addr_len = addr.getOsSockLen(); 143 | 144 | const socket: std.posix.socket_t = blk: while (true) { 145 | break :blk std.posix.accept( 146 | self.handle, 147 | &addr.any, 148 | &addr_len, 149 | std.posix.SOCK.NONBLOCK, 150 | ) catch |e| return switch (e) { 151 | std.posix.AcceptError.WouldBlock => { 152 | Frame.yield(); 153 | continue; 154 | }, 155 | std.posix.AcceptError.ConnectionAborted, 156 | std.posix.AcceptError.ConnectionResetByPeer, 157 | => AcceptError.ConnectionAborted, 158 | std.posix.AcceptError.SocketNotListening => AcceptError.NotListening, 159 | std.posix.AcceptError.ProcessFdQuotaExceeded => AcceptError.ProcessFdQuotaExceeded, 160 | std.posix.AcceptError.SystemFdQuotaExceeded => AcceptError.SystemFdQuotaExceeded, 161 | std.posix.AcceptError.FileDescriptorNotASocket => AcceptError.NotASocket, 162 | std.posix.AcceptError.OperationNotSupported => AcceptError.OperationNotSupported, 163 | else => AcceptError.Unexpected, 164 | }; 165 | }; 166 | 167 | return .{ 168 | .handle = socket, 169 | .addr = addr, 170 | .kind = self.kind, 171 | }; 172 | } 173 | } 174 | 175 | pub fn connect(self: Socket, rt: *Runtime) !void { 176 | if (rt.aio.features.has_capability(.connect)) { 177 | try rt.scheduler.io_await(.{ 178 | .connect = .{ 179 | .socket = self.handle, 180 | .addr = self.addr, 181 | .kind = self.kind, 182 | }, 183 | }); 184 | 185 | const index = rt.current_task.?; 186 | const task = rt.scheduler.tasks.get(index); 187 | try task.result.connect.unwrap(); 188 | } else { 189 | while (true) { 190 | break std.posix.connect( 191 | self.handle, 192 | &self.addr.any, 193 | self.addr.getOsSockLen(), 194 | ) catch |e| return switch (e) { 195 | std.posix.ConnectError.WouldBlock => { 196 | Frame.yield(); 197 | continue; 198 | }, 199 | else => ConnectError.Unexpected, 200 | }; 201 | } 202 | } 203 | } 204 | 205 | pub fn recv(self: Socket, rt: *Runtime, buffer: []u8) !usize { 206 | if (rt.aio.features.has_capability(.recv)) { 207 | try rt.scheduler.io_await(.{ 208 | .recv = .{ 209 | .socket = self.handle, 210 | .buffer = buffer, 211 | }, 212 | }); 213 | 214 | const index = rt.current_task.?; 215 | const task = rt.scheduler.tasks.get(index); 216 | return try task.result.recv.unwrap(); 217 | } else { 218 | const count: usize = blk: while (true) { 219 | break :blk std.posix.recv(self.handle, buffer, 0) catch |e| return switch (e) { 220 | std.posix.RecvFromError.WouldBlock => { 221 | Frame.yield(); 222 | continue; 223 | }, 224 | else => RecvError.Unexpected, 225 | }; 226 | }; 227 | 228 | if (count == 0) return RecvError.Closed; 229 | return count; 230 | } 231 | } 232 | 233 | pub fn recv_all(self: Socket, rt: *Runtime, buffer: []u8) !usize { 234 | var length: usize = 0; 235 | 236 | while (length < buffer.len) { 237 | const result = self.recv(rt, buffer[length..]) catch |e| switch (e) { 238 | RecvError.Closed => return length, 239 | else => return e, 240 | }; 241 | 242 | length += result; 243 | } 244 | 245 | return length; 246 | } 247 | 248 | pub fn send(self: Socket, rt: *Runtime, buffer: []const u8) !usize { 249 | if (rt.aio.features.has_capability(.send)) { 250 | try rt.scheduler.io_await(.{ 251 | .send = .{ 252 | .socket = self.handle, 253 | .buffer = buffer, 254 | }, 255 | }); 256 | 257 | const index = rt.current_task.?; 258 | const task = rt.scheduler.tasks.get(index); 259 | return try task.result.send.unwrap(); 260 | } else { 261 | const count: usize = blk: while (true) { 262 | break :blk std.posix.send(self.handle, buffer, 0) catch |e| return switch (e) { 263 | std.posix.SendError.WouldBlock => { 264 | Frame.yield(); 265 | continue; 266 | }, 267 | std.posix.SendError.ConnectionResetByPeer, 268 | std.posix.SendError.BrokenPipe, 269 | => SendError.Closed, 270 | else => SendError.Unexpected, 271 | }; 272 | }; 273 | 274 | return count; 275 | } 276 | } 277 | 278 | pub fn send_all(self: Socket, rt: *Runtime, buffer: []const u8) !usize { 279 | var length: usize = 0; 280 | 281 | while (length < buffer.len) { 282 | const result = self.send(rt, buffer[length..]) catch |e| switch (e) { 283 | SendError.Closed => return length, 284 | else => return e, 285 | }; 286 | length += result; 287 | } 288 | 289 | return length; 290 | } 291 | 292 | pub const Writer = struct { 293 | socket: Socket, 294 | err: ?anyerror = null, 295 | pos: u64 = 0, 296 | rt: *Runtime, 297 | interface: std.Io.Writer, 298 | 299 | pub fn init(socket: Socket, rt: *Runtime, buffer: []u8) Writer { 300 | return .{ 301 | .socket = socket, 302 | .rt = rt, 303 | .interface = initInterface(buffer), 304 | }; 305 | } 306 | 307 | pub fn initInterface(buffer: []u8) std.Io.Writer { 308 | return .{ 309 | .vtable = &.{ 310 | .drain = drain, 311 | .sendFile = sendFile, 312 | }, 313 | .buffer = buffer, 314 | }; 315 | } 316 | 317 | pub fn drain(io_w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize { 318 | const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w)); 319 | const buffered = io_w.buffered(); 320 | 321 | if (buffered.len != 0) { 322 | const n = w.socket.send(w.rt, buffered) catch |err| { 323 | w.err = err; 324 | return error.WriteFailed; 325 | }; 326 | w.pos += n; 327 | return io_w.consume(n); 328 | } 329 | for (data[0 .. data.len - 1]) |buf| { 330 | if (buf.len == 0) continue; 331 | const n = w.socket.send(w.rt, buf) catch |err| { 332 | w.err = err; 333 | return error.WriteFailed; 334 | }; 335 | w.pos += n; 336 | return io_w.consume(n); 337 | } 338 | const pattern = data[data.len - 1]; 339 | if (pattern.len == 0 or splat == 0) return 0; 340 | const n = w.socket.send(w.rt, pattern) catch |err| { 341 | w.err = err; 342 | return error.WriteFailed; 343 | }; 344 | w.pos += n; 345 | return io_w.consume(n); 346 | } 347 | 348 | pub fn sendFile( 349 | io_w: *std.Io.Writer, 350 | file_reader: *std.fs.File.Reader, 351 | limit: std.Io.Limit, 352 | ) std.Io.Writer.FileError!usize { 353 | _ = io_w; // autofix 354 | _ = file_reader; // autofix 355 | _ = limit; // autofix 356 | return error.Unimplemented; 357 | } 358 | }; 359 | 360 | pub const Reader = struct { 361 | socket: Socket, 362 | err: ?anyerror = null, 363 | pos: u64 = 0, 364 | rt: *Runtime, 365 | interface: std.Io.Reader, 366 | 367 | pub fn init(socket: Socket, rt: *Runtime, buffer: []u8) Reader { 368 | return .{ 369 | .socket = socket, 370 | .rt = rt, 371 | .interface = initInterface(buffer), 372 | }; 373 | } 374 | 375 | pub fn initInterface(buffer: []u8) std.Io.Reader { 376 | return .{ 377 | .vtable = &.{ 378 | .stream = Reader.stream, 379 | }, 380 | .buffer = buffer, 381 | .seek = 0, 382 | .end = 0, 383 | }; 384 | } 385 | 386 | fn stream(io_reader: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize { 387 | const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader)); 388 | const w_dest = limit.slice(try w.writableSliceGreedy(1)); 389 | 390 | const n = r.socket.recv(r.rt, w_dest) catch |err| switch (err) { 391 | error.Closed => { 392 | return error.EndOfStream; 393 | }, 394 | else => { 395 | r.err = err; 396 | return error.ReadFailed; 397 | }, 398 | }; 399 | r.pos += n; 400 | w.advance(n); 401 | return n; 402 | } 403 | }; 404 | 405 | pub fn writer(sock: Socket, rt: *Runtime, buffer: []u8) Writer { 406 | return .init(sock, rt, buffer); 407 | } 408 | 409 | pub fn reader(sock: Socket, rt: *Runtime, buffer: []u8) Reader { 410 | return .init(sock, rt, buffer); 411 | } 412 | 413 | // TODO: sendFile like api is a more appropriate for this 414 | pub fn stream_to(from: Socket, to_w: *std.Io.Writer, rt: *Runtime) !void { 415 | debug.assert(to_w.buffer.len > 0); 416 | 417 | var file = from.reader(rt, &.{}); 418 | const file_r = &file.interface; 419 | while (true) { 420 | _ = Reader.stream(file_r, to_w, .limited(to_w.buffer.len)) catch |e| switch (e) { 421 | error.EndOfStream => break, 422 | else => { 423 | return e; 424 | }, 425 | }; 426 | _ = to_w.vtable.drain(to_w, &.{}, 0) catch break; 427 | } 428 | } 429 | }; 430 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at https://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | 375 | -------------------------------------------------------------------------------- /src/aio/apis/poll.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const assert = std.debug.assert; 3 | const builtin = @import("builtin"); 4 | 5 | const Pool = @import("../../core/pool.zig").Pool; 6 | const Cross = @import("../../cross/lib.zig"); 7 | const Stat = @import("../../fs/lib.zig").Stat; 8 | const Timespec = @import("../../lib.zig").Timespec; 9 | const Socket = @import("../../net/lib.zig").Socket; 10 | const Completion = @import("../completion.zig").Completion; 11 | const Result = @import("../completion.zig").Result; 12 | const AcceptResult = @import("../completion.zig").AcceptResult; 13 | const AcceptError = @import("../completion.zig").AcceptError; 14 | const ConnectResult = @import("../completion.zig").ConnectResult; 15 | const ConnectError = @import("../completion.zig").ConnectError; 16 | const RecvResult = @import("../completion.zig").RecvResult; 17 | const RecvError = @import("../completion.zig").RecvError; 18 | const SendResult = @import("../completion.zig").SendResult; 19 | const SendError = @import("../completion.zig").SendError; 20 | const Job = @import("../job.zig").Job; 21 | const Async = @import("../lib.zig").Async; 22 | const AsyncOptions = @import("../lib.zig").AsyncOptions; 23 | const AsyncFeatures = @import("../lib.zig").AsyncFeatures; 24 | const AsyncSubmission = @import("../lib.zig").AsyncSubmission; 25 | 26 | const log = std.log.scoped(.@"tardy/aio/poll"); 27 | 28 | const TimerPair = struct { 29 | milliseconds: usize, 30 | task: usize, 31 | }; 32 | 33 | const TimerQueue = std.PriorityQueue(TimerPair, void, struct { 34 | fn compare(_: void, a: TimerPair, b: TimerPair) std.math.Order { 35 | return std.math.order(a.milliseconds, b.milliseconds); 36 | } 37 | }.compare); 38 | 39 | pub const AsyncPoll = struct { 40 | allocator: std.mem.Allocator, 41 | wake_pipe: [2]std.posix.fd_t, 42 | 43 | fd_list: std.ArrayList(std.posix.pollfd), 44 | fd_job_map: std.AutoHashMap(std.posix.fd_t, Job), 45 | timers: TimerQueue, 46 | 47 | pub fn init(allocator: std.mem.Allocator, options: AsyncOptions) !AsyncPoll { 48 | const size = options.size_tasks_initial + 1; 49 | 50 | // 0 is read, 1 is write. 51 | const pipe: [2]std.posix.fd_t = blk: { 52 | if (comptime builtin.os.tag == .windows) { 53 | const server_socket = try std.posix.socket(std.posix.AF.INET, std.posix.SOCK.STREAM, 0); 54 | defer std.posix.close(server_socket); 55 | 56 | const addr = std.net.Address.initIp4(.{ 127, 0, 0, 1 }, 0); 57 | try std.posix.bind(server_socket, &addr.any, addr.getOsSockLen()); 58 | 59 | var binded_addr: std.posix.sockaddr = undefined; 60 | var binded_size: std.posix.socklen_t = @sizeOf(std.posix.sockaddr); 61 | try std.posix.getsockname(server_socket, &binded_addr, &binded_size); 62 | 63 | try std.posix.listen(server_socket, 1); 64 | 65 | const write_end = try std.posix.socket(std.posix.AF.INET, std.posix.SOCK.STREAM, 0); 66 | errdefer std.posix.close(write_end); 67 | try std.posix.connect(write_end, &binded_addr, binded_size); 68 | 69 | const read_end = try std.posix.accept(server_socket, null, null, 0); 70 | errdefer std.posix.close(read_end); 71 | 72 | break :blk .{ read_end, write_end }; 73 | } else break :blk try std.posix.pipe(); 74 | }; 75 | errdefer for (pipe) |fd| std.posix.close(fd); 76 | 77 | var fd_list: std.ArrayList(std.posix.pollfd) = try .initCapacity(allocator, size); 78 | errdefer fd_list.deinit(allocator); 79 | 80 | var fd_job_map: std.AutoHashMap(std.posix.fd_t, Job) = .init(allocator); 81 | errdefer fd_job_map.deinit(); 82 | try fd_job_map.ensureTotalCapacity(@intCast(size)); 83 | 84 | if (comptime builtin.os.tag == .windows) { 85 | try fd_list.append(allocator, .{ .fd = @ptrCast(pipe[0]), .events = std.posix.POLL.IN, .revents = 0 }); 86 | try fd_job_map.put(@ptrCast(pipe[0]), .{ .index = 0, .type = .wake, .task = 0 }); 87 | } else { 88 | try fd_list.append(allocator, .{ .fd = pipe[0], .events = std.posix.POLL.IN, .revents = 0 }); 89 | try fd_job_map.put(pipe[0], .{ .index = 0, .type = .wake, .task = 0 }); 90 | } 91 | 92 | const timers = TimerQueue.init(allocator, {}); 93 | errdefer timers.deinit(); 94 | 95 | return AsyncPoll{ 96 | .allocator = allocator, 97 | .wake_pipe = pipe, 98 | .fd_list = fd_list, 99 | .fd_job_map = fd_job_map, 100 | .timers = timers, 101 | }; 102 | } 103 | 104 | pub fn inner_deinit(self: *AsyncPoll, allocator: std.mem.Allocator) void { 105 | self.fd_list.deinit(allocator); 106 | self.fd_job_map.deinit(); 107 | self.timers.deinit(); 108 | for (self.wake_pipe) |fd| std.posix.close(fd); 109 | } 110 | 111 | fn deinit(runner: *anyopaque, allocator: std.mem.Allocator) void { 112 | const poll: *AsyncPoll = @ptrCast(@alignCast(runner)); 113 | poll.inner_deinit(allocator); 114 | } 115 | 116 | pub fn queue_job(runner: *anyopaque, task: usize, job: AsyncSubmission) !void { 117 | const poll: *AsyncPoll = @ptrCast(@alignCast(runner)); 118 | 119 | try switch (job) { 120 | .timer => |inner| queue_timer(poll, task, inner), 121 | .accept => |inner| queue_accept(poll, task, inner.socket, inner.kind), 122 | .connect => |inner| queue_connect(poll, task, inner.socket, inner.addr, inner.kind), 123 | .recv => |inner| queue_recv(poll, task, inner.socket, inner.buffer), 124 | .send => |inner| queue_send(poll, task, inner.socket, inner.buffer), 125 | .open, .delete, .mkdir, .stat, .read, .write, .close => unreachable, 126 | }; 127 | } 128 | 129 | fn queue_timer(self: *AsyncPoll, task: usize, timespec: Timespec) !void { 130 | const current: usize = @intCast(std.time.milliTimestamp()); 131 | const seconds_to_ms: usize = @intCast(timespec.seconds * 1000); 132 | const nanos_to_ms: usize = @intCast(@divFloor(timespec.nanos, std.time.ns_per_ms)); 133 | const milliseconds: usize = current + seconds_to_ms + nanos_to_ms; 134 | 135 | try self.timers.add(.{ .milliseconds = milliseconds, .task = task }); 136 | } 137 | 138 | fn queue_accept( 139 | self: *AsyncPoll, 140 | task: usize, 141 | socket: std.posix.socket_t, 142 | kind: Socket.Kind, 143 | ) !void { 144 | try self.fd_list.append(self.allocator, .{ .fd = socket, .events = std.posix.POLL.IN, .revents = 0 }); 145 | try self.fd_job_map.put(socket, .{ 146 | .index = 0, 147 | .type = .{ 148 | .accept = .{ 149 | .socket = socket, 150 | .kind = kind, 151 | .addr = undefined, 152 | .addr_len = @sizeOf(std.net.Address), 153 | }, 154 | }, 155 | .task = task, 156 | }); 157 | } 158 | 159 | fn queue_connect( 160 | self: *AsyncPoll, 161 | task: usize, 162 | socket: std.posix.socket_t, 163 | addr: std.net.Address, 164 | kind: Socket.Kind, 165 | ) !void { 166 | std.posix.connect( 167 | socket, 168 | &addr.any, 169 | addr.getOsSockLen(), 170 | ) catch |e| switch (e) { 171 | std.posix.ConnectError.WouldBlock => {}, 172 | else => return e, 173 | }; 174 | 175 | try self.fd_list.append(self.allocator, .{ .fd = socket, .events = std.posix.POLL.OUT, .revents = 0 }); 176 | try self.fd_job_map.put(socket, .{ 177 | .index = 0, 178 | .type = .{ 179 | .connect = .{ 180 | .socket = socket, 181 | .addr = addr, 182 | .kind = kind, 183 | }, 184 | }, 185 | .task = task, 186 | }); 187 | } 188 | 189 | fn queue_recv(self: *AsyncPoll, task: usize, socket: std.posix.socket_t, buffer: []u8) !void { 190 | try self.fd_list.append(self.allocator, .{ .fd = socket, .events = std.posix.POLL.IN, .revents = 0 }); 191 | try self.fd_job_map.put(socket, .{ 192 | .index = 0, 193 | .type = .{ 194 | .recv = .{ 195 | .socket = socket, 196 | .buffer = buffer, 197 | }, 198 | }, 199 | .task = task, 200 | }); 201 | } 202 | 203 | fn queue_send(self: *AsyncPoll, task: usize, socket: std.posix.socket_t, buffer: []const u8) !void { 204 | try self.fd_list.append(self.allocator, .{ .fd = socket, .events = std.posix.POLL.OUT, .revents = 0 }); 205 | try self.fd_job_map.put(socket, .{ 206 | .index = 0, 207 | .type = .{ 208 | .send = .{ 209 | .socket = socket, 210 | .buffer = buffer, 211 | }, 212 | }, 213 | .task = task, 214 | }); 215 | } 216 | 217 | pub fn wake(runner: *anyopaque) !void { 218 | const poll: *AsyncPoll = @ptrCast(@alignCast(runner)); 219 | 220 | const bytes: []const u8 = "00000000"; 221 | var i: usize = 0; 222 | while (i < bytes.len) i += try std.posix.write(poll.wake_pipe[1], bytes[i..]); 223 | } 224 | 225 | pub fn submit(_: *anyopaque) !void {} 226 | 227 | pub fn reap(runner: *anyopaque, completions: []Completion, wait: bool) ![]Completion { 228 | const poll: *AsyncPoll = @ptrCast(@alignCast(runner)); 229 | var reaped: usize = 0; 230 | 231 | poll_loop: while (reaped == 0 and wait) { 232 | const current: usize = @intCast(std.time.milliTimestamp()); 233 | 234 | // Reap all completed Timers 235 | while (poll.timers.peek()) |peeked| { 236 | if (peeked.milliseconds > current) break; 237 | if (completions.len - reaped == 0) break; 238 | 239 | const timer = poll.timers.remove(); 240 | completions[reaped] = .{ 241 | .result = .none, 242 | .task = timer.task, 243 | }; 244 | reaped += 1; 245 | } 246 | 247 | var timeout: i32 = if (!wait or reaped > 0) 0 else -1; 248 | 249 | // Select next Timer 250 | if (poll.timers.peek()) |peeked| timeout = @intCast(peeked.milliseconds - current); 251 | 252 | log.debug("timeout = {d}", .{timeout}); 253 | const poll_result = if (comptime builtin.os.tag == .windows) 254 | std.os.windows.poll(poll.fd_list.items.ptr, @intCast(poll.fd_list.items.len), timeout) 255 | else 256 | try std.posix.poll(poll.fd_list.items, timeout); 257 | 258 | if (poll_result == 0 and timeout > 0) continue :poll_loop; 259 | 260 | var ready = poll_result; 261 | var i = poll.fd_list.items.len; 262 | while (i > 0) : (i -= 1) { 263 | const index = i - 1; 264 | if (reaped >= completions.len) break; 265 | if (ready == 0) break; 266 | 267 | const pfd = poll.fd_list.items[index]; 268 | log.debug("revents={x}", .{pfd.revents}); 269 | if (pfd.revents == 0) continue; 270 | const job = poll.fd_job_map.getPtr(pfd.fd).?; 271 | 272 | var remove: bool = true; 273 | defer if (remove) { 274 | _ = poll.fd_list.swapRemove(index); 275 | _ = poll.fd_job_map.remove(pfd.fd); 276 | ready -= 1; 277 | }; 278 | 279 | const result: Result = result: { 280 | switch (job.type) { 281 | .wake => { 282 | assert(pfd.revents & std.posix.POLL.IN != 0 or pfd.revents & std.posix.POLL.RDNORM != 0); 283 | 284 | var buf: [8]u8 = undefined; 285 | _ = std.posix.read(poll.wake_pipe[0], &buf) catch unreachable; 286 | remove = false; 287 | break :result .wake; 288 | }, 289 | .accept => |*inner| { 290 | assert(pfd.revents & std.posix.POLL.IN != 0 or pfd.revents & std.posix.POLL.RDNORM != 0); 291 | 292 | const socket = std.posix.accept( 293 | inner.socket, 294 | &inner.addr.any, 295 | @ptrCast(&inner.addr_len), 296 | std.posix.SOCK.NONBLOCK, 297 | ) catch |e| { 298 | const err = switch (e) { 299 | std.posix.AcceptError.WouldBlock => { 300 | log.debug("accept wouldblock - not removing", .{}); 301 | remove = false; 302 | continue; 303 | }, 304 | std.posix.AcceptError.ConnectionAborted, 305 | std.posix.AcceptError.ConnectionResetByPeer, 306 | => AcceptError.ConnectionAborted, 307 | std.posix.AcceptError.SocketNotListening => AcceptError.NotListening, 308 | std.posix.AcceptError.ProcessFdQuotaExceeded => AcceptError.ProcessFdQuotaExceeded, 309 | std.posix.AcceptError.SystemFdQuotaExceeded => AcceptError.SystemFdQuotaExceeded, 310 | std.posix.AcceptError.FileDescriptorNotASocket => AcceptError.NotASocket, 311 | std.posix.AcceptError.OperationNotSupported => AcceptError.OperationNotSupported, 312 | else => AcceptError.Unexpected, 313 | }; 314 | 315 | break :result .{ .accept = .{ .err = err } }; 316 | }; 317 | 318 | break :result .{ 319 | .accept = .{ 320 | .actual = .{ 321 | .handle = socket, 322 | .addr = inner.addr, 323 | .kind = inner.kind, 324 | }, 325 | }, 326 | }; 327 | }, 328 | .connect => |_| { 329 | assert(pfd.revents & std.posix.POLL.OUT != 0); 330 | 331 | if (pfd.revents & std.posix.POLL.ERR != 0) { 332 | break :result .{ .connect = .{ .err = ConnectError.Unexpected } }; 333 | } else { 334 | break :result .{ .connect = .actual }; 335 | } 336 | }, 337 | .recv => |inner| { 338 | if (pfd.revents & std.posix.POLL.HUP != 0) break :result .{ 339 | .recv = .{ .err = RecvError.Closed }, 340 | }; 341 | 342 | assert(pfd.revents & std.posix.POLL.IN != 0 or pfd.revents & std.posix.POLL.RDNORM != 0); 343 | const count = std.posix.recv(inner.socket, inner.buffer, 0) catch |e| { 344 | const err = switch (e) { 345 | std.posix.RecvFromError.WouldBlock => { 346 | log.debug("recv wouldblock - not removing", .{}); 347 | remove = false; 348 | continue; 349 | }, 350 | std.posix.RecvFromError.ConnectionResetByPeer => RecvError.Closed, 351 | else => RecvError.Unexpected, 352 | }; 353 | 354 | break :result .{ .recv = .{ .err = err } }; 355 | }; 356 | 357 | if (count == 0) break :result .{ .recv = .{ .err = RecvError.Closed } }; 358 | break :result .{ .recv = .{ .actual = count } }; 359 | }, 360 | .send => |inner| { 361 | if (pfd.revents & std.posix.POLL.HUP != 0) break :result .{ 362 | .send = .{ .err = SendError.Closed }, 363 | }; 364 | 365 | assert(pfd.revents & std.posix.POLL.OUT != 0); 366 | const count = std.posix.send(inner.socket, inner.buffer, 0) catch |e| { 367 | log.err("send failed with {}", .{e}); 368 | const err = switch (e) { 369 | std.posix.SendError.WouldBlock => { 370 | log.debug("send wouldblock - not removing", .{}); 371 | remove = false; 372 | continue; 373 | }, 374 | std.posix.SendError.ConnectionResetByPeer, 375 | std.posix.SendError.BrokenPipe, 376 | => SendError.Closed, 377 | else => SendError.Unexpected, 378 | }; 379 | 380 | break :result .{ .send = .{ .err = err } }; 381 | }; 382 | 383 | break :result .{ .send = .{ .actual = count } }; 384 | }, 385 | .timer, 386 | .open, 387 | .delete, 388 | .mkdir, 389 | .stat, 390 | .read, 391 | .write, 392 | .close, 393 | => unreachable, 394 | } 395 | }; 396 | 397 | completions[reaped] = .{ .result = result, .task = job.task }; 398 | reaped += 1; 399 | } 400 | } 401 | 402 | return completions[0..reaped]; 403 | } 404 | 405 | pub fn to_async(self: *AsyncPoll) Async { 406 | return Async{ 407 | .runner = self, 408 | .features = AsyncFeatures.init(&.{ 409 | .timer, 410 | .accept, 411 | .connect, 412 | .recv, 413 | .send, 414 | }), 415 | .vtable = .{ 416 | .queue_job = queue_job, 417 | .deinit = deinit, 418 | .wake = wake, 419 | .submit = submit, 420 | .reap = reap, 421 | }, 422 | }; 423 | } 424 | }; 425 | --------------------------------------------------------------------------------