├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── Timer.zig ├── Timer2.zig ├── async_socket.zig ├── async_wait_group.zig ├── async_wait_group_allocator.zig ├── blake3 ├── blake3.zig └── build.zig ├── build.zig ├── circuit_breaker.zig ├── ctrl_c.zig ├── ed25519_donna ├── build.zig ├── ed25519.c └── ed25519.zig ├── example_http_client.zig ├── example_http_server.zig ├── example_tcp_client.zig ├── example_tcp_server.zig ├── hyperia.zig ├── mpmc.zig ├── mpsc.zig ├── net.zig ├── object_pool.zig ├── oneshot.zig ├── picohttp └── picohttp.zig ├── reactor.zig ├── select.zig ├── socket.zig ├── spsc.zig └── sync.zig /.gitignore: -------------------------------------------------------------------------------- 1 | **/zig-cache -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "zap"] 2 | path = zap 3 | url = https://github.com/kprotty/zap 4 | [submodule "picohttp/lib"] 5 | path = picohttp/lib 6 | url = https://github.com/h2o/picohttpparser.git 7 | [submodule "blake3/lib"] 8 | path = blake3/lib 9 | url = https://github.com/BLAKE3-team/BLAKE3.git 10 | [submodule "ed25519_donna/lib"] 11 | path = ed25519_donna/lib 12 | url = https://github.com/floodyberry/ed25519-donna.git 13 | [submodule "clap"] 14 | path = clap 15 | url = https://github.com/Hejsil/zig-clap 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Kenta Iwasaki 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hyperia 2 | 3 | A playground of Zig async code. 4 | 5 | The intent is to work towards eventually upstreaming the majority of the code in this playground to the Zig standard library (i.e. async sockets, timers, event loops, lockless/lock-free synchronization primitives). 6 | 7 | All other code present in this playground comprises of utilities that are intended to make writing correct high-performance async Zig applications simpler (i.e. lock-free thread-safe object pool, thread-safe memory allocator that suspends until all allocations are freed, etc.). 8 | 9 | All code is heavily a work-in-progress - a lot of work is being put into 1) writing a thorough unit test suite to check for correctness, and 2) ensuring that a flexible-enough API surface is provided by introducing a slew of thorough async code examples. 10 | 11 | The code in this playground only supports Linux for the time being, though work will eventually be done to support Mac and Windows as well. 12 | 13 | If you would like to partake contributing to the playground to make Zig's async/await story production-ready enough for creating high-performance HTTP/TCP/UDP/WebSocket clients and servers a reality sooner, please do reach out to me (lithdew#6092) on the Zig Programming Language Discord :). 14 | 15 | ## [TCP Client Example](example_tcp_client.zig) 16 | 17 | An example that demonstrates what it takes to make a production-ready TCP client. 18 | 19 | The TCP client points to a single destination address (by default 127.0.0.1:9000), and comprises of a bounded pool of connections (bounded by default to a maximum of 4 connections; configurable). 20 | 21 | Pooling is performed in order to reduce the likelihood of TCP head-of-line blocking, which significantly hampers message throughput in the case of packet loss. 22 | 23 | Messages to be written to the client's destination are queued into a multiple-producer/multiple-consumer queue. Connections in the pool pop messages from the queue and directly send the messages to the client's destination. 24 | 25 | Connections are spun up and registered to the pool dynamically. When a message is queued to be written, if the pool still has capacity for spawning another connection and the client's write queue appears to still contain messages that have yet to be written, a new connection is spun up and registered. 26 | 27 | The TCP client additionally supports auto-reconnection that complements the pooling strategy outlined above. If all connections in the pool spontaneously close, the last connection that was closed attempts to reconnect back to the clients destination up to a configurable number of times. If all reconnection attempts fail, all pending attempts to queue messages to the client to be written to the clients' destination are cancelled. 28 | 29 | Work still needs to be done to implement async timers to timeout socket reads/writes/connect attempts that take too long. 30 | 31 | Work additionally needs to be done in order to thoroughly audit the correctness of the TCP client's state machine. -------------------------------------------------------------------------------- /Timer.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const mem = std.mem; 6 | const math = std.math; 7 | const time = std.time; 8 | const mpsc = hyperia.mpsc; 9 | const testing = std.testing; 10 | 11 | const assert = std.debug.assert; 12 | 13 | const SpinLock = hyperia.sync.SpinLock; 14 | 15 | const Timer = @This(); 16 | 17 | pub const Handle = struct { 18 | event: mpsc.AsyncAutoResetEvent(void) = .{}, 19 | expires_at: usize = 0, 20 | 21 | pub fn start(self: *Timer.Handle, timer: *Timer, expires_at: usize) !void { 22 | self.event = .{}; 23 | self.expires_at = expires_at; 24 | 25 | try timer.add(self); 26 | } 27 | 28 | pub fn cancel(self: *Timer, Handle, timer: *Timer) void { 29 | if (!timer.cancel(self)) return; 30 | hyperia.pool.schedule(.{}, self.set()); 31 | } 32 | 33 | pub fn wait(self: *Timer.Handle) void { 34 | return self.event.wait(); 35 | } 36 | 37 | pub fn set(self: *Timer.Handle) ?*zap.Pool.Runnable { 38 | return self.event.set(); 39 | } 40 | }; 41 | 42 | lock: SpinLock = .{}, 43 | entries: std.PriorityQueue(*Timer.Handle), 44 | 45 | pub fn init(allocator: *mem.Allocator) Timer { 46 | return Timer{ 47 | .entries = std.PriorityQueue(*Timer.Handle).init(allocator, struct { 48 | fn lessThan(a: *Timer.Handle, b: *Timer.Handle) math.Order { 49 | return math.order(a.expires_at, b.expires_at); 50 | } 51 | }.lessThan), 52 | }; 53 | } 54 | 55 | pub fn deinit(self: *Timer, allocator: *mem.Allocator) void { 56 | self.entries.deinit(); 57 | } 58 | 59 | pub fn add(self: *Timer, handle: *Timer.Handle) !void { 60 | const held = self.lock.acquire(); 61 | defer held.release(); 62 | 63 | try self.entries.add(handle); 64 | } 65 | 66 | pub fn cancel(self: *Timer, handle: *Timer.Handle) bool { 67 | const held = self.lock.acquire(); 68 | defer held.release(); 69 | 70 | const i = mem.indexOfScalar(*Timer.Handle, self.entries.items, handle) orelse return false; 71 | assert(self.entries.removeIndex(i) == handle); 72 | 73 | return true; 74 | } 75 | 76 | pub fn delay(self: *Timer, current_time: usize) ?usize { 77 | const held = self.lock.acquire(); 78 | defer held.release(); 79 | 80 | const head = self.entries.peek() orelse return null; 81 | return math.sub(usize, head.expires_at, current_time) catch 0; 82 | } 83 | 84 | pub fn update(self: *Timer, current_time: usize, callback: anytype) void { 85 | const held = self.lock.acquire(); 86 | defer held.release(); 87 | 88 | while (true) { 89 | const head = self.entries.peek() orelse break; 90 | if (head.expires_at > current_time) break; 91 | 92 | callback.call(self.entries.remove()); 93 | } 94 | } 95 | 96 | test { 97 | testing.refAllDecls(@This()); 98 | } 99 | 100 | test "timer/async: add timers and execute them" { 101 | hyperia.init(); 102 | defer hyperia.deinit(); 103 | 104 | const allocator = testing.allocator; 105 | 106 | var timer = Timer.init(allocator); 107 | defer timer.deinit(allocator); 108 | 109 | var a: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 10 }; 110 | var b: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 20 }; 111 | var c: Timer.Handle = .{ .expires_at = @intCast(usize, time.milliTimestamp()) + 30 }; 112 | 113 | try timer.add(&a); 114 | try timer.add(&b); 115 | try timer.add(&c); 116 | 117 | var fa = async a.wait(); 118 | var fb = async b.wait(); 119 | var fc = async c.wait(); 120 | 121 | while (true) { 122 | time.sleep(timer.delay(@intCast(usize, time.milliTimestamp())) orelse break); 123 | 124 | var batch: zap.Pool.Batch = .{}; 125 | defer while (batch.pop()) |runnable| runnable.run(); 126 | 127 | timer.update(@intCast(usize, @intCast(usize, time.milliTimestamp())), struct { 128 | batch: *zap.Pool.Batch, 129 | 130 | pub fn call(self: @This(), handle: *Timer.Handle) void { 131 | self.batch.push(handle.set()); 132 | } 133 | }{ .batch = &batch }); 134 | } 135 | 136 | nosuspend await fa; 137 | nosuspend await fb; 138 | nosuspend await fc; 139 | } 140 | 141 | test "timer: add timers and update latest time" { 142 | const allocator = testing.allocator; 143 | 144 | var timer = Timer.init(allocator); 145 | defer timer.deinit(allocator); 146 | 147 | var a: Timer.Handle = .{ .expires_at = 10 }; 148 | var b: Timer.Handle = .{ .expires_at = 20 }; 149 | var c: Timer.Handle = .{ .expires_at = 30 }; 150 | 151 | try timer.add(&a); 152 | try timer.add(&b); 153 | try timer.add(&c); 154 | 155 | timer.update(25, struct { 156 | fn call(handle: *Timer.Handle) void { 157 | return {}; 158 | } 159 | }); 160 | 161 | testing.expect(timer.delay(25) == c.expires_at - 25); 162 | } 163 | -------------------------------------------------------------------------------- /Timer2.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const os = std.os; 6 | const mem = std.mem; 7 | const math = std.math; 8 | const time = std.time; 9 | const testing = std.testing; 10 | 11 | const assert = std.debug.assert; 12 | 13 | const Reactor = hyperia.Reactor; 14 | 15 | const Timer = @This(); 16 | 17 | pub const Handle = struct { 18 | pub const Error = error{Cancelled} || mem.Allocator.Error; 19 | 20 | parent: *Timer, 21 | expires_at: u64, 22 | result: ?Error = null, 23 | 24 | runnable: zap.Pool.Runnable = .{ .runFn = resumeWaiter }, 25 | frame: anyframe = undefined, 26 | 27 | fn resumeWaiter(runnable: *zap.Pool.Runnable) void { 28 | const self = @fieldParentPtr(Timer.Handle, "runnable", runnable); 29 | resume self.frame; 30 | } 31 | 32 | pub fn cancel(self: *Timer.Handle) void { 33 | const i = mem.indexOfScalar(*Timer.Handle, self.parent.pending.items[0..self.parent.pending.len], self) orelse return; 34 | assert(self.parent.pending.removeIndex(i) == self); 35 | 36 | self.result = error.Cancelled; 37 | hyperia.pool.schedule(.{}, &self.runnable); 38 | } 39 | 40 | pub fn wait(self: *Timer.Handle) callconv(.Async) !void { 41 | if (self.result) |err| { 42 | self.result = null; 43 | return err; 44 | } 45 | 46 | suspend { 47 | self.frame = @frame(); 48 | 49 | if (self.parent.pending.add(self)) { 50 | self.parent.event.post(); 51 | } else |err| { 52 | self.result = err; 53 | hyperia.pool.schedule(.{}, &self.runnable); 54 | } 55 | } 56 | 57 | if (self.result) |err| { 58 | self.result = null; 59 | return err; 60 | } 61 | } 62 | }; 63 | 64 | event: *Reactor.AutoResetEvent, 65 | pending: std.PriorityQueue(*Handle), 66 | 67 | pub fn init(allocator: *mem.Allocator, event: *Reactor.AutoResetEvent) Timer { 68 | return Timer{ 69 | .event = event, 70 | .pending = std.PriorityQueue(*Handle).init(allocator, struct { 71 | fn compare(a: *Handle, b: *Handle) math.Order { 72 | return math.order(a.expires_at, b.expires_at); 73 | } 74 | }.compare), 75 | }; 76 | } 77 | 78 | pub fn deinit(self: *Timer, allocator: *mem.Allocator) void { 79 | var batch: zap.Pool.Batch = .{}; 80 | defer hyperia.pool.schedule(.{}, batch); 81 | 82 | while (self.pending.removeOrNull()) |handle| { 83 | handle.result = error.Cancelled; 84 | batch.push(&handle.runnable); 85 | } 86 | 87 | self.pending.deinit(); 88 | } 89 | 90 | pub fn after(self: *Timer, duration_ms: u64) Handle { 91 | return Handle{ .parent = self, .expires_at = @intCast(u64, time.milliTimestamp()) + duration_ms }; 92 | } 93 | 94 | pub fn at(self: *Timer, timestamp_ms: u64) Handle { 95 | return Handle{ .parent = self, .expires_at = timestamp_ms }; 96 | } 97 | 98 | pub fn delay(self: *Timer) ?u64 { 99 | const head = self.pending.peek() orelse return null; 100 | return math.sub(u64, head.expires_at, @intCast(u64, time.milliTimestamp())) catch 0; 101 | } 102 | 103 | pub fn update(self: *Timer, closure: anytype) void { 104 | const current_time = @intCast(u64, time.milliTimestamp()); 105 | 106 | while (true) { 107 | const head = self.pending.peek() orelse break; 108 | if (head.expires_at > current_time) break; 109 | 110 | assert(self.pending.remove() == head); 111 | 112 | closure.call(head); 113 | } 114 | } 115 | 116 | test { 117 | testing.refAllDecls(@This()); 118 | } 119 | 120 | test "timer2: register timer" { 121 | hyperia.init(); 122 | defer hyperia.deinit(); 123 | 124 | const allocator = testing.allocator; 125 | 126 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 127 | defer reactor.deinit(); 128 | 129 | var reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 130 | defer reactor_event.deinit(); 131 | 132 | try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); 133 | 134 | var timer = Timer.init(allocator, &reactor_event); 135 | defer timer.deinit(allocator); 136 | 137 | var handle = timer.after(100); // 100 milliseconds 138 | var handle_frame = async handle.wait(); 139 | 140 | while (timer.delay()) |delay_duration| { 141 | try reactor.poll(1, struct { 142 | pub fn call(event: Reactor.Event) void { 143 | var batch: zap.Pool.Batch = .{}; 144 | defer while (batch.pop()) |runnable| runnable.run(); 145 | 146 | const event_handle = @intToPtr(*Reactor.Handle, event.data); 147 | event_handle.call(&batch, event); 148 | } 149 | }, delay_duration); 150 | 151 | timer.update(struct { 152 | pub fn call(timer_handle: *Timer.Handle) void { 153 | timer_handle.runnable.run(); 154 | } 155 | }); 156 | } 157 | 158 | try nosuspend await handle_frame; 159 | } 160 | -------------------------------------------------------------------------------- /async_socket.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | const Socket = @import("socket.zig").Socket; 5 | const Reactor = @import("reactor.zig").Reactor; 6 | 7 | const io = std.io; 8 | const os = std.os; 9 | const net = std.net; 10 | const mpsc = hyperia.mpsc; 11 | const testing = std.testing; 12 | 13 | pub const AsyncSocket = struct { 14 | pub const Error = error{Cancelled}; 15 | 16 | const Self = @This(); 17 | 18 | const READY = 0; 19 | const CANCELLED = 1; 20 | 21 | socket: Socket, 22 | readable: mpsc.AsyncAutoResetEvent(usize) = .{}, 23 | writable: mpsc.AsyncAutoResetEvent(usize) = .{}, 24 | handle: Reactor.Handle = .{ .onEventFn = onEvent }, 25 | 26 | pub fn init(domain: u32, socket_type: u32, flags: u32) !Self { 27 | return Self{ .socket = try Socket.init(domain, socket_type | os.SOCK_NONBLOCK, flags) }; 28 | } 29 | 30 | pub fn deinit(self: *Self) void { 31 | self.socket.deinit(); 32 | } 33 | 34 | pub fn from(socket: Socket) Self { 35 | return Self{ .socket = socket }; 36 | } 37 | 38 | pub fn shutdown(self: *Self, how: os.ShutdownHow) !void { 39 | return self.socket.shutdown(how); 40 | } 41 | 42 | pub fn bind(self: *Self, address: net.Address) !void { 43 | return self.socket.bind(address); 44 | } 45 | 46 | pub fn listen(self: *Self, max_backlog_size: usize) !void { 47 | return self.socket.listen(max_backlog_size); 48 | } 49 | 50 | pub fn setReuseAddress(self: *Self, enabled: bool) !void { 51 | return self.socket.setReuseAddress(enabled); 52 | } 53 | 54 | pub fn setReusePort(self: *Self, enabled: bool) !void { 55 | return self.socket.setReusePort(enabled); 56 | } 57 | 58 | pub fn setNoDelay(self: *Self, enabled: bool) !void { 59 | return self.socket.setNoDelay(enabled); 60 | } 61 | 62 | pub fn setFastOpen(self: *Self, enabled: bool) !void { 63 | return self.socket.setFastOpen(enabled); 64 | } 65 | 66 | pub fn setQuickAck(self: *Self, enabled: bool) !void { 67 | return self.socket.setQuickAck(enabled); 68 | } 69 | 70 | pub fn getName(self: *Self) !net.Address { 71 | return self.socket.getName(); 72 | } 73 | 74 | pub fn getReadBufferSize(self: *Self) !u32 { 75 | return self.socket.getReadBufferSize(); 76 | } 77 | 78 | pub fn getWriteBufferSize(self: *Self) !u32 { 79 | return self.socket.getWriteBufferSize(); 80 | } 81 | 82 | pub fn setReadBufferSize(self: *Self, size: u32) !void { 83 | return self.socket.setReadBufferSize(size); 84 | } 85 | 86 | pub fn setWriteBufferSize(self: *Self, size: u32) !void { 87 | return self.socket.setWriteBufferSize(size); 88 | } 89 | 90 | pub fn setReadTimeout(self: *Self, milliseconds: usize) !void { 91 | return self.socket.setReadTimeout(milliseconds); 92 | } 93 | 94 | pub fn setWriteTimeout(self: *Self, milliseconds: usize) !void { 95 | return self.socket.setWriteTimeout(milliseconds); 96 | } 97 | 98 | pub fn tryRead(self: *Self, buf: []u8) os.ReadError!usize { 99 | return self.socket.read(buf); 100 | } 101 | 102 | pub fn tryRecv(self: *Self, buf: []u8, flags: u32) os.RecvFromError!usize { 103 | return self.socket.recv(buf, flags); 104 | } 105 | 106 | pub fn tryWrite(self: *Self, buf: []const u8) os.WriteError!usize { 107 | return self.socket.write(buf); 108 | } 109 | 110 | pub fn trySend(self: *Self, buf: []const u8, flags: u32) os.SendError!usize { 111 | return self.socket.send(buf, flags); 112 | } 113 | 114 | pub fn tryConnect(self: *Self, address: net.Address) os.ConnectError!void { 115 | return self.socket.connect(address); 116 | } 117 | 118 | pub fn tryAccept(self: *Self, flags: u32) !Socket.Connection { 119 | return self.socket.accept(flags); 120 | } 121 | 122 | pub const ReadError = os.ReadError || Error; 123 | 124 | pub fn read(self: *Self, buf: []u8) ReadError!usize { 125 | while (true) { 126 | const num_bytes = self.tryRead(buf) catch |err| switch (err) { 127 | error.WouldBlock => { 128 | if (self.readable.wait() == CANCELLED) { 129 | return error.Cancelled; 130 | } 131 | continue; 132 | }, 133 | else => return err, 134 | }; 135 | 136 | return num_bytes; 137 | } 138 | } 139 | 140 | pub const RecvFromError = os.RecvFromError || Error; 141 | 142 | pub fn recv(self: *Self, buf: []u8, flags: u32) RecvFromError!usize { 143 | while (true) { 144 | const num_bytes = self.tryRecv(buf, flags) catch |err| switch (err) { 145 | error.WouldBlock => { 146 | if (self.readable.wait() == CANCELLED) { 147 | return error.Cancelled; 148 | } 149 | continue; 150 | }, 151 | else => return err, 152 | }; 153 | 154 | return num_bytes; 155 | } 156 | } 157 | 158 | pub const WriteError = os.WriteError || Error; 159 | 160 | pub fn write(self: *Self, buf: []const u8) WriteError!usize { 161 | while (true) { 162 | const num_bytes = self.tryWrite(buf) catch |err| switch (err) { 163 | error.WouldBlock => { 164 | if (self.writable.wait() == CANCELLED) { 165 | return error.Cancelled; 166 | } 167 | continue; 168 | }, 169 | else => return err, 170 | }; 171 | 172 | return num_bytes; 173 | } 174 | } 175 | 176 | pub const SendError = os.SendError || Error; 177 | 178 | pub fn send(self: *Self, buf: []const u8, flags: u32) SendError!usize { 179 | while (true) { 180 | const num_bytes = self.trySend(buf, flags) catch |err| switch (err) { 181 | error.WouldBlock => { 182 | if (self.writable.wait() == CANCELLED) { 183 | return error.Cancelled; 184 | } 185 | continue; 186 | }, 187 | else => return err, 188 | }; 189 | 190 | return num_bytes; 191 | } 192 | } 193 | 194 | pub fn Sender(comptime flags: i32) type { 195 | return io.Writer(*Self, SendError, struct { 196 | pub fn call(self: *Self, buf: []const u8) SendError!usize { 197 | return self.send(buf, flags); 198 | } 199 | }.call); 200 | } 201 | 202 | pub fn sender(self: *Self, comptime flags: i32) Sender(flags) { 203 | return Sender(flags){ .context = self }; 204 | } 205 | 206 | pub const ConnectError = os.ConnectError || Error; 207 | 208 | pub fn connect(self: *Self, address: net.Address) ConnectError!void { 209 | while (true) { 210 | return self.tryConnect(address) catch |err| switch (err) { 211 | error.WouldBlock => { 212 | if (self.writable.wait() == CANCELLED) { 213 | return error.Cancelled; 214 | } 215 | continue; 216 | }, 217 | else => return err, 218 | }; 219 | } 220 | } 221 | 222 | pub const AcceptError = os.AcceptError || Error; 223 | 224 | pub fn accept(self: *Self, flags: u32) AcceptError!Socket.Connection { 225 | while (true) { 226 | const connection = self.tryAccept(flags) catch |err| switch (err) { 227 | error.WouldBlock => { 228 | if (self.readable.wait() == CANCELLED) { 229 | return error.Cancelled; 230 | } 231 | continue; 232 | }, 233 | else => return err, 234 | }; 235 | 236 | return connection; 237 | } 238 | } 239 | 240 | pub fn cancel(self: *Self, how: enum { read, write, connect, accept, all }) void { 241 | switch (how) { 242 | .read, .accept => { 243 | if (self.readable.set(CANCELLED)) |runnable| { 244 | hyperia.pool.schedule(.{}, runnable); 245 | } 246 | }, 247 | .write, .connect => { 248 | if (self.writable.set(CANCELLED)) |runnable| { 249 | hyperia.pool.schedule(.{}, runnable); 250 | } 251 | }, 252 | .all => { 253 | var batch: zap.Pool.Batch = .{}; 254 | if (self.writable.set(CANCELLED)) |runnable| batch.push(runnable); 255 | if (self.readable.set(CANCELLED)) |runnable| batch.push(runnable); 256 | hyperia.pool.schedule(.{}, batch); 257 | }, 258 | } 259 | } 260 | 261 | pub fn onEvent(handle: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void { 262 | const self = @fieldParentPtr(AsyncSocket, "handle", handle); 263 | 264 | if (event.is_readable) { 265 | if (self.readable.set(READY)) |runnable| { 266 | batch.push(runnable); 267 | } 268 | } 269 | 270 | if (event.is_writable) { 271 | if (self.writable.set(READY)) |runnable| { 272 | batch.push(runnable); 273 | } 274 | } 275 | 276 | if (event.is_hup) { 277 | if (self.readable.set(READY)) |runnable| batch.push(runnable); 278 | if (self.writable.set(READY)) |runnable| batch.push(runnable); 279 | } 280 | } 281 | }; 282 | 283 | test { 284 | testing.refAllDecls(@This()); 285 | } 286 | 287 | test "socket/async" { 288 | hyperia.init(); 289 | defer hyperia.deinit(); 290 | 291 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 292 | defer reactor.deinit(); 293 | 294 | var a = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 295 | defer a.deinit(); 296 | 297 | try reactor.add(a.socket.fd, &a.handle, .{ .readable = true }); 298 | try reactor.poll(1, struct { 299 | expected_data: usize, 300 | 301 | pub fn call(self: @This(), event: Reactor.Event) void { 302 | testing.expectEqual( 303 | Reactor.Event{ 304 | .data = self.expected_data, 305 | .is_error = false, 306 | .is_hup = true, 307 | .is_readable = false, 308 | .is_writable = false, 309 | }, 310 | event, 311 | ); 312 | } 313 | }{ .expected_data = @ptrToInt(&a.handle) }, null); 314 | 315 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 316 | try a.listen(128); 317 | 318 | const binded_address = try a.getName(); 319 | 320 | var b = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 321 | defer b.deinit(); 322 | 323 | try reactor.add(b.socket.fd, &b.handle, .{ .readable = true, .writable = true }); 324 | try reactor.poll(1, struct { 325 | expected_data: usize, 326 | 327 | pub fn call(self: @This(), event: Reactor.Event) void { 328 | testing.expectEqual( 329 | Reactor.Event{ 330 | .data = self.expected_data, 331 | .is_error = false, 332 | .is_hup = true, 333 | .is_readable = false, 334 | .is_writable = true, 335 | }, 336 | event, 337 | ); 338 | 339 | var batch: zap.Pool.Batch = .{}; 340 | defer hyperia.pool.schedule(.{}, batch); 341 | 342 | const handle = @intToPtr(*Reactor.Handle, event.data); 343 | handle.call(&batch, event); 344 | } 345 | }{ .expected_data = @ptrToInt(&b.handle) }, null); 346 | 347 | var connect_frame = async b.connect(binded_address); 348 | 349 | try reactor.poll(1, struct { 350 | expected_data: usize, 351 | 352 | pub fn call(self: @This(), event: Reactor.Event) void { 353 | testing.expectEqual( 354 | Reactor.Event{ 355 | .data = self.expected_data, 356 | .is_error = false, 357 | .is_hup = false, 358 | .is_readable = false, 359 | .is_writable = true, 360 | }, 361 | event, 362 | ); 363 | 364 | var batch: zap.Pool.Batch = .{}; 365 | defer hyperia.pool.schedule(.{}, batch); 366 | 367 | const handle = @intToPtr(*Reactor.Handle, event.data); 368 | handle.call(&batch, event); 369 | } 370 | }{ .expected_data = @ptrToInt(&b.handle) }, null); 371 | 372 | try nosuspend await connect_frame; 373 | 374 | try reactor.poll(1, struct { 375 | expected_data: usize, 376 | 377 | pub fn call(self: @This(), event: Reactor.Event) void { 378 | testing.expectEqual( 379 | Reactor.Event{ 380 | .data = self.expected_data, 381 | .is_error = false, 382 | .is_hup = false, 383 | .is_readable = true, 384 | .is_writable = false, 385 | }, 386 | event, 387 | ); 388 | 389 | var batch: zap.Pool.Batch = .{}; 390 | defer hyperia.pool.schedule(.{}, batch); 391 | 392 | const handle = @intToPtr(*Reactor.Handle, event.data); 393 | handle.call(&batch, event); 394 | } 395 | }{ .expected_data = @ptrToInt(&a.handle) }, null); 396 | 397 | var ab = try nosuspend a.accept(os.SOCK_CLOEXEC | os.SOCK_NONBLOCK); 398 | defer ab.socket.deinit(); 399 | } 400 | -------------------------------------------------------------------------------- /async_wait_group.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const mpsc = hyperia.mpsc; 6 | const testing = std.testing; 7 | 8 | pub const AsyncWaitGroup = struct { 9 | const Self = @This(); 10 | 11 | const Node = struct { 12 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 13 | frame: anyframe, 14 | 15 | pub fn run(runnable: *zap.Pool.Runnable) void { 16 | const self = @fieldParentPtr(Node, "runnable", runnable); 17 | resume self.frame; 18 | } 19 | }; 20 | 21 | state: usize = 0, 22 | event: mpsc.AsyncAutoResetEvent(void) = .{}, 23 | 24 | pub fn add(self: *Self, delta: usize) void { 25 | if (delta == 0) return; 26 | 27 | _ = @atomicRmw(usize, &self.state, .Add, delta, .Monotonic); 28 | } 29 | 30 | pub fn sub(self: *Self, delta: usize) void { 31 | if (delta == 0) return; 32 | if (@atomicRmw(usize, &self.state, .Sub, delta, .Release) - delta != 0) return; 33 | 34 | if (self.event.set()) |runnable| { 35 | hyperia.pool.schedule(.{}, runnable); 36 | } 37 | } 38 | 39 | pub fn wait(self: *Self) void { 40 | while (@atomicLoad(usize, &self.state, .Monotonic) != 0) { 41 | self.event.wait(); 42 | } 43 | } 44 | }; 45 | 46 | test { 47 | testing.refAllDecls(@This()); 48 | } 49 | -------------------------------------------------------------------------------- /async_wait_group_allocator.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const hyperia = @import("hyperia.zig"); 3 | 4 | const mem = std.mem; 5 | const testing = std.testing; 6 | 7 | const AsyncWaitGroup = hyperia.AsyncWaitGroup; 8 | 9 | pub const AsyncWaitGroupAllocator = struct { 10 | const Self = @This(); 11 | 12 | backing_allocator: *mem.Allocator, 13 | allocator: mem.Allocator = .{ 14 | .allocFn = alloc, 15 | .resizeFn = resize, 16 | }, 17 | wg: AsyncWaitGroup = .{}, 18 | 19 | fn alloc(allocator: *mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) mem.Allocator.Error![]u8 { 20 | const self = @fieldParentPtr(Self, "allocator", allocator); 21 | const bytes = try self.backing_allocator.allocFn(self.backing_allocator, len, ptr_align, len_align, ret_addr); 22 | self.wg.add(bytes.len); 23 | 24 | return bytes; 25 | } 26 | 27 | fn resize(allocator: *mem.Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) mem.Allocator.Error!usize { 28 | const self = @fieldParentPtr(Self, "allocator", allocator); 29 | const bytes_len = try self.backing_allocator.resizeFn(self.backing_allocator, buf, buf_align, new_len, len_align, ret_addr); 30 | if (bytes_len < buf.len) { 31 | self.wg.sub(buf.len - bytes_len); 32 | } else { 33 | self.wg.add(bytes_len - buf.len); 34 | } 35 | return bytes_len; 36 | } 37 | 38 | pub fn wait(self: *Self) void { 39 | self.wg.wait(); 40 | } 41 | }; 42 | 43 | test { 44 | testing.refAllDecls(@This()); 45 | } 46 | 47 | test "async_wait_group_allocator: wait for all allocations to be freed" { 48 | hyperia.init(); 49 | defer hyperia.deinit(); 50 | 51 | var wga = AsyncWaitGroupAllocator{ .backing_allocator = testing.allocator }; 52 | const allocator = &wga.allocator; 53 | 54 | var a = try allocator.alloc(u8, 16); 55 | var b = async wga.wait(); 56 | var c = try allocator.alloc(u8, 128); 57 | 58 | allocator.free(a); 59 | allocator.free(c); 60 | nosuspend await b; 61 | } 62 | -------------------------------------------------------------------------------- /blake3/blake3.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const c = @cImport(@cInclude("blake3.h")); 3 | 4 | const fmt = std.fmt; 5 | const mem = std.mem; 6 | const testing = std.testing; 7 | 8 | pub fn addTo(step: *std.build.LibExeObjStep, comptime dir: []const u8) void { 9 | step.linkLibC(); 10 | 11 | step.addIncludeDir(dir ++ "/lib/c"); 12 | 13 | var defines: std.ArrayListUnmanaged([]const u8) = .{}; 14 | defer defines.deinit(step.builder.allocator); 15 | 16 | if (std.Target.x86.featureSetHas(step.target.getCpuFeatures(), .sse2)) { 17 | step.addAssemblyFile(dir ++ "/lib/c/blake3_sse2_x86-64_unix.S"); 18 | } else { 19 | defines.append(step.builder.allocator, "-DBLAKE3_NO_SSE2") catch unreachable; 20 | } 21 | 22 | if (std.Target.x86.featureSetHas(step.target.getCpuFeatures(), .sse4_1)) { 23 | step.addAssemblyFile(dir ++ "/lib/c/blake3_sse41_x86-64_unix.S"); 24 | } else { 25 | defines.append(step.builder.allocator, "-DBLAKE3_NO_SSE41") catch unreachable; 26 | } 27 | 28 | if (std.Target.x86.featureSetHas(step.target.getCpuFeatures(), .avx2)) { 29 | step.addAssemblyFile(dir ++ "/lib/c/blake3_avx2_x86-64_unix.S"); 30 | } else { 31 | defines.append(step.builder.allocator, "-DBLAKE3_NO_AVX2") catch unreachable; 32 | } 33 | 34 | if (std.Target.x86.featureSetHasAll(step.target.getCpuFeatures(), .{ .avx512f, .avx512vl })) { 35 | step.addAssemblyFile(dir ++ "/lib/c/blake3_avx512_x86-64_unix.S"); 36 | } else { 37 | defines.append(step.builder.allocator, "-DBLAKE3_NO_AVX512") catch unreachable; 38 | } 39 | 40 | step.addCSourceFile(dir ++ "/lib/c/blake3.c", defines.items); 41 | step.addCSourceFile(dir ++ "/lib/c/blake3_dispatch.c", defines.items); 42 | step.addCSourceFile(dir ++ "/lib/c/blake3_portable.c", defines.items); 43 | } 44 | 45 | pub const Hasher = struct { 46 | state: c.blake3_hasher = undefined, 47 | 48 | pub fn init() callconv(.Inline) Hasher { 49 | var state: c.blake3_hasher = undefined; 50 | c.blake3_hasher_init(&state); 51 | 52 | return Hasher{ .state = state }; 53 | } 54 | 55 | pub fn update(self: *Hasher, buf: []const u8) callconv(.Inline) void { 56 | c.blake3_hasher_update(&self.state, buf.ptr, buf.len); 57 | } 58 | 59 | pub fn final(self: *Hasher, dst: []u8) callconv(.Inline) void { 60 | c.blake3_hasher_finalize(&self.state, dst.ptr, dst.len); 61 | } 62 | }; 63 | 64 | pub fn hash(buf: []const u8) callconv(.Inline) [32]u8 { 65 | var hasher = Hasher.init(); 66 | hasher.update(buf); 67 | 68 | var dst: [32]u8 = undefined; 69 | hasher.final(&dst); 70 | 71 | return dst; 72 | } 73 | 74 | test "blake3: hash 'hello world" { 75 | try testing.expectFmt("d74981efa70a0c880b8d8c1985d075dbcbf679b99a5f9914e5aaf96b831a9e24", "{s}", .{fmt.fmtSliceHexLower(&hash("hello world"))}); 76 | } 77 | -------------------------------------------------------------------------------- /blake3/build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Pkg = std.build.Pkg; 3 | const Builder = std.build.Builder; 4 | 5 | pub fn build(b: *Builder) void { 6 | const target = b.standardTargetOptions(.{}); 7 | const mode = b.standardReleaseOptions(); 8 | 9 | const test_step = b.step("test", "Run library tests."); 10 | const test_filter = b.option([]const u8, "test-filter", "Test filter"); 11 | 12 | const file = b.addTest("blake3.zig"); 13 | file.setTarget(target); 14 | file.setBuildMode(mode); 15 | 16 | @import("blake3.zig").addTo(file, "."); 17 | 18 | if (test_filter != null) { 19 | file.setFilter(test_filter.?); 20 | } 21 | 22 | test_step.dependOn(&file.step); 23 | } 24 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Pkg = std.build.Pkg; 3 | const Builder = std.build.Builder; 4 | 5 | pub const pkgs = struct { 6 | pub const zap = Pkg{ 7 | .name = "zap", 8 | .path = "zap/src/zap.zig", 9 | }; 10 | 11 | pub const clap = Pkg{ 12 | .name = "clap", 13 | .path = "clap/clap.zig", 14 | }; 15 | 16 | pub const hyperia = Pkg{ 17 | .name = "hyperia", 18 | .path = "hyperia.zig", 19 | .dependencies = &[_]Pkg{ 20 | zap, 21 | }, 22 | }; 23 | }; 24 | 25 | pub fn register(step: *std.build.LibExeObjStep) void { 26 | step.linkLibC(); 27 | 28 | step.addPackage(pkgs.zap); 29 | step.addPackage(pkgs.clap); 30 | step.addPackage(pkgs.hyperia); 31 | 32 | @import("picohttp/picohttp.zig").addTo(step, "picohttp"); 33 | @import("blake3/blake3.zig").addTo(step, "blake3"); 34 | } 35 | 36 | pub fn build(b: *Builder) void { 37 | const target = b.standardTargetOptions(.{}); 38 | const mode = b.standardReleaseOptions(); 39 | 40 | const test_step = b.step("test", "Run library tests."); 41 | const test_filter = b.option([]const u8, "test-filter", "Test filter"); 42 | const sanitize_thread = b.option(bool, "sanitize-thread", "Enable ThreadSanitizer") orelse false; 43 | 44 | const file = b.addTest("hyperia.zig"); 45 | file.sanitize_thread = sanitize_thread; 46 | file.setTarget(target); 47 | file.setBuildMode(mode); 48 | register(file); 49 | 50 | if (test_filter != null) { 51 | file.setFilter(test_filter.?); 52 | } 53 | 54 | test_step.dependOn(&file.step); 55 | 56 | inline for (.{ 57 | "example_tcp_client", 58 | "example_tcp_server", 59 | "example_http_client", 60 | "example_http_server", 61 | }) |example_name| { 62 | const example_step = b.step(example_name, "Example " ++ example_name ++ ".zig"); 63 | 64 | const exe = b.addExecutable(example_name, example_name ++ ".zig"); 65 | exe.sanitize_thread = sanitize_thread; 66 | exe.setTarget(target); 67 | exe.setBuildMode(mode); 68 | register(exe); 69 | exe.install(); 70 | 71 | const exe_run = exe.run(); 72 | if (b.args) |args| exe_run.addArgs(args); 73 | 74 | example_step.dependOn(&exe_run.step); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /circuit_breaker.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const hyperia = @import("hyperia.zig"); 3 | 4 | const SpinLock = hyperia.sync.SpinLock; 5 | 6 | const math = std.math; 7 | const time = std.time; 8 | const testing = std.testing; 9 | 10 | pub const Options = struct { 11 | // max_concurrent_attempts: usize, // TODO(kenta): use a cancellable semaphore to limit max number of concurrent attempts 12 | failure_threshold: usize, 13 | reset_timeout: usize, 14 | }; 15 | 16 | pub fn CircuitBreaker(comptime opts: Options) type { 17 | return struct { 18 | const Self = @This(); 19 | 20 | pub const State = enum { 21 | closed, 22 | open, 23 | half_open, 24 | }; 25 | 26 | lock: SpinLock = .{}, 27 | failure_count: usize = 0, 28 | last_failure_time: usize = 0, 29 | 30 | pub fn init(state: State) Self { 31 | return switch (state) { 32 | .closed => .{}, 33 | .half_open => .{ .failure_count = math.maxInt(usize) }, 34 | .open => .{ .failure_count = math.maxInt(usize), .last_failure_time = math.maxInt(usize) }, 35 | }; 36 | } 37 | 38 | pub fn run(self: *Self, current_time: usize, closure: anytype) !void { 39 | switch (self.query()) { 40 | .closed, .half_open => { 41 | closure.call() catch { 42 | self.reportFailure(current_time); 43 | return error.Failed; 44 | }; 45 | 46 | self.reportSuccess(); 47 | }, 48 | .open => return error.Broken, 49 | } 50 | } 51 | 52 | pub fn query(self: *Self, current_time: usize) State { 53 | const held = self.lock.acquire(); 54 | defer held.release(); 55 | 56 | if (self.failure_count >= opts.failure_threshold) { 57 | if (math.sub(usize, current_time, self.last_failure_time) catch 0 <= opts.reset_timeout) { 58 | return .open; 59 | } 60 | return .half_open; 61 | } 62 | 63 | return .closed; 64 | } 65 | 66 | pub fn reportFailure(self: *Self, current_time: usize) void { 67 | const held = self.lock.acquire(); 68 | defer held.release(); 69 | 70 | self.failure_count = math.add(usize, self.failure_count, 1) catch opts.failure_threshold; 71 | self.last_failure_time = current_time; 72 | } 73 | 74 | pub fn reportSuccess(self: *Self) void { 75 | const held = self.lock.acquire(); 76 | defer held.release(); 77 | 78 | self.failure_count = 0; 79 | self.last_failure_time = 0; 80 | } 81 | }; 82 | } 83 | 84 | test { 85 | testing.refAllDecls(CircuitBreaker(.{ .failure_threshold = 10, .reset_timeout = 1000 })); 86 | } 87 | 88 | test "circuit_breaker: init and query state" { 89 | const TestBreaker = CircuitBreaker(.{ .failure_threshold = 10, .reset_timeout = 1000 }); 90 | 91 | const current_time = @intCast(usize, time.milliTimestamp()); 92 | testing.expect(TestBreaker.init(.open).query(current_time) == .open); 93 | testing.expect(TestBreaker.init(.closed).query(current_time) == .closed); 94 | testing.expect(TestBreaker.init(.half_open).query(current_time) == .half_open); 95 | } 96 | -------------------------------------------------------------------------------- /ctrl_c.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const hyperia = @import("hyperia.zig"); 3 | 4 | const os = std.os; 5 | const mem = std.mem; 6 | const mpmc = hyperia.mpmc; 7 | 8 | var event: mpmc.AsyncAutoResetEvent = .{}; 9 | 10 | var last_sigaction: os.Sigaction = .{ 11 | .handler = .{ .handler = null }, 12 | .mask = mem.zeroes(os.sigset_t), 13 | .flags = 0, 14 | }; 15 | 16 | pub fn init() void { 17 | var mask = mem.zeroes(os.sigset_t); 18 | os.linux.sigaddset(&mask, os.SIGINT); 19 | 20 | const sigaction = os.Sigaction{ 21 | .handler = .{ .handler = handler }, 22 | .mask = mask, 23 | .flags = 0, 24 | }; 25 | 26 | os.sigaction(os.SIGINT, &sigaction, &last_sigaction); 27 | } 28 | 29 | pub fn deinit() void { 30 | os.sigaction(os.SIGINT, &last_sigaction, null); 31 | cancel(); 32 | } 33 | 34 | pub fn wait() void { 35 | event.wait(); 36 | } 37 | 38 | pub fn cancel() void { 39 | while (true) { 40 | var batch = event.set(); 41 | if (batch.isEmpty()) break; 42 | hyperia.pool.schedule(.{}, batch); 43 | } 44 | } 45 | 46 | fn handler(signum: c_int) callconv(.C) void { 47 | if (signum != os.SIGINT) return; 48 | 49 | while (true) { 50 | var batch = event.set(); 51 | if (batch.isEmpty()) break; 52 | hyperia.pool.schedule(.{}, batch); 53 | } 54 | } 55 | 56 | test "ctrl_c: manually raise ctrl+c event" { 57 | hyperia.init(); 58 | defer hyperia.deinit(); 59 | 60 | init(); 61 | defer deinit(); 62 | 63 | var frame = async wait(); 64 | try os.raise(os.SIGINT); 65 | nosuspend await frame; 66 | } 67 | -------------------------------------------------------------------------------- /ed25519_donna/build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Pkg = std.build.Pkg; 3 | const Builder = std.build.Builder; 4 | 5 | pub fn build(b: *Builder) void { 6 | const target = b.standardTargetOptions(.{}); 7 | const mode = b.standardReleaseOptions(); 8 | 9 | const test_step = b.step("test", "Run library tests."); 10 | const test_filter = b.option([]const u8, "test-filter", "Test filter"); 11 | 12 | const file = b.addTest("ed25519.zig"); 13 | file.setTarget(target); 14 | file.setBuildMode(mode); 15 | 16 | @import("ed25519.zig").addTo(file, "."); 17 | 18 | if (test_filter != null) { 19 | file.setFilter(test_filter.?); 20 | } 21 | 22 | test_step.dependOn(&file.step); 23 | } 24 | -------------------------------------------------------------------------------- /ed25519_donna/ed25519.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | typedef struct ed25519_hash_context { 5 | uint8_t s[200]; 6 | size_t offset; 7 | size_t rate; 8 | } ed25519_hash_context; 9 | 10 | void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); 11 | void ed25519_hash_init(ed25519_hash_context *ctx); 12 | void ed25519_hash_update(ed25519_hash_context *ctx, const uint8_t *in, size_t inlen); 13 | void ed25519_hash_final(ed25519_hash_context *ctx, uint8_t *hash); 14 | void ed25519_hash(uint8_t *hash, const uint8_t *in, size_t inlen); 15 | 16 | #include "lib/ed25519.c" -------------------------------------------------------------------------------- /ed25519_donna/ed25519.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const c = @cImport(@cInclude("ed25519.h")); 3 | 4 | const crypto = std.crypto; 5 | const testing = std.testing; 6 | 7 | export fn @"ed25519_randombytes_unsafe"(ptr: *c_void, len: usize) callconv(.C) void { 8 | crypto.random.bytes(@ptrCast([*]u8, ptr)[0..len]); 9 | } 10 | 11 | export fn @"ed25519_hash_init"(ctx: *crypto.hash.sha3.Sha3_512) callconv(.C) void { 12 | ctx.* = crypto.hash.sha3.Sha3_512.init(.{}); 13 | } 14 | 15 | export fn @"ed25519_hash_update"(ctx: *crypto.hash.sha3.Sha3_512, ptr: [*]const u8, len: usize) callconv(.C) void { 16 | ctx.update(ptr[0..len]); 17 | } 18 | 19 | export fn @"ed25519_hash_final"(ctx: *crypto.hash.sha3.Sha3_512, hash: [*]u8) callconv(.C) void { 20 | ctx.final(@ptrCast(*[64]u8, hash)); 21 | } 22 | 23 | export fn @"ed25519_hash"(hash: [*]u8, ptr: [*]const u8, len: usize) callconv(.C) void { 24 | crypto.hash.sha3.Sha3_512.hash(ptr[0..len], @ptrCast(*[64]u8, hash), .{}); 25 | } 26 | 27 | pub fn addTo(step: *std.build.LibExeObjStep, comptime dir: []const u8) void { 28 | step.linkLibC(); 29 | 30 | step.addIncludeDir(dir ++ "/lib"); 31 | 32 | var defines: std.ArrayListUnmanaged([]const u8) = .{}; 33 | defer defines.deinit(step.builder.allocator); 34 | 35 | defines.append(step.builder.allocator, "-DED25519_CUSTOMRANDOM") catch unreachable; 36 | defines.append(step.builder.allocator, "-DED25519_CUSTOMHASH") catch unreachable; 37 | 38 | if (std.Target.x86.featureSetHas(step.target.getCpuFeatures(), .sse2)) { 39 | defines.append(step.builder.allocator, "-DED25519_SSE2") catch unreachable; 40 | } 41 | 42 | step.addCSourceFile(dir ++ "/ed25519.c", defines.items); 43 | } 44 | 45 | pub fn derivePublicKey(secret_key: [32]u8) [32]u8 { 46 | var public_key: [32]u8 = undefined; 47 | c.ed25519_publickey(&secret_key, &public_key); 48 | return public_key; 49 | } 50 | 51 | pub fn sign(msg: []const u8, secret_key: [32]u8, public_key: [32]u8) [64]u8 { 52 | var signature: [64]u8 = undefined; 53 | c.ed25519_sign(msg.ptr, msg.len, &secret_key, &public_key, &signature); 54 | return signature; 55 | } 56 | 57 | pub fn open(msg: []const u8, public_key: [32]u8, signature: [64]u8) bool { 58 | return c.ed25519_sign_open(msg.ptr, msg.len, &public_key, &signature) == 0; 59 | } 60 | 61 | test "ed25519_donna" { 62 | var secret_key: [32]u8 = undefined; 63 | crypto.random.bytes(&secret_key); 64 | 65 | var public_key: [32]u8 = derivePublicKey(secret_key); 66 | var buf: [1024]u8 = undefined; 67 | 68 | var i: usize = 0; 69 | while (i < 100) : (i += 1) { 70 | testing.expect(open(&buf, public_key, sign(&buf, secret_key, public_key))); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /example_http_client.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia"); 4 | const picohttp = @import("picohttp"); 5 | 6 | const Timer = hyperia.Timer; 7 | const Reactor = hyperia.Reactor; 8 | const SpinLock = hyperia.sync.SpinLock; 9 | 10 | const AsyncSocket = hyperia.AsyncSocket; 11 | const CircuitBreaker = hyperia.CircuitBreaker; 12 | const AsyncWaitGroupAllocator = hyperia.AsyncWaitGroupAllocator; 13 | 14 | const os = std.os; 15 | const mem = std.mem; 16 | const net = std.net; 17 | const meta = std.meta; 18 | const time = std.time; 19 | const mpsc = hyperia.mpsc; 20 | const mpmc = hyperia.mpmc; 21 | 22 | const log = std.log.scoped(.client); 23 | const assert = std.debug.assert; 24 | 25 | usingnamespace hyperia.select; 26 | 27 | pub const log_level = .debug; 28 | 29 | const ConnectCircuitBreaker = CircuitBreaker(.{ .failure_threshold = 10, .reset_timeout = 1000 }); 30 | 31 | var stopped: bool = false; 32 | var clock: time.Timer = undefined; 33 | 34 | var reactor_event: Reactor.AutoResetEvent = undefined; 35 | var timer: Timer = undefined; 36 | 37 | pub const Frame = struct { 38 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 39 | frame: anyframe, 40 | 41 | fn run(runnable: *zap.Pool.Runnable) void { 42 | const self = @fieldParentPtr(Frame, "runnable", runnable); 43 | resume self.frame; 44 | } 45 | 46 | pub fn yield() void { 47 | var frame: Frame = .{ .frame = @frame() }; 48 | suspend hyperia.pool.schedule(.{}, &frame.runnable); 49 | } 50 | }; 51 | 52 | pub const Item = struct { 53 | req: picohttp.Request, 54 | body: []const u8 = &[_]u8{}, 55 | 56 | res: []const u8 = undefined, 57 | event: mpsc.AsyncAutoResetEvent(void) = .{}, 58 | 59 | next: ?*Item = null, 60 | }; 61 | 62 | pub const Client = struct { 63 | pub const WriteQueue = mpmc.AsyncQueue(*Item, 4096); 64 | 65 | pub const capacity = 4; 66 | 67 | pub const Waiter = struct { 68 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 69 | frame: anyframe, 70 | 71 | result: Connection.Error!void = undefined, 72 | next: ?*Waiter = undefined, 73 | 74 | pub fn run(runnable: *zap.Pool.Runnable) void { 75 | const self = @fieldParentPtr(Waiter, "runnable", runnable); 76 | resume self.frame; 77 | } 78 | }; 79 | 80 | pub const Connection = struct { 81 | pub const Error = os.SocketError || AsyncSocket.ConnectError || os.EpollCtlError || os.SetSockOptError; 82 | 83 | client: *Client, 84 | socket: AsyncSocket, 85 | connected: bool = false, 86 | frame: @Frame(Connection.run), 87 | 88 | pub fn deinit(self: *Connection) void { 89 | self.client.wga.allocator.destroy(self); 90 | } 91 | 92 | pub fn run(self: *Connection) !void { 93 | defer { 94 | suspend self.deinit(); 95 | } 96 | 97 | var num_attempts: usize = 0; 98 | 99 | while (true) : (num_attempts += 1) { 100 | Frame.yield(); 101 | 102 | if (self.client.connect_circuit.query(@intCast(usize, time.milliTimestamp())) == .open) { 103 | assert(!self.client.reportConnectError(self, true, error.NetworkUnreachable)); 104 | return; 105 | } 106 | 107 | if (num_attempts > 0) { 108 | log.info("{*} reconnection attempt {}", .{ self, num_attempts }); 109 | } 110 | 111 | self.connect() catch |err| { 112 | if (!self.client.reportConnectError(self, false, err)) { 113 | return; 114 | } 115 | continue; 116 | }; 117 | defer self.socket.deinit(); 118 | 119 | if (!self.client.reportConnected(self)) { 120 | return; 121 | } 122 | 123 | num_attempts = 0; 124 | 125 | var popper = self.client.queue.popper(); 126 | 127 | var read_frame = async self.readLoop(); 128 | var write_frame = async self.writeLoop(&popper); 129 | 130 | _ = await read_frame; 131 | hyperia.pool.schedule(.{}, popper.cancel()); 132 | _ = await write_frame; 133 | 134 | if (self.client.reportDisconnected(self)) { 135 | return; 136 | } 137 | } 138 | } 139 | 140 | fn readLoop(self: *Connection) !void { 141 | defer log.info("{*} read loop ended", .{self}); 142 | 143 | var buf: [1024]u8 = undefined; 144 | while (true) { 145 | const num_bytes = try self.socket.read(&buf); 146 | if (num_bytes == 0) return; 147 | } 148 | } 149 | 150 | fn writeLoop(self: *Connection, popper: *WriteQueue.Popper) !void { 151 | defer log.info("{*} write loop ended", .{self}); 152 | 153 | while (true) { 154 | const item: *Item = popper.pop() orelse return; 155 | // errdefer ... 156 | 157 | const sender = self.socket.sender(os.MSG_NOSIGNAL); 158 | 159 | try sender.print("{s} {s} HTTP/1.{d}\r\n", .{ 160 | item.req.method, 161 | item.req.path, 162 | item.req.minor_version, 163 | }); 164 | 165 | for (item.req.headers) |header| { 166 | if (header.isMultiline()) { 167 | try sender.print(" {s}\r\n", .{header.value}); 168 | } else { 169 | try sender.print("{s}: {s}\r\n", .{ header.name, header.value }); 170 | } 171 | } 172 | 173 | try sender.print("\r\n{s}", .{item.body}); 174 | 175 | Frame.yield(); 176 | } 177 | } 178 | 179 | fn connect(self: *Connection) Error!void { 180 | self.socket = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 181 | errdefer self.socket.deinit(); 182 | 183 | try self.socket.setNoDelay(true); 184 | 185 | try self.client.reactor.add(self.socket.socket.fd, &self.socket.handle, Reactor.Interest{ 186 | .readable = true, 187 | .writable = true, 188 | }); 189 | 190 | try self.socket.connect(self.client.address); 191 | } 192 | }; 193 | 194 | lock: SpinLock = .{}, 195 | 196 | address: net.Address, 197 | reactor: Reactor, 198 | 199 | pool: [*]*Connection, 200 | len: usize = 0, 201 | 202 | queue: WriteQueue, 203 | wga: AsyncWaitGroupAllocator, 204 | waiters: ?*Waiter = null, 205 | 206 | closed: bool = false, 207 | connect_circuit: ConnectCircuitBreaker = ConnectCircuitBreaker.init(.half_open), 208 | 209 | pub fn init(allocator: *mem.Allocator, reactor: Reactor, address: net.Address) !Client { 210 | const pool = try allocator.create([capacity]*Connection); 211 | errdefer allocator.destroy(pool); 212 | 213 | const queue = try WriteQueue.init(allocator); 214 | errdefer queue.deinit(allocator); 215 | 216 | return Client{ 217 | .address = address, 218 | .reactor = reactor, 219 | .pool = pool, 220 | .queue = queue, 221 | .wga = .{ .backing_allocator = allocator }, 222 | }; 223 | } 224 | 225 | pub fn deinit(self: *Client, allocator: *mem.Allocator) void { 226 | self.wga.wait(); 227 | self.queue.deinit(allocator); 228 | allocator.destroy(@ptrCast(*const [capacity]*Connection, self.pool)); 229 | } 230 | 231 | pub fn close(self: *Client) void { 232 | self.queue.close(); 233 | 234 | const held = self.lock.acquire(); 235 | defer held.release(); 236 | 237 | self.closed = true; 238 | 239 | for (self.pool[0..self.len]) |conn| { 240 | if (conn.connected) { 241 | conn.socket.shutdown(.both) catch {}; 242 | log.info("{*} signalled to shutdown", .{conn}); 243 | } 244 | } 245 | } 246 | 247 | pub fn request(self: *Client, req: picohttp.Request, body: []const u8) !void { 248 | try self.ensureConnectionAvailable(); 249 | 250 | var item: Item = .{ .req = req, .body = body }; 251 | if (!self.queue.pusher().push(&item)) return error.Closed; 252 | item.event.wait(); 253 | 254 | // TODO(kenta): parse response here 255 | } 256 | 257 | /// Lock must be held. Allocates a new connection and registers it 258 | /// to this clients' pool of connections. 259 | fn spawn(self: *Client) !*Connection { 260 | const conn = try self.wga.allocator.create(Connection); 261 | errdefer self.wga.allocator.destroy(conn); 262 | 263 | conn.* = .{ 264 | .client = self, 265 | .socket = undefined, 266 | .frame = undefined, 267 | }; 268 | 269 | self.pool[self.len] = conn; 270 | self.len += 1; 271 | 272 | log.info("{*} got spawned", .{conn}); 273 | 274 | return conn; 275 | } 276 | 277 | pub const PoolResult = union(enum) { 278 | available: void, 279 | spawned_pending: *Connection, 280 | spawned_available: *Connection, 281 | pending: void, 282 | }; 283 | 284 | fn queryPool(self: *Client) !PoolResult { 285 | if (self.len == 0) { 286 | return PoolResult{ .spawned_pending = try self.spawn() }; 287 | } 288 | 289 | const pool = self.pool[0..self.len]; 290 | 291 | const any_connected = for (pool) |conn| { 292 | if (conn.connected) break true; 293 | } else false; 294 | 295 | if (self.queue.count() == 0 or pool.len == capacity) { 296 | return if (any_connected) PoolResult.available else PoolResult.pending; 297 | } 298 | 299 | if (any_connected) { 300 | return PoolResult{ .spawned_available = try self.spawn() }; 301 | } 302 | return PoolResult{ .spawned_pending = try self.spawn() }; 303 | } 304 | 305 | pub fn ensureConnectionAvailable(self: *Client) !void { 306 | const held = self.lock.acquire(); 307 | 308 | if (self.closed) { 309 | held.release(); 310 | return error.Closed; 311 | } 312 | 313 | const result = self.queryPool() catch |err| { 314 | held.release(); 315 | return err; 316 | }; 317 | 318 | if (result == .available) { 319 | held.release(); 320 | return; 321 | } 322 | 323 | if (result == .spawned_available) { 324 | held.release(); 325 | result.spawned_available.frame = async result.spawned_available.run(); 326 | return; 327 | } 328 | 329 | var waiter: Waiter = .{ .frame = @frame() }; 330 | 331 | suspend { 332 | waiter.next = self.waiters; 333 | self.waiters = &waiter; 334 | held.release(); 335 | 336 | if (result == .spawned_pending) { 337 | result.spawned_pending.frame = async result.spawned_pending.run(); 338 | } 339 | } 340 | 341 | return waiter.result; 342 | } 343 | 344 | /// Lock must be held, and connection must exist in the pool. 345 | fn deregister(self: *Client, conn: *Connection) void { 346 | const pool = self.pool[0..self.len]; 347 | const i = mem.indexOfScalar(*Connection, pool, conn) orelse unreachable; 348 | 349 | log.info("connection {*} was released", .{conn}); 350 | 351 | if (i == self.len - 1) { 352 | pool[i] = undefined; 353 | self.len -= 1; 354 | return; 355 | } 356 | 357 | pool[i] = pool[self.len - 1]; 358 | self.len -= 1; 359 | } 360 | 361 | fn reportConnected(self: *Client, conn: *Connection) bool { 362 | log.info("{*} successfully connected", .{conn}); 363 | 364 | const batch = collected: { 365 | var batch: zap.Pool.Batch = .{}; 366 | 367 | const held = self.lock.acquire(); 368 | defer held.release(); 369 | 370 | assert(!conn.connected); 371 | 372 | if (self.closed) { 373 | self.deregister(conn); 374 | return false; 375 | } 376 | 377 | conn.connected = true; 378 | self.connect_circuit.reportSuccess(); 379 | 380 | while (self.waiters) |waiter| : (self.waiters = waiter.next) { 381 | waiter.result = {}; 382 | batch.push(&waiter.runnable); 383 | } 384 | 385 | break :collected batch; 386 | }; 387 | 388 | hyperia.pool.schedule(.{}, batch); 389 | return true; 390 | } 391 | 392 | fn reportConnectError( 393 | self: *Client, 394 | conn: *Connection, 395 | unrecoverable: bool, 396 | err: Connection.Error, 397 | ) bool { 398 | const batch = collect: { 399 | var batch: zap.Pool.Batch = .{}; 400 | 401 | const held = self.lock.acquire(); 402 | defer held.release(); 403 | 404 | assert(!conn.connected); 405 | 406 | if (!unrecoverable) { 407 | log.info("{*} got an error while connecting: {}", .{ conn, err }); 408 | } 409 | 410 | self.connect_circuit.reportFailure(@intCast(usize, time.milliTimestamp())); 411 | 412 | if (self.len > 1) { 413 | self.deregister(conn); 414 | return false; 415 | } 416 | 417 | if (unrecoverable) { 418 | self.deregister(conn); 419 | } 420 | 421 | while (self.waiters) |waiter| : (self.waiters = waiter.next) { 422 | waiter.result = err; 423 | batch.push(&waiter.runnable); 424 | } 425 | 426 | break :collect batch; 427 | }; 428 | 429 | hyperia.pool.schedule(.{}, batch); 430 | 431 | return !unrecoverable; 432 | } 433 | 434 | fn reportDisconnected(self: *Client, conn: *Connection) bool { 435 | const held = self.lock.acquire(); 436 | defer held.release(); 437 | 438 | assert(conn.connected); 439 | conn.connected = false; 440 | 441 | log.info("{*} disconnected", .{conn}); 442 | 443 | if (self.closed or self.len > 1) { 444 | self.deregister(conn); 445 | return true; 446 | } 447 | 448 | return false; 449 | } 450 | }; 451 | 452 | fn runBenchmark(client: *Client) !void { 453 | defer log.info("done", .{}); 454 | 455 | var i: usize = 0; 456 | while (true) : (i +%= 1) { 457 | // await async client.write("message\n") catch |err| switch (err) { 458 | // error.Closed => return, 459 | // else => return err, 460 | // }; 461 | 462 | if (i % 50 == 0) Frame.yield(); 463 | } 464 | } 465 | 466 | fn runApp(reactor: Reactor) !void { 467 | defer { 468 | @atomicStore(bool, &stopped, true, .Release); 469 | reactor_event.post(); 470 | } 471 | 472 | const address = net.Address.initIp4(.{ 0, 0, 0, 0 }, 9000); 473 | 474 | var client = try Client.init(hyperia.allocator, reactor, address); 475 | defer client.deinit(hyperia.allocator); 476 | 477 | const Cases = struct { 478 | benchmark: struct { 479 | run: Case(runBenchmark), 480 | cancel: Case(Client.close), 481 | }, 482 | ctrl_c: struct { 483 | run: Case(hyperia.ctrl_c.wait), 484 | cancel: Case(hyperia.ctrl_c.cancel), 485 | }, 486 | }; 487 | 488 | switch (select( 489 | Cases{ 490 | .benchmark = .{ 491 | .run = call(runBenchmark, .{&client}), 492 | .cancel = call(Client.close, .{&client}), 493 | }, 494 | .ctrl_c = .{ 495 | .run = call(hyperia.ctrl_c.wait, .{}), 496 | .cancel = call(hyperia.ctrl_c.cancel, .{}), 497 | }, 498 | }, 499 | )) { 500 | .benchmark => |result| return result, 501 | .ctrl_c => return log.info("got ctrl+c", .{}), 502 | } 503 | } 504 | 505 | pub fn main() !void { 506 | hyperia.init(); 507 | defer hyperia.deinit(); 508 | 509 | hyperia.ctrl_c.init(); 510 | defer hyperia.ctrl_c.deinit(); 511 | 512 | clock = try time.Timer.start(); 513 | 514 | timer = Timer.init(hyperia.allocator); 515 | defer timer.deinit(hyperia.allocator); 516 | 517 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 518 | defer reactor.deinit(); 519 | 520 | reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 521 | defer reactor_event.deinit(); 522 | 523 | try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); 524 | 525 | var frame = async runApp(reactor); 526 | 527 | while (!@atomicLoad(bool, &stopped, .Acquire)) { 528 | var batch: zap.Pool.Batch = .{}; 529 | defer hyperia.pool.schedule(.{}, batch); 530 | 531 | try reactor.poll(128, struct { 532 | batch: *zap.Pool.Batch, 533 | 534 | pub fn call(self: @This(), event: Reactor.Event) void { 535 | const handle = @intToPtr(*Reactor.Handle, event.data); 536 | handle.call(self.batch, event); 537 | } 538 | }{ .batch = &batch }, if (timer.delay(clock.read())) |delay| @intCast(u64, delay) else null); 539 | 540 | timer.update(clock.read(), struct { 541 | batch: *zap.Pool.Batch, 542 | 543 | pub fn call(self: @This(), handle: *Timer.Handle) void { 544 | self.batch.push(handle.set()); 545 | } 546 | }{ .batch = &batch }); 547 | } 548 | 549 | try nosuspend await frame; 550 | 551 | log.info("good bye!", .{}); 552 | } 553 | -------------------------------------------------------------------------------- /example_http_server.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia"); 4 | const picohttp = @import("picohttp"); 5 | const Reactor = hyperia.Reactor; 6 | const AsyncSocket = hyperia.AsyncSocket; 7 | const AsyncWaitGroupAllocator = hyperia.AsyncWaitGroupAllocator; 8 | 9 | const os = std.os; 10 | const net = std.net; 11 | const mem = std.mem; 12 | const math = std.math; 13 | const mpsc = hyperia.mpsc; 14 | const log = std.log.scoped(.server); 15 | 16 | usingnamespace hyperia.select; 17 | 18 | pub const log_level = .debug; 19 | 20 | var stopped: bool = false; 21 | 22 | var pool: hyperia.ObjectPool(mpsc.Sink([]const u8).Node, 4096) = undefined; 23 | 24 | pub const Server = struct { 25 | pub const Connection = struct { 26 | server: *Server, 27 | socket: AsyncSocket, 28 | address: net.Address, 29 | frame: @Frame(Connection.start), 30 | queue: mpsc.AsyncSink([]const u8) = .{}, 31 | 32 | pub fn start(self: *Connection) !void { 33 | defer { 34 | // log.info("{} has disconnected", .{self.address}); 35 | 36 | if (self.server.deregister(self.address)) { 37 | suspend { 38 | self.cleanup(); 39 | self.socket.deinit(); 40 | self.server.wga.allocator.destroy(self); 41 | } 42 | } 43 | } 44 | 45 | const Cases = struct { 46 | write: struct { 47 | run: Case(Connection.writeLoop), 48 | cancel: Case(mpsc.AsyncSink([]const u8).close), 49 | }, 50 | read: struct { 51 | run: Case(Connection.readLoop), 52 | cancel: Case(AsyncSocket.cancel), 53 | }, 54 | }; 55 | 56 | switch (select( 57 | Cases{ 58 | .write = .{ 59 | .run = call(Connection.writeLoop, .{self}), 60 | .cancel = call(mpsc.AsyncSink([]const u8).close, .{&self.queue}), 61 | }, 62 | .read = .{ 63 | .run = call(Connection.readLoop, .{self}), 64 | .cancel = call(AsyncSocket.cancel, .{ &self.socket, .read }), 65 | }, 66 | }, 67 | )) { 68 | .write => |result| return result, 69 | .read => |result| return result, 70 | } 71 | } 72 | 73 | pub fn cleanup(self: *Connection) void { 74 | var first: *mpsc.Sink([]const u8).Node = undefined; 75 | var last: *mpsc.Sink([]const u8).Node = undefined; 76 | 77 | while (true) { 78 | var num_items = self.queue.tryPopBatch(&first, &last); 79 | if (num_items == 0) break; 80 | 81 | while (num_items > 0) : (num_items -= 1) { 82 | const next = first.next; 83 | pool.release(hyperia.allocator, first); 84 | first = next orelse continue; 85 | } 86 | } 87 | } 88 | 89 | pub fn writeLoop(self: *Connection) !void { 90 | var first: *mpsc.Sink([]const u8).Node = undefined; 91 | var last: *mpsc.Sink([]const u8).Node = undefined; 92 | 93 | while (true) { 94 | const num_items = self.queue.popBatch(&first, &last); 95 | if (num_items == 0) return; 96 | 97 | var i: usize = 0; 98 | defer while (i < num_items) : (i += 1) { 99 | const next = first.next; 100 | pool.release(hyperia.allocator, first); 101 | first = next orelse continue; 102 | }; 103 | 104 | while (i < num_items) : (i += 1) { 105 | var index: usize = 0; 106 | while (index < first.value.len) { 107 | index += try self.socket.send(first.value[index..], os.MSG_NOSIGNAL); 108 | } 109 | 110 | const next = first.next; 111 | pool.release(hyperia.allocator, first); 112 | first = next orelse continue; 113 | } 114 | } 115 | } 116 | 117 | pub fn readLoop(self: *Connection) !void { 118 | var buf = std.ArrayList(u8).init(hyperia.allocator); 119 | defer buf.deinit(); 120 | 121 | var headers: [128]picohttp.Header = undefined; 122 | 123 | while (true) { 124 | const old_len = buf.items.len; 125 | try buf.ensureCapacity(4096); 126 | buf.items.len = buf.capacity; 127 | 128 | const num_bytes = try self.socket.recv(buf.items[old_len..], os.MSG_NOSIGNAL); 129 | if (num_bytes == 0) return; 130 | 131 | buf.items.len = old_len + num_bytes; 132 | 133 | const end_idx = mem.indexOf(u8, buf.items[old_len - math.min(old_len, 4) ..], "\r\n\r\n") orelse continue; 134 | 135 | const req = try picohttp.Request.parse(buf.items[0 .. end_idx + 4], &headers); 136 | 137 | // std.debug.print("Method: {s}\n", .{req.method}); 138 | // std.debug.print("Path: {s}\n", .{req.path}); 139 | // std.debug.print("Minor Version: {}\n", .{req.minor_version}); 140 | 141 | // for (req.headers) |header| { 142 | // std.debug.print("{}\n", .{header}); 143 | // } 144 | 145 | const RES = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nContent-Length: 12\r\n\r\nHello world!"; 146 | 147 | const node = try pool.acquire(hyperia.allocator); 148 | node.* = .{ .value = RES }; 149 | self.queue.push(node); 150 | 151 | buf.items.len = 0; 152 | } 153 | } 154 | }; 155 | 156 | listener: AsyncSocket, 157 | 158 | wga: AsyncWaitGroupAllocator, 159 | lock: std.Thread.Mutex = .{}, 160 | connections: std.AutoArrayHashMapUnmanaged(os.sockaddr, *Connection) = .{}, 161 | 162 | pub fn init(allocator: *mem.Allocator) Server { 163 | return Server{ 164 | .listener = undefined, 165 | .wga = .{ .backing_allocator = allocator }, 166 | }; 167 | } 168 | 169 | pub fn deinit(self: *Server, allocator: *mem.Allocator) void { 170 | { 171 | const held = self.lock.acquire(); 172 | defer held.release(); 173 | 174 | for (self.connections.items()) |entry| { 175 | log.info("closing {}", .{entry.value.address}); 176 | entry.value.socket.shutdown(.both) catch {}; 177 | } 178 | } 179 | 180 | self.wga.wait(); 181 | self.connections.deinit(allocator); 182 | } 183 | 184 | pub fn close(self: *Server) void { 185 | self.listener.shutdown(.recv) catch {}; 186 | } 187 | 188 | pub fn start(self: *Server, reactor: Reactor, address: net.Address) !void { 189 | self.listener = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 190 | errdefer self.listener.deinit(); 191 | 192 | try reactor.add(self.listener.socket.fd, &self.listener.handle, .{ .readable = true }); 193 | 194 | try self.listener.setReuseAddress(true); 195 | try self.listener.setReusePort(true); 196 | try self.listener.setNoDelay(true); 197 | try self.listener.setFastOpen(true); 198 | try self.listener.setQuickAck(true); 199 | 200 | try self.listener.bind(address); 201 | try self.listener.listen(128); 202 | 203 | log.info("listening for connections on: {}", .{try self.listener.getName()}); 204 | } 205 | 206 | fn accept(self: *Server, allocator: *mem.Allocator, reactor: Reactor) !void { 207 | while (true) { 208 | var conn = try self.listener.accept(os.SOCK_CLOEXEC | os.SOCK_NONBLOCK); 209 | errdefer conn.socket.deinit(); 210 | 211 | try conn.socket.setNoDelay(true); 212 | 213 | // log.info("got connection: {}", .{conn.address}); 214 | 215 | const wga_allocator = &self.wga.allocator; 216 | 217 | const connection = try wga_allocator.create(Connection); 218 | errdefer wga_allocator.destroy(connection); 219 | 220 | connection.server = self; 221 | connection.socket = AsyncSocket.from(conn.socket); 222 | connection.address = conn.address; 223 | connection.queue = .{}; 224 | 225 | try reactor.add(conn.socket.fd, &connection.socket.handle, .{ .readable = true, .writable = true }); 226 | 227 | { 228 | const held = self.lock.acquire(); 229 | defer held.release(); 230 | 231 | try self.connections.put(allocator, connection.address.any, connection); 232 | } 233 | 234 | connection.frame = async connection.start(); 235 | } 236 | } 237 | 238 | fn deregister(self: *Server, address: net.Address) bool { 239 | const held = self.lock.acquire(); 240 | defer held.release(); 241 | 242 | const entry = self.connections.swapRemove(address.any) orelse return false; 243 | return true; 244 | } 245 | }; 246 | 247 | pub fn runApp(reactor: Reactor, reactor_event: *Reactor.AutoResetEvent) !void { 248 | defer { 249 | log.info("shutting down...", .{}); 250 | @atomicStore(bool, &stopped, true, .Release); 251 | reactor_event.post(); 252 | } 253 | 254 | var server = Server.init(hyperia.allocator); 255 | defer server.deinit(hyperia.allocator); 256 | 257 | const address = net.Address.initIp4(.{ 0, 0, 0, 0 }, 9000); 258 | try server.start(reactor, address); 259 | 260 | const Cases = struct { 261 | accept: struct { 262 | run: Case(Server.accept), 263 | cancel: Case(Server.close), 264 | }, 265 | ctrl_c: struct { 266 | run: Case(hyperia.ctrl_c.wait), 267 | cancel: Case(hyperia.ctrl_c.cancel), 268 | }, 269 | }; 270 | 271 | switch (select( 272 | Cases{ 273 | .accept = .{ 274 | .run = call(Server.accept, .{ &server, hyperia.allocator, reactor }), 275 | .cancel = call(Server.close, .{&server}), 276 | }, 277 | .ctrl_c = .{ 278 | .run = call(hyperia.ctrl_c.wait, .{}), 279 | .cancel = call(hyperia.ctrl_c.cancel, .{}), 280 | }, 281 | }, 282 | )) { 283 | .accept => |result| return result, 284 | .ctrl_c => |result| return result, 285 | } 286 | } 287 | 288 | pub fn main() !void { 289 | hyperia.init(); 290 | defer hyperia.deinit(); 291 | 292 | hyperia.ctrl_c.init(); 293 | defer hyperia.ctrl_c.deinit(); 294 | 295 | pool = try hyperia.ObjectPool(mpsc.Sink([]const u8).Node, 4096).init(hyperia.allocator); 296 | defer pool.deinit(hyperia.allocator); 297 | 298 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 299 | defer reactor.deinit(); 300 | 301 | var reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 302 | defer reactor_event.deinit(); 303 | 304 | try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); 305 | 306 | var frame = async runApp(reactor, &reactor_event); 307 | 308 | while (!@atomicLoad(bool, &stopped, .Acquire)) { 309 | const EventProcessor = struct { 310 | batch: zap.Pool.Batch = .{}, 311 | 312 | pub fn call(self: *@This(), event: Reactor.Event) void { 313 | const handle = @intToPtr(*Reactor.Handle, event.data); 314 | handle.call(&self.batch, event); 315 | } 316 | }; 317 | 318 | var processor: EventProcessor = .{}; 319 | defer hyperia.pool.schedule(.{}, processor.batch); 320 | 321 | try reactor.poll(128, &processor, null); 322 | } 323 | 324 | try nosuspend await frame; 325 | 326 | log.info("good bye!", .{}); 327 | } 328 | -------------------------------------------------------------------------------- /example_tcp_client.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia"); 4 | 5 | const Timer = hyperia.Timer; 6 | const Reactor = hyperia.Reactor; 7 | const SpinLock = hyperia.sync.SpinLock; 8 | 9 | const AsyncSocket = hyperia.AsyncSocket; 10 | const CircuitBreaker = hyperia.CircuitBreaker; 11 | const AsyncWaitGroupAllocator = hyperia.AsyncWaitGroupAllocator; 12 | 13 | const os = std.os; 14 | const mem = std.mem; 15 | const net = std.net; 16 | const meta = std.meta; 17 | const time = std.time; 18 | const mpmc = hyperia.mpmc; 19 | 20 | const log = std.log.scoped(.client); 21 | const assert = std.debug.assert; 22 | 23 | usingnamespace hyperia.select; 24 | 25 | pub const log_level = .debug; 26 | 27 | const ConnectCircuitBreaker = CircuitBreaker(.{ .failure_threshold = 10, .reset_timeout = 1000 }); 28 | 29 | var stopped: bool = false; 30 | var clock: time.Timer = undefined; 31 | 32 | var reactor_event: Reactor.AutoResetEvent = undefined; 33 | var timer: Timer = undefined; 34 | 35 | pub const Frame = struct { 36 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 37 | frame: anyframe, 38 | 39 | fn run(runnable: *zap.Pool.Runnable) void { 40 | const self = @fieldParentPtr(Frame, "runnable", runnable); 41 | resume self.frame; 42 | } 43 | 44 | pub fn yield() void { 45 | var frame: Frame = .{ .frame = @frame() }; 46 | suspend hyperia.pool.schedule(.{}, &frame.runnable); 47 | } 48 | }; 49 | 50 | pub const Client = struct { 51 | pub const WriteQueue = mpmc.AsyncQueue([]const u8, 4096); 52 | 53 | pub const capacity = 4; 54 | 55 | pub const Waiter = struct { 56 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 57 | frame: anyframe, 58 | 59 | result: Connection.Error!void = undefined, 60 | next: ?*Waiter = undefined, 61 | 62 | pub fn run(runnable: *zap.Pool.Runnable) void { 63 | const self = @fieldParentPtr(Waiter, "runnable", runnable); 64 | resume self.frame; 65 | } 66 | }; 67 | 68 | pub const Connection = struct { 69 | pub const Error = os.SocketError || AsyncSocket.ConnectError || os.EpollCtlError || os.SetSockOptError; 70 | 71 | client: *Client, 72 | socket: AsyncSocket, 73 | connected: bool = false, 74 | frame: @Frame(Connection.run), 75 | 76 | pub fn deinit(self: *Connection) void { 77 | self.client.wga.allocator.destroy(self); 78 | } 79 | 80 | pub fn run(self: *Connection) !void { 81 | defer { 82 | suspend self.deinit(); 83 | } 84 | 85 | var num_attempts: usize = 0; 86 | 87 | while (true) : (num_attempts += 1) { 88 | Frame.yield(); 89 | 90 | const connect_circuit_breaker_status = self.client.connect_circuit.query(@intCast(usize, time.milliTimestamp())); 91 | if (connect_circuit_breaker_status == .open) { 92 | assert(!self.client.reportConnectError(self, true, error.NetworkUnreachable)); 93 | return; 94 | } 95 | 96 | if (num_attempts > 0) { 97 | log.info("{*} reconnection attempt {}", .{ self, num_attempts }); 98 | } 99 | 100 | self.connect() catch |err| { 101 | if (!self.client.reportConnectError(self, false, err)) { 102 | return; 103 | } 104 | continue; 105 | }; 106 | defer self.socket.deinit(); 107 | 108 | if (!self.client.reportConnected(self)) { 109 | return; 110 | } 111 | 112 | num_attempts = 0; 113 | 114 | var popper = self.client.queue.popper(); 115 | 116 | var read_frame = async self.readLoop(); 117 | var write_frame = async self.writeLoop(&popper); 118 | 119 | _ = await read_frame; 120 | hyperia.pool.schedule(.{}, popper.cancel()); 121 | _ = await write_frame; 122 | 123 | if (self.client.reportDisconnected(self)) { 124 | return; 125 | } 126 | } 127 | } 128 | 129 | fn readLoop(self: *Connection) !void { 130 | defer log.info("{*} read loop ended", .{self}); 131 | 132 | var buf: [1024]u8 = undefined; 133 | while (true) { 134 | const num_bytes = try self.socket.read(&buf); 135 | if (num_bytes == 0) return; 136 | } 137 | } 138 | 139 | fn writeLoop(self: *Connection, popper: *WriteQueue.Popper) !void { 140 | defer log.info("{*} write loop ended", .{self}); 141 | 142 | const sender = self.socket.sender(os.MSG_NOSIGNAL); 143 | 144 | while (true) { 145 | const buf = popper.pop() orelse return; 146 | try sender.writeAll(buf); 147 | Frame.yield(); 148 | } 149 | } 150 | 151 | fn connect(self: *Connection) Error!void { 152 | self.socket = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 153 | errdefer self.socket.deinit(); 154 | 155 | try self.socket.setNoDelay(true); 156 | 157 | try self.client.reactor.add(self.socket.socket.fd, &self.socket.handle, Reactor.Interest{ 158 | .readable = true, 159 | .writable = true, 160 | }); 161 | 162 | try self.socket.connect(self.client.address); 163 | } 164 | }; 165 | 166 | lock: SpinLock = .{}, 167 | 168 | address: net.Address, 169 | reactor: Reactor, 170 | 171 | pool: [*]*Connection, 172 | len: usize = 0, 173 | 174 | queue: WriteQueue, 175 | wga: AsyncWaitGroupAllocator, 176 | waiters: ?*Waiter = null, 177 | 178 | closed: bool = false, 179 | connect_circuit: ConnectCircuitBreaker = ConnectCircuitBreaker.init(.half_open), 180 | 181 | pub fn init(allocator: *mem.Allocator, reactor: Reactor, address: net.Address) !Client { 182 | const pool = try allocator.create([capacity]*Connection); 183 | errdefer allocator.destroy(pool); 184 | 185 | const queue = try WriteQueue.init(allocator); 186 | errdefer queue.deinit(allocator); 187 | 188 | return Client{ 189 | .address = address, 190 | .reactor = reactor, 191 | .pool = pool, 192 | .queue = queue, 193 | .wga = .{ .backing_allocator = allocator }, 194 | }; 195 | } 196 | 197 | pub fn deinit(self: *Client, allocator: *mem.Allocator) void { 198 | self.wga.wait(); 199 | self.queue.deinit(allocator); 200 | allocator.destroy(@ptrCast(*const [capacity]*Connection, self.pool)); 201 | } 202 | 203 | pub fn close(self: *Client) void { 204 | self.queue.close(); 205 | 206 | const held = self.lock.acquire(); 207 | defer held.release(); 208 | 209 | self.closed = true; 210 | 211 | for (self.pool[0..self.len]) |conn| { 212 | if (conn.connected) { 213 | conn.socket.shutdown(.both) catch {}; 214 | log.info("{*} signalled to shutdown", .{conn}); 215 | } 216 | } 217 | } 218 | 219 | pub fn write(self: *Client, buf: []const u8) !void { 220 | try self.ensureConnectionAvailable(); 221 | if (!self.queue.pusher().push(buf)) return error.Closed; 222 | } 223 | 224 | /// Lock must be held. Allocates a new connection and registers it 225 | /// to this clients' pool of connections. 226 | fn spawn(self: *Client) !*Connection { 227 | const conn = try self.wga.allocator.create(Connection); 228 | errdefer self.wga.allocator.destroy(conn); 229 | 230 | conn.* = .{ 231 | .client = self, 232 | .socket = undefined, 233 | .frame = undefined, 234 | }; 235 | 236 | self.pool[self.len] = conn; 237 | self.len += 1; 238 | 239 | log.info("{*} got spawned", .{conn}); 240 | 241 | return conn; 242 | } 243 | 244 | pub const PoolResult = union(enum) { 245 | available: void, 246 | spawned_pending: *Connection, 247 | spawned_available: *Connection, 248 | pending: void, 249 | }; 250 | 251 | fn queryPool(self: *Client) !PoolResult { 252 | if (self.len == 0) { 253 | return PoolResult{ .spawned_pending = try self.spawn() }; 254 | } 255 | 256 | const pool = self.pool[0..self.len]; 257 | 258 | const any_connected = for (pool) |conn| { 259 | if (conn.connected) break true; 260 | } else false; 261 | 262 | if (self.queue.count() == 0 or pool.len == capacity) { 263 | return if (any_connected) PoolResult.available else PoolResult.pending; 264 | } 265 | 266 | const connect_circuit_breaker_status = self.connect_circuit.query(@intCast(usize, time.milliTimestamp())); 267 | if (connect_circuit_breaker_status == .open) { 268 | return if (any_connected) PoolResult.available else PoolResult.pending; 269 | } 270 | 271 | if (!any_connected) { 272 | return PoolResult{ .spawned_pending = try self.spawn() }; 273 | } 274 | return PoolResult{ .spawned_available = try self.spawn() }; 275 | } 276 | 277 | pub fn ensureConnectionAvailable(self: *Client) !void { 278 | const held = self.lock.acquire(); 279 | 280 | if (self.closed) { 281 | held.release(); 282 | return error.Closed; 283 | } 284 | 285 | const result = self.queryPool() catch |err| { 286 | held.release(); 287 | return err; 288 | }; 289 | 290 | if (result == .available) { 291 | held.release(); 292 | return; 293 | } 294 | 295 | if (result == .spawned_available) { 296 | held.release(); 297 | result.spawned_available.frame = async result.spawned_available.run(); 298 | return; 299 | } 300 | 301 | var waiter: Waiter = .{ .frame = @frame() }; 302 | 303 | suspend { 304 | waiter.next = self.waiters; 305 | self.waiters = &waiter; 306 | held.release(); 307 | 308 | if (result == .spawned_pending) { 309 | result.spawned_pending.frame = async result.spawned_pending.run(); 310 | } 311 | } 312 | 313 | return waiter.result; 314 | } 315 | 316 | /// Lock must be held, and connection must exist in the pool. 317 | fn deregister(self: *Client, conn: *Connection) void { 318 | const pool = self.pool[0..self.len]; 319 | const i = mem.indexOfScalar(*Connection, pool, conn) orelse unreachable; 320 | 321 | log.info("connection {*} was released", .{conn}); 322 | 323 | if (i == self.len - 1) { 324 | pool[i] = undefined; 325 | self.len -= 1; 326 | return; 327 | } 328 | 329 | pool[i] = pool[self.len - 1]; 330 | self.len -= 1; 331 | } 332 | 333 | fn reportConnected(self: *Client, conn: *Connection) bool { 334 | log.info("{*} successfully connected", .{conn}); 335 | 336 | const batch = collected: { 337 | var batch: zap.Pool.Batch = .{}; 338 | 339 | const held = self.lock.acquire(); 340 | defer held.release(); 341 | 342 | assert(!conn.connected); 343 | 344 | if (self.closed) { 345 | self.deregister(conn); 346 | return false; 347 | } 348 | 349 | conn.connected = true; 350 | self.connect_circuit.reportSuccess(); 351 | 352 | while (self.waiters) |waiter| : (self.waiters = waiter.next) { 353 | waiter.result = {}; 354 | batch.push(&waiter.runnable); 355 | } 356 | 357 | break :collected batch; 358 | }; 359 | 360 | hyperia.pool.schedule(.{}, batch); 361 | return true; 362 | } 363 | 364 | fn reportConnectError( 365 | self: *Client, 366 | conn: *Connection, 367 | unrecoverable: bool, 368 | err: Connection.Error, 369 | ) bool { 370 | const batch = collect: { 371 | var batch: zap.Pool.Batch = .{}; 372 | 373 | const held = self.lock.acquire(); 374 | defer held.release(); 375 | 376 | assert(!conn.connected); 377 | 378 | if (!unrecoverable) { 379 | log.info("{*} got an error while connecting: {}", .{ conn, err }); 380 | } 381 | 382 | self.connect_circuit.reportFailure(@intCast(usize, time.milliTimestamp())); 383 | 384 | if (self.len > 1) { 385 | self.deregister(conn); 386 | return false; 387 | } 388 | 389 | if (unrecoverable) { 390 | self.deregister(conn); 391 | } 392 | 393 | while (self.waiters) |waiter| : (self.waiters = waiter.next) { 394 | waiter.result = err; 395 | batch.push(&waiter.runnable); 396 | } 397 | 398 | break :collect batch; 399 | }; 400 | 401 | hyperia.pool.schedule(.{}, batch); 402 | 403 | return !unrecoverable; 404 | } 405 | 406 | fn reportDisconnected(self: *Client, conn: *Connection) bool { 407 | const held = self.lock.acquire(); 408 | defer held.release(); 409 | 410 | assert(conn.connected); 411 | conn.connected = false; 412 | 413 | log.info("{*} disconnected", .{conn}); 414 | 415 | if (self.closed or self.len > 1) { 416 | self.deregister(conn); 417 | return true; 418 | } 419 | 420 | return false; 421 | } 422 | }; 423 | 424 | fn runBenchmark(client: *Client) !void { 425 | defer log.info("done", .{}); 426 | 427 | var i: usize = 0; 428 | while (true) : (i +%= 1) { 429 | await async client.write("message\n") catch |err| switch (err) { 430 | error.Closed => return, 431 | else => return err, 432 | }; 433 | 434 | if (i % 50 == 0) Frame.yield(); 435 | } 436 | } 437 | 438 | fn runApp(reactor: Reactor) !void { 439 | defer { 440 | @atomicStore(bool, &stopped, true, .Release); 441 | reactor_event.post(); 442 | } 443 | 444 | const address = net.Address.initIp4(.{ 0, 0, 0, 0 }, 9000); 445 | 446 | var client = try Client.init(hyperia.allocator, reactor, address); 447 | defer client.deinit(hyperia.allocator); 448 | 449 | const Cases = struct { 450 | benchmark: struct { 451 | run: Case(runBenchmark), 452 | cancel: Case(Client.close), 453 | }, 454 | ctrl_c: struct { 455 | run: Case(hyperia.ctrl_c.wait), 456 | cancel: Case(hyperia.ctrl_c.cancel), 457 | }, 458 | }; 459 | 460 | switch (select( 461 | Cases{ 462 | .benchmark = .{ 463 | .run = call(runBenchmark, .{&client}), 464 | .cancel = call(Client.close, .{&client}), 465 | }, 466 | .ctrl_c = .{ 467 | .run = call(hyperia.ctrl_c.wait, .{}), 468 | .cancel = call(hyperia.ctrl_c.cancel, .{}), 469 | }, 470 | }, 471 | )) { 472 | .benchmark => |result| return result, 473 | .ctrl_c => return log.info("got ctrl+c", .{}), 474 | } 475 | } 476 | 477 | pub fn main() !void { 478 | hyperia.init(); 479 | defer hyperia.deinit(); 480 | 481 | hyperia.ctrl_c.init(); 482 | defer hyperia.ctrl_c.deinit(); 483 | 484 | clock = try time.Timer.start(); 485 | 486 | timer = Timer.init(hyperia.allocator); 487 | defer timer.deinit(hyperia.allocator); 488 | 489 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 490 | defer reactor.deinit(); 491 | 492 | reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 493 | defer reactor_event.deinit(); 494 | 495 | try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); 496 | 497 | var frame = async runApp(reactor); 498 | 499 | while (!@atomicLoad(bool, &stopped, .Acquire)) { 500 | var batch: zap.Pool.Batch = .{}; 501 | defer hyperia.pool.schedule(.{}, batch); 502 | 503 | try reactor.poll(128, struct { 504 | batch: *zap.Pool.Batch, 505 | 506 | pub fn call(self: @This(), event: Reactor.Event) void { 507 | const handle = @intToPtr(*Reactor.Handle, event.data); 508 | handle.call(self.batch, event); 509 | } 510 | }{ .batch = &batch }, if (timer.delay(clock.read())) |delay| @intCast(u64, delay) else null); 511 | 512 | timer.update(clock.read(), struct { 513 | batch: *zap.Pool.Batch, 514 | 515 | pub fn call(self: @This(), handle: *Timer.Handle) void { 516 | self.batch.push(handle.set()); 517 | } 518 | }{ .batch = &batch }); 519 | } 520 | 521 | try nosuspend await frame; 522 | 523 | log.info("good bye!", .{}); 524 | } 525 | -------------------------------------------------------------------------------- /example_tcp_server.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia"); 4 | const Reactor = hyperia.Reactor; 5 | const AsyncSocket = hyperia.AsyncSocket; 6 | const AsyncWaitGroupAllocator = hyperia.AsyncWaitGroupAllocator; 7 | 8 | const os = std.os; 9 | const net = std.net; 10 | const mem = std.mem; 11 | const builtin = std.builtin; 12 | const log = std.log.scoped(.server); 13 | 14 | usingnamespace hyperia.select; 15 | 16 | pub const log_level = .debug; 17 | 18 | var stopped: bool = false; 19 | 20 | pub const Server = struct { 21 | pub const Connection = struct { 22 | server: *Server, 23 | socket: AsyncSocket, 24 | address: net.Address, 25 | frame: @Frame(Connection.start), 26 | 27 | pub fn start(self: *Connection) !void { 28 | defer { 29 | log.info("{} has disconnected", .{self.address}); 30 | 31 | if (self.server.deregister(self.address)) { 32 | suspend { 33 | self.socket.deinit(); 34 | self.server.wga.allocator.destroy(self); 35 | } 36 | } 37 | } 38 | 39 | var buf: [4096]u8 = undefined; 40 | while (true) { 41 | const num_bytes = try self.socket.read(&buf); 42 | if (num_bytes == 0) return; 43 | 44 | const message = mem.trim(u8, buf[0..num_bytes], "\r\n"); 45 | log.info("got message from {}: '{s}'", .{ self.address, message }); 46 | } 47 | } 48 | }; 49 | 50 | listener: AsyncSocket, 51 | 52 | wga: AsyncWaitGroupAllocator, 53 | lock: std.Thread.Mutex = .{}, 54 | connections: std.AutoArrayHashMapUnmanaged(os.sockaddr, *Connection) = .{}, 55 | 56 | pub fn init(allocator: *mem.Allocator) Server { 57 | return Server{ 58 | .listener = undefined, 59 | .wga = .{ .backing_allocator = allocator }, 60 | }; 61 | } 62 | 63 | pub fn deinit(self: *Server, allocator: *mem.Allocator) void { 64 | { 65 | const held = self.lock.acquire(); 66 | defer held.release(); 67 | 68 | for (self.connections.items()) |entry| { 69 | entry.value.socket.shutdown(.both) catch {}; 70 | } 71 | } 72 | 73 | self.wga.wait(); 74 | self.connections.deinit(allocator); 75 | } 76 | 77 | pub fn close(self: *Server) void { 78 | self.listener.shutdown(.recv) catch {}; 79 | } 80 | 81 | pub fn start(self: *Server, reactor: Reactor, address: net.Address) !void { 82 | self.listener = try AsyncSocket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 83 | errdefer self.listener.deinit(); 84 | 85 | try reactor.add(self.listener.socket.fd, &self.listener.handle, .{ .readable = true }); 86 | 87 | try self.listener.setReuseAddress(true); 88 | try self.listener.setReusePort(true); 89 | try self.listener.setNoDelay(true); 90 | try self.listener.setFastOpen(true); 91 | try self.listener.setQuickAck(true); 92 | 93 | try self.listener.bind(address); 94 | try self.listener.listen(128); 95 | 96 | log.info("listening for connections on: {}", .{try self.listener.getName()}); 97 | } 98 | 99 | fn accept(self: *Server, allocator: *mem.Allocator, reactor: Reactor) callconv(.Async) !void { 100 | while (true) { 101 | var conn = try self.listener.accept(os.SOCK_CLOEXEC | os.SOCK_NONBLOCK); 102 | errdefer conn.socket.deinit(); 103 | 104 | try conn.socket.setNoDelay(true); 105 | 106 | log.info("got connection: {}", .{conn.address}); 107 | 108 | const wga_allocator = &self.wga.allocator; 109 | 110 | const connection = try wga_allocator.create(Connection); 111 | errdefer wga_allocator.destroy(connection); 112 | 113 | connection.server = self; 114 | connection.socket = AsyncSocket.from(conn.socket); 115 | connection.address = conn.address; 116 | 117 | try reactor.add(conn.socket.fd, &connection.socket.handle, .{ .readable = true, .writable = true }); 118 | 119 | { 120 | const held = self.lock.acquire(); 121 | defer held.release(); 122 | 123 | try self.connections.put(allocator, connection.address.any, connection); 124 | } 125 | 126 | connection.frame = async connection.start(); 127 | } 128 | } 129 | 130 | fn deregister(self: *Server, address: net.Address) bool { 131 | const held = self.lock.acquire(); 132 | defer held.release(); 133 | 134 | const entry = self.connections.swapRemove(address.any) orelse return false; 135 | return true; 136 | } 137 | }; 138 | 139 | pub fn runApp(reactor: Reactor, reactor_event: *Reactor.AutoResetEvent) !void { 140 | defer { 141 | log.info("shutting down...", .{}); 142 | @atomicStore(bool, &stopped, true, .Release); 143 | reactor_event.post(); 144 | } 145 | 146 | var server = Server.init(hyperia.allocator); 147 | defer server.deinit(hyperia.allocator); 148 | 149 | const address = net.Address.initIp4(.{ 0, 0, 0, 0 }, 9000); 150 | try server.start(reactor, address); 151 | 152 | const Cases = struct { 153 | accept: struct { 154 | run: Case(Server.accept), 155 | cancel: Case(Server.close), 156 | }, 157 | ctrl_c: struct { 158 | run: Case(hyperia.ctrl_c.wait), 159 | cancel: Case(hyperia.ctrl_c.cancel), 160 | }, 161 | }; 162 | 163 | switch (select( 164 | Cases{ 165 | .accept = .{ 166 | .run = call(Server.accept, .{ &server, hyperia.allocator, reactor }), 167 | .cancel = call(Server.close, .{&server}), 168 | }, 169 | .ctrl_c = .{ 170 | .run = call(hyperia.ctrl_c.wait, .{}), 171 | .cancel = call(hyperia.ctrl_c.cancel, .{}), 172 | }, 173 | }, 174 | )) { 175 | .accept => |result| return result, 176 | .ctrl_c => |result| return result, 177 | } 178 | } 179 | 180 | pub fn main() !void { 181 | hyperia.init(); 182 | defer hyperia.deinit(); 183 | 184 | hyperia.ctrl_c.init(); 185 | defer hyperia.ctrl_c.deinit(); 186 | 187 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 188 | defer reactor.deinit(); 189 | 190 | var reactor_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 191 | defer reactor_event.deinit(); 192 | 193 | try reactor.add(reactor_event.fd, &reactor_event.handle, .{}); 194 | 195 | var frame = async runApp(reactor, &reactor_event); 196 | 197 | while (!@atomicLoad(bool, &stopped, .Acquire)) { 198 | const EventProcessor = struct { 199 | batch: zap.Pool.Batch = .{}, 200 | 201 | pub fn call(self: *@This(), event: Reactor.Event) void { 202 | const handle = @intToPtr(*Reactor.Handle, event.data); 203 | handle.call(&self.batch, event); 204 | } 205 | }; 206 | 207 | var processor: EventProcessor = .{}; 208 | defer hyperia.pool.schedule(.{}, processor.batch); 209 | 210 | try reactor.poll(128, &processor, null); 211 | } 212 | 213 | try nosuspend await frame; 214 | 215 | log.info("good bye!", .{}); 216 | } 217 | -------------------------------------------------------------------------------- /hyperia.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | 4 | const mem = std.mem; 5 | const heap = std.heap; 6 | const builtin = std.builtin; 7 | const testing = std.testing; 8 | 9 | const assert = std.debug.assert; 10 | 11 | pub const net = @import("net.zig"); 12 | pub const sync = @import("sync.zig"); 13 | pub const spsc = @import("spsc.zig"); 14 | pub const mpsc = @import("mpsc.zig"); 15 | pub const mpmc = @import("mpmc.zig"); 16 | pub const oneshot = @import("oneshot.zig"); 17 | 18 | pub const ctrl_c = @import("ctrl_c.zig"); 19 | pub const select = @import("select.zig"); 20 | 21 | pub const ObjectPool = @import("object_pool.zig").ObjectPool; 22 | 23 | pub const Timer = @import("Timer.zig"); 24 | pub const Timer2 = @import("Timer2.zig"); 25 | pub const Reactor = @import("reactor.zig").Reactor; 26 | pub const Socket = @import("socket.zig").Socket; 27 | pub const AsyncSocket = @import("async_socket.zig").AsyncSocket; 28 | pub const CircuitBreaker = @import("circuit_breaker.zig").CircuitBreaker; 29 | pub const AsyncWaitGroup = @import("async_wait_group.zig").AsyncWaitGroup; 30 | pub const AsyncWaitGroupAllocator = @import("async_wait_group_allocator.zig").AsyncWaitGroupAllocator; 31 | 32 | pub var gpa: heap.GeneralPurposeAllocator(.{}) = undefined; 33 | pub var allocator: *mem.Allocator = undefined; 34 | 35 | pub var pool: zap.Pool = undefined; 36 | 37 | pub fn init() void { 38 | gpa = .{}; 39 | if (builtin.link_libc) { 40 | gpa.backing_allocator = heap.c_allocator; 41 | } 42 | allocator = &gpa.allocator; 43 | 44 | pool = zap.Pool.init(.{ .max_threads = 1 }); 45 | } 46 | 47 | pub fn deinit() void { 48 | pool.deinit(); 49 | assert(!gpa.deinit()); 50 | } 51 | 52 | test { 53 | testing.refAllDecls(@This()); 54 | } 55 | -------------------------------------------------------------------------------- /mpmc.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const os = std.os; 6 | const mem = std.mem; 7 | const math = std.math; 8 | const builtin = std.builtin; 9 | const testing = std.testing; 10 | 11 | const assert = std.debug.assert; 12 | 13 | pub const cache_line_length = switch (builtin.cpu.arch) { 14 | .x86_64, .aarch64, .powerpc64 => 128, 15 | .arm, .mips, .mips64, .riscv64 => 32, 16 | .s390x => 256, 17 | else => 64, 18 | }; 19 | 20 | pub const AsyncAutoResetEvent = struct { 21 | const Self = @This(); 22 | 23 | const Waiter = struct { 24 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 25 | frame: anyframe, 26 | 27 | next: ?*Waiter = null, 28 | refs: u32 = 2, 29 | 30 | pub fn run(runnable: *zap.Pool.Runnable) void { 31 | const self = @fieldParentPtr(Waiter, "runnable", runnable); 32 | resume self.frame; 33 | } 34 | }; 35 | 36 | const setter_increment: u64 = 1; 37 | const waiter_increment: u64 = @as(u64, 1) << 32; 38 | 39 | state: u64 = 0, 40 | waiters: ?*Waiter = null, 41 | new_waiters: ?*Waiter = null, 42 | 43 | fn getSetterCount(state: u64) callconv(.Inline) u32 { 44 | return @truncate(u32, state); 45 | } 46 | 47 | fn getWaiterCount(state: u64) callconv(.Inline) u32 { 48 | return @intCast(u32, state >> 32); 49 | } 50 | 51 | pub fn wait(self: *Self) void { 52 | var state = @atomicLoad(u64, &self.state, .Monotonic); 53 | if (getSetterCount(state) > getWaiterCount(state)) { 54 | if (@cmpxchgStrong( 55 | u64, 56 | &self.state, 57 | state, 58 | state - setter_increment, 59 | .Acquire, 60 | .Monotonic, 61 | ) == null) return; 62 | } 63 | self.park(); 64 | } 65 | 66 | /// Cancels a waiter so that the waiter may be manually resumed. 67 | /// Returns true if the waiter may be manually resumed, and false 68 | /// otherwise because a setter appears to have already resumed 69 | /// the waiter. 70 | pub fn cancel(self: *Self) bool { 71 | var state = @atomicLoad(u64, &self.state, .Monotonic); 72 | while (true) { 73 | if (getSetterCount(state) >= getWaiterCount(state)) return false; 74 | 75 | state = @cmpxchgWeak( 76 | u64, 77 | &self.state, 78 | state, 79 | state - waiter_increment, 80 | .AcqRel, 81 | .Acquire, 82 | ) orelse return true; 83 | } 84 | } 85 | 86 | pub fn set(self: *Self) zap.Pool.Batch { 87 | var state = @atomicLoad(u64, &self.state, .Monotonic); 88 | while (true) { 89 | if (getSetterCount(state) > getWaiterCount(state)) return .{}; 90 | 91 | state = @cmpxchgWeak( 92 | u64, 93 | &self.state, 94 | state, 95 | state + setter_increment, 96 | .AcqRel, 97 | .Acquire, 98 | ) orelse break; 99 | } 100 | 101 | if (getSetterCount(state) == 0 and getWaiterCount(state) > 0) { 102 | return self.unpark(state + setter_increment); 103 | } 104 | 105 | return .{}; 106 | } 107 | 108 | fn park(self: *Self) void { 109 | var waiter: Waiter = .{ .frame = @frame() }; 110 | 111 | suspend { 112 | var head = @atomicLoad(?*Waiter, &self.new_waiters, .Monotonic); 113 | while (true) { 114 | waiter.next = head; 115 | 116 | head = @cmpxchgWeak( 117 | ?*Waiter, 118 | &self.new_waiters, 119 | head, 120 | &waiter, 121 | .Release, 122 | .Monotonic, 123 | ) orelse break; 124 | } 125 | 126 | var batch: zap.Pool.Batch = .{}; 127 | defer hyperia.pool.schedule(.{}, batch); 128 | 129 | var state = @atomicRmw(u64, &self.state, .Add, waiter_increment, .AcqRel); 130 | if (getSetterCount(state) > 0 and getWaiterCount(state) == 0) { 131 | batch.push(self.unpark(state + waiter_increment)); 132 | } 133 | 134 | if (@atomicRmw(u32, &waiter.refs, .Sub, 1, .Acquire) == 1) { 135 | batch.push(&waiter.runnable); 136 | } 137 | } 138 | } 139 | 140 | fn unpark(self: *Self, state: u64) zap.Pool.Batch { 141 | var batch: zap.Pool.Batch = .{}; 142 | var waiters_to_resume: ?*Waiter = null; 143 | var waiters_to_resume_tail: *?*Waiter = &waiters_to_resume; 144 | 145 | var num_waiters_to_resume: u64 = math.min(getWaiterCount(state), getSetterCount(state)); 146 | assert(num_waiters_to_resume > 0); 147 | 148 | while (num_waiters_to_resume != 0) { 149 | var i: usize = 0; 150 | while (i < num_waiters_to_resume) : (i += 1) { 151 | if (self.waiters == null) { 152 | var new_waiters = @atomicRmw(?*Waiter, &self.new_waiters, .Xchg, null, .Acquire); 153 | assert(new_waiters != null); 154 | 155 | while (new_waiters) |new_waiter| { 156 | const next = new_waiter.next; 157 | new_waiter.next = self.waiters; 158 | self.waiters = new_waiter; 159 | new_waiters = next; 160 | } 161 | } 162 | 163 | const waiter_to_resume = self.waiters orelse unreachable; 164 | self.waiters = waiter_to_resume.next; 165 | 166 | waiter_to_resume.next = null; 167 | waiters_to_resume_tail.* = waiter_to_resume; 168 | waiters_to_resume_tail = &waiter_to_resume.next; 169 | } 170 | 171 | const delta = num_waiters_to_resume | (num_waiters_to_resume << 32); 172 | const new_state = @atomicRmw(u64, &self.state, .Sub, delta, .AcqRel) - delta; 173 | num_waiters_to_resume = math.min(getWaiterCount(new_state), getSetterCount(new_state)); 174 | } 175 | 176 | assert(waiters_to_resume != null); 177 | 178 | while (waiters_to_resume) |waiter| { 179 | const next = waiter.next; 180 | if (@atomicRmw(u32, &waiter.refs, .Sub, 1, .Release) == 1) { 181 | batch.push(&waiter.runnable); 182 | } 183 | waiters_to_resume = next; 184 | } 185 | 186 | return batch; 187 | } 188 | }; 189 | 190 | pub const Semaphore = struct { 191 | const Self = @This(); 192 | 193 | pub const Waiter = struct { 194 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 195 | frame: anyframe, 196 | 197 | parent: *Self, 198 | prev: ?*Waiter = null, 199 | next: ?*Waiter = null, 200 | cancelled: bool = false, 201 | }; 202 | 203 | tokens: usize = 0, 204 | 205 | lock: hyperia.sync.SpinLock = .{}, 206 | waiters: ?*Waiter = null, 207 | 208 | pub fn init(tokens: usize) Self { 209 | return .{ .tokens = tokens }; 210 | } 211 | 212 | pub fn signal(self: *Self) void { 213 | var tokens = @atomicLoad(usize, &self.tokens, .Monotonic); 214 | while (true) { 215 | while (tokens == 0) { 216 | if (self.signalSlow()) return; 217 | tokens = @atomicLoad(usize, &self.tokens, .Acquire); 218 | } 219 | 220 | tokens = @cmpxchgWeak( 221 | usize, 222 | &self.tokens, 223 | tokens, 224 | tokens + 1, 225 | .Release, 226 | .Acquire, 227 | ) orelse return; 228 | } 229 | } 230 | 231 | fn signalSlow(self: *Self) bool { 232 | var waiter: *Waiter = wake: { 233 | const held = self.lock.acquire(); 234 | defer held.release(); 235 | 236 | const tokens = @atomicLoad(usize, &self.tokens, .Acquire); 237 | if (tokens != 0) return false; 238 | 239 | const waiter = self.waiters orelse { 240 | assert(@cmpxchgStrong( 241 | usize, 242 | &self.tokens, 243 | tokens, 244 | tokens + 1, 245 | .Monotonic, 246 | .Monotonic, 247 | ) == null); 248 | return true; 249 | }; 250 | 251 | if (waiter.next) |next| { 252 | next.prev = null; 253 | } 254 | self.waiters = waiter.next; 255 | 256 | break :wake waiter; 257 | }; 258 | 259 | hyperia.pool.schedule(.{}, &waiter.runnable); 260 | return true; 261 | } 262 | 263 | pub fn wait(self: *Self, waiter: *Waiter) void { 264 | var tokens = @atomicLoad(usize, &self.tokens, .Acquire); 265 | while (true) { 266 | while (tokens == 0) { 267 | suspend { 268 | waiter.frame = @frame(); 269 | if (!self.waitSlow(waiter)) { 270 | hyperia.pool.schedule(.{}, &waiter.runnable); 271 | } 272 | } 273 | tokens = @atomicLoad(usize, &self.tokens, .Acquire); 274 | } 275 | 276 | tokens = @cmpxchgWeak( 277 | usize, 278 | &self.tokens, 279 | tokens, 280 | tokens - 1, 281 | .Release, 282 | .Acquire, 283 | ) orelse return; 284 | } 285 | } 286 | 287 | fn waitSlow(self: *Self, waiter: *Waiter) bool { 288 | const held = self.lock.acquire(); 289 | defer held.release(); 290 | 291 | const tokens = @atomicLoad(usize, &self.tokens, .Acquire); 292 | if (tokens != 0) return false; 293 | 294 | if (self.waiters) |head| { 295 | head.prev = waiter; 296 | } 297 | waiter.next = self.waiters; 298 | self.waiters = waiter; 299 | 300 | return true; 301 | } 302 | }; 303 | 304 | pub const Event = struct { 305 | const Self = @This(); 306 | 307 | pub const State = enum { 308 | unset, 309 | set, 310 | }; 311 | 312 | pub const Waiter = struct { 313 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 314 | frame: anyframe, 315 | 316 | parent: *Event, 317 | prev: ?*Waiter = null, 318 | next: ?*Waiter = null, 319 | cancelled: bool = false, 320 | 321 | fn run(runnable: *zap.Pool.Runnable) void { 322 | const self = @fieldParentPtr(Waiter, "runnable", runnable); 323 | resume self.frame; 324 | } 325 | 326 | pub fn cancel(self: *Waiter) ?*zap.Pool.Runnable { 327 | const runnable = collect: { 328 | const held = self.parent.lock.acquire(); 329 | defer held.release(); 330 | 331 | if (self.cancelled) return null; 332 | 333 | self.cancelled = true; 334 | 335 | if (self.prev == null and self.next == null) return null; 336 | if (self.parent.waiters == self) return null; 337 | 338 | if (self.prev) |prev| { 339 | prev.next = self.next; 340 | self.prev = null; 341 | } else { 342 | self.parent.waiters = self.next; 343 | } 344 | 345 | if (self.next) |next| { 346 | next.prev = self.prev; 347 | self.next = null; 348 | } 349 | 350 | break :collect &self.runnable; 351 | }; 352 | 353 | return runnable; 354 | } 355 | 356 | /// Waits until the parenting event is set. It returns true 357 | /// if this waiter was not cancelled, and false otherwise. 358 | pub fn wait(self: *Waiter) bool { 359 | const held = self.parent.lock.acquire(); 360 | 361 | if (self.cancelled) { 362 | held.release(); 363 | return false; 364 | } 365 | 366 | if (self.parent.state == .set) { 367 | self.parent.state = .unset; 368 | held.release(); 369 | return true; 370 | } 371 | 372 | suspend { 373 | self.frame = @frame(); 374 | if (self.parent.waiters) |waiter| { 375 | waiter.prev = self; 376 | } 377 | self.next = self.parent.waiters; 378 | self.parent.waiters = self; 379 | held.release(); 380 | } 381 | 382 | assert(self.prev == null); 383 | assert(self.next == null); 384 | 385 | return !self.cancelled; 386 | } 387 | }; 388 | 389 | lock: hyperia.sync.SpinLock = .{}, 390 | state: State = .unset, 391 | waiters: ?*Waiter = null, 392 | 393 | pub fn set(self: *Self) ?*zap.Pool.Runnable { 394 | const runnable: ?*zap.Pool.Runnable = collect: { 395 | const held = self.lock.acquire(); 396 | defer held.release(); 397 | 398 | if (self.state == .set) { 399 | break :collect null; 400 | } 401 | 402 | if (self.waiters) |waiter| { 403 | self.waiters = waiter.next; 404 | waiter.next = null; 405 | waiter.prev = null; 406 | break :collect &waiter.runnable; 407 | } else { 408 | self.state = .set; 409 | break :collect null; 410 | } 411 | }; 412 | 413 | return runnable; 414 | } 415 | 416 | pub fn createWaiter(self: *Self) Waiter { 417 | return Waiter{ .parent = self, .frame = undefined }; 418 | } 419 | }; 420 | 421 | pub fn AsyncQueue(comptime T: type, comptime capacity: comptime_int) type { 422 | return struct { 423 | const Self = @This(); 424 | 425 | queue: Queue(T, capacity), 426 | closed: bool = false, 427 | producer_event: Event = .{}, 428 | consumer_event: Event = .{}, 429 | 430 | pub fn init(allocator: *mem.Allocator) !Self { 431 | return Self{ .queue = try Queue(T, capacity).init(allocator) }; 432 | } 433 | 434 | pub fn deinit(self: *Self, allocator: *mem.Allocator) void { 435 | self.queue.deinit(allocator); 436 | } 437 | 438 | pub fn tryPush(self: *Self, item: T) bool { 439 | return self.queue.tryPush(item); 440 | } 441 | 442 | pub fn tryPop(self: *Self) ?T { 443 | return self.queue.tryPop(); 444 | } 445 | 446 | pub fn count(self: *Self) usize { 447 | return self.queue.count(); 448 | } 449 | 450 | pub fn close(self: *Self) void { 451 | @atomicStore(bool, &self.closed, true, .Monotonic); 452 | while (true) { 453 | var batch: zap.Pool.Batch = .{}; 454 | batch.push(self.producer_event.set()); 455 | batch.push(self.consumer_event.set()); 456 | if (batch.isEmpty()) break; 457 | hyperia.pool.schedule(.{}, batch); 458 | } 459 | } 460 | 461 | pub const Pusher = struct { 462 | parent: *Self, 463 | waiter: Event.Waiter, 464 | 465 | pub fn cancel(self: *Pusher) ?*zap.Pool.Runnable { 466 | return self.waiter.cancel(); 467 | } 468 | 469 | pub fn push(self: *Pusher, item: T) bool { 470 | while (!@atomicLoad(bool, &self.parent.closed, .Monotonic)) { 471 | if (self.parent.tryPush(item)) { 472 | hyperia.pool.schedule(.{}, self.parent.consumer_event.set()); 473 | return true; 474 | } 475 | if (!self.waiter.wait()) return false; 476 | } 477 | return false; 478 | } 479 | }; 480 | 481 | pub const Popper = struct { 482 | parent: *Self, 483 | waiter: Event.Waiter, 484 | 485 | pub fn cancel(self: *Popper) ?*zap.Pool.Runnable { 486 | return self.waiter.cancel(); 487 | } 488 | 489 | pub fn pop(self: *Popper) ?T { 490 | while (!@atomicLoad(bool, &self.parent.closed, .Monotonic)) { 491 | if (self.parent.tryPop()) |item| { 492 | hyperia.pool.schedule(.{}, self.parent.producer_event.set()); 493 | return item; 494 | } 495 | if (!self.waiter.wait()) return null; 496 | } 497 | return null; 498 | } 499 | }; 500 | 501 | pub fn pusher(self: *Self) Pusher { 502 | return Pusher{ .parent = self, .waiter = self.producer_event.createWaiter() }; 503 | } 504 | 505 | pub fn popper(self: *Self) Popper { 506 | return Popper{ .parent = self, .waiter = self.consumer_event.createWaiter() }; 507 | } 508 | }; 509 | } 510 | 511 | pub fn Queue(comptime T: type, comptime capacity: comptime_int) type { 512 | return struct { 513 | const Self = @This(); 514 | 515 | pub const Entry = struct { 516 | sequence: usize align(cache_line_length), 517 | item: T, 518 | }; 519 | 520 | entries: [*]Entry align(cache_line_length), 521 | enqueue_pos: usize align(cache_line_length), 522 | dequeue_pos: usize align(cache_line_length), 523 | 524 | pub fn init(allocator: *mem.Allocator) !Self { 525 | const entries = try allocator.create([capacity]Entry); 526 | for (entries) |*entry, i| entry.sequence = i; 527 | 528 | return Self{ 529 | .entries = entries, 530 | .enqueue_pos = 0, 531 | .dequeue_pos = 0, 532 | }; 533 | } 534 | 535 | pub fn deinit(self: *Self, allocator: *mem.Allocator) void { 536 | allocator.destroy(@ptrCast(*const [capacity]Entry, self.entries)); 537 | } 538 | 539 | pub fn count(self: *Self) usize { 540 | const tail = @atomicLoad(usize, &self.dequeue_pos, .Monotonic); 541 | const head = @atomicLoad(usize, &self.enqueue_pos, .Monotonic); 542 | return (tail -% head) % (capacity - 1); 543 | } 544 | 545 | pub fn tryPush(self: *Self, item: T) bool { 546 | var entry: *Entry = undefined; 547 | var pos = @atomicLoad(usize, &self.enqueue_pos, .Monotonic); 548 | while (true) : (os.sched_yield() catch {}) { 549 | entry = &self.entries[pos & (capacity - 1)]; 550 | 551 | const seq = @atomicLoad(usize, &entry.sequence, .Acquire); 552 | const diff = @intCast(isize, seq) -% @intCast(isize, pos); 553 | if (diff == 0) { 554 | pos = @cmpxchgWeak(usize, &self.enqueue_pos, pos, pos +% 1, .Monotonic, .Monotonic) orelse { 555 | break; 556 | }; 557 | } else if (diff < 0) { 558 | return false; 559 | } else { 560 | pos = @atomicLoad(usize, &self.enqueue_pos, .Monotonic); 561 | } 562 | } 563 | entry.item = item; 564 | @atomicStore(usize, &entry.sequence, pos +% 1, .Release); 565 | return true; 566 | } 567 | 568 | pub fn tryPop(self: *Self) ?T { 569 | var entry: *Entry = undefined; 570 | var pos = @atomicLoad(usize, &self.dequeue_pos, .Monotonic); 571 | while (true) : (os.sched_yield() catch {}) { 572 | entry = &self.entries[pos & (capacity - 1)]; 573 | 574 | const seq = @atomicLoad(usize, &entry.sequence, .Acquire); 575 | const diff = @intCast(isize, seq) -% @intCast(isize, pos +% 1); 576 | if (diff == 0) { 577 | pos = @cmpxchgWeak(usize, &self.dequeue_pos, pos, pos +% 1, .Monotonic, .Monotonic) orelse { 578 | break; 579 | }; 580 | } else if (diff < 0) { 581 | return null; 582 | } else { 583 | pos = @atomicLoad(usize, &self.dequeue_pos, .Monotonic); 584 | } 585 | } 586 | const item = entry.item; 587 | @atomicStore(usize, &entry.sequence, pos +% (capacity - 1) +% 1, .Release); 588 | return item; 589 | } 590 | }; 591 | } 592 | 593 | test { 594 | testing.refAllDecls(@This()); 595 | testing.refAllDecls(Event); 596 | testing.refAllDecls(Semaphore); 597 | testing.refAllDecls(Queue(u64, 128)); 598 | testing.refAllDecls(AsyncQueue(u64, 128)); 599 | } 600 | 601 | test "mpmc/auto_reset_event: set and wait" { 602 | hyperia.init(); 603 | defer hyperia.deinit(); 604 | 605 | var event: AsyncAutoResetEvent = .{}; 606 | 607 | testing.expect(event.set().isEmpty()); 608 | testing.expect(event.set().isEmpty()); 609 | testing.expect(event.set().isEmpty()); 610 | testing.expect(AsyncAutoResetEvent.getSetterCount(event.state) == 1); 611 | nosuspend event.wait(); 612 | testing.expect(AsyncAutoResetEvent.getSetterCount(event.state) == 0); 613 | 614 | var a = async event.wait(); 615 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 1); 616 | var b = async event.wait(); 617 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 2); 618 | var c = async event.wait(); 619 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 3); 620 | 621 | event.set().pop().?.run(); 622 | testing.expect(AsyncAutoResetEvent.getSetterCount(event.state) == 0); 623 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 2); 624 | nosuspend await a; 625 | 626 | event.set().pop().?.run(); 627 | testing.expect(AsyncAutoResetEvent.getSetterCount(event.state) == 0); 628 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 1); 629 | nosuspend await b; 630 | 631 | event.set().pop().?.run(); 632 | testing.expect(AsyncAutoResetEvent.getSetterCount(event.state) == 0); 633 | testing.expect(AsyncAutoResetEvent.getWaiterCount(event.state) == 0); 634 | nosuspend await c; 635 | } 636 | 637 | test "mpmc/queue: push and pop 60,000 u64s with 4 producers and 4 consumers" { 638 | const NUM_ITEMS = 60_000; 639 | const NUM_PRODUCERS = 4; 640 | const NUM_CONSUMERS = 4; 641 | 642 | const TestQueue = Queue(u64, 2048); 643 | 644 | const Context = struct { 645 | queue: *TestQueue, 646 | 647 | fn runProducer(self: @This()) !void { 648 | var i: usize = 0; 649 | while (i < NUM_ITEMS / NUM_PRODUCERS) : (i += 1) { 650 | while (true) { 651 | if (self.queue.tryPush(@intCast(u64, i))) { 652 | break; 653 | } 654 | } 655 | } 656 | } 657 | 658 | fn runConsumer(self: @This()) !void { 659 | var i: usize = 0; 660 | while (i < NUM_ITEMS / NUM_CONSUMERS) : (i += 1) { 661 | while (true) { 662 | if (self.queue.tryPop() != null) { 663 | break; 664 | } 665 | } 666 | } 667 | } 668 | }; 669 | 670 | const allocator = testing.allocator; 671 | 672 | var queue = try TestQueue.init(allocator); 673 | defer queue.deinit(allocator); 674 | 675 | var producers: [NUM_PRODUCERS]*std.Thread = undefined; 676 | defer for (producers) |producer| producer.wait(); 677 | 678 | var consumers: [NUM_CONSUMERS]*std.Thread = undefined; 679 | defer for (consumers) |consumer| consumer.wait(); 680 | 681 | for (consumers) |*consumer| consumer.* = try std.Thread.spawn(Context.runConsumer, Context{ .queue = &queue }); 682 | for (producers) |*producer| producer.* = try std.Thread.spawn(Context.runProducer, Context{ .queue = &queue }); 683 | } 684 | -------------------------------------------------------------------------------- /mpsc.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const os = std.os; 6 | const mem = std.mem; 7 | const builtin = std.builtin; 8 | const testing = std.testing; 9 | 10 | const assert = std.debug.assert; 11 | 12 | pub const cache_line_length = switch (builtin.cpu.arch) { 13 | .x86_64, .aarch64, .powerpc64 => 128, 14 | .arm, .mips, .mips64, .riscv64 => 32, 15 | .s390x => 256, 16 | else => 64, 17 | }; 18 | 19 | pub fn AsyncAutoResetEvent(comptime T: type) type { 20 | return struct { 21 | const Self = @This(); 22 | 23 | const Node = struct { 24 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 25 | token: T = undefined, 26 | frame: anyframe, 27 | 28 | pub fn run(runnable: *zap.Pool.Runnable) void { 29 | const self = @fieldParentPtr(Node, "runnable", runnable); 30 | resume self.frame; 31 | } 32 | }; 33 | 34 | const EMPTY = 0; 35 | const NOTIFIED = 1; 36 | 37 | state: usize = EMPTY, 38 | 39 | pub usingnamespace if (@sizeOf(T) == 0) struct { 40 | pub fn set(self: *Self) ?*zap.Pool.Runnable { 41 | var state = @atomicLoad(usize, &self.state, .Monotonic); 42 | while (state != NOTIFIED) { 43 | if (state != EMPTY) { 44 | state = @cmpxchgWeak(usize, &self.state, state, NOTIFIED, .Acquire, .Monotonic) orelse { 45 | const node = @intToPtr(*Node, state); 46 | return &node.runnable; 47 | }; 48 | } else { 49 | state = @cmpxchgWeak(usize, &self.state, state, NOTIFIED, .Monotonic, .Monotonic) orelse { 50 | return null; 51 | }; 52 | } 53 | } 54 | return null; 55 | } 56 | 57 | pub fn wait(self: *Self) void { 58 | var state = @atomicLoad(usize, &self.state, .Monotonic); 59 | defer @atomicStore(usize, &self.state, EMPTY, .Monotonic); 60 | 61 | if (state != NOTIFIED) { 62 | var node: Node = .{ .frame = @frame() }; 63 | suspend { // This CMPXCHG can only fail if state is NOTIFIED. 64 | if (@cmpxchgStrong(usize, &self.state, state, @ptrToInt(&node), .Release, .Monotonic) != null) { 65 | hyperia.pool.schedule(.{}, &node.runnable); 66 | } 67 | } 68 | } 69 | } 70 | } else struct { 71 | pub fn set(self: *Self, token: T) ?*zap.Pool.Runnable { 72 | var state = @atomicLoad(usize, &self.state, .Monotonic); 73 | while (state != NOTIFIED) { 74 | if (state != EMPTY) { 75 | state = @cmpxchgWeak(usize, &self.state, state, NOTIFIED, .Acquire, .Monotonic) orelse { 76 | const node = @intToPtr(*Node, state); 77 | node.token = token; 78 | @fence(.Release); 79 | return &node.runnable; 80 | }; 81 | } else { 82 | state = @cmpxchgWeak(usize, &self.state, state, NOTIFIED, .Monotonic, .Monotonic) orelse { 83 | return null; 84 | }; 85 | } 86 | } 87 | return null; 88 | } 89 | 90 | pub fn wait(self: *Self) T { 91 | var state = @atomicLoad(usize, &self.state, .Monotonic); 92 | defer @atomicStore(usize, &self.state, EMPTY, .Monotonic); 93 | 94 | if (state != NOTIFIED) { 95 | var node: Node = .{ .token = mem.zeroes(T), .frame = @frame() }; 96 | suspend { // This CMPXCHG can only fail if state is NOTIFIED. 97 | if (@cmpxchgStrong(usize, &self.state, state, @ptrToInt(&node), .Release, .Monotonic) != null) { 98 | hyperia.pool.schedule(.{}, &node.runnable); 99 | } 100 | } 101 | @fence(.Acquire); 102 | return node.token; 103 | } 104 | 105 | return mem.zeroes(T); 106 | } 107 | }; 108 | }; 109 | } 110 | 111 | pub fn AsyncQueue(comptime T: type) type { 112 | return struct { 113 | const Self = @This(); 114 | 115 | queue: Queue(T) = .{}, 116 | closed: bool = false, 117 | event: AsyncAutoResetEvent(void) = .{}, 118 | 119 | pub fn close(self: *Self) void { 120 | @atomicStore(bool, &self.closed, true, .Monotonic); 121 | 122 | while (true) { 123 | const runnable = self.event.set() orelse break; 124 | hyperia.pool.schedule(.{}, runnable); 125 | } 126 | } 127 | 128 | pub fn peek(self: *const Self) usize { 129 | return self.queue.peek(); 130 | } 131 | 132 | pub fn push(self: *Self, src: *Queue(T).Node) void { 133 | self.queue.tryPush(src); 134 | 135 | if (self.event.set()) |runnable| { 136 | hyperia.pool.schedule(.{}, runnable); 137 | } 138 | } 139 | 140 | pub fn pushBatch(self: *Self, first: *Queue(T).Node, last: *Queue(T).Node, count: usize) void { 141 | self.queue.tryPushBatch(first, last, count); 142 | 143 | if (self.event.set()) |runnable| { 144 | hyperia.pool.schedule(.{}, runnable); 145 | } 146 | } 147 | 148 | pub fn tryPop(self: *Self) ?*Queue(T).Node { 149 | return self.queue.tryPop(); 150 | } 151 | 152 | pub fn tryPopBatch(self: *Self, b_first: **Queue(T).Node, b_last: **Queue(T).Node) usize { 153 | return self.queue.tryPopBatch(b_first, b_last); 154 | } 155 | 156 | pub fn pop(self: *Self) ?*Queue(T).Node { 157 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 158 | return self.tryPop() orelse { 159 | self.event.wait(); 160 | continue; 161 | }; 162 | } 163 | return null; 164 | } 165 | 166 | pub fn popBatch(self: *Self, b_first: **Queue(T).Node, b_last: **Queue(T).Node) callconv(.Async) usize { 167 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 168 | const num_items = self.tryPopBatch(b_first, b_last); 169 | if (num_items == 0) { 170 | self.event.wait(); 171 | continue; 172 | } 173 | return num_items; 174 | } 175 | return 0; 176 | } 177 | }; 178 | } 179 | 180 | pub fn AsyncSink(comptime T: type) type { 181 | return struct { 182 | const Self = @This(); 183 | 184 | sink: Sink(T) = .{}, 185 | closed: bool = false, 186 | event: AsyncAutoResetEvent(void) = .{}, 187 | 188 | pub fn close(self: *Self) void { 189 | @atomicStore(bool, &self.closed, true, .Monotonic); 190 | 191 | while (true) { 192 | const runnable = self.event.set() orelse break; 193 | hyperia.pool.schedule(.{}, runnable); 194 | } 195 | } 196 | 197 | pub fn push(self: *Self, src: *Sink(T).Node) void { 198 | self.sink.tryPush(src); 199 | 200 | if (self.event.set()) |runnable| { 201 | hyperia.pool.schedule(.{}, runnable); 202 | } 203 | } 204 | 205 | pub fn pushBatch(self: *Self, first: *Sink(T).Node, last: *Sink(T).Node) void { 206 | self.sink.tryPushBatch(first, last); 207 | 208 | if (self.event.set()) |runnable| { 209 | hyperia.pool.schedule(.{}, runnable); 210 | } 211 | } 212 | 213 | pub fn tryPop(self: *Self) ?*Sink(T).Node { 214 | return self.sink.tryPop(); 215 | } 216 | 217 | pub fn tryPopBatch(self: *Self, b_first: **Sink(T).Node, b_last: **Sink(T).Node) usize { 218 | return self.sink.tryPopBatch(b_first, b_last); 219 | } 220 | 221 | pub fn pop(self: *Self) ?*Sink(T).Node { 222 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 223 | return self.tryPop() orelse { 224 | self.event.wait(); 225 | continue; 226 | }; 227 | } 228 | return null; 229 | } 230 | 231 | pub fn popBatch(self: *Self, b_first: **Sink(T).Node, b_last: **Sink(T).Node) usize { 232 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 233 | const num_items = self.tryPopBatch(b_first, b_last); 234 | if (num_items == 0) { 235 | self.event.wait(); 236 | continue; 237 | } 238 | return num_items; 239 | } 240 | return 0; 241 | } 242 | }; 243 | } 244 | 245 | /// Unbounded MPSC queue supporting batching operations that keeps track of the number of items queued. 246 | pub fn Queue(comptime T: type) type { 247 | return struct { 248 | pub const Node = struct { 249 | next: ?*Node = null, 250 | value: T, 251 | }; 252 | 253 | const Self = @This(); 254 | 255 | front: Node align(cache_line_length) = .{ .value = undefined }, 256 | count: usize align(cache_line_length) = 0, 257 | back: ?*Node align(cache_line_length) = null, 258 | 259 | pub fn peek(self: *const Self) usize { 260 | const count = @atomicLoad(usize, &self.count, .Monotonic); 261 | assert(count >= 0); 262 | return count; 263 | } 264 | 265 | pub fn tryPush(self: *Self, src: *Node) void { 266 | assert(@atomicRmw(usize, &self.count, .Add, 1, .Monotonic) >= 0); 267 | 268 | src.next = null; 269 | const old_back = @atomicRmw(?*Node, &self.back, .Xchg, src, .AcqRel) orelse &self.front; 270 | @atomicStore(?*Node, &old_back.next, src, .Release); 271 | } 272 | 273 | pub fn tryPushBatch(self: *Self, first: *Node, last: *Node, count: usize) void { 274 | assert(@atomicRmw(usize, &self.count, .Add, count, .Monotonic) >= 0); 275 | 276 | last.next = null; 277 | const old_back = @atomicRmw(?*Node, &self.back, .Xchg, last, .AcqRel) orelse &self.front; 278 | @atomicStore(?*Node, &old_back.next, first, .Release); 279 | } 280 | 281 | pub fn tryPop(self: *Self) ?*Node { 282 | var first = @atomicLoad(?*Node, &self.front.next, .Acquire) orelse return null; 283 | 284 | if (@atomicLoad(?*Node, &first.next, .Acquire)) |next| { 285 | self.front.next = next; 286 | 287 | assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); 288 | return first; 289 | } 290 | 291 | var last = @atomicLoad(?*Node, &self.back, .Acquire) orelse &self.front; 292 | if (first != last) return null; 293 | 294 | self.front.next = null; 295 | if (@cmpxchgStrong(?*Node, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { 296 | assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); 297 | return first; 298 | } 299 | 300 | var maybe_next = @atomicLoad(?*Node, &first.next, .Acquire); 301 | while (maybe_next == null) : (os.sched_yield() catch {}) { 302 | maybe_next = @atomicLoad(?*Node, &first.next, .Acquire); 303 | } 304 | 305 | self.front.next = maybe_next; 306 | 307 | assert(@atomicRmw(usize, &self.count, .Sub, 1, .Monotonic) >= 1); 308 | return first; 309 | } 310 | 311 | pub fn tryPopBatch(self: *Self, b_first: **Node, b_last: **Node) usize { 312 | var front = @atomicLoad(?*Node, &self.front.next, .Acquire) orelse return 0; 313 | b_first.* = front; 314 | 315 | var maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 316 | var count: usize = 0; 317 | 318 | while (maybe_next) |next| { 319 | count += 1; 320 | b_last.* = front; 321 | front = next; 322 | maybe_next = @atomicLoad(?*Node, &next.next, .Acquire); 323 | } 324 | 325 | var last = @atomicLoad(?*Node, &self.back, .Acquire) orelse &self.front; 326 | if (front != last) { 327 | self.front.next = front; 328 | 329 | assert(@atomicRmw(usize, &self.count, .Sub, count, .Monotonic) >= count); 330 | return count; 331 | } 332 | 333 | self.front.next = null; 334 | if (@cmpxchgStrong(?*Node, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { 335 | count += 1; 336 | b_last.* = front; 337 | 338 | assert(@atomicRmw(usize, &self.count, .Sub, count, .Monotonic) >= count); 339 | return count; 340 | } 341 | 342 | maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 343 | while (maybe_next == null) : (os.sched_yield() catch {}) { 344 | maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 345 | } 346 | 347 | count += 1; 348 | self.front.next = maybe_next; 349 | b_last.* = front; 350 | 351 | assert(@atomicRmw(usize, &self.count, .Sub, count, .Monotonic) >= count); 352 | return count; 353 | } 354 | }; 355 | } 356 | 357 | /// Unbounded MPSC queue supporting batching operations. 358 | pub fn Sink(comptime T: type) type { 359 | return struct { 360 | pub const Node = struct { 361 | next: ?*Node = null, 362 | value: T, 363 | }; 364 | 365 | const Self = @This(); 366 | 367 | front: Node align(cache_line_length) = .{ .value = undefined }, 368 | back: ?*Node align(cache_line_length) = null, 369 | 370 | pub fn tryPush(self: *Self, src: *Node) void { 371 | src.next = null; 372 | const old_back = @atomicRmw(?*Node, &self.back, .Xchg, src, .AcqRel) orelse &self.front; 373 | @atomicStore(?*Node, &old_back.next, src, .Release); 374 | } 375 | 376 | pub fn tryPushBatch(self: *Self, first: *Node, last: *Node) void { 377 | last.next = null; 378 | const old_back = @atomicRmw(?*Node, &self.back, .Xchg, last, .AcqRel) orelse &self.front; 379 | @atomicStore(?*Node, &old_back.next, first, .Release); 380 | } 381 | 382 | pub fn tryPop(self: *Self) ?*Node { 383 | var first = @atomicLoad(?*Node, &self.front.next, .Acquire) orelse return null; 384 | 385 | if (@atomicLoad(?*Node, &first.next, .Acquire)) |next| { 386 | self.front.next = next; 387 | return first; 388 | } 389 | 390 | var last = @atomicLoad(?*Node, &self.back, .Acquire) orelse &self.front; 391 | if (first != last) return null; 392 | 393 | self.front.next = null; 394 | if (@cmpxchgStrong(?*Node, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { 395 | return first; 396 | } 397 | 398 | var maybe_next = @atomicLoad(?*Node, &first.next, .Acquire); 399 | while (maybe_next == null) : (os.sched_yield() catch {}) { 400 | maybe_next = @atomicLoad(?*Node, &first.next, .Acquire); 401 | } 402 | 403 | self.front.next = maybe_next; 404 | 405 | return first; 406 | } 407 | 408 | pub fn tryPopBatch(self: *Self, b_first: **Node, b_last: **Node) usize { 409 | var front = @atomicLoad(?*Node, &self.front.next, .Acquire) orelse return 0; 410 | b_first.* = front; 411 | 412 | var maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 413 | var count: usize = 0; 414 | 415 | while (maybe_next) |next| { 416 | count += 1; 417 | b_last.* = front; 418 | front = next; 419 | maybe_next = @atomicLoad(?*Node, &next.next, .Acquire); 420 | } 421 | 422 | var last = @atomicLoad(?*Node, &self.back, .Acquire) orelse &self.front; 423 | if (front != last) { 424 | self.front.next = front; 425 | return count; 426 | } 427 | 428 | self.front.next = null; 429 | if (@cmpxchgStrong(?*Node, &self.back, last, &self.front, .AcqRel, .Acquire) == null) { 430 | count += 1; 431 | b_last.* = front; 432 | return count; 433 | } 434 | 435 | maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 436 | while (maybe_next == null) : (os.sched_yield() catch {}) { 437 | maybe_next = @atomicLoad(?*Node, &front.next, .Acquire); 438 | } 439 | 440 | count += 1; 441 | self.front.next = maybe_next; 442 | b_last.* = front; 443 | 444 | return count; 445 | } 446 | }; 447 | } 448 | 449 | test { 450 | testing.refAllDecls(Queue(u64)); 451 | testing.refAllDecls(AsyncQueue(u64)); 452 | testing.refAllDecls(Sink(u64)); 453 | testing.refAllDecls(AsyncSink(u64)); 454 | testing.refAllDecls(AsyncAutoResetEvent(void)); 455 | testing.refAllDecls(AsyncAutoResetEvent(usize)); 456 | } 457 | 458 | test "mpsc/sink: push and pop 60,000 u64s with 15 producers" { 459 | const NUM_ITEMS = 60_000; 460 | const NUM_PRODUCERS = 15; 461 | 462 | const TestSink = Sink(u64); 463 | 464 | const Context = struct { 465 | allocator: *mem.Allocator, 466 | sink: *TestSink, 467 | 468 | fn runProducer(self: @This()) !void { 469 | var i: usize = 0; 470 | while (i < NUM_ITEMS / NUM_PRODUCERS) : (i += 1) { 471 | const node = try self.allocator.create(TestSink.Node); 472 | node.* = .{ .value = @intCast(u64, i) }; 473 | self.sink.tryPush(node); 474 | } 475 | } 476 | 477 | fn runConsumer(self: @This()) !void { 478 | var i: usize = 0; 479 | while (i < NUM_ITEMS) : (i += 1) { 480 | self.allocator.destroy(while (true) { 481 | if (self.sink.tryPop()) |node| { 482 | break node; 483 | } 484 | } else unreachable); 485 | } 486 | } 487 | }; 488 | 489 | const allocator = testing.allocator; 490 | 491 | var sink: TestSink = .{}; 492 | 493 | const consumer = try std.Thread.spawn(Context.runConsumer, Context{ 494 | .allocator = allocator, 495 | .sink = &sink, 496 | }); 497 | defer consumer.wait(); 498 | 499 | var producers: [NUM_PRODUCERS]*std.Thread = undefined; 500 | defer for (producers) |producer| producer.wait(); 501 | 502 | for (producers) |*producer| { 503 | producer.* = try std.Thread.spawn(Context.runProducer, Context{ 504 | .allocator = allocator, 505 | .sink = &sink, 506 | }); 507 | } 508 | } 509 | 510 | test "mpsc/sink: batch push and pop 60,000 u64s with 15 producers" { 511 | const NUM_ITEMS = 60_000; 512 | const NUM_ITEMS_PER_BATCH = 100; 513 | const NUM_PRODUCERS = 15; 514 | 515 | const TestSink = Sink(u64); 516 | 517 | const Context = struct { 518 | allocator: *mem.Allocator, 519 | sink: *TestSink, 520 | 521 | fn runBatchProducer(self: @This()) !void { 522 | var i: usize = 0; 523 | while (i < NUM_ITEMS / NUM_PRODUCERS) : (i += NUM_ITEMS_PER_BATCH) { 524 | var first = try self.allocator.create(TestSink.Node); 525 | first.* = .{ .value = @intCast(u64, i) }; 526 | 527 | const last = first; 528 | 529 | var j: usize = 0; 530 | while (j < NUM_ITEMS_PER_BATCH - 1) : (j += 1) { 531 | const node = try self.allocator.create(TestSink.Node); 532 | node.* = .{ 533 | .next = first, 534 | .value = @intCast(u64, i) + 1 + @intCast(u64, j), 535 | }; 536 | first = node; 537 | } 538 | 539 | self.sink.tryPushBatch(first, last); 540 | } 541 | } 542 | 543 | fn runBatchConsumer(self: @This()) !void { 544 | var first: *TestSink.Node = undefined; 545 | var last: *TestSink.Node = undefined; 546 | 547 | var i: usize = 0; 548 | while (i < NUM_ITEMS) { 549 | var j = self.sink.tryPopBatch(&first, &last); 550 | i += j; 551 | 552 | while (j > 0) : (j -= 1) { 553 | const next = first.next; 554 | self.allocator.destroy(first); 555 | first = next orelse continue; 556 | } 557 | } 558 | } 559 | }; 560 | 561 | const allocator = testing.allocator; 562 | 563 | var sink: TestSink = .{}; 564 | 565 | const consumer = try std.Thread.spawn(Context.runBatchConsumer, Context{ 566 | .allocator = allocator, 567 | .sink = &sink, 568 | }); 569 | defer consumer.wait(); 570 | 571 | var producers: [NUM_PRODUCERS]*std.Thread = undefined; 572 | defer for (producers) |producer| producer.wait(); 573 | 574 | for (producers) |*producer| { 575 | producer.* = try std.Thread.spawn(Context.runBatchProducer, Context{ 576 | .allocator = allocator, 577 | .sink = &sink, 578 | }); 579 | } 580 | } 581 | 582 | test "mpsc/queue: push and pop 60,000 u64s with 15 producers" { 583 | const NUM_ITEMS = 60_000; 584 | const NUM_PRODUCERS = 15; 585 | 586 | const TestQueue = Queue(u64); 587 | 588 | const Context = struct { 589 | allocator: *mem.Allocator, 590 | queue: *TestQueue, 591 | 592 | fn runProducer(self: @This()) !void { 593 | var i: usize = 0; 594 | while (i < NUM_ITEMS / NUM_PRODUCERS) : (i += 1) { 595 | const node = try self.allocator.create(TestQueue.Node); 596 | node.* = .{ .value = @intCast(u64, i) }; 597 | self.queue.tryPush(node); 598 | } 599 | } 600 | 601 | fn runConsumer(self: @This()) !void { 602 | var i: usize = 0; 603 | while (i < NUM_ITEMS) : (i += 1) { 604 | self.allocator.destroy(while (true) { 605 | if (self.queue.tryPop()) |node| { 606 | break node; 607 | } 608 | } else unreachable); 609 | } 610 | } 611 | }; 612 | 613 | const allocator = testing.allocator; 614 | 615 | var queue: TestQueue = .{}; 616 | defer testing.expect(queue.peek() == 0); 617 | 618 | const consumer = try std.Thread.spawn(Context.runConsumer, Context{ 619 | .allocator = allocator, 620 | .queue = &queue, 621 | }); 622 | defer consumer.wait(); 623 | 624 | var producers: [NUM_PRODUCERS]*std.Thread = undefined; 625 | defer for (producers) |producer| producer.wait(); 626 | 627 | for (producers) |*producer| { 628 | producer.* = try std.Thread.spawn(Context.runProducer, Context{ 629 | .allocator = allocator, 630 | .queue = &queue, 631 | }); 632 | } 633 | } 634 | 635 | test "mpsc/queue: batch push and pop 60,000 u64s with 15 producers" { 636 | const NUM_ITEMS = 60_000; 637 | const NUM_ITEMS_PER_BATCH = 100; 638 | const NUM_PRODUCERS = 15; 639 | 640 | const TestQueue = Queue(u64); 641 | 642 | const Context = struct { 643 | allocator: *mem.Allocator, 644 | queue: *TestQueue, 645 | 646 | fn runBatchProducer(self: @This()) !void { 647 | var i: usize = 0; 648 | while (i < NUM_ITEMS / NUM_PRODUCERS) : (i += NUM_ITEMS_PER_BATCH) { 649 | var first = try self.allocator.create(TestQueue.Node); 650 | first.* = .{ .value = @intCast(u64, i) }; 651 | 652 | const last = first; 653 | 654 | var j: usize = 0; 655 | while (j < NUM_ITEMS_PER_BATCH - 1) : (j += 1) { 656 | const node = try self.allocator.create(TestQueue.Node); 657 | node.* = .{ 658 | .next = first, 659 | .value = @intCast(u64, i) + 1 + @intCast(u64, j), 660 | }; 661 | first = node; 662 | } 663 | 664 | self.queue.tryPushBatch(first, last, NUM_ITEMS_PER_BATCH); 665 | } 666 | } 667 | 668 | fn runBatchConsumer(self: @This()) !void { 669 | var first: *TestQueue.Node = undefined; 670 | var last: *TestQueue.Node = undefined; 671 | 672 | var i: usize = 0; 673 | while (i < NUM_ITEMS) { 674 | var j = self.queue.tryPopBatch(&first, &last); 675 | i += j; 676 | 677 | while (j > 0) : (j -= 1) { 678 | const next = first.next; 679 | self.allocator.destroy(first); 680 | first = next orelse continue; 681 | } 682 | } 683 | } 684 | }; 685 | 686 | const allocator = testing.allocator; 687 | 688 | var queue: TestQueue = .{}; 689 | defer testing.expect(queue.peek() == 0); 690 | 691 | const consumer = try std.Thread.spawn(Context.runBatchConsumer, Context{ 692 | .allocator = allocator, 693 | .queue = &queue, 694 | }); 695 | defer consumer.wait(); 696 | 697 | var producers: [NUM_PRODUCERS]*std.Thread = undefined; 698 | defer for (producers) |producer| producer.wait(); 699 | 700 | for (producers) |*producer| { 701 | producer.* = try std.Thread.spawn(Context.runBatchProducer, Context{ 702 | .allocator = allocator, 703 | .queue = &queue, 704 | }); 705 | } 706 | } 707 | -------------------------------------------------------------------------------- /net.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const mem = std.mem; 4 | const net = std.net; 5 | const fmt = std.fmt; 6 | const testing = std.testing; 7 | 8 | pub const ParseAddressError = error{ 9 | BadPort, 10 | MissingPort, 11 | MissingEndBracket, 12 | MissingRightBracket, 13 | TooManyColons, 14 | UnexpectedLeftBracket, 15 | UnexpectedRightBracket, 16 | InvalidIPAddressFormat, 17 | }; 18 | 19 | pub fn parseAddress(buf: []const u8) ParseAddressError!net.Address { 20 | var j: usize = 0; 21 | var k: usize = 0; 22 | 23 | const i = mem.lastIndexOfScalar(u8, buf, ':') orelse { 24 | const port = fmt.parseInt(u16, buf, 10) catch return error.MissingPort; 25 | return net.Address.initIp4(.{ 0, 0, 0, 0 }, port); 26 | }; 27 | 28 | const host = parse: { 29 | if (buf[0] == '[') { 30 | const end = mem.indexOfScalar(u8, buf, ']') orelse return error.MissingEndBracket; 31 | if (end + 1 == i) {} else if (end + 1 == buf.len) { 32 | return error.MissingRightBracket; 33 | } else { 34 | return error.MissingPort; 35 | } 36 | 37 | j = 1; 38 | k = end + 1; 39 | break :parse buf[1..end]; 40 | } 41 | 42 | if (mem.indexOfScalar(u8, buf[0..i], ':') != null) { 43 | return error.TooManyColons; 44 | } 45 | break :parse buf[0..i]; 46 | }; 47 | 48 | if (mem.indexOfScalar(u8, buf[j..], '[') != null) { 49 | return error.UnexpectedLeftBracket; 50 | } 51 | 52 | if (mem.indexOfScalar(u8, buf[k..], ']') != null) { 53 | return error.UnexpectedRightBracket; 54 | } 55 | 56 | const port = fmt.parseInt(u16, buf[i + 1 ..], 10) catch return error.BadPort; 57 | if (host.len == 0) return net.Address.initIp4(.{ 0, 0, 0, 0 }, port); 58 | 59 | return try net.Address.parseIp(host, port); 60 | } 61 | 62 | test { 63 | testing.refAllDecls(@This()); 64 | } 65 | -------------------------------------------------------------------------------- /object_pool.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const hyperia = @import("hyperia.zig"); 3 | 4 | const mem = std.mem; 5 | const mpmc = hyperia.mpmc; 6 | const testing = std.testing; 7 | 8 | pub fn ObjectPool(comptime T: type, comptime capacity: comptime_int) type { 9 | return struct { 10 | const Self = @This(); 11 | 12 | queue: mpmc.Queue(*T, capacity) align(mpmc.cache_line_length), 13 | head: [*]T align(mpmc.cache_line_length), 14 | 15 | pub fn init(allocator: *mem.Allocator) !Self { 16 | var queue = try mpmc.Queue(*T, capacity).init(allocator); 17 | errdefer queue.deinit(allocator); 18 | 19 | const items = try allocator.create([capacity]T); 20 | errdefer allocator.destroy(items); 21 | 22 | for (items) |*item| if (!queue.tryPush(item)) unreachable; 23 | 24 | return Self{ .queue = queue, .head = items }; 25 | } 26 | 27 | pub fn deinit(self: *Self, allocator: *mem.Allocator) void { 28 | allocator.destroy(@ptrCast(*const [capacity]T, self.head)); 29 | self.queue.deinit(allocator); 30 | } 31 | 32 | pub fn acquire(self: *Self, allocator: *mem.Allocator) !*T { 33 | if (self.queue.tryPop()) |item| { 34 | return item; 35 | } 36 | return try allocator.create(T); 37 | } 38 | 39 | pub fn release(self: *Self, allocator: *mem.Allocator, item: *T) void { 40 | if (@ptrToInt(item) >= @ptrToInt(self.head) and @ptrToInt(item) <= @ptrToInt(self.head + capacity - 1)) { 41 | while (true) { 42 | if (self.queue.tryPush(item)) { 43 | break; 44 | } 45 | } 46 | return; 47 | } 48 | allocator.destroy(item); 49 | } 50 | }; 51 | } 52 | 53 | test { 54 | testing.refAllDecls(ObjectPool(u8, 16)); 55 | } 56 | 57 | test "object_pool: test invariants" { 58 | const allocator = testing.allocator; 59 | 60 | var pool = try ObjectPool(u8, 2).init(allocator); 61 | defer pool.deinit(allocator); 62 | 63 | const a = try pool.acquire(allocator); 64 | const b = try pool.acquire(allocator); 65 | const c = try pool.acquire(allocator); 66 | pool.release(allocator, c); 67 | pool.release(allocator, b); 68 | pool.release(allocator, a); 69 | } 70 | -------------------------------------------------------------------------------- /oneshot.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const testing = std.testing; 6 | 7 | pub fn Channel(comptime T: type) type { 8 | return struct { 9 | const Self = @This(); 10 | 11 | const Node = struct { 12 | next: ?*Node = null, 13 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 14 | frame: anyframe, 15 | 16 | pub fn run(runnable: *zap.Pool.Runnable) void { 17 | const self = @fieldParentPtr(Node, "runnable", runnable); 18 | resume self.frame; 19 | } 20 | }; 21 | 22 | const EMPTY = 0; 23 | const NOTIFIED = 1; 24 | const COMMITTED = 2; 25 | 26 | state: usize = EMPTY, 27 | data: T = undefined, 28 | 29 | pub fn wait(self: *Self) T { 30 | var node: Node = .{ .frame = @frame() }; 31 | 32 | suspend { 33 | var state = @atomicLoad(usize, &self.state, .Acquire); 34 | 35 | while (true) { 36 | const new_state = switch (state & 0b11) { 37 | COMMITTED => { 38 | hyperia.pool.schedule(.{}, &node.runnable); 39 | break; 40 | }, 41 | else => update: { 42 | node.next = @intToPtr(?*Node, state & ~@as(usize, 0b11)); 43 | break :update @ptrToInt(&node) | (state & 0b11); 44 | }, 45 | }; 46 | 47 | state = @cmpxchgWeak( 48 | usize, 49 | &self.state, 50 | state, 51 | new_state, 52 | .Release, 53 | .Acquire, 54 | ) orelse break; 55 | } 56 | } 57 | 58 | return self.data; 59 | } 60 | 61 | pub fn get(self: *Self) ?T { 62 | if (@atomicLoad(usize, &self.state, .Acquire) != COMMITTED) { 63 | return null; 64 | } 65 | return self.data; 66 | } 67 | 68 | pub fn set(self: *Self) bool { 69 | var state = @atomicLoad(usize, &self.state, .Monotonic); 70 | 71 | const new_state = switch (state & 0b11) { 72 | NOTIFIED, COMMITTED => return false, 73 | else => state | NOTIFIED, 74 | }; 75 | 76 | return @cmpxchgStrong(usize, &self.state, state, new_state, .Acquire, .Monotonic) == null; 77 | } 78 | 79 | pub fn reset(self: *Self) void { 80 | @atomicStore(usize, &self.state, EMPTY, .Monotonic); 81 | } 82 | 83 | pub fn commit(self: *Self, data: T) void { 84 | self.data = data; 85 | 86 | const state = @atomicRmw(usize, &self.state, .Xchg, COMMITTED, .AcqRel); 87 | if (state & 0b11 != NOTIFIED) unreachable; 88 | 89 | var batch: zap.Pool.Batch = .{}; 90 | 91 | var it = @intToPtr(?*Node, state & ~@as(usize, 0b11)); 92 | while (it) |node| : (it = node.next) { 93 | batch.push(&node.runnable); 94 | } 95 | 96 | hyperia.pool.schedule(.{}, batch); 97 | } 98 | }; 99 | } 100 | 101 | test { 102 | testing.refAllDecls(@This()); 103 | } 104 | 105 | test "oneshot/channel: multiple waiters" { 106 | hyperia.init(); 107 | defer hyperia.deinit(); 108 | 109 | var channel: Channel(void) = .{}; 110 | 111 | var a = async channel.wait(); 112 | var b = async channel.wait(); 113 | var c = async channel.wait(); 114 | var d = async channel.wait(); 115 | 116 | if (channel.set()) { 117 | channel.commit({}); 118 | } 119 | 120 | nosuspend await a; 121 | nosuspend await b; 122 | nosuspend await c; 123 | nosuspend await d; 124 | 125 | testing.expect(channel.state == Channel(void).COMMITTED); 126 | } 127 | 128 | test "oneshot/channel: stress test" { 129 | const Frame = struct { 130 | runnable: zap.Pool.Runnable = .{ .runFn = run }, 131 | frame: anyframe, 132 | 133 | fn run(runnable: *zap.Pool.Runnable) void { 134 | const self = @fieldParentPtr(@This(), "runnable", runnable); 135 | resume self.frame; 136 | } 137 | }; 138 | 139 | const Context = struct { 140 | channel: Channel(void) = .{}, 141 | 142 | event: std.Thread.StaticResetEvent = .{}, 143 | waiter_count: usize, 144 | setter_count: usize, 145 | 146 | fn runWaiter(self: *@This()) void { 147 | var frame: Frame = .{ .frame = @frame() }; 148 | suspend hyperia.pool.schedule(.{}, &frame.runnable); 149 | 150 | self.channel.wait(); 151 | } 152 | 153 | fn runSetter(self: *@This()) void { 154 | var frame: Frame = .{ .frame = @frame() }; 155 | suspend hyperia.pool.schedule(.{}, &frame.runnable); 156 | 157 | if (self.channel.set()) { 158 | self.channel.commit({}); 159 | } 160 | } 161 | 162 | pub fn run(self: *@This()) !void { 163 | var frame: Frame = .{ .frame = @frame() }; 164 | suspend hyperia.pool.schedule(.{}, &frame.runnable); 165 | 166 | var waiters = try testing.allocator.alloc(@Frame(@This().runWaiter), self.waiter_count); 167 | var setters = try testing.allocator.alloc(@Frame(@This().runSetter), self.setter_count); 168 | 169 | for (waiters) |*waiter| waiter.* = async self.runWaiter(); 170 | for (setters) |*setter| setter.* = async self.runSetter(); 171 | 172 | for (waiters) |*waiter| await waiter; 173 | for (setters) |*setter| await setter; 174 | 175 | suspend { 176 | testing.allocator.free(setters); 177 | testing.allocator.free(waiters); 178 | self.event.set(); 179 | } 180 | } 181 | }; 182 | 183 | hyperia.init(); 184 | defer hyperia.deinit(); 185 | 186 | var test_count: usize = 1000; 187 | var rand = std.rand.DefaultPrng.init(0); 188 | 189 | while (test_count > 0) : (test_count -= 1) { 190 | const waiter_count = rand.random.intRangeAtMost(usize, 4, 10); 191 | const setter_count = rand.random.intRangeAtMost(usize, 4, 10); 192 | 193 | var ctx: Context = .{ 194 | .waiter_count = waiter_count, 195 | .setter_count = setter_count, 196 | }; 197 | 198 | var frame = async ctx.run(); 199 | ctx.event.wait(); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /picohttp/picohttp.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const c = @cImport(@cInclude("picohttpparser.h")); 3 | 4 | const fmt = std.fmt; 5 | 6 | const assert = std.debug.assert; 7 | 8 | pub fn addTo(step: *std.build.LibExeObjStep, comptime dir: []const u8) void { 9 | step.addCSourceFile(dir ++ "/lib/picohttpparser.c", &[_][]const u8{}); 10 | step.addIncludeDir(dir ++ "/lib"); 11 | 12 | step.addPackage(.{ 13 | .name = "picohttp", 14 | .path = dir ++ "/picohttp.zig", 15 | }); 16 | } 17 | 18 | pub const Header = struct { 19 | name: []const u8, 20 | value: []const u8, 21 | 22 | pub fn isMultiline(self: Header) bool { 23 | return @ptrToInt(self.name.ptr) == 0; 24 | } 25 | 26 | pub fn format(self: Header, comptime layout: []const u8, opts: fmt.FormatOptions, writer: anytype) !void { 27 | if (self.isMultiline()) { 28 | try fmt.format(writer, "{s}", .{self.value}); 29 | } else { 30 | try fmt.format(writer, "{s}: {s}", .{ self.name, self.value }); 31 | } 32 | } 33 | 34 | comptime { 35 | assert(@sizeOf(Header) == @sizeOf(c.phr_header)); 36 | assert(@alignOf(Header) == @alignOf(c.phr_header)); 37 | } 38 | }; 39 | 40 | pub const Request = struct { 41 | method: []const u8, 42 | path: []const u8, 43 | minor_version: usize, 44 | headers: []const Header, 45 | 46 | pub fn parse(buf: []const u8, src: []Header) !Request { 47 | var method: []const u8 = undefined; 48 | var path: []const u8 = undefined; 49 | var minor_version: c_int = undefined; 50 | var num_headers: usize = src.len; 51 | 52 | const rc = c.phr_parse_request( 53 | buf.ptr, 54 | buf.len, 55 | @ptrCast([*c][*c]const u8, &method.ptr), 56 | &method.len, 57 | @ptrCast([*c][*c]const u8, &path.ptr), 58 | &path.len, 59 | &minor_version, 60 | @ptrCast([*c]c.phr_header, src.ptr), 61 | &num_headers, 62 | 0, 63 | ); 64 | 65 | return switch (rc) { 66 | -1 => error.BadRequest, 67 | -2 => error.ShortRead, 68 | else => |bytes_read| Request{ 69 | .method = method, 70 | .path = path, 71 | .minor_version = @intCast(usize, minor_version), 72 | .headers = src[0..num_headers], 73 | }, 74 | }; 75 | } 76 | }; 77 | 78 | test "pico_http: parse request" { 79 | const REQ = "GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n" ++ 80 | "Host: www.kittyhell.com\r\n" ++ 81 | "User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 " ++ 82 | "Pathtraq/0.9\r\n" ++ 83 | "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" ++ 84 | "Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n" ++ 85 | "Accept-Encoding: gzip,deflate\r\n" ++ 86 | "Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n" ++ 87 | "Keep-Alive: 115\r\n" ++ 88 | "Connection: keep-alive\r\n" ++ 89 | "TestMultiline: Hello world\r\n" ++ 90 | " This is a second line in the header!\r\n" ++ 91 | "Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; " ++ 92 | "__utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; " ++ 93 | "__utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral\r\n" ++ 94 | "\r\n"; 95 | 96 | var headers: [32]Header = undefined; 97 | 98 | const req = try Request.parse(REQ, &headers); 99 | 100 | std.debug.print("Method: {s}\n", .{req.method}); 101 | std.debug.print("Path: {s}\n", .{req.path}); 102 | std.debug.print("Minor Version: {}\n", .{req.minor_version}); 103 | 104 | for (req.headers) |header| { 105 | std.debug.print("{}\n", .{header}); 106 | } 107 | } 108 | 109 | pub const Response = struct { 110 | minor_version: usize, 111 | status_code: usize, 112 | status: []const u8, 113 | headers: []const Header, 114 | 115 | pub fn parse(buf: []const u8, src: []Header) !Response { 116 | var minor_version: c_int = undefined; 117 | var status_code: c_int = undefined; 118 | var status: []const u8 = undefined; 119 | var num_headers: usize = src.len; 120 | 121 | const rc = c.phr_parse_response( 122 | buf.ptr, 123 | buf.len, 124 | &minor_version, 125 | &status_code, 126 | @ptrCast([*c][*c]const u8, &status.ptr), 127 | &status.len, 128 | @ptrCast([*c]c.phr_header, src.ptr), 129 | &num_headers, 130 | 0, 131 | ); 132 | 133 | return switch (rc) { 134 | -1 => error.BadResponse, 135 | -2 => error.ShortRead, 136 | else => |bytes_read| Response{ 137 | .minor_version = @intCast(usize, minor_version), 138 | .status_code = @intCast(usize, status_code), 139 | .status = status, 140 | .headers = src[0..num_headers], 141 | }, 142 | }; 143 | } 144 | }; 145 | 146 | test "pico_http: parse response" { 147 | const RES = "HTTP/1.1 200 OK\r\n" ++ 148 | "Date: Mon, 22 Mar 2021 08:15:54 GMT\r\n" ++ 149 | "Content-Type: text/html; charset=utf-8\r\n" ++ 150 | "Content-Length: 9593\r\n" ++ 151 | "Connection: keep-alive\r\n" ++ 152 | "Server: gunicorn/19.9.0\r\n" ++ 153 | "Access-Control-Allow-Origin: *\r\n" ++ 154 | "Access-Control-Allow-Credentials: true\r\n" ++ 155 | "\r\n"; 156 | 157 | var headers: [32]Header = undefined; 158 | 159 | const res = try Response.parse(RES, &headers); 160 | 161 | std.debug.print("Minor Version: {}\n", .{res.minor_version}); 162 | std.debug.print("Status Code: {}\n", .{res.status_code}); 163 | std.debug.print("Status: {s}\n", .{res.status}); 164 | 165 | for (res.headers) |header| { 166 | std.debug.print("{}\n", .{header}); 167 | } 168 | } 169 | 170 | pub const Headers = struct { 171 | headers: []const Header, 172 | 173 | pub fn parse(buf: []const u8, src: []Header) !Headers { 174 | var num_headers: usize = src.len; 175 | 176 | const rc = c.phr_parse_headers( 177 | buf.ptr, 178 | buf.len, 179 | @ptrCast([*c]c.phr_header, src.ptr), 180 | @ptrCast([*c]usize, &num_headers), 181 | 0, 182 | ); 183 | 184 | return switch (rc) { 185 | -1 => error.BadHeaders, 186 | -2 => error.ShortRead, 187 | else => |bytes_read| Headers{ 188 | .headers = src[0..num_headers], 189 | }, 190 | }; 191 | } 192 | }; 193 | 194 | test "pico_http: parse headers" { 195 | const HEADERS = "Date: Mon, 22 Mar 2021 08:15:54 GMT\r\n" ++ 196 | "Content-Type: text/html; charset=utf-8\r\n" ++ 197 | "Content-Length: 9593\r\n" ++ 198 | "Connection: keep-alive\r\n" ++ 199 | "Server: gunicorn/19.9.0\r\n" ++ 200 | "Access-Control-Allow-Origin: *\r\n" ++ 201 | "Access-Control-Allow-Credentials: true\r\n" ++ 202 | "\r\n"; 203 | 204 | var headers: [32]Header = undefined; 205 | 206 | const result = try Headers.parse(HEADERS, &headers); 207 | for (result.headers) |header| { 208 | std.debug.print("{}\n", .{header}); 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /reactor.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const Socket = @import("socket.zig").Socket; 4 | 5 | const os = std.os; 6 | const net = std.net; 7 | const mem = std.mem; 8 | const testing = std.testing; 9 | 10 | const print = std.debug.print; 11 | const assert = std.debug.assert; 12 | 13 | pub const Reactor = struct { 14 | pub const Handle = struct { 15 | onEventFn: fn (handle: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void, 16 | 17 | pub fn call(self: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void { 18 | (self.onEventFn)(self, batch, event); 19 | } 20 | }; 21 | 22 | pub const Event = struct { 23 | data: usize, 24 | is_error: bool, 25 | is_hup: bool, 26 | is_readable: bool, 27 | is_writable: bool, 28 | }; 29 | 30 | pub const Interest = struct { 31 | oneshot: bool = false, 32 | readable: bool = false, 33 | writable: bool = false, 34 | 35 | pub fn flags(self: Interest) u32 { 36 | var set: u32 = os.EPOLLRDHUP; 37 | set |= if (self.oneshot) os.EPOLLONESHOT else os.EPOLLET; 38 | if (self.readable) set |= os.EPOLLIN; 39 | if (self.writable) set |= os.EPOLLOUT; 40 | return set; 41 | } 42 | }; 43 | 44 | pub const AutoResetEvent = struct { 45 | fd: os.fd_t, 46 | reactor: Reactor, 47 | notified: bool = true, 48 | handle: Reactor.Handle = .{ .onEventFn = onEvent }, 49 | 50 | pub fn init(flags: u32, reactor: Reactor) !AutoResetEvent { 51 | return AutoResetEvent{ .fd = try os.eventfd(0, flags | os.EFD_NONBLOCK), .reactor = reactor }; 52 | } 53 | 54 | pub fn deinit(self: *AutoResetEvent) void { 55 | os.close(self.fd); 56 | } 57 | 58 | pub fn post(self: *AutoResetEvent) void { 59 | if (!@atomicRmw(bool, &self.notified, .Xchg, false, .AcqRel)) return; 60 | 61 | os.epoll_ctl(self.reactor.fd, os.EPOLL_CTL_MOD, self.fd, &os.epoll_event{ 62 | .events = os.EPOLLONESHOT | os.EPOLLOUT, 63 | .data = .{ .ptr = @ptrToInt(&self.handle) }, 64 | }) catch {}; 65 | } 66 | 67 | pub fn onEvent(handle: *Reactor.Handle, batch: *zap.Pool.Batch, event: Reactor.Event) void { 68 | assert(event.is_writable); 69 | 70 | const self = @fieldParentPtr(AutoResetEvent, "handle", handle); 71 | @atomicStore(bool, &self.notified, true, .Release); 72 | } 73 | }; 74 | 75 | fd: os.fd_t, 76 | 77 | pub fn init(flags: u32) !Reactor { 78 | const fd = try os.epoll_create1(flags); 79 | return Reactor{ .fd = fd }; 80 | } 81 | 82 | pub fn deinit(self: Reactor) void { 83 | os.close(self.fd); 84 | } 85 | 86 | pub fn add(self: Reactor, fd: os.fd_t, data: anytype, interest: Interest) !void { 87 | try os.epoll_ctl(self.fd, os.EPOLL_CTL_ADD, fd, &os.epoll_event{ 88 | .events = interest.flags(), 89 | .data = .{ .ptr = if (@typeInfo(@TypeOf(data)) == .Pointer) @ptrToInt(data) else data }, 90 | }); 91 | } 92 | 93 | pub fn poll(self: Reactor, comptime max_num_events: comptime_int, closure: anytype, timeout_milliseconds: ?u64) !void { 94 | var events: [max_num_events]os.epoll_event = undefined; 95 | 96 | const num_events = os.epoll_wait(self.fd, &events, if (timeout_milliseconds) |ms| @intCast(i32, ms) else -1); 97 | for (events[0..num_events]) |ev| { 98 | const is_error = ev.events & os.EPOLLERR != 0; 99 | const is_hup = ev.events & (os.EPOLLHUP | os.EPOLLRDHUP) != 0; 100 | const is_readable = ev.events & os.EPOLLIN != 0; 101 | const is_writable = ev.events & os.EPOLLOUT != 0; 102 | 103 | closure.call(Event{ 104 | .data = ev.data.ptr, 105 | .is_error = is_error, 106 | .is_hup = is_hup, 107 | .is_readable = is_readable, 108 | .is_writable = is_writable, 109 | }); 110 | } 111 | } 112 | }; 113 | 114 | test { 115 | testing.refAllDecls(Reactor); 116 | } 117 | 118 | test "reactor: shutdown before accept async socket" { 119 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 120 | defer reactor.deinit(); 121 | 122 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 123 | defer a.deinit(); 124 | 125 | try reactor.add(a.fd, 0, .{ .readable = true }); 126 | try reactor.poll(1, struct { 127 | fn call(event: Reactor.Event) void { 128 | testing.expectEqual( 129 | Reactor.Event{ 130 | .data = 0, 131 | .is_error = false, 132 | .is_hup = true, 133 | .is_readable = false, 134 | .is_writable = false, 135 | }, 136 | event, 137 | ); 138 | } 139 | }, null); 140 | 141 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 142 | try a.listen(128); 143 | 144 | const binded_address = try a.getName(); 145 | print("Binded to address: {}\n", .{binded_address}); 146 | 147 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 148 | defer b.deinit(); 149 | 150 | try reactor.add(b.fd, 1, .{ .readable = true, .writable = true }); 151 | try reactor.poll(1, struct { 152 | fn call(event: Reactor.Event) void { 153 | testing.expectEqual( 154 | Reactor.Event{ 155 | .data = 1, 156 | .is_error = false, 157 | .is_hup = true, 158 | .is_readable = false, 159 | .is_writable = true, 160 | }, 161 | event, 162 | ); 163 | } 164 | }, null); 165 | 166 | b.connect(binded_address) catch |err| switch (err) { 167 | error.WouldBlock => {}, 168 | else => return err, 169 | }; 170 | 171 | try reactor.poll(1, struct { 172 | fn call(event: Reactor.Event) void { 173 | testing.expectEqual( 174 | Reactor.Event{ 175 | .data = 1, 176 | .is_error = false, 177 | .is_hup = false, 178 | .is_readable = false, 179 | .is_writable = true, 180 | }, 181 | event, 182 | ); 183 | } 184 | }, null); 185 | 186 | try reactor.poll(1, struct { 187 | fn call(event: Reactor.Event) void { 188 | testing.expectEqual( 189 | Reactor.Event{ 190 | .data = 0, 191 | .is_error = false, 192 | .is_hup = false, 193 | .is_readable = true, 194 | .is_writable = false, 195 | }, 196 | event, 197 | ); 198 | } 199 | }, null); 200 | 201 | const ab = try a.accept(os.SOCK_CLOEXEC); 202 | defer ab.socket.deinit(); 203 | 204 | try os.shutdown(ab.socket.fd, .both); 205 | 206 | try reactor.add(ab.socket.fd, 2, .{ .readable = true, .writable = true }); 207 | 208 | try reactor.poll(1, struct { 209 | fn call(event: Reactor.Event) void { 210 | testing.expectEqual( 211 | Reactor.Event{ 212 | .data = 1, 213 | .is_error = false, 214 | .is_hup = true, 215 | .is_readable = true, 216 | .is_writable = true, 217 | }, 218 | event, 219 | ); 220 | } 221 | }, null); 222 | 223 | try reactor.poll(1, struct { 224 | fn call(event: Reactor.Event) void { 225 | testing.expectEqual( 226 | Reactor.Event{ 227 | .data = 2, 228 | .is_error = false, 229 | .is_hup = true, 230 | .is_readable = true, 231 | .is_writable = true, 232 | }, 233 | event, 234 | ); 235 | } 236 | }, null); 237 | } 238 | 239 | test "reactor: shutdown async socket" { 240 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 241 | defer reactor.deinit(); 242 | 243 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 244 | defer a.deinit(); 245 | 246 | try reactor.add(a.fd, 0, .{ .readable = true }); 247 | try reactor.poll(1, struct { 248 | fn call(event: Reactor.Event) void { 249 | testing.expectEqual( 250 | Reactor.Event{ 251 | .data = 0, 252 | .is_error = false, 253 | .is_hup = true, 254 | .is_readable = false, 255 | .is_writable = false, 256 | }, 257 | event, 258 | ); 259 | } 260 | }, null); 261 | 262 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 263 | try a.listen(128); 264 | 265 | const binded_address = try a.getName(); 266 | print("Binded to address: {}\n", .{binded_address}); 267 | 268 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 269 | defer b.deinit(); 270 | 271 | try reactor.add(b.fd, 1, .{ .readable = true, .writable = true }); 272 | try reactor.poll(1, struct { 273 | fn call(event: Reactor.Event) void { 274 | testing.expectEqual( 275 | Reactor.Event{ 276 | .data = 1, 277 | .is_error = false, 278 | .is_hup = true, 279 | .is_readable = false, 280 | .is_writable = true, 281 | }, 282 | event, 283 | ); 284 | } 285 | }, null); 286 | 287 | b.connect(binded_address) catch |err| switch (err) { 288 | error.WouldBlock => {}, 289 | else => return err, 290 | }; 291 | 292 | try reactor.poll(1, struct { 293 | fn call(event: Reactor.Event) void { 294 | testing.expectEqual( 295 | Reactor.Event{ 296 | .data = 1, 297 | .is_error = false, 298 | .is_hup = false, 299 | .is_readable = false, 300 | .is_writable = true, 301 | }, 302 | event, 303 | ); 304 | } 305 | }, null); 306 | 307 | try reactor.poll(1, struct { 308 | fn call(event: Reactor.Event) void { 309 | testing.expectEqual( 310 | Reactor.Event{ 311 | .data = 0, 312 | .is_error = false, 313 | .is_hup = false, 314 | .is_readable = true, 315 | .is_writable = false, 316 | }, 317 | event, 318 | ); 319 | } 320 | }, null); 321 | 322 | const ab = try a.accept(os.SOCK_CLOEXEC); 323 | defer ab.socket.deinit(); 324 | 325 | try reactor.add(ab.socket.fd, 2, .{ .readable = true, .writable = true }); 326 | try reactor.poll(1, struct { 327 | fn call(event: Reactor.Event) void { 328 | testing.expectEqual( 329 | Reactor.Event{ 330 | .data = 2, 331 | .is_error = false, 332 | .is_hup = false, 333 | .is_readable = false, 334 | .is_writable = true, 335 | }, 336 | event, 337 | ); 338 | } 339 | }, null); 340 | 341 | try os.shutdown(b.fd, .both); 342 | 343 | try reactor.poll(1, struct { 344 | fn call(event: Reactor.Event) void { 345 | testing.expectEqual( 346 | Reactor.Event{ 347 | .data = 2, 348 | .is_error = false, 349 | .is_hup = true, 350 | .is_readable = true, 351 | .is_writable = true, 352 | }, 353 | event, 354 | ); 355 | } 356 | }, null); 357 | 358 | try reactor.poll(1, struct { 359 | fn call(event: Reactor.Event) void { 360 | testing.expectEqual( 361 | Reactor.Event{ 362 | .data = 1, 363 | .is_error = false, 364 | .is_hup = true, 365 | .is_readable = true, 366 | .is_writable = true, 367 | }, 368 | event, 369 | ); 370 | } 371 | }, null); 372 | } 373 | 374 | test "reactor: async socket" { 375 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 376 | defer reactor.deinit(); 377 | 378 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 379 | defer a.deinit(); 380 | 381 | try reactor.add(a.fd, 0, .{ .readable = true }); 382 | try reactor.poll(1, struct { 383 | fn call(event: Reactor.Event) void { 384 | testing.expectEqual( 385 | Reactor.Event{ 386 | .data = 0, 387 | .is_error = false, 388 | .is_hup = true, 389 | .is_readable = false, 390 | .is_writable = false, 391 | }, 392 | event, 393 | ); 394 | } 395 | }, null); 396 | 397 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 398 | try a.listen(128); 399 | 400 | const binded_address = try a.getName(); 401 | print("Binded to address: {}\n", .{binded_address}); 402 | 403 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 404 | defer b.deinit(); 405 | 406 | try reactor.add(b.fd, 1, .{ .readable = true, .writable = true }); 407 | try reactor.poll(1, struct { 408 | fn call(event: Reactor.Event) void { 409 | testing.expectEqual( 410 | Reactor.Event{ 411 | .data = 1, 412 | .is_error = false, 413 | .is_hup = true, 414 | .is_readable = false, 415 | .is_writable = true, 416 | }, 417 | event, 418 | ); 419 | } 420 | }, null); 421 | 422 | b.connect(binded_address) catch |err| switch (err) { 423 | error.WouldBlock => {}, 424 | else => return err, 425 | }; 426 | 427 | try reactor.poll(1, struct { 428 | fn call(event: Reactor.Event) void { 429 | testing.expectEqual( 430 | Reactor.Event{ 431 | .data = 1, 432 | .is_error = false, 433 | .is_hup = false, 434 | .is_readable = false, 435 | .is_writable = true, 436 | }, 437 | event, 438 | ); 439 | } 440 | }, null); 441 | 442 | try reactor.poll(1, struct { 443 | fn call(event: Reactor.Event) void { 444 | testing.expectEqual( 445 | Reactor.Event{ 446 | .data = 0, 447 | .is_error = false, 448 | .is_hup = false, 449 | .is_readable = true, 450 | .is_writable = false, 451 | }, 452 | event, 453 | ); 454 | } 455 | }, null); 456 | } 457 | 458 | test "reactor/auto_reset_event: post a notification 1024 times" { 459 | const reactor = try Reactor.init(os.EPOLL_CLOEXEC); 460 | defer reactor.deinit(); 461 | 462 | var test_event = try Reactor.AutoResetEvent.init(os.EFD_CLOEXEC, reactor); 463 | defer test_event.deinit(); 464 | 465 | try reactor.add(test_event.fd, &test_event.handle, .{}); 466 | 467 | try reactor.poll(1, struct { 468 | pub fn call(event: Reactor.Event) void { 469 | unreachable; 470 | } 471 | }, 0); 472 | 473 | // Registering an eventfd to epoll will not trigger a notification. 474 | // Deregistering an eventfd from epoll will not trigger a notification. 475 | // Attempt to post a notification to see if we achieve expected behavior. 476 | 477 | var i: usize = 0; 478 | while (i < 1024) : (i += 1) { 479 | test_event.post(); 480 | 481 | try reactor.poll(1, struct { 482 | pub fn call(event: Reactor.Event) void { 483 | const handle = @intToPtr(*Reactor.Handle, event.data); 484 | 485 | var batch: zap.Pool.Batch = .{}; 486 | defer testing.expect(batch.isEmpty()); 487 | 488 | handle.call(&batch, event); 489 | } 490 | }, null); 491 | } 492 | 493 | testing.expect(@atomicLoad(bool, &test_event.notified, .Monotonic)); 494 | } 495 | -------------------------------------------------------------------------------- /select.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const hyperia = @import("hyperia.zig"); 3 | 4 | const mem = std.mem; 5 | const meta = std.meta; 6 | const builtin = std.builtin; 7 | const oneshot = hyperia.oneshot; 8 | 9 | pub fn ResultUnionOf(comptime Cases: type) type { 10 | var union_fields: [@typeInfo(Cases).Struct.fields.len]builtin.TypeInfo.UnionField = undefined; 11 | var union_tag = meta.FieldEnum(Cases); 12 | 13 | inline for (@typeInfo(Cases).Struct.fields) |field, i| { 14 | const run = meta.fieldInfo(field.field_type, .run).field_type; 15 | const return_type = @typeInfo(@typeInfo(run).Struct.decls[0].data.Var).Fn.return_type.?; 16 | const field_alignment = if (@sizeOf(return_type) > 0) @alignOf(return_type) else 0; 17 | 18 | union_fields[i] = .{ 19 | .name = field.name, 20 | .field_type = return_type, 21 | .alignment = field_alignment, 22 | }; 23 | } 24 | 25 | return @Type(builtin.TypeInfo{ 26 | .Union = .{ 27 | .layout = .Auto, 28 | .tag_type = union_tag, 29 | .fields = &union_fields, 30 | .decls = &.{}, 31 | }, 32 | }); 33 | } 34 | 35 | pub fn select(cases: anytype) ResultUnionOf(@TypeOf(cases)) { 36 | const ResultUnion = ResultUnionOf(@TypeOf(cases)); 37 | const Channel = oneshot.Channel(ResultUnion); 38 | 39 | const Memoized = struct { 40 | pub fn Closure( 41 | comptime C: type, 42 | comptime case_name: []const u8, 43 | ) type { 44 | return struct { 45 | fn call(channel: *Channel, case: C) callconv(.Async) void { 46 | const result = @call(.{}, C.function, case.args); 47 | const result_union = @unionInit(ResultUnion, case_name, result); 48 | if (channel.set()) channel.commit(result_union); 49 | } 50 | }; 51 | } 52 | }; 53 | 54 | comptime var types: []const type = &[_]type{}; 55 | inline for (@typeInfo(@TypeOf(cases)).Struct.fields) |field| { 56 | const C = Memoized.Closure(@TypeOf(@field(@field(cases, field.name), "run")), field.name); 57 | types = types ++ [_]type{@Frame(C.call)}; 58 | } 59 | 60 | var frames: meta.Tuple(types) = undefined; 61 | var channel: Channel = .{}; 62 | 63 | inline for (@typeInfo(@TypeOf(cases)).Struct.fields) |field, i| { 64 | const C = Memoized.Closure(@TypeOf(@field(@field(cases, field.name), "run")), field.name); 65 | frames[i] = async C.call(&channel, @field(@field(cases, field.name), "run")); 66 | } 67 | 68 | const result = channel.wait(); 69 | const result_idx = @enumToInt(result); 70 | 71 | inline for (@typeInfo(@TypeOf(cases)).Struct.fields) |field, i| { 72 | if (i != result_idx) { 73 | if (comptime @hasField(@TypeOf(@field(cases, field.name)), "cancel")) { 74 | const cancel = @field(@field(cases, field.name), "cancel"); 75 | @call(comptime .{}, @TypeOf(cancel).function, cancel.args); 76 | } 77 | } 78 | await frames[i]; 79 | } 80 | 81 | return result; 82 | } 83 | 84 | pub fn Case(comptime Function: anytype) type { 85 | return struct { 86 | pub const function = Function; 87 | args: meta.ArgsTuple(@TypeOf(Function)), 88 | }; 89 | } 90 | 91 | pub fn call(comptime Function: anytype, arguments: anytype) Case(Function) { 92 | var args: meta.ArgsTuple(@TypeOf(Function)) = undefined; 93 | mem.copy(u8, mem.asBytes(&args), mem.asBytes(&arguments)); 94 | return .{ .args = args }; 95 | } 96 | -------------------------------------------------------------------------------- /socket.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const os = std.os; 4 | const mem = std.mem; 5 | const net = std.net; 6 | const time = std.time; 7 | const testing = std.testing; 8 | 9 | const print = std.debug.print; 10 | 11 | pub const Socket = struct { 12 | fd: os.socket_t, 13 | 14 | pub fn init(domain: u32, socket_type: u32, protocol: u32) !Socket { 15 | return Socket{ .fd = try os.socket(domain, socket_type, protocol) }; 16 | } 17 | 18 | pub fn deinit(self: Socket) void { 19 | os.close(self.fd); 20 | } 21 | 22 | pub fn shutdown(self: Socket, how: os.ShutdownHow) !void { 23 | try os.shutdown(self.fd, how); 24 | } 25 | 26 | pub fn bind(self: Socket, address: net.Address) !void { 27 | try os.bind(self.fd, &address.any, address.getOsSockLen()); 28 | } 29 | 30 | pub fn listen(self: Socket, max_backlog_size: usize) !void { 31 | try os.listen(self.fd, @truncate(u31, max_backlog_size)); 32 | } 33 | 34 | pub fn setReuseAddress(self: Socket, enabled: bool) !void { 35 | if (@hasDecl(os, "SO_REUSEADDR")) { 36 | try os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEADDR, mem.asBytes(&@as(usize, @boolToInt(enabled)))); 37 | } 38 | } 39 | 40 | pub fn setReusePort(self: Socket, enabled: bool) !void { 41 | if (@hasDecl(os, "SO_REUSEPORT")) { 42 | try os.setsockopt(self.fd, os.SOL_SOCKET, os.SO_REUSEPORT, mem.asBytes(&@as(usize, @boolToInt(enabled)))); 43 | } 44 | } 45 | 46 | pub fn setNoDelay(self: Socket, enabled: bool) !void { 47 | if (@hasDecl(os, "TCP_NODELAY")) { 48 | try os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_NODELAY, mem.asBytes(&@as(usize, @boolToInt(enabled)))); 49 | } 50 | } 51 | 52 | pub fn setFastOpen(self: Socket, enabled: bool) !void { 53 | if (@hasDecl(os, "TCP_FASTOPEN")) { 54 | try os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_FASTOPEN, mem.asBytes(&@as(usize, @boolToInt(enabled)))); 55 | } 56 | } 57 | 58 | pub fn setQuickAck(self: Socket, enabled: bool) !void { 59 | if (@hasDecl(os, "TCP_QUICKACK")) { 60 | try os.setsockopt(self.fd, os.IPPROTO_TCP, os.TCP_QUICKACK, mem.asBytes(&@as(usize, @boolToInt(enabled)))); 61 | } 62 | } 63 | 64 | pub fn getName(self: Socket) !net.Address { 65 | var binded_address: os.sockaddr = undefined; 66 | var binded_address_len: u32 = @sizeOf(os.sockaddr); 67 | try os.getsockname(self.fd, &binded_address, &binded_address_len); 68 | return net.Address.initPosix(@alignCast(4, &binded_address)); 69 | } 70 | 71 | pub fn connect(self: Socket, address: net.Address) !void { 72 | try os.connect(self.fd, &address.any, address.getOsSockLen()); 73 | } 74 | 75 | pub fn getError(self: Socket) !void { 76 | try os.getsockoptError(self.fd); 77 | } 78 | 79 | pub fn getReadBufferSize(self: Socket) !u32 { 80 | var value: u32 = undefined; 81 | var value_len: u32 = @sizeOf(u32); 82 | 83 | const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&value), &value_len); 84 | return switch (os.errno(rc)) { 85 | 0 => value, 86 | os.EBADF => error.BadFileDescriptor, 87 | os.EFAULT => error.InvalidAddressSpace, 88 | os.EINVAL => error.InvalidSocketOption, 89 | os.ENOPROTOOPT => error.UnknownSocketOption, 90 | os.ENOTSOCK => error.NotASocket, 91 | else => |err| os.unexpectedErrno(err), 92 | }; 93 | } 94 | 95 | pub fn getWriteBufferSize(self: Socket) !u32 { 96 | var value: u32 = undefined; 97 | var value_len: u32 = @sizeOf(u32); 98 | 99 | const rc = os.system.getsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&value), &value_len); 100 | return switch (os.errno(rc)) { 101 | 0 => value, 102 | os.EBADF => error.BadFileDescriptor, 103 | os.EFAULT => error.InvalidAddressSpace, 104 | os.EINVAL => error.InvalidSocketOption, 105 | os.ENOPROTOOPT => error.UnknownSocketOption, 106 | os.ENOTSOCK => error.NotASocket, 107 | else => |err| os.unexpectedErrno(err), 108 | }; 109 | } 110 | 111 | pub fn setReadBufferSize(self: Socket, size: u32) !void { 112 | const rc = os.system.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVBUF, mem.asBytes(&size), @sizeOf(u32)); 113 | return switch (os.errno(rc)) { 114 | 0 => {}, 115 | os.EBADF => error.BadFileDescriptor, 116 | os.EFAULT => error.InvalidAddressSpace, 117 | os.EINVAL => error.InvalidSocketOption, 118 | os.ENOPROTOOPT => error.UnknownSocketOption, 119 | os.ENOTSOCK => error.NotASocket, 120 | else => |err| os.unexpectedErrno(err), 121 | }; 122 | } 123 | 124 | pub fn setWriteBufferSize(self: Socket, size: u32) !void { 125 | const rc = os.system.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDBUF, mem.asBytes(&size), @sizeOf(u32)); 126 | return switch (os.errno(rc)) { 127 | 0 => {}, 128 | os.EBADF => error.BadFileDescriptor, 129 | os.EFAULT => error.InvalidAddressSpace, 130 | os.EINVAL => error.InvalidSocketOption, 131 | os.ENOPROTOOPT => error.UnknownSocketOption, 132 | os.ENOTSOCK => error.NotASocket, 133 | else => |err| os.unexpectedErrno(err), 134 | }; 135 | } 136 | 137 | pub fn setReadTimeout(self: Socket, milliseconds: usize) !void { 138 | const timeout = os.timeval{ 139 | .tv_sec = @intCast(isize, milliseconds / time.ms_per_s), 140 | .tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms), 141 | }; 142 | 143 | const rc = os.system.setsockopt(self.fd, os.SOL_SOCKET, os.SO_RCVTIMEO, mem.asBytes(&timeout), @sizeOf(os.timeval)); 144 | return switch (os.errno(rc)) { 145 | 0 => {}, 146 | os.EBADF => error.BadFileDescriptor, 147 | os.EFAULT => error.InvalidAddressSpace, 148 | os.EINVAL => error.InvalidSocketOption, 149 | os.ENOPROTOOPT => error.UnknownSocketOption, 150 | os.ENOTSOCK => error.NotASocket, 151 | else => |err| os.unexpectedErrno(err), 152 | }; 153 | } 154 | 155 | pub fn setWriteTimeout(self: Socket, milliseconds: usize) !void { 156 | const timeout = os.timeval{ 157 | .tv_sec = @intCast(isize, milliseconds / time.ms_per_s), 158 | .tv_usec = @intCast(isize, (milliseconds % time.ms_per_s) * time.us_per_ms), 159 | }; 160 | 161 | const rc = os.system.setsockopt(self.fd, os.SOL_SOCKET, os.SO_SNDTIMEO, mem.asBytes(&timeout), @sizeOf(os.timeval)); 162 | return switch (os.errno(rc)) { 163 | 0 => {}, 164 | os.EBADF => error.BadFileDescriptor, 165 | os.EFAULT => error.InvalidAddressSpace, 166 | os.EINVAL => error.InvalidSocketOption, 167 | os.ENOPROTOOPT => error.UnknownSocketOption, 168 | os.ENOTSOCK => error.NotASocket, 169 | else => |err| os.unexpectedErrno(err), 170 | }; 171 | } 172 | 173 | pub const Connection = struct { 174 | socket: Socket, 175 | address: net.Address, 176 | }; 177 | 178 | pub fn accept(self: Socket, flags: u32) !Connection { 179 | var address: os.sockaddr = undefined; 180 | var address_len: u32 = @sizeOf(os.sockaddr); 181 | 182 | const fd = try os.accept(self.fd, &address, &address_len, flags); 183 | 184 | return Connection{ 185 | .socket = Socket{ .fd = fd }, 186 | .address = net.Address.initPosix(@alignCast(4, &address)), 187 | }; 188 | } 189 | 190 | pub fn read(self: Socket, buf: []u8) !usize { 191 | return try os.read(self.fd, buf); 192 | } 193 | 194 | pub fn recv(self: Socket, buf: []u8, flags: u32) !usize { 195 | return try os.recv(self.fd, buf, flags); 196 | } 197 | 198 | pub fn write(self: Socket, buf: []const u8) !usize { 199 | return try os.write(self.fd, buf); 200 | } 201 | 202 | pub fn send(self: Socket, buf: []const u8, flags: u32) !usize { 203 | return try os.send(self.fd, buf, flags); 204 | } 205 | }; 206 | 207 | test { 208 | testing.refAllDecls(Socket); 209 | } 210 | 211 | test "socket/linux: set write timeout" { 212 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 213 | defer a.deinit(); 214 | 215 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 216 | try a.listen(128); 217 | 218 | const binded_address = try a.getName(); 219 | print("Binded to address: {}\n", .{binded_address}); 220 | 221 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 222 | defer b.deinit(); 223 | 224 | try b.connect(binded_address); 225 | 226 | const ab = try a.accept(os.SOCK_CLOEXEC); 227 | defer ab.socket.deinit(); 228 | 229 | // The minimum read buffer size is 128. 230 | // The minimum write buffer size is 1024. 231 | // All buffer sizes are doubled when they are passed in. 232 | // After taking into account book-keeping for buffer sizes, the minimum 233 | // buffer size before writes start to block the main thread is 65,483 bytes. 234 | 235 | var buf: [65_483]u8 = undefined; 236 | 237 | try ab.socket.setReadBufferSize(128); 238 | try b.setWriteBufferSize(1024); 239 | try b.setWriteTimeout(10); 240 | 241 | testing.expectEqual(buf.len - 1, try b.write(&buf)); 242 | } 243 | 244 | test "socket/linux: set read timeout" { 245 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 246 | defer a.deinit(); 247 | 248 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 249 | try a.listen(128); 250 | 251 | const binded_address = try a.getName(); 252 | print("Binded to address: {}\n", .{binded_address}); 253 | 254 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 255 | defer b.deinit(); 256 | 257 | try b.connect(binded_address); 258 | try b.setReadTimeout(10); 259 | 260 | const ab = try a.accept(os.SOCK_CLOEXEC); 261 | defer ab.socket.deinit(); 262 | 263 | var buf: [1]u8 = undefined; 264 | testing.expectError(error.WouldBlock, b.read(&buf)); 265 | } 266 | 267 | test "socket/linux: create socket pair" { 268 | const a = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 269 | defer a.deinit(); 270 | 271 | try a.bind(net.Address.initIp4([_]u8{ 0, 0, 0, 0 }, 0)); 272 | try a.listen(128); 273 | 274 | const binded_address = try a.getName(); 275 | print("Binded to address: {}\n", .{binded_address}); 276 | 277 | const b = try Socket.init(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 278 | defer b.deinit(); 279 | 280 | testing.expectError(error.WouldBlock, b.connect(binded_address)); 281 | try b.getError(); 282 | 283 | const ab = try a.accept(os.SOCK_NONBLOCK | os.SOCK_CLOEXEC); 284 | defer ab.socket.deinit(); 285 | } 286 | 287 | test "raw_socket/linux: create socket pair" { 288 | const empty_ip4_address = os.sockaddr_in{ .port = 0, .addr = 0 }; 289 | 290 | const a = try os.socket(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 291 | defer os.close(a); 292 | 293 | try os.bind(a, @ptrCast(*const os.sockaddr, &empty_ip4_address), @sizeOf(os.sockaddr_in)); 294 | try os.listen(a, 128); 295 | 296 | var binded_address: os.sockaddr = undefined; 297 | var binded_address_len: u32 = @sizeOf(os.sockaddr); 298 | try os.getsockname(a, &binded_address, &binded_address_len); 299 | 300 | switch (binded_address.family) { 301 | os.AF_INET => print("Binded to IPv4 address: {}", .{@ptrCast(*align(1) os.sockaddr_in, &binded_address)}), 302 | os.AF_INET6 => print("Binded to IPv6 address: {}", .{@ptrCast(*align(1) os.sockaddr_in6, &binded_address)}), 303 | else => return error.UnexpectedAddressFamily, 304 | } 305 | 306 | const b = try os.socket(os.AF_INET, os.SOCK_STREAM | os.SOCK_NONBLOCK | os.SOCK_CLOEXEC, os.IPPROTO_TCP); 307 | defer os.close(b); 308 | 309 | testing.expectError(error.WouldBlock, os.connect(b, &binded_address, binded_address_len)); 310 | try os.getsockoptError(b); 311 | } 312 | -------------------------------------------------------------------------------- /spsc.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const zap = @import("zap"); 3 | const hyperia = @import("hyperia.zig"); 4 | 5 | const mem = std.mem; 6 | const mpsc = hyperia.mpsc; 7 | const builtin = std.builtin; 8 | const testing = std.testing; 9 | 10 | const assert = std.debug.assert; 11 | 12 | pub const cache_line_length = switch (builtin.cpu.arch) { 13 | .x86_64, .aarch64, .powerpc64 => 128, 14 | .arm, .mips, .mips64, .riscv64 => 32, 15 | .s390x => 256, 16 | else => 64, 17 | }; 18 | 19 | pub fn AsyncQueue(comptime T: type, comptime capacity: comptime_int) type { 20 | return struct { 21 | const Self = @This(); 22 | 23 | queue: Queue(T, capacity) = .{}, 24 | closed: bool = false, 25 | producer_event: mpsc.AsyncAutoResetEvent(void) = .{}, 26 | consumer_event: mpsc.AsyncAutoResetEvent(void) = .{}, 27 | 28 | pub fn close(self: *Self) void { 29 | @atomicStore(bool, &self.closed, true, .Monotonic); 30 | 31 | while (true) { 32 | var batch: zap.Pool.Batch = .{}; 33 | batch.push(self.producer_event.set()); 34 | batch.push(self.consumer_event.set()); 35 | if (batch.isEmpty()) break; 36 | hyperia.pool.schedule(.{}, batch); 37 | } 38 | } 39 | 40 | pub fn push(self: *Self, item: T) bool { 41 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 42 | if (self.queue.push(item)) { 43 | if (self.consumer_event.set()) |runnable| { 44 | hyperia.pool.schedule(.{}, runnable); 45 | } 46 | return true; 47 | } 48 | self.producer_event.wait(); 49 | } 50 | return false; 51 | } 52 | 53 | pub fn pop(self: *Self) ?T { 54 | while (!@atomicLoad(bool, &self.closed, .Monotonic)) { 55 | if (self.queue.pop()) |item| { 56 | if (self.producer_event.set()) |runnable| { 57 | hyperia.pool.schedule(.{}, runnable); 58 | } 59 | return item; 60 | } 61 | self.consumer_event.wait(); 62 | } 63 | return null; 64 | } 65 | }; 66 | } 67 | 68 | pub fn Queue(comptime T: type, comptime capacity: comptime_int) type { 69 | return struct { 70 | const Self = @This(); 71 | 72 | entries: [capacity]T align(cache_line_length) = undefined, 73 | enqueue_pos: usize align(cache_line_length) = 0, 74 | dequeue_pos: usize align(cache_line_length) = 0, 75 | 76 | pub fn push(self: *Self, item: T) bool { 77 | const head = self.enqueue_pos; 78 | const tail = @atomicLoad(usize, &self.dequeue_pos, .Acquire); 79 | if (head +% 1 -% tail > capacity) { 80 | return false; 81 | } 82 | self.entries[head & (capacity - 1)] = item; 83 | @atomicStore(usize, &self.enqueue_pos, head +% 1, .Release); 84 | return true; 85 | } 86 | 87 | pub fn pop(self: *Self) ?T { 88 | const tail = self.dequeue_pos; 89 | const head = @atomicLoad(usize, &self.enqueue_pos, .Acquire); 90 | if (tail -% head == 0) { 91 | return null; 92 | } 93 | const popped = self.entries[tail & (capacity - 1)]; 94 | @atomicStore(usize, &self.dequeue_pos, tail +% 1, .Release); 95 | return popped; 96 | } 97 | }; 98 | } 99 | 100 | test { 101 | testing.refAllDecls(Queue(u64, 4)); 102 | testing.refAllDecls(AsyncQueue(u64, 4)); 103 | } 104 | 105 | test "queue" { 106 | var queue: Queue(u64, 4) = .{}; 107 | 108 | var i: usize = 0; 109 | while (i < 4) : (i += 1) testing.expect(queue.push(i)); 110 | testing.expect(!queue.push(5)); 111 | testing.expect(!queue.push(6)); 112 | testing.expect(!queue.push(7)); 113 | testing.expect(!queue.push(8)); 114 | 115 | var j: usize = 0; 116 | while (j < 4) : (j += 1) testing.expect(queue.pop().? == j); 117 | testing.expect(queue.pop() == null); 118 | testing.expect(queue.pop() == null); 119 | testing.expect(queue.pop() == null); 120 | testing.expect(queue.pop() == null); 121 | } 122 | -------------------------------------------------------------------------------- /sync.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const testing = std.testing; 4 | 5 | pub const SpinLock = struct { 6 | pub const Held = struct { 7 | self: *SpinLock, 8 | 9 | pub fn release(held: Held) void { 10 | @atomicStore(bool, &held.self.locked, false, .Release); 11 | } 12 | }; 13 | 14 | locked: bool = false, 15 | 16 | pub fn acquire(self: *SpinLock) Held { 17 | while (@atomicRmw(bool, &self.locked, .Xchg, true, .Acquire)) { 18 | std.Thread.spinLoopHint(); 19 | } 20 | return Held{ .self = self }; 21 | } 22 | }; 23 | 24 | test { 25 | testing.refAllDecls(@This()); 26 | } 27 | 28 | test "sync/spin_lock: acquire and release" { 29 | var lock: SpinLock = .{}; 30 | 31 | const held = lock.acquire(); 32 | defer held.release(); 33 | } 34 | --------------------------------------------------------------------------------