├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── client.zig ├── io.zig ├── server.zig ├── snow.zig ├── socket.zig ├── sync.zig ├── test.zig └── zig.mod /.gitignore: -------------------------------------------------------------------------------- 1 | zig-cache/ 2 | deps.zig -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Kenta Iwasaki 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # snow 2 | 3 | A small, fast, cross-platform, async Zig networking framework built on top of [lithdew/pike](https://github.com/lithdew/pike). 4 | 5 | It automatically handles: 6 | 1. buffering/framing data coming in and out of a socket, 7 | 2. managing the lifecycle of incoming / outgoing connections, and 8 | 3. representing a singular `Client` / `Server` as a bounded adaptive pool of outgoing / incoming connections. 9 | 10 | It also allows you to specify: 11 | 1. how messages are framed (`\n` suffixed to each message, message length prefixed to each message, etc.), 12 | 2. a sequence of steps to be performed before successfully establishing a connection (a handshake protocol), and 13 | 3. an upper bound to the maximum number of connections a `Client` / `Server` may pool in total. 14 | 15 | ## Usage 16 | 17 | In your `build.zig`: 18 | 19 | ```zig 20 | const std = @import("std"); 21 | 22 | const Builder = std.build.Builder; 23 | 24 | const pkgs = struct { 25 | const pike = std.build.Pkg{ 26 | .name = "pike", 27 | .path = "pike/pike.zig", 28 | }; 29 | 30 | const snow = std.build.Pkg{ 31 | .name = "snow", 32 | .path = "snow/snow.zig", 33 | .dependencies = &[_]std.build.Pkg{ 34 | pike, 35 | } 36 | }; 37 | }; 38 | 39 | pub fn build(b: *Builder) void { 40 | // Given a build step... 41 | step.addPackage(pkgs.pike); 42 | step.addPackage(pkgs.snow); 43 | } 44 | ``` 45 | 46 | ## Protocol 47 | 48 | Applications written with _snow_ provide a `Protocol` implementation which specifies how message are encoded / decoded into frames. 49 | 50 | Helpers (`io.Reader` / `io.Writer`) are provided to assist developers in specifying how frame encoding / decoding is to be performed. 51 | 52 | Given a `Protocol` implementation, a `Client` / `Server` may be instantiated. 53 | 54 | Here is an example of a `Protocol` that frames messages based on an End-of-Line character ('\n') suffixed at the end of each message: 55 | 56 | ```zig 57 | const std = @import("std"); 58 | const snow = @import("snow"); 59 | 60 | const mem = std.mem; 61 | 62 | const Protocol = struct { 63 | const Self = @This(); 64 | 65 | // This gets called right before a connection is marked to be successfully established! 66 | // 67 | // Unlike the rest of the callbacks below, 'socket' is a raw 'pike.Socket'. Feel free to 68 | // read / write as much data as you wish, or to return an error to prevent a connection 69 | // from being marked as being successfully established. 70 | // 71 | // Rather than 'void', snow.Options.context_type may be set and returned from 'handshake' 72 | // to bootstrap a connection with additional fields and methods under 'socket.context'. 73 | pub fn handshake(self: *Self, comptime side: snow.Side, socket: anytype) !void { 74 | return {}; 75 | } 76 | 77 | // This gets called before a connection is closed! 78 | pub fn close(self: *Self, comptime side: snow.Side, socket: anytype) void { 79 | return {}; 80 | } 81 | 82 | // This gets called when a connection's resources is ready to be de-allocated! 83 | // 84 | // A slice of remaining items in the socket's write queue is passed to purge() to be 85 | // optionally deallocated. 86 | pub fn purge(self: *Self, comptime side: snow.Side, socket: anytype, items: []const []const u8) void { 87 | return {}; 88 | } 89 | 90 | // This gets called when data is ready to be read from a connection! 91 | pub fn read(self: *Self, comptime side: snow.Side, socket: anytype, reader: anytype) !void { 92 | while (true) { 93 | const line = try reader.readLine(); 94 | defer reader.shift(line.len); 95 | 96 | // Do something with the frame here! 97 | } 98 | } 99 | 100 | // This gets called when data is queued and ready to be encoded and written to 101 | // a connection! 102 | // 103 | // Rather than '[]const u8', custom message types may be set to be queuable to the 104 | // connections write queue by setting snow.Options.message_type. 105 | pub fn write(self: *Self, comptime side: snow.Side, socket: anytype, writer: anytype, items: [][]const u8) !void { 106 | for (items) |message| { 107 | if (mem.indexOfScalar(u8, message, '\n') != null) { 108 | return error.UnexpectedDelimiter; 109 | } 110 | 111 | const frame = try writer.peek(message.len + 1); 112 | mem.copy(u8, frame[0..message.len], message); 113 | frame[message.len..][0] = '\n'; 114 | } 115 | 116 | try writer.flush(); 117 | } 118 | }; 119 | ``` 120 | 121 | ## Client 122 | 123 | A `Client` comprises of a bounded adaptive pool of outgoing connections that are to be connected to a single IPv4/IPv6 endpoint. 124 | 125 | When writing a message to an endpoint with a `Client`, a connection is initially grabbed from the pool. The message is then queued to be written to the connection's underlying socket. 126 | 127 | A policy is specified for selecting which connection to grab from a `Client`'s pool. The policy goes as follows: 128 | 129 | There exists a configurable maximum number of connections that may belong to a `Client`'s pool (defaulting to 16). If all existing connections in a `Client`'s pool contain queued messages that have yet to be flushed and written, a new connection is created and registered to the pool up to a maximum number of connections. 130 | 131 | If the pool's maximum number of connections limit is reached, the connection in the pool with the smallest number of queued messages is returned. Otherwise, a new connection is created and registered to the pool and returned. 132 | 133 | ## Server 134 | 135 | A `Server` comprises of a bounded adaptive pool of incoming connections accepted from a single bounded IPv4/IPv6 endpoint. There exists a configurable maximum number of connections that may belong to a `Server`'s pool (defaulting to 128). 136 | 137 | Should an incoming connection be established and the pool underlying a `Server` appears to be full, the connection will be declined and de-initialized. 138 | 139 | ## Socket 140 | 141 | All sockets comprise of two coroutines: a reader coroutine, and a writer coroutine. The reader and writer coroutine are responsible for framing and buffering messages received from / written to a single socket instance, and for executing logic specified under a `Protocol` implementation. 142 | 143 | An interesting detail to note is that the writer coroutine is entirely lock-free. 144 | 145 | ## Performance 146 | 147 | _snow_ was written with performance in mind: zero heap allocations occur in all hot paths. Heap allocations only ever occur when establishing a new incoming / outgoing connection. 148 | 149 | All other underlying components are stack-allocated and recycled as much as possible. -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const deps = @import("deps.zig"); 3 | 4 | pub fn build(b: *std.build.Builder) void { 5 | const mode = b.standardReleaseOptions(); 6 | const target = b.standardTargetOptions(.{}); 7 | 8 | const test_runner = b.addTest("test.zig"); 9 | test_runner.setBuildMode(mode); 10 | test_runner.setTarget(target); 11 | deps.addAllTo(test_runner); 12 | 13 | const test_step = b.step("test", "Runs the test suite."); 14 | test_step.dependOn(&test_runner.step); 15 | } 16 | -------------------------------------------------------------------------------- /client.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const pike = @import("pike"); 3 | const sync = @import("sync.zig"); 4 | 5 | const os = std.os; 6 | const net = std.net; 7 | const mem = std.mem; 8 | const meta = std.meta; 9 | const testing = std.testing; 10 | 11 | usingnamespace @import("socket.zig"); 12 | 13 | pub fn Client(comptime opts: Options) type { 14 | return struct { 15 | const Self = @This(); 16 | 17 | const Node = struct { 18 | ptr: *Connection, 19 | next: ?*Node = null, 20 | }; 21 | 22 | const ClientSocket = Socket(.client, opts); 23 | const Protocol = opts.protocol_type; 24 | 25 | pub const Connection = struct { 26 | node: Node, 27 | socket: ClientSocket, 28 | frame: @Frame(Self.runConnection), 29 | }; 30 | 31 | protocol: Protocol, 32 | notifier: *const pike.Notifier, 33 | allocator: *mem.Allocator, 34 | address: net.Address, 35 | 36 | lock: sync.Mutex = .{}, 37 | done: bool = false, 38 | 39 | pool: [opts.max_connections_per_client]*Connection = undefined, 40 | pool_len: usize = 0, 41 | 42 | cleanup_counter: sync.Counter = .{}, 43 | cleanup_queue: ?*Node = null, 44 | 45 | pub fn init(protocol: Protocol, allocator: *mem.Allocator, notifier: *const pike.Notifier, address: net.Address) Self { 46 | return Self{ .protocol = protocol, .allocator = allocator, .notifier = notifier, .address = address }; 47 | } 48 | 49 | pub fn deinit(self: *Self) void { 50 | var pool: [opts.max_connections_per_client]*Connection = undefined; 51 | var pool_len: usize = 0; 52 | 53 | { 54 | const held = self.lock.acquire(); 55 | defer held.release(); 56 | 57 | if (self.done) { 58 | return; 59 | } else { 60 | self.done = true; 61 | } 62 | 63 | pool = self.pool; 64 | pool_len = self.pool_len; 65 | self.pool = undefined; 66 | self.pool_len = 0; 67 | } 68 | 69 | for (pool[0..pool_len]) |conn| { 70 | conn.socket.deinit(); 71 | 72 | if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) { 73 | self.protocol.close(.client, &conn.socket); 74 | } 75 | } 76 | 77 | self.cleanup_counter.wait(); 78 | self.purge(); 79 | } 80 | 81 | pub fn purge(self: *Self) void { 82 | const held = self.lock.acquire(); 83 | defer held.release(); 84 | 85 | while (self.cleanup_queue) |head| { 86 | await head.ptr.frame catch {}; 87 | self.cleanup_queue = head.next; 88 | 89 | if (comptime meta.trait.hasFn("purge")(meta.Child(Protocol))) { 90 | var items: [opts.write_queue_size]opts.message_type = undefined; 91 | 92 | const queue = &head.ptr.socket.write_queue; 93 | const remaining = queue.tail -% queue.head; 94 | 95 | var i: usize = 0; 96 | while (i < remaining) : (i += 1) { 97 | items[i] = queue.items[(queue.head + i) % queue.items.len]; 98 | } 99 | 100 | queue.head = queue.tail; 101 | 102 | self.protocol.purge(.client, &head.ptr.socket, items[0..remaining]); 103 | } 104 | 105 | self.allocator.destroy(head.ptr); 106 | } 107 | } 108 | 109 | fn cleanup(self: *Self, node: *Node) void { 110 | const held = self.lock.acquire(); 111 | defer held.release(); 112 | 113 | node.next = self.cleanup_queue; 114 | self.cleanup_queue = node; 115 | } 116 | 117 | pub fn bootstrap(self: *Self) !void { 118 | _ = try self.getConnection(); 119 | } 120 | 121 | pub fn write(self: *Self, message: opts.message_type) !void { 122 | const conn = try self.getConnection(); 123 | try conn.socket.write(message); 124 | } 125 | 126 | pub fn getConnection(self: *Self) !*Connection { 127 | defer self.purge(); 128 | 129 | const held = self.lock.acquire(); 130 | defer held.release(); 131 | 132 | if (self.done) return error.OperationCancelled; 133 | 134 | var pool = self.pool[0..self.pool_len]; 135 | if (pool.len == 0) return self.initConnection(); 136 | 137 | var min_conn = pool[0]; 138 | var min_pending = min_conn.socket.write_queue.pending(); 139 | if (min_pending == 0) return min_conn; 140 | 141 | for (pool[1..]) |conn| { 142 | const pending = conn.socket.write_queue.pending(); 143 | if (pending == 0) return conn; 144 | if (pending < min_pending) { 145 | min_conn = conn; 146 | min_pending = pending; 147 | } 148 | } 149 | 150 | if (pool.len < opts.max_connections_per_client) { 151 | return self.initConnection(); 152 | } 153 | 154 | return min_conn; 155 | } 156 | 157 | fn initConnection(self: *Self) !*Connection { 158 | const conn = try self.allocator.create(Connection); 159 | errdefer self.allocator.destroy(conn); 160 | 161 | conn.node = .{ .ptr = conn }; 162 | 163 | conn.socket = ClientSocket.init( 164 | try pike.Socket.init(os.AF_INET, os.SOCK_STREAM, os.IPPROTO_TCP, 0), 165 | self.address, 166 | ); 167 | errdefer conn.socket.deinit(); 168 | 169 | try conn.socket.unwrap().registerTo(self.notifier); 170 | try conn.socket.unwrap().connect(conn.socket.address); 171 | 172 | if (comptime meta.trait.hasFn("handshake")(meta.Child(Protocol))) { 173 | conn.socket.context = try self.protocol.handshake(.client, &conn.socket.inner); 174 | } 175 | 176 | self.pool[self.pool_len] = conn; 177 | self.pool_len += 1; 178 | 179 | conn.frame = async self.runConnection(conn); 180 | 181 | return conn; 182 | } 183 | 184 | fn deleteConnection(self: *Self, conn: *Connection) bool { 185 | const held = self.lock.acquire(); 186 | defer held.release(); 187 | 188 | var pool = self.pool[0..self.pool_len]; 189 | 190 | if (mem.indexOfScalar(*Connection, pool, conn)) |i| { 191 | mem.copy(*Connection, pool[i..], pool[i + 1 ..]); 192 | self.pool_len -= 1; 193 | return true; 194 | } 195 | 196 | return false; 197 | } 198 | 199 | fn runConnection(self: *Self, conn: *Connection) !void { 200 | self.cleanup_counter.add(1); 201 | 202 | defer { 203 | if (self.deleteConnection(conn)) { 204 | conn.socket.deinit(); 205 | 206 | if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) { 207 | self.protocol.close(.client, &conn.socket); 208 | } 209 | } 210 | 211 | self.cleanup(&conn.node); 212 | self.cleanup_counter.add(-1); 213 | } 214 | 215 | yield(); 216 | 217 | try conn.socket.run(self.protocol); 218 | } 219 | }; 220 | } 221 | -------------------------------------------------------------------------------- /io.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const mem = std.mem; 4 | 5 | pub fn Reader(comptime Socket: type, comptime buffer_size: usize) type { 6 | return struct { 7 | const Self = @This(); 8 | 9 | socket: *Socket, 10 | 11 | buf: [buffer_size]u8 = undefined, 12 | pos: usize = 0, 13 | 14 | pub inline fn init(socket: *Socket) Self { 15 | return Self{ .socket = socket }; 16 | } 17 | 18 | pub inline fn reset(self: *Self) void { 19 | self.pos = 0; 20 | } 21 | 22 | pub inline fn readUntil(self: *Self, delimiter: []const u8) ![]const u8 { 23 | while (true) { 24 | if (self.pos >= buffer_size) return error.BufferOverflow; 25 | 26 | const num_bytes = try self.socket.read(self.buf[self.pos..]); 27 | if (num_bytes == 0) return error.EndOfStream; 28 | self.pos += num_bytes; 29 | 30 | if (mem.indexOf(u8, self.buf[0..self.pos], delimiter)) |i| { 31 | return self.buf[0 .. i + 1]; 32 | } 33 | } 34 | } 35 | 36 | pub inline fn readLine(self: *Self) ![]const u8 { 37 | return self.readUntil("\n"); 38 | } 39 | 40 | pub inline fn peek(self: *Self, amount: usize) !void { 41 | if (self.pos >= amount) return; 42 | 43 | while (self.pos < amount) { 44 | const num_bytes = try self.socket.read(self.buf[self.pos..]); 45 | if (num_bytes == 0) return error.EndOfStream; 46 | self.pos += num_bytes; 47 | } 48 | } 49 | 50 | pub inline fn shift(self: *Self, amount: usize) void { 51 | mem.copy(u8, self.buf[0 .. self.pos - amount], self.buf[amount..self.pos]); 52 | self.pos -= amount; 53 | } 54 | }; 55 | } 56 | 57 | pub fn Writer(comptime Socket: type, comptime buffer_size: usize) type { 58 | return struct { 59 | const Self = @This(); 60 | 61 | socket: *Socket, 62 | 63 | buf: [buffer_size]u8 = undefined, 64 | pos: usize = 0, 65 | 66 | pub inline fn init(socket: *Socket) Self { 67 | return Self{ .socket = socket }; 68 | } 69 | 70 | pub inline fn write(self: *Self, buf: []const u8) !void { 71 | mem.copy(u8, try self.peek(buf.len), buf); 72 | } 73 | 74 | pub inline fn peek(self: *Self, size: usize) ![]u8 { 75 | if (size > buffer_size) return error.RequestedSizeToolarge; 76 | if (self.pos + size > buffer_size) try self.shift(buffer_size - size); 77 | 78 | defer self.pos += size; 79 | return self.buf[self.pos..][0..size]; 80 | } 81 | 82 | pub inline fn flush(self: *Self) !void { 83 | return self.shift(null); 84 | } 85 | 86 | pub inline fn shift(self: *Self, amount: ?usize) !void { 87 | const required_leftover_space = amount orelse 0; 88 | 89 | while (self.pos > required_leftover_space) { 90 | const num_bytes = try self.socket.write(self.buf[0..self.pos]); 91 | if (num_bytes == 0) return error.EndOfStream; 92 | self.pos -= num_bytes; 93 | } 94 | } 95 | }; 96 | } 97 | -------------------------------------------------------------------------------- /server.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const pike = @import("pike"); 3 | const sync = @import("sync.zig"); 4 | 5 | const os = std.os; 6 | const net = std.net; 7 | const mem = std.mem; 8 | const meta = std.meta; 9 | const atomic = std.atomic; 10 | 11 | usingnamespace @import("socket.zig"); 12 | 13 | pub fn Server(comptime opts: Options) type { 14 | return struct { 15 | const Self = @This(); 16 | 17 | const Node = struct { 18 | ptr: *Connection, 19 | next: ?*Node = null, 20 | }; 21 | 22 | const ServerSocket = Socket(.server, opts); 23 | const Protocol = opts.protocol_type; 24 | 25 | pub const Connection = struct { 26 | node: Node, 27 | socket: ServerSocket, 28 | frame: @Frame(Self.runConnection), 29 | }; 30 | 31 | protocol: Protocol, 32 | allocator: *mem.Allocator, 33 | notifier: *const pike.Notifier, 34 | socket: pike.Socket, 35 | 36 | lock: sync.Mutex = .{}, 37 | done: atomic.Bool = atomic.Bool.init(false), 38 | 39 | pool: [opts.max_connections_per_server]*Connection = undefined, 40 | pool_len: usize = 0, 41 | 42 | cleanup_counter: sync.Counter = .{}, 43 | cleanup_queue: ?*Node = null, 44 | 45 | frame: @Frame(Self.run) = undefined, 46 | 47 | pub fn init(protocol: Protocol, allocator: *mem.Allocator, notifier: *const pike.Notifier, address: net.Address) !Self { 48 | var self = Self{ 49 | .protocol = protocol, 50 | .allocator = allocator, 51 | .notifier = notifier, 52 | .socket = try pike.Socket.init(os.AF_INET, os.SOCK_STREAM, os.IPPROTO_TCP, 0), 53 | }; 54 | errdefer self.socket.deinit(); 55 | 56 | try self.socket.set(.reuse_address, true); 57 | try self.socket.bind(address); 58 | try self.socket.listen(128); 59 | 60 | return self; 61 | } 62 | 63 | pub fn deinit(self: *Self) void { 64 | if (self.done.xchg(true, .SeqCst)) return; 65 | 66 | self.socket.deinit(); 67 | await self.frame catch {}; 68 | 69 | self.close(); 70 | 71 | self.cleanup_counter.wait(); 72 | self.purge(); 73 | } 74 | 75 | pub fn close(self: *Self) void { 76 | var pool: [opts.max_connections_per_server]*Connection = undefined; 77 | var pool_len: usize = 0; 78 | 79 | { 80 | const held = self.lock.acquire(); 81 | defer held.release(); 82 | 83 | pool = self.pool; 84 | pool_len = self.pool_len; 85 | self.pool = undefined; 86 | self.pool_len = 0; 87 | } 88 | 89 | for (pool[0..pool_len]) |conn| { 90 | conn.socket.deinit(); 91 | 92 | if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) { 93 | self.protocol.close(.server, &conn.socket); 94 | } 95 | } 96 | } 97 | 98 | pub fn purge(self: *Self) void { 99 | const held = self.lock.acquire(); 100 | defer held.release(); 101 | 102 | while (self.cleanup_queue) |head| { 103 | await head.ptr.frame catch {}; 104 | self.cleanup_queue = head.next; 105 | 106 | if (comptime meta.trait.hasFn("purge")(meta.Child(Protocol))) { 107 | var items: [opts.write_queue_size]opts.message_type = undefined; 108 | 109 | const queue = &head.ptr.socket.write_queue; 110 | const remaining = queue.tail -% queue.head; 111 | 112 | var i: usize = 0; 113 | while (i < remaining) : (i += 1) { 114 | items[i] = queue.items[(queue.head + i) % queue.items.len]; 115 | } 116 | 117 | queue.head = queue.tail; 118 | 119 | self.protocol.purge(.server, &head.ptr.socket, items[0..remaining]); 120 | } 121 | 122 | self.allocator.destroy(head.ptr); 123 | } 124 | } 125 | 126 | fn cleanup(self: *Self, node: *Node) void { 127 | const held = self.lock.acquire(); 128 | defer held.release(); 129 | 130 | node.next = self.cleanup_queue; 131 | self.cleanup_queue = node; 132 | } 133 | 134 | pub fn serve(self: *Self) !void { 135 | try self.socket.registerTo(self.notifier); 136 | self.frame = async self.run(); 137 | } 138 | 139 | fn run(self: *Self) !void { 140 | yield(); 141 | 142 | defer if (!self.done.xchg(true, .SeqCst)) { 143 | self.socket.deinit(); 144 | self.close(); 145 | }; 146 | 147 | while (true) { 148 | self.accept() catch |err| switch (err) { 149 | error.SocketNotListening, 150 | error.OperationCancelled, 151 | => return, 152 | else => { 153 | continue; 154 | }, 155 | }; 156 | 157 | self.purge(); 158 | } 159 | } 160 | 161 | fn accept(self: *Self) !void { 162 | self.cleanup_counter.add(1); 163 | errdefer self.cleanup_counter.add(-1); 164 | 165 | const conn = try self.allocator.create(Connection); 166 | errdefer self.allocator.destroy(conn); 167 | 168 | conn.node = .{ .ptr = conn }; 169 | 170 | const peer = try self.socket.accept(); 171 | 172 | conn.socket = ServerSocket.init(peer.socket, peer.address); 173 | errdefer conn.socket.deinit(); 174 | 175 | try conn.socket.unwrap().registerTo(self.notifier); 176 | 177 | { 178 | const held = self.lock.acquire(); 179 | defer held.release(); 180 | 181 | if (self.pool_len + 1 == opts.max_connections_per_server) { 182 | return error.MaxConnectionLimitExceeded; 183 | } 184 | 185 | self.pool[self.pool_len] = conn; 186 | self.pool_len += 1; 187 | } 188 | 189 | conn.frame = async self.runConnection(conn); 190 | } 191 | 192 | fn deleteConnection(self: *Self, conn: *Connection) bool { 193 | const held = self.lock.acquire(); 194 | defer held.release(); 195 | 196 | var pool = self.pool[0..self.pool_len]; 197 | 198 | if (mem.indexOfScalar(*Connection, pool, conn)) |i| { 199 | mem.copy(*Connection, pool[i..], pool[i + 1 ..]); 200 | self.pool_len -= 1; 201 | return true; 202 | } 203 | 204 | return false; 205 | } 206 | 207 | fn runConnection(self: *Self, conn: *Connection) !void { 208 | defer { 209 | if (self.deleteConnection(conn)) { 210 | conn.socket.deinit(); 211 | 212 | if (comptime meta.trait.hasFn("close")(meta.Child(Protocol))) { 213 | self.protocol.close(.server, &conn.socket); 214 | } 215 | } 216 | 217 | self.cleanup(&conn.node); 218 | self.cleanup_counter.add(-1); 219 | } 220 | 221 | yield(); 222 | 223 | if (comptime meta.trait.hasFn("handshake")(meta.Child(Protocol))) { 224 | conn.socket.context = try self.protocol.handshake(.server, &conn.socket.inner); 225 | } 226 | 227 | try conn.socket.run(self.protocol); 228 | } 229 | }; 230 | } 231 | -------------------------------------------------------------------------------- /snow.zig: -------------------------------------------------------------------------------- 1 | pub const io = @import("io.zig"); 2 | pub const sync = @import("sync.zig"); 3 | 4 | pub usingnamespace @import("client.zig"); 5 | pub usingnamespace @import("server.zig"); 6 | pub usingnamespace @import("socket.zig"); 7 | -------------------------------------------------------------------------------- /socket.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const pike = @import("pike"); 3 | const io = @import("io.zig"); 4 | const sync = @import("sync.zig"); 5 | 6 | const net = std.net; 7 | const meta = std.meta; 8 | 9 | pub const Side = packed enum(u1) { 10 | client, 11 | server, 12 | }; 13 | 14 | pub const Options = struct { 15 | max_connections_per_client: usize = 16, 16 | max_connections_per_server: usize = 128, 17 | 18 | protocol_type: type = void, 19 | message_type: type = []const u8, 20 | context_type: type = void, 21 | 22 | write_queue_size: usize = 128, 23 | read_buffer_size: usize = 4 * 1024 * 1024, 24 | write_buffer_size: usize = 4 * 1024 * 1024, 25 | }; 26 | 27 | pub fn yield() void { 28 | suspend { 29 | var task = pike.Task.init(@frame()); 30 | pike.dispatch(&task, .{ .use_lifo = true }); 31 | } 32 | } 33 | 34 | pub fn Socket(comptime side: Side, comptime opts: Options) type { 35 | return struct { 36 | const Self = @This(); 37 | 38 | pub const Reader = io.Reader(pike.Socket, opts.read_buffer_size); 39 | pub const Writer = io.Writer(pike.Socket, opts.write_buffer_size); 40 | 41 | const WriteQueue = sync.Queue(opts.message_type, opts.write_queue_size); 42 | const Protocol = opts.protocol_type; 43 | const Context = opts.context_type; 44 | 45 | inner: pike.Socket, 46 | address: net.Address, 47 | 48 | context: Context = undefined, 49 | write_queue: WriteQueue = .{}, 50 | 51 | pub fn init(inner: pike.Socket, address: net.Address) Self { 52 | return Self{ .inner = inner, .address = address }; 53 | } 54 | 55 | pub fn deinit(self: *Self) void { 56 | self.inner.deinit(); 57 | } 58 | 59 | pub inline fn unwrap(self: *Self) *pike.Socket { 60 | return &self.inner; 61 | } 62 | 63 | pub fn write(self: *Self, message: opts.message_type) !void { 64 | try self.write_queue.push(message); 65 | } 66 | 67 | pub fn run(self: *Self, protocol: Protocol) !void { 68 | var reader = Reader.init(self.unwrap()); 69 | 70 | var writer = async self.runWriter(protocol); 71 | defer { 72 | self.write_queue.close(); 73 | await writer catch {}; 74 | } 75 | 76 | yield(); 77 | 78 | try protocol.read(side, self, &reader); 79 | } 80 | 81 | fn runWriter(self: *Self, protocol: Protocol) !void { 82 | var writer = Writer.init(self.unwrap()); 83 | var queue: @TypeOf(self.write_queue.items) = undefined; 84 | 85 | while (true) { 86 | const num_items = try self.write_queue.pop(queue[0..]); 87 | try protocol.write(side, self, &writer, queue[0..num_items]); 88 | } 89 | } 90 | }; 91 | } 92 | -------------------------------------------------------------------------------- /sync.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const pike = @import("pike"); 3 | 4 | const mem = std.mem; 5 | 6 | pub const Counter = struct { 7 | const Self = @This(); 8 | 9 | state: isize = 0, 10 | event: Event = .{}, 11 | 12 | pub fn add(self: *Self, delta: isize) void { 13 | var state = @atomicLoad(isize, &self.state, .Monotonic); 14 | var new_state: isize = undefined; 15 | 16 | while (true) { 17 | new_state = state + delta; 18 | 19 | state = @cmpxchgWeak( 20 | isize, 21 | &self.state, 22 | state, 23 | new_state, 24 | .Monotonic, 25 | .Monotonic, 26 | ) orelse break; 27 | } 28 | 29 | if (new_state == 0) { 30 | self.event.notify(); 31 | } 32 | } 33 | 34 | pub fn wait(self: *Self) void { 35 | while (@atomicLoad(isize, &self.state, .Monotonic) != 0) { 36 | self.event.wait(); 37 | } 38 | } 39 | }; 40 | 41 | pub fn Queue(comptime T: type, comptime capacity: comptime_int) type { 42 | return struct { 43 | const Self = @This(); 44 | 45 | const Reader = struct { 46 | task: pike.Task, 47 | dead: bool = false, 48 | }; 49 | 50 | const Writer = struct { 51 | next: ?*Writer = null, 52 | tail: ?*Writer = null, 53 | task: pike.Task, 54 | dead: bool = false, 55 | }; 56 | 57 | lock: std.Mutex = .{}, 58 | items: [capacity]T = undefined, 59 | dead: bool = false, 60 | head: usize = 0, 61 | tail: usize = 0, 62 | reader: ?*Reader = null, 63 | writers: ?*Writer = null, 64 | 65 | pub fn close(self: *Self) void { 66 | const held = self.lock.acquire(); 67 | if (self.dead) { 68 | held.release(); 69 | return; 70 | } 71 | 72 | self.dead = true; 73 | 74 | const maybe_reader = blk: { 75 | if (self.reader) |reader| { 76 | self.reader = null; 77 | break :blk reader; 78 | } 79 | break :blk null; 80 | }; 81 | 82 | var maybe_writers = blk: { 83 | if (self.writers) |writers| { 84 | self.writers = null; 85 | break :blk writers; 86 | } 87 | break :blk null; 88 | }; 89 | 90 | held.release(); 91 | 92 | if (maybe_reader) |reader| { 93 | reader.dead = true; 94 | pike.dispatch(&reader.task, .{}); 95 | } 96 | 97 | while (maybe_writers) |writer| { 98 | writer.dead = true; 99 | maybe_writers = writer.next; 100 | pike.dispatch(&writer.task, .{}); 101 | } 102 | } 103 | 104 | pub fn pending(self: *Self) usize { 105 | const held = self.lock.acquire(); 106 | defer held.release(); 107 | return self.tail -% self.head; 108 | } 109 | 110 | pub fn push(self: *Self, item: T) !void { 111 | while (true) { 112 | const held = self.lock.acquire(); 113 | if (self.dead) { 114 | held.release(); 115 | return error.AlreadyShutdown; 116 | } 117 | 118 | if (self.tail -% self.head < capacity) { 119 | self.items[self.tail % capacity] = item; 120 | self.tail +%= 1; 121 | 122 | const maybe_reader = blk: { 123 | if (self.reader) |reader| { 124 | self.reader = null; 125 | break :blk reader; 126 | } 127 | break :blk null; 128 | }; 129 | 130 | held.release(); 131 | 132 | if (maybe_reader) |reader| { 133 | pike.dispatch(&reader.task, .{}); 134 | } 135 | 136 | return; 137 | } 138 | 139 | var writer = Writer{ .task = pike.Task.init(@frame()) }; 140 | 141 | suspend { 142 | if (self.writers) |writers| { 143 | writers.tail.?.next = &writer; 144 | } else { 145 | self.writers = &writer; 146 | } 147 | self.writers.?.tail = &writer; 148 | held.release(); 149 | } 150 | 151 | if (writer.dead) return error.OperationCancelled; 152 | } 153 | } 154 | 155 | pub fn pop(self: *Self, dst: []T) !usize { 156 | while (true) { 157 | const held = self.lock.acquire(); 158 | const count = self.tail -% self.head; 159 | 160 | if (count != 0) { 161 | var i: usize = 0; 162 | while (i < count) : (i += 1) { 163 | dst[i] = self.items[(self.head +% i) % capacity]; 164 | } 165 | 166 | self.head = self.tail; 167 | 168 | var maybe_writers = blk: { 169 | if (self.writers) |writers| { 170 | self.writers = null; 171 | break :blk writers; 172 | } 173 | break :blk null; 174 | }; 175 | 176 | held.release(); 177 | 178 | while (maybe_writers) |writer| { 179 | maybe_writers = writer.next; 180 | pike.dispatch(&writer.task, .{}); 181 | } 182 | 183 | return count; 184 | } 185 | 186 | if (self.dead) { 187 | held.release(); 188 | return error.AlreadyShutdown; 189 | } 190 | 191 | var reader = Reader{ .task = pike.Task.init(@frame()) }; 192 | 193 | suspend { 194 | self.reader = &reader; 195 | held.release(); 196 | } 197 | 198 | if (reader.dead) return error.OperationCancelled; 199 | } 200 | } 201 | }; 202 | } 203 | 204 | // pub fn Queue(comptime T: type, comptime capacity: comptime_int) type { 205 | // return struct { 206 | // items: [capacity]T = undefined, 207 | // reader: Event = .{}, 208 | // writer: Event = .{}, 209 | // dead: bool = false, 210 | // head: usize = 0, 211 | // tail: usize = 0, 212 | 213 | // const Self = @This(); 214 | 215 | // pub fn pending(self: *const Self) usize { 216 | // const head = @atomicLoad(usize, &self.head, .Acquire); 217 | // return self.tail -% head; 218 | // } 219 | 220 | // pub fn push(self: *Self, item: T) !void { 221 | // while (true) { 222 | // if (@atomicLoad(bool, &self.dead, .Monotonic)) { 223 | // return error.OperationCancelled; 224 | // } 225 | 226 | // const head = @atomicLoad(usize, &self.head, .Acquire); 227 | // if (self.tail -% head < capacity) { 228 | // self.items[self.tail % capacity] = item; 229 | // @atomicStore(usize, &self.tail, self.tail +% 1, .Release); 230 | // self.reader.notify(); 231 | // return; 232 | // } 233 | 234 | // self.writer.wait(); 235 | // } 236 | // } 237 | 238 | // pub fn pop(self: *Self, dst: []T) !usize { 239 | // while (true) { 240 | // const tail = @atomicLoad(usize, &self.tail, .Acquire); 241 | // const popped = tail -% self.head; 242 | 243 | // if (popped != 0) { 244 | // var i: usize = 0; 245 | // while (i < popped) : (i += 1) { 246 | // dst[i] = self.items[(self.head +% i) % capacity]; 247 | // } 248 | 249 | // @atomicStore(usize, &self.head, tail, .Release); 250 | // self.writer.notify(); 251 | 252 | // return popped; 253 | // } 254 | 255 | // if (@atomicLoad(bool, &self.dead, .Monotonic)) { 256 | // return error.OperationCancelled; 257 | // } 258 | 259 | // self.reader.wait(); 260 | // } 261 | // } 262 | 263 | // pub fn close(self: *Self) void { 264 | // if (@atomicRmw(bool, &self.dead, .Xchg, true, .Monotonic)) { 265 | // return; 266 | // } 267 | 268 | // self.reader.notify(); 269 | // self.writer.notify(); 270 | // } 271 | // }; 272 | // } 273 | 274 | pub const Event = struct { 275 | state: ?*pike.Task = null, 276 | 277 | var notified: pike.Task = undefined; 278 | 279 | pub fn wait(self: *Event) void { 280 | var task = pike.Task.init(@frame()); 281 | suspend { 282 | var state = @atomicLoad(?*pike.Task, &self.state, .Monotonic); 283 | while (true) { 284 | const new_state = if (state == ¬ified) null else if (state == null) &task else unreachable; 285 | 286 | state = @cmpxchgWeak( 287 | ?*pike.Task, 288 | &self.state, 289 | state, 290 | new_state, 291 | .Release, 292 | .Monotonic, 293 | ) orelse { 294 | if (new_state == null) pike.dispatch(&task, .{}); 295 | break; 296 | }; 297 | } 298 | } 299 | } 300 | 301 | pub fn notify(self: *Event) void { 302 | var state = @atomicLoad(?*pike.Task, &self.state, .Monotonic); 303 | while (true) { 304 | if (state == ¬ified) 305 | return; 306 | 307 | const new_state = if (state == null) ¬ified else null; 308 | state = @cmpxchgWeak( 309 | ?*pike.Task, 310 | &self.state, 311 | state, 312 | new_state, 313 | .Acquire, 314 | .Monotonic, 315 | ) orelse { 316 | if (state) |task| pike.dispatch(task, .{}); 317 | break; 318 | }; 319 | } 320 | } 321 | }; 322 | 323 | /// Async-friendly Mutex ported from Zig's standard library to be compatible 324 | /// with scheduling methods exposed by pike. 325 | pub const Mutex = struct { 326 | mutex: std.Mutex = .{}, 327 | head: usize = UNLOCKED, 328 | 329 | const UNLOCKED = 0; 330 | const LOCKED = 1; 331 | 332 | const Waiter = struct { 333 | // forced Waiter alignment to ensure it doesn't clash with LOCKED 334 | next: ?*Waiter align(2), 335 | tail: *Waiter, 336 | task: pike.Task, 337 | }; 338 | 339 | pub fn initLocked() Mutex { 340 | return Mutex{ .head = LOCKED }; 341 | } 342 | 343 | pub fn acquire(self: *Mutex) Held { 344 | const held = self.mutex.acquire(); 345 | 346 | // self.head transitions from multiple stages depending on the value: 347 | // UNLOCKED -> LOCKED: 348 | // acquire Mutex ownership when theres no waiters 349 | // LOCKED -> : 350 | // Mutex is already owned, enqueue first Waiter 351 | // -> : 352 | // Mutex is owned with pending waiters. Push our waiter to the queue. 353 | 354 | if (self.head == UNLOCKED) { 355 | self.head = LOCKED; 356 | held.release(); 357 | return Held{ .lock = self }; 358 | } 359 | 360 | var waiter: Waiter = undefined; 361 | waiter.next = null; 362 | waiter.tail = &waiter; 363 | 364 | const head = switch (self.head) { 365 | UNLOCKED => unreachable, 366 | LOCKED => null, 367 | else => @intToPtr(*Waiter, self.head), 368 | }; 369 | 370 | if (head) |h| { 371 | h.tail.next = &waiter; 372 | h.tail = &waiter; 373 | } else { 374 | self.head = @ptrToInt(&waiter); 375 | } 376 | 377 | suspend { 378 | waiter.task = pike.Task.init(@frame()); 379 | held.release(); 380 | } 381 | 382 | return Held{ .lock = self }; 383 | } 384 | 385 | pub const Held = struct { 386 | lock: *Mutex, 387 | 388 | pub fn release(self: Held) void { 389 | const waiter = blk: { 390 | const held = self.lock.mutex.acquire(); 391 | defer held.release(); 392 | 393 | // self.head goes through the reverse transition from acquire(): 394 | // -> : 395 | // pop a waiter from the queue to give Mutex ownership when theres still others pending 396 | // -> LOCKED: 397 | // pop the laster waiter from the queue, while also giving it lock ownership when awaken 398 | // LOCKED -> UNLOCKED: 399 | // last lock owner releases lock while no one else is waiting for it 400 | 401 | switch (self.lock.head) { 402 | UNLOCKED => unreachable, // Mutex unlocked while unlocking 403 | LOCKED => { 404 | self.lock.head = UNLOCKED; 405 | break :blk null; 406 | }, 407 | else => { 408 | const waiter = @intToPtr(*Waiter, self.lock.head); 409 | self.lock.head = if (waiter.next == null) LOCKED else @ptrToInt(waiter.next); 410 | if (waiter.next) |next| 411 | next.tail = waiter.tail; 412 | break :blk waiter; 413 | }, 414 | } 415 | }; 416 | 417 | if (waiter) |w| { 418 | pike.dispatch(&w.task, .{}); 419 | } 420 | } 421 | }; 422 | }; 423 | -------------------------------------------------------------------------------- /test.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const snow = @import("snow.zig"); 3 | const sync = @import("sync.zig"); 4 | const pike = @import("pike"); 5 | 6 | const net = std.net; 7 | const mem = std.mem; 8 | const testing = std.testing; 9 | 10 | test "client / server" { 11 | const Protocol = struct { 12 | const Self = @This(); 13 | 14 | event: sync.Event = .{}, 15 | 16 | pub fn handshake(self: *Self, comptime side: snow.Side, socket: anytype) !void { 17 | return {}; 18 | } 19 | 20 | pub fn close(self: *Self, comptime side: snow.Side, socket: anytype) void { 21 | return {}; 22 | } 23 | 24 | pub fn purge(self: *Self, comptime side: snow.Side, socket: anytype, items: []const []const u8) void { 25 | return {}; 26 | } 27 | 28 | pub fn read(self: *Self, comptime side: snow.Side, socket: anytype, reader: anytype) !void { 29 | while (true) { 30 | const line = try reader.readLine(); 31 | defer reader.shift(line.len); 32 | 33 | self.event.notify(); 34 | } 35 | } 36 | 37 | pub fn write(self: *Self, comptime side: snow.Side, socket: anytype, writer: anytype, items: [][]const u8) !void { 38 | for (items) |message| { 39 | if (mem.indexOfScalar(u8, message, '\n') != null) { 40 | return error.UnexpectedDelimiter; 41 | } 42 | 43 | const frame = try writer.peek(message.len + 1); 44 | mem.copy(u8, frame[0..message.len], message); 45 | frame[message.len..][0] = '\n'; 46 | } 47 | 48 | try writer.flush(); 49 | } 50 | }; 51 | 52 | const opts: snow.Options = .{ .protocol_type = *Protocol }; 53 | 54 | const Test = struct { 55 | fn run(notifier: *const pike.Notifier, protocol: *Protocol, stopped: *bool) !void { 56 | defer stopped.* = true; 57 | 58 | var server = try snow.Server(opts).init( 59 | protocol, 60 | testing.allocator, 61 | notifier, 62 | net.Address.initIp4(.{ 0, 0, 0, 0 }, 0), 63 | ); 64 | defer server.deinit(); 65 | 66 | try server.serve(); 67 | 68 | var client = snow.Client(opts).init( 69 | protocol, 70 | testing.allocator, 71 | notifier, 72 | try server.socket.getBindAddress(), 73 | ); 74 | defer client.deinit(); 75 | 76 | inline for (.{ "A", "B", "C", "D" }) |message| { 77 | try client.write(message); 78 | protocol.event.wait(); 79 | } 80 | } 81 | }; 82 | 83 | const notifier = try pike.Notifier.init(); 84 | defer notifier.deinit(); 85 | 86 | var protocol: Protocol = .{}; 87 | 88 | var stopped = false; 89 | var frame = async Test.run(¬ifier, &protocol, &stopped); 90 | 91 | while (!stopped) { 92 | try notifier.poll(10_000); 93 | } 94 | 95 | try nosuspend await frame; 96 | } 97 | -------------------------------------------------------------------------------- /zig.mod: -------------------------------------------------------------------------------- 1 | name: snow 2 | main: snow.zig 3 | dependencies: 4 | - type: git 5 | path: https://github.com/lithdew/pike.git --------------------------------------------------------------------------------