├── .github └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── build.zig ├── build.zig.zon └── stable_array.zig /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | test-windows: 11 | runs-on: windows-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: mlugg/setup-zig@v1 15 | with: 16 | version: 0.14.0 17 | - run: | 18 | zig build test 19 | zig build -Dtarget=x86-windows-gnu test 20 | 21 | test-macos: 22 | runs-on: macos-latest 23 | steps: 24 | - uses: actions/checkout@v3 25 | - uses: mlugg/setup-zig@v1 26 | with: 27 | version: 0.14.0 28 | # Note that there's no testing for 32-bit on macos since offical support has been dropped 29 | - run: | 30 | zig build test 31 | 32 | test-linux: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@v3 36 | - uses: mlugg/setup-zig@v1 37 | with: 38 | version: 0.14.0 39 | - run: | 40 | zig build test 41 | zig build -Dtarget=x86-linux-gnu test 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .zig-cache/ 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The license for this software is below: 2 | 3 | This is free and unencumbered software released into the public domain. 4 | 5 | Anyone is free to copy, modify, publish, use, compile, sell, or 6 | distribute this software, either in source code form or as a compiled 7 | binary, for any purpose, commercial or non-commercial, and by any 8 | means. 9 | 10 | In jurisdictions that recognize copyright laws, the author or authors 11 | of this software dedicate any and all copyright interest in the 12 | software to the public domain. We make this dedication for the benefit 13 | of the public at large and to the detriment of our heirs and 14 | successors. We intend this dedication to be an overt act of 15 | relinquishment in perpetuity of all present and future rights to this 16 | software under copyright law. 17 | 18 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 19 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 21 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 | OTHER DEALINGS IN THE SOFTWARE. 25 | 26 | For more information, please refer to 27 | 28 | ======================================================================= 29 | 30 | Substantial portions of Zig's ArrayList source code were used in making 31 | the implementation of this software. Accordingly, the Zig software license 32 | is: 33 | 34 | The MIT License (Expat) 35 | 36 | Copyright (c) 2015-2022, Zig contributors 37 | 38 | Permission is hereby granted, free of charge, to any person obtaining a copy 39 | of this software and associated documentation files (the "Software"), to deal 40 | in the Software without restriction, including without limitation the rights 41 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 42 | copies of the Software, and to permit persons to whom the Software is 43 | furnished to do so, subject to the following conditions: 44 | 45 | The above copyright notice and this permission notice shall be included in 46 | all copies or substantial portions of the Software. 47 | 48 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 49 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 50 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 51 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 52 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 53 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 54 | THE SOFTWARE. 55 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # zig-stable-array 2 | Address-stable array with a max size that allocates directly from virtual memory. Memory is only committed when actually used, and virtual page table mappings are relatively cheap, so you only pay for the memory that you're actually using. Additionally, since all memory remains inplace, and new memory is committed incrementally at the end of the array, there are no additional recopies of data made when the array is enlarged. 3 | 4 | Ideal use cases are for large arrays that potentially grow over time. When reallocating a dynamic array with a high upper bound would be a waste of memory, and depending on dynamic resizing may incur high recopy costs due to the size of the array, consider using this array type. Another good use case is when stable pointers or slices to the array contents are desired; since the memory is never moved, pointers to the contents of the array will not be invalidated when growing. Not recommended for small arrays, since the minimum allocation size is the platform's minimum page size. Also not for use with platforms that don't support virtual memory, such as WASM. 5 | 6 | Typical usage is to specify a large size up-front that the array should not encounter, such as 2GB+. Then use the array as usual. If freeing memory is desired, `shrinkAndFree()` will decommit memory at the end of the array. Total memory usage can be calculated with `calcTotalUsedBytes()`. The interface is very similar to ArrayList, except for the allocator semantics. Since typical heap semantics don't apply to this array, the memory is manually managed using mmap/munmap and VirtualAlloc/VirtualFree on nix and Windows platforms, respectively. 7 | 8 | Usage: 9 | ```zig 10 | var array = StableArray(u8).init(1024 * 1024 * 1024 * 128); // virtual address reservation of 128 GB 11 | try array.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 12 | assert(array.calcTotalUsedBytes() == mem.page_size); 13 | for (array.items) |v, i| { 14 | assert(v == i); 15 | } 16 | array.shrinkAndFree(5); 17 | assert(array.calcTotalUsedBytes() == mem.page_size); 18 | array.deinit(); 19 | ``` 20 | -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn build(b: *std.Build) void { 4 | const target = b.standardTargetOptions(.{}); 5 | const optimize = b.standardOptimizeOption(.{}); 6 | 7 | _ = b.addModule("zig-stable-array", .{ 8 | .root_source_file = b.path("stable_array.zig"), 9 | }); 10 | 11 | const lib_unit_tests = b.addTest(.{ 12 | .root_source_file = b.path("stable_array.zig"), 13 | .target = target, 14 | .optimize = optimize, 15 | }); 16 | const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests); 17 | const test_step = b.step("test", "Run unit tests"); 18 | test_step.dependOn(&run_lib_unit_tests.step); 19 | } 20 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .stable_array, 3 | .version = "0.1.0", 4 | .fingerprint = 0xee760748bd6028de, 5 | .dependencies = .{}, 6 | .paths = .{ 7 | "build.zig", 8 | "build.zig.zon", 9 | "stable_array.zig", 10 | "LICENSE", 11 | "README.md", 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /stable_array.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const builtin = @import("builtin"); 3 | const os = std.os; 4 | const posix = std.posix; 5 | const mem = std.mem; 6 | const heap = std.heap; 7 | const assert = std.debug.assert; 8 | 9 | const AllocError = std.mem.Allocator.Error; 10 | 11 | const darwin = struct { 12 | extern "c" fn madvise(ptr: [*]align(heap.page_size_min) u8, length: usize, advice: c_int) c_int; 13 | }; 14 | 15 | pub fn StableArray(comptime T: type) type { 16 | return StableArrayAligned(T, @alignOf(T)); 17 | } 18 | 19 | pub fn StableArrayAligned(comptime T: type, comptime _alignment: u29) type { 20 | if (@sizeOf(T) == 0) { 21 | @compileError("StableArray does not support types of size 0. Use ArrayList instead."); 22 | } 23 | 24 | return struct { 25 | const Self = @This(); 26 | 27 | pub const Slice = []align(alignment) T; 28 | pub const VariableSlice = [*]align(alignment) T; 29 | 30 | pub const k_sizeof: usize = if (alignment > @sizeOf(T)) alignment else @sizeOf(T); 31 | pub const page_size: usize = heap.pageSize(); 32 | pub const alignment = _alignment; 33 | 34 | items: Slice, 35 | capacity: usize, 36 | max_virtual_alloc_bytes: usize, 37 | 38 | pub fn getPageSize(self: *Self) usize { 39 | _ = self; 40 | return Self.page_size; 41 | } 42 | 43 | pub fn getAlignment(self: *Self) usize { 44 | _ = self; 45 | return Self.alignment; 46 | } 47 | 48 | pub fn init(max_virtual_alloc_bytes: usize) Self { 49 | assert(@mod(max_virtual_alloc_bytes, page_size) == 0); // max_virtual_alloc_bytes must be a multiple of page_size 50 | return Self{ 51 | .items = &[_]T{}, 52 | .capacity = 0, 53 | .max_virtual_alloc_bytes = max_virtual_alloc_bytes, 54 | }; 55 | } 56 | 57 | pub fn initCapacity(max_virtual_alloc_bytes: usize, capacity: usize) AllocError!Self { 58 | var self = Self.init(max_virtual_alloc_bytes); 59 | try self.ensureTotalCapacity(capacity); 60 | return self; 61 | } 62 | 63 | pub fn deinit(self: *Self) void { 64 | self.clearAndFree(); 65 | } 66 | 67 | pub fn insert(self: *Self, n: usize, item: T) AllocError!void { 68 | try self.ensureUnusedCapacity(1); 69 | self.items.len += 1; 70 | 71 | mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]); 72 | self.items[n] = item; 73 | } 74 | 75 | pub fn insertSlice(self: *Self, i: usize, items: []const T) AllocError!void { 76 | try self.ensureUnusedCapacity(items.len); 77 | self.items.len += items.len; 78 | 79 | mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]); 80 | @memcpy(self.items[i .. i + items.len], items); 81 | } 82 | 83 | pub fn replaceRange(self: *Self, start: usize, len: usize, new_items: []const T) AllocError!void { 84 | const after_range = start + len; 85 | const range = self.items[start..after_range]; 86 | 87 | if (range.len == new_items.len) 88 | @memcpy(range, new_items) 89 | else if (range.len < new_items.len) { 90 | const first = new_items[0..range.len]; 91 | const rest = new_items[range.len..]; 92 | 93 | @memcpy(range, first); 94 | try self.insertSlice(after_range, rest); 95 | } else { 96 | @memcpy(range, new_items); 97 | const after_subrange = start + new_items.len; 98 | 99 | for (self.items[after_range..], 0..) |item, i| { 100 | self.items[after_subrange..][i] = item; 101 | } 102 | 103 | self.items.len -= len - new_items.len; 104 | } 105 | } 106 | 107 | pub fn append(self: *Self, item: T) AllocError!void { 108 | const new_item_ptr = try self.addOne(); 109 | new_item_ptr.* = item; 110 | } 111 | 112 | pub fn appendAssumeCapacity(self: *Self, item: T) void { 113 | const new_item_ptr = self.addOneAssumeCapacity(); 114 | new_item_ptr.* = item; 115 | } 116 | 117 | pub fn appendSlice(self: *Self, items: []const T) AllocError!void { 118 | try self.ensureUnusedCapacity(items.len); 119 | self.appendSliceAssumeCapacity(items); 120 | } 121 | 122 | pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { 123 | const old_len = self.items.len; 124 | const new_len = old_len + items.len; 125 | assert(new_len <= self.capacity); 126 | self.items.len = new_len; 127 | @memcpy(self.items[old_len..], items); 128 | } 129 | 130 | pub fn appendNTimes(self: *Self, value: T, n: usize) AllocError!void { 131 | const old_len = self.items.len; 132 | try self.resize(self.items.len + n); 133 | @memset(self.items[old_len..self.items.len], value); 134 | } 135 | 136 | pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void { 137 | const new_len = self.items.len + n; 138 | assert(new_len <= self.capacity); 139 | @memset(self.items.ptr[self.items.len..new_len], value); 140 | self.items.len = new_len; 141 | } 142 | 143 | pub const Writer = if (T != u8) 144 | @compileError("The Writer interface is only defined for StableArray(u8) " ++ 145 | "but the given type is StableArray(" ++ @typeName(T) ++ ")") 146 | else 147 | std.io.Writer(*Self, error{OutOfMemory}, appendWrite); 148 | 149 | pub fn writer(self: *Self) Writer { 150 | return .{ .context = self }; 151 | } 152 | 153 | fn appendWrite(self: *Self, m: []const u8) AllocError!usize { 154 | try self.appendSlice(m); 155 | return m.len; 156 | } 157 | 158 | pub fn addOne(self: *Self) AllocError!*T { 159 | const newlen = self.items.len + 1; 160 | try self.ensureTotalCapacity(newlen); 161 | return self.addOneAssumeCapacity(); 162 | } 163 | 164 | pub fn addOneAssumeCapacity(self: *Self) *T { 165 | assert(self.items.len < self.capacity); 166 | 167 | self.items.len += 1; 168 | return &self.items[self.items.len - 1]; 169 | } 170 | 171 | pub fn addManyAsArray(self: *Self, comptime n: usize) AllocError!*[n]T { 172 | const prev_len = self.items.len; 173 | try self.resize(self.items.len + n); 174 | return self.items[prev_len..][0..n]; 175 | } 176 | 177 | pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T { 178 | assert(self.items.len + n <= self.capacity); 179 | const prev_len = self.items.len; 180 | self.items.len += n; 181 | return self.items[prev_len..][0..n]; 182 | } 183 | 184 | pub fn orderedRemove(self: *Self, i: usize) T { 185 | const newlen = self.items.len - 1; 186 | if (newlen == i) return self.pop(); 187 | 188 | const old_item = self.items[i]; 189 | for (self.items[i..newlen], 0..) |*b, j| b.* = self.items[i + 1 + j]; 190 | self.items[newlen] = undefined; 191 | self.items.len = newlen; 192 | return old_item; 193 | } 194 | 195 | pub fn swapRemove(self: *Self, i: usize) T { 196 | if (self.items.len - 1 == i) return self.pop(); 197 | 198 | const old_item = self.items[i]; 199 | self.items[i] = self.pop(); 200 | return old_item; 201 | } 202 | 203 | pub fn resize(self: *Self, new_len: usize) AllocError!void { 204 | try self.ensureTotalCapacity(new_len); 205 | self.items.len = new_len; 206 | } 207 | 208 | pub fn shrinkAndFree(self: *Self, new_len: usize) void { 209 | assert(new_len <= self.items.len); 210 | 211 | const new_capacity_bytes = calcBytesUsedForCapacity(new_len); 212 | const current_capacity_bytes: usize = calcBytesUsedForCapacity(self.capacity); 213 | 214 | if (new_capacity_bytes < current_capacity_bytes) { 215 | const bytes_to_free: usize = current_capacity_bytes - new_capacity_bytes; 216 | 217 | if (builtin.os.tag == .windows) { 218 | const w = os.windows; 219 | const addr: usize = @intFromPtr(self.items.ptr) + new_capacity_bytes; 220 | w.VirtualFree(@as(w.PVOID, @ptrFromInt(addr)), bytes_to_free, w.MEM_DECOMMIT); 221 | } else { 222 | const base_addr: usize = @intFromPtr(self.items.ptr); 223 | const offset_addr: usize = base_addr + new_capacity_bytes; 224 | const addr: [*]align(heap.page_size_min) u8 = @ptrFromInt(offset_addr); 225 | if (comptime builtin.os.tag.isDarwin()) { 226 | const MADV_DONTNEED = 4; 227 | const err: c_int = darwin.madvise(addr, bytes_to_free, MADV_DONTNEED); 228 | switch (@as(posix.E, @enumFromInt(err))) { 229 | posix.E.INVAL => unreachable, 230 | posix.E.NOMEM => unreachable, 231 | else => {}, 232 | } 233 | } else { 234 | posix.madvise(addr, bytes_to_free, std.c.MADV.DONTNEED) catch unreachable; 235 | } 236 | } 237 | 238 | self.capacity = new_capacity_bytes / k_sizeof; 239 | } 240 | 241 | self.items.len = new_len; 242 | } 243 | 244 | pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void { 245 | assert(new_len <= self.items.len); 246 | self.items.len = new_len; 247 | } 248 | 249 | pub fn clearRetainingCapacity(self: *Self) void { 250 | self.items.len = 0; 251 | } 252 | 253 | pub fn clearAndFree(self: *Self) void { 254 | if (self.capacity > 0) { 255 | if (builtin.os.tag == .windows) { 256 | const w = os.windows; 257 | w.VirtualFree(@as(*anyopaque, @ptrCast(self.items.ptr)), 0, w.MEM_RELEASE); 258 | } else { 259 | var slice: []align(heap.page_size_min) const u8 = undefined; 260 | slice.ptr = @alignCast(@as([*]u8, @ptrCast(self.items.ptr))); 261 | slice.len = self.max_virtual_alloc_bytes; 262 | posix.munmap(slice); 263 | } 264 | } 265 | 266 | self.capacity = 0; 267 | self.items = &[_]T{}; 268 | } 269 | 270 | pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) AllocError!void { 271 | const new_capacity_bytes = calcBytesUsedForCapacity(new_capacity); 272 | const current_capacity_bytes: usize = calcBytesUsedForCapacity(self.capacity); 273 | 274 | if (current_capacity_bytes < new_capacity_bytes) { 275 | if (self.capacity == 0) { 276 | if (builtin.os.tag == .windows) { 277 | const w = os.windows; 278 | const addr: w.PVOID = w.VirtualAlloc(null, self.max_virtual_alloc_bytes, w.MEM_RESERVE, w.PAGE_READWRITE) catch return AllocError.OutOfMemory; 279 | self.items.ptr = @alignCast(@ptrCast(addr)); 280 | self.items.len = 0; 281 | } else { 282 | const prot: u32 = std.c.PROT.NONE; 283 | const map: std.c.MAP = .{ 284 | .ANONYMOUS = true, 285 | .TYPE = .PRIVATE, 286 | }; 287 | const fd: posix.fd_t = -1; 288 | const offset: usize = 0; 289 | const slice = posix.mmap(null, self.max_virtual_alloc_bytes, prot, map, fd, offset) catch return AllocError.OutOfMemory; 290 | self.items.ptr = @alignCast(@ptrCast(slice.ptr)); 291 | self.items.len = 0; 292 | } 293 | } else if (current_capacity_bytes == self.max_virtual_alloc_bytes) { 294 | // If you hit this, you likely either didn't reserve enough space up-front, or have a leak that is allocating too many elements 295 | return AllocError.OutOfMemory; 296 | } 297 | 298 | if (builtin.os.tag == .windows) { 299 | const w = std.os.windows; 300 | _ = w.VirtualAlloc(@as(w.PVOID, @ptrCast(self.items.ptr)), new_capacity_bytes, w.MEM_COMMIT, w.PAGE_READWRITE) catch return AllocError.OutOfMemory; 301 | } else { 302 | const resize_capacity = new_capacity_bytes - current_capacity_bytes; 303 | const region_begin: [*]u8 = @ptrCast(self.items.ptr); 304 | const remap_region_begin: [*]u8 = region_begin + current_capacity_bytes; 305 | 306 | const prot: u32 = std.c.PROT.READ | std.c.PROT.WRITE; 307 | const map: std.c.MAP = .{ 308 | .ANONYMOUS = true, 309 | .TYPE = .PRIVATE, 310 | .FIXED = true, 311 | }; 312 | const fd: posix.fd_t = -1; 313 | const offset: usize = 0; 314 | 315 | _ = posix.mmap(@alignCast(remap_region_begin), resize_capacity, prot, map, fd, offset) catch return AllocError.OutOfMemory; 316 | } 317 | } 318 | 319 | self.capacity = new_capacity; 320 | } 321 | 322 | pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) AllocError!void { 323 | return self.ensureTotalCapacity(self.items.len + additional_count); 324 | } 325 | 326 | pub fn expandToCapacity(self: *Self) void { 327 | self.items.len = self.capacity; 328 | } 329 | 330 | pub fn pop(self: *Self) T { 331 | const val = self.items[self.items.len - 1]; 332 | self.items.len -= 1; 333 | return val; 334 | } 335 | 336 | pub fn popOrNull(self: *Self) ?T { 337 | if (self.items.len == 0) return null; 338 | return self.pop(); 339 | } 340 | 341 | pub fn allocatedSlice(self: Self) Slice { 342 | return self.items.ptr[0..self.capacity]; 343 | } 344 | 345 | // Make sure to update self.items.len if you indend for any writes to this 346 | // to modify the length of the array. 347 | pub fn unusedCapacitySlice(self: Self) Slice { 348 | return self.allocatedSlice()[self.items.len..]; 349 | } 350 | 351 | pub fn calcTotalUsedBytes(self: Self) usize { 352 | return calcBytesUsedForCapacity(self.capacity); 353 | } 354 | 355 | fn calcBytesUsedForCapacity(capacity: usize) usize { 356 | return mem.alignForward(usize, k_sizeof * capacity, page_size); 357 | } 358 | }; 359 | } 360 | 361 | const TEST_VIRTUAL_ALLOC_SIZE = 1024 * 1024 * 2; // 2 MB 362 | 363 | test "init" { 364 | var a = StableArray(u8).init(TEST_VIRTUAL_ALLOC_SIZE); 365 | assert(a.items.len == 0); 366 | assert(a.capacity == 0); 367 | assert(a.max_virtual_alloc_bytes == TEST_VIRTUAL_ALLOC_SIZE); 368 | a.deinit(); 369 | 370 | var b = StableArrayAligned(u8, 16).init(TEST_VIRTUAL_ALLOC_SIZE); 371 | assert(b.getAlignment() == 16); 372 | assert(b.items.len == 0); 373 | assert(b.capacity == 0); 374 | assert(b.max_virtual_alloc_bytes == TEST_VIRTUAL_ALLOC_SIZE); 375 | b.deinit(); 376 | 377 | assert(a.getPageSize() == b.getPageSize()); 378 | } 379 | 380 | test "append" { 381 | var a = StableArray(u8).init(TEST_VIRTUAL_ALLOC_SIZE); 382 | try a.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 383 | assert(a.calcTotalUsedBytes() == a.getPageSize()); 384 | for (a.items, 0..) |v, i| { 385 | assert(v == i); 386 | } 387 | a.deinit(); 388 | 389 | var b = StableArrayAligned(u8, heap.pageSize()).init(TEST_VIRTUAL_ALLOC_SIZE); 390 | try b.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 391 | assert(b.calcTotalUsedBytes() == a.getPageSize() * 10); 392 | for (b.items, 0..) |v, i| { 393 | assert(v == i); 394 | } 395 | b.deinit(); 396 | } 397 | 398 | test "shrinkAndFree" { 399 | const page_size = heap.pageSize(); 400 | 401 | var a = StableArray(u8).init(TEST_VIRTUAL_ALLOC_SIZE); 402 | try a.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 403 | a.shrinkAndFree(5); 404 | assert(a.calcTotalUsedBytes() == page_size); // still using only a page 405 | assert(a.items.len == 5); 406 | for (a.items, 0..) |v, i| { 407 | assert(v == i); 408 | } 409 | a.deinit(); 410 | 411 | var b = StableArrayAligned(u8, heap.pageSize()).init(TEST_VIRTUAL_ALLOC_SIZE); 412 | try b.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 413 | b.shrinkAndFree(5); 414 | assert(b.calcTotalUsedBytes() == page_size * 5); // alignment of each item is 1 page 415 | assert(b.items.len == 5); 416 | for (b.items, 0..) |v, i| { 417 | assert(v == i); 418 | } 419 | b.deinit(); 420 | 421 | var c = StableArrayAligned(u8, page_size / 2).init(TEST_VIRTUAL_ALLOC_SIZE); 422 | assert(c.getAlignment() == page_size / 2); 423 | try c.appendSlice(&[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }); 424 | c.shrinkAndFree(5); 425 | assert(c.calcTotalUsedBytes() == page_size * 3); 426 | assert(c.capacity == 6); 427 | assert(c.items.len == 5); 428 | for (c.items, 0..) |v, i| { 429 | assert(v == i); 430 | } 431 | c.deinit(); 432 | } 433 | 434 | test "resize" { 435 | const max: usize = 1024 * 1024 * 1; 436 | var a = StableArray(u8).init(max); 437 | defer a.deinit(); 438 | 439 | var size: usize = 512; 440 | while (size <= max) { 441 | try a.resize(size); 442 | size *= 2; 443 | } 444 | } 445 | 446 | test "out of memory" { 447 | var a = StableArrayAligned(u8, heap.pageSize()).init(TEST_VIRTUAL_ALLOC_SIZE); 448 | defer a.deinit(); 449 | 450 | const max_capacity: usize = TEST_VIRTUAL_ALLOC_SIZE / a.getPageSize(); 451 | try a.appendNTimes(0xFF, max_capacity); 452 | for (a.items) |v| { 453 | assert(v == 0xFF); 454 | } 455 | assert(a.max_virtual_alloc_bytes == a.calcTotalUsedBytes()); 456 | assert(a.capacity == max_capacity); 457 | assert(a.items.len == max_capacity); 458 | 459 | var didCatchError: bool = false; 460 | a.append(0) catch |err| { 461 | didCatchError = true; 462 | assert(err == error.OutOfMemory); 463 | }; 464 | assert(didCatchError == true); 465 | } 466 | 467 | test "huge max size" { 468 | const KB = 1024; 469 | const MB = KB * 1024; 470 | const GB = MB * 1024; 471 | 472 | const MAX_MEMORY_32 = GB * 1; 473 | const MAX_MEMORY_64 = GB * 128; 474 | const MAX_MEMORY = if (@sizeOf(usize) < @sizeOf(u64)) MAX_MEMORY_32 else MAX_MEMORY_64; 475 | 476 | var a = StableArray(u8).init(MAX_MEMORY); 477 | defer a.deinit(); 478 | 479 | try a.resize(MB * 4); 480 | try a.resize(MB * 8); 481 | try a.resize(MB * 16); 482 | a.items[MB * 16 - 1] = 0xFF; 483 | } 484 | 485 | test "growing retains values" { 486 | var a = StableArray(u8).init(TEST_VIRTUAL_ALLOC_SIZE); 487 | defer a.deinit(); 488 | 489 | try a.resize(a.getPageSize()); 490 | a.items[0] = 0xFF; 491 | try a.resize(a.getPageSize() * 2); 492 | assert(a.items[0] == 0xFF); 493 | } 494 | --------------------------------------------------------------------------------