From aa1742db639a88559a1e5522dc796d4cc639c477 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Mon, 24 Nov 2025 10:43:08 -0800 Subject: [PATCH 1/4] use SlabAllocator --- src/browser/Factory.zig | 96 +----- src/slab.zig | 651 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 661 insertions(+), 86 deletions(-) create mode 100644 src/slab.zig diff --git a/src/browser/Factory.zig b/src/browser/Factory.zig index 8c9c3b58..6013a2ff 100644 --- a/src/browser/Factory.zig +++ b/src/browser/Factory.zig @@ -24,6 +24,8 @@ const IS_DEBUG = builtin.mode == .Debug; const log = @import("../log.zig"); const String = @import("../string.zig").String; +const SlabAllocator = @import("../slab.zig").SlabAllocator(16); + const Page = @import("Page.zig"); const Node = @import("webapi/Node.zig"); const Event = @import("webapi/Event.zig"); @@ -46,48 +48,12 @@ const MemoryPoolAligned = std.heap.MemoryPoolAligned; // (and alignment) based pools. const Factory = @This(); _page: *Page, -_size_8_8: MemoryPoolAligned([8]u8, .@"8"), -_size_16_8: MemoryPoolAligned([16]u8, .@"8"), -_size_24_8: MemoryPoolAligned([24]u8, .@"8"), -_size_32_8: MemoryPoolAligned([32]u8, .@"8"), -_size_32_16: MemoryPoolAligned([32]u8, .@"16"), -_size_40_8: MemoryPoolAligned([40]u8, .@"8"), -_size_48_16: MemoryPoolAligned([48]u8, .@"16"), -_size_56_8: MemoryPoolAligned([56]u8, .@"8"), -_size_64_16: MemoryPoolAligned([64]u8, .@"16"), -_size_80_16: MemoryPoolAligned([80]u8, .@"16"), -_size_88_8: MemoryPoolAligned([88]u8, .@"8"), -_size_96_16: MemoryPoolAligned([96]u8, .@"16"), -_size_128_8: MemoryPoolAligned([128]u8, .@"8"), -_size_144_8: MemoryPoolAligned([144]u8, .@"8"), -_size_152_8: MemoryPoolAligned([152]u8, .@"8"), -_size_160_8: MemoryPoolAligned([160]u8, .@"8"), -_size_184_8: MemoryPoolAligned([184]u8, .@"8"), -_size_232_8: MemoryPoolAligned([232]u8, .@"8"), -_size_648_8: MemoryPoolAligned([648]u8, .@"8"), +_slab: SlabAllocator, pub fn init(page: *Page) Factory { return .{ ._page = page, - ._size_8_8 = MemoryPoolAligned([8]u8, .@"8").init(page.arena), - ._size_16_8 = MemoryPoolAligned([16]u8, .@"8").init(page.arena), - ._size_24_8 = MemoryPoolAligned([24]u8, .@"8").init(page.arena), - ._size_32_8 = MemoryPoolAligned([32]u8, .@"8").init(page.arena), - ._size_32_16 = MemoryPoolAligned([32]u8, .@"16").init(page.arena), - ._size_40_8 = MemoryPoolAligned([40]u8, .@"8").init(page.arena), - ._size_48_16 = MemoryPoolAligned([48]u8, .@"16").init(page.arena), - ._size_56_8 = MemoryPoolAligned([56]u8, .@"8").init(page.arena), - ._size_64_16 = MemoryPoolAligned([64]u8, .@"16").init(page.arena), - ._size_80_16 = MemoryPoolAligned([80]u8, .@"16").init(page.arena), - ._size_88_8 = MemoryPoolAligned([88]u8, .@"8").init(page.arena), - ._size_96_16 = MemoryPoolAligned([96]u8, .@"16").init(page.arena), - ._size_128_8 = MemoryPoolAligned([128]u8, .@"8").init(page.arena), - ._size_144_8 = MemoryPoolAligned([144]u8, .@"8").init(page.arena), - ._size_152_8 = MemoryPoolAligned([152]u8, .@"8").init(page.arena), - ._size_160_8 = MemoryPoolAligned([160]u8, .@"8").init(page.arena), - ._size_184_8 = MemoryPoolAligned([184]u8, .@"8").init(page.arena), - ._size_232_8 = MemoryPoolAligned([232]u8, .@"8").init(page.arena), - ._size_648_8 = MemoryPoolAligned([648]u8, .@"8").init(page.arena), + ._slab = SlabAllocator.init(page.arena), }; } @@ -246,28 +212,8 @@ pub fn create(self: *Factory, value: anytype) !*@TypeOf(value) { } pub fn createT(self: *Factory, comptime T: type) !*T { - const SO = @sizeOf(T); - if (comptime SO == 8) return @ptrCast(try self._size_8_8.create()); - if (comptime SO == 16) return @ptrCast(try self._size_16_8.create()); - if (comptime SO == 24) return @ptrCast(try self._size_24_8.create()); - if (comptime SO == 32) { - if (comptime @alignOf(T) == 8) return @ptrCast(try self._size_32_8.create()); - if (comptime @alignOf(T) == 16) return @ptrCast(try self._size_32_16.create()); - } - if (comptime SO == 40) return @ptrCast(try self._size_40_8.create()); - if (comptime SO == 48) return @ptrCast(try self._size_48_16.create()); - if (comptime SO == 56) return @ptrCast(try self._size_56_8.create()); - if (comptime SO == 64) return @ptrCast(try self._size_64_16.create()); - if (comptime SO == 80) return @ptrCast(try self._size_80_16.create()); - if (comptime SO == 88) return @ptrCast(try self._size_88_8.create()); - if (comptime SO == 96) return @ptrCast(try self._size_96_16.create()); - if (comptime SO == 128) return @ptrCast(try self._size_128_8.create()); - if (comptime SO == 152) return @ptrCast(try self._size_152_8.create()); - if (comptime SO == 160) return @ptrCast(try self._size_160_8.create()); - if (comptime SO == 184) return @ptrCast(try self._size_184_8.create()); - if (comptime SO == 232) return @ptrCast(try self._size_232_8.create()); - if (comptime SO == 648) return @ptrCast(try self._size_648_8.create()); - @compileError(std.fmt.comptimePrint("No pool configured for @sizeOf({d}), @alignOf({d}): ({s})", .{ SO, @alignOf(T), @typeName(T) })); + const allocator = self._slab.allocator(); + return try allocator.create(T); } pub fn destroy(self: *Factory, value: anytype) void { @@ -291,6 +237,8 @@ pub fn destroy(self: *Factory, value: anytype) void { fn destroyChain(self: *Factory, value: anytype, comptime first: bool) void { const S = reflect.Struct(@TypeOf(value)); + const allocator = self._slab.allocator(); + // This is initially called from a deinit. We don't want to call that // same deinit. So when this is the first time destroyChain is called // we don't call deinit (because we're in that deinit) @@ -311,7 +259,7 @@ fn destroyChain(self: *Factory, value: anytype, comptime first: bool) void { } else if (@hasDecl(S, "JsApi")) { // Doesn't have a _proto, but has a JsApi. if (self._page.js.removeTaggedMapping(@intFromPtr(value))) |tagged| { - self._size_24_8.destroy(@ptrCast(tagged)); + allocator.destroy(tagged); } } @@ -319,31 +267,7 @@ fn destroyChain(self: *Factory, value: anytype, comptime first: bool) void { // (which makes sense when the @sizeOf(Leaf) == 8). These don't need to // be (cannot be) freed. But we'll still free the chain. if (comptime wasAllocated(S)) { - switch (@sizeOf(S)) { - 8 => self._size_8_8.destroy(@ptrCast(@alignCast(value))), - 16 => self._size_16_8.destroy(@ptrCast(value)), - 24 => self._size_24_8.destroy(@ptrCast(value)), - 32 => { - if (comptime @alignOf(S) == 8) { - self._size_32_8.destroy(@ptrCast(value)); - } else if (comptime @alignOf(S) == 16) { - self._size_32_16.destroy(@ptrCast(value)); - } - }, - 40 => self._size_40_8.destroy(@ptrCast(value)), - 48 => self._size_48_16.destroy(@ptrCast(@alignCast(value))), - 56 => self._size_56_8.destroy(@ptrCast(value)), - 64 => self._size_64_16.destroy(@ptrCast(@alignCast(value))), - 80 => self._size_80_16.destroy(@ptrCast(@alignCast(value))), - 88 => self._size_88_8.destroy(@ptrCast(@alignCast(value))), - 96 => self._size_96_16.destroy(@ptrCast(@alignCast(value))), - 128 => self._size_128_8.destroy(@ptrCast(value)), - 144 => self._size_144_8.destroy(@ptrCast(value)), - 152 => self._size_152_8.destroy(@ptrCast(value)), - 160 => self._size_160_8.destroy(@ptrCast(value)), - 648 => self._size_648_8.destroy(@ptrCast(value)), - else => |SO| @compileError(std.fmt.comptimePrint("Don't know what I'm being asked to destroy @sizeOf({d}), @alignOf({d}): ({s})", .{ SO, @alignOf(S), @typeName(S) })), - } + allocator.destroy(value); } } diff --git a/src/slab.zig b/src/slab.zig new file mode 100644 index 00000000..0af4f616 --- /dev/null +++ b/src/slab.zig @@ -0,0 +1,651 @@ +const std = @import("std"); +const assert = std.debug.assert; + +const Allocator = std.mem.Allocator; +const Alignment = std.mem.Alignment; + +pub fn SlabAllocator(comptime slot_count: usize) type { + comptime assert(std.math.isPowerOfTwo(slot_count)); + + const Slab = struct { + const Slab = @This(); + const chunk_shift = std.math.log2_int(usize, slot_count); + const chunk_mask = slot_count - 1; + + alignment: Alignment, + item_size: usize, + + bitset: std.bit_set.DynamicBitSetUnmanaged, + chunks: std.ArrayListUnmanaged([]u8), + + pub fn init( + allocator: Allocator, + alignment: Alignment, + item_size: usize, + ) !Slab { + return .{ + .alignment = alignment, + .item_size = item_size, + .bitset = try .initFull(allocator, 0), + .chunks = .empty, + }; + } + + pub fn deinit(self: *Slab, allocator: Allocator) void { + self.bitset.deinit(allocator); + + for (self.chunks.items) |chunk| { + allocator.rawFree(chunk, self.alignment, @returnAddress()); + } + + self.chunks.deinit(allocator); + } + + inline fn toBitsetIndex(chunk_index: usize, slot_index: usize) usize { + return chunk_index * slot_count + slot_index; + } + + inline fn chunkIndex(bitset_index: usize) usize { + return bitset_index >> chunk_shift; + } + + inline fn slotIndex(bitset_index: usize) usize { + return bitset_index & chunk_mask; + } + + fn alloc(self: *Slab, allocator: Allocator) ![]u8 { + if (self.bitset.findFirstSet()) |index| { + // if we have a free slot + const chunk_index = chunkIndex(index); + const slot_index = slotIndex(index); + self.bitset.unset(index); + + const chunk = self.chunks.items[chunk_index]; + const offset = slot_index * self.item_size; + return chunk.ptr[offset..][0..self.item_size]; + } else { + const old_capacity = self.bitset.bit_length; + + // if we have don't have a free slot + try self.allocateChunk(allocator); + + const first_slot_index = old_capacity; + self.bitset.unset(first_slot_index); + + const new_chunk = self.chunks.items[self.chunks.items.len - 1]; + return new_chunk.ptr[0..self.item_size]; + } + } + + fn free(self: *Slab, ptr: [*]u8) void { + const addr = @intFromPtr(ptr); + + for (self.chunks.items, 0..) |chunk, i| { + const chunk_start = @intFromPtr(chunk.ptr); + const chunk_end = chunk_start + (slot_count * self.item_size); + + if (addr >= chunk_start and addr < chunk_end) { + const offset = addr - chunk_start; + const slot_index = offset / self.item_size; + + const bitset_index = toBitsetIndex(i, slot_index); + assert(!self.bitset.isSet(bitset_index)); + + self.bitset.set(bitset_index); + return; + } + } + + unreachable; + } + + fn allocateChunk(self: *Slab, allocator: Allocator) !void { + const chunk_len = self.item_size * slot_count; + + const chunk_ptr = allocator.rawAlloc( + chunk_len, + self.alignment, + @returnAddress(), + ) orelse return error.FailedChildAllocation; + + const chunk = chunk_ptr[0..chunk_len]; + try self.chunks.append(allocator, chunk); + + const new_capacity = self.chunks.items.len * slot_count; + try self.bitset.resize(allocator, new_capacity, true); + } + }; + + const SlabKey = struct { + size: usize, + alignment: Alignment, + }; + + return struct { + const Self = @This(); + + child_allocator: Allocator, + slabs: std.ArrayHashMapUnmanaged(SlabKey, Slab, struct { + const Context = @This(); + + pub fn hash(_: Context, key: SlabKey) u32 { + var hasher = std.hash.Wyhash.init(0); + std.hash.autoHash(&hasher, key.size); + std.hash.autoHash(&hasher, key.alignment); + return @truncate(hasher.final()); + } + + pub fn eql(_: Context, a: SlabKey, b: SlabKey, _: usize) bool { + return a.size == b.size and a.alignment == b.alignment; + } + }, false) = .empty, + + pub fn init(child_allocator: Allocator) Self { + return .{ + .child_allocator = child_allocator, + .slabs = .empty, + }; + } + + pub fn deinit(self: *Self) void { + for (self.slabs.values()) |*slab| { + slab.deinit(self.child_allocator); + } + + self.slabs.deinit(self.child_allocator); + } + + pub const ResetKind = enum { + /// Free all chunks and release all memory. + clear, + /// Keep all chunks, reset trees to reuse memory. + retain_capacity, + }; + + /// This clears all of the stored memory, freeing the currently used chunks. + pub fn reset(self: *Self, kind: ResetKind) void { + switch (kind) { + .clear => { + for (self.slabs.values()) |*slab| { + for (slab.chunks.items) |chunk| { + self.child_allocator.free(chunk); + } + + slab.chunks.clearAndFree(self.child_allocator); + slab.bitset.deinit(self.child_allocator); + } + + self.slabs.clearAndFree(self.child_allocator); + }, + .retain_capacity => { + for (self.slabs.values()) |*slab| { + slab.bitset.setAll(); + } + }, + } + } + + pub const vtable = Allocator.VTable{ + .alloc = alloc, + .free = free, + .remap = Allocator.noRemap, + .resize = Allocator.noResize, + }; + + pub fn allocator(self: *Self) Allocator { + return .{ + .ptr = self, + .vtable = &vtable, + }; + } + + fn alloc(ctx: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(ctx)); + _ = ret_addr; + + const list_gop = self.slabs.getOrPut( + self.child_allocator, + SlabKey{ .size = len, .alignment = alignment }, + ) catch return null; + + if (!list_gop.found_existing) { + list_gop.value_ptr.* = Slab.init( + self.child_allocator, + alignment, + len, + ) catch return null; + } + + const list = list_gop.value_ptr; + const buf = list.alloc(self.child_allocator) catch return null; + return buf.ptr; + } + + fn free(ctx: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void { + const self: *Self = @ptrCast(@alignCast(ctx)); + _ = ret_addr; + + const ptr = memory.ptr; + const len = memory.len; + + const list = self.slabs.getPtr(.{ .size = len, .alignment = alignment }).?; + list.free(ptr); + } + }; +} + +const testing = std.testing; + +const TestSlabAllocator = SlabAllocator(32); + +test "slab allocator - basic allocation and free" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate some memory + const ptr1 = try allocator.alloc(u8, 100); + try testing.expect(ptr1.len == 100); + + // Write to it to ensure it's valid + @memset(ptr1, 42); + try testing.expectEqual(@as(u8, 42), ptr1[50]); + + // Free it + allocator.free(ptr1); +} + +test "slab allocator - multiple allocations" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + const ptr1 = try allocator.alloc(u8, 64); + const ptr2 = try allocator.alloc(u8, 128); + const ptr3 = try allocator.alloc(u8, 256); + + // Ensure they don't overlap + const addr1 = @intFromPtr(ptr1.ptr); + const addr2 = @intFromPtr(ptr2.ptr); + const addr3 = @intFromPtr(ptr3.ptr); + + try testing.expect(addr1 + 64 <= addr2 or addr2 + 128 <= addr1); + try testing.expect(addr2 + 128 <= addr3 or addr3 + 256 <= addr2); + + allocator.free(ptr1); + allocator.free(ptr2); + allocator.free(ptr3); +} + +test "slab allocator - no coalescing (different size classes)" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate two blocks of same size + const ptr1 = try allocator.alloc(u8, 128); + const ptr2 = try allocator.alloc(u8, 128); + + // Free them (no coalescing in slab allocator) + allocator.free(ptr1); + allocator.free(ptr2); + + // Can't allocate larger block from these freed 128-byte blocks + const ptr3 = try allocator.alloc(u8, 256); + + // ptr3 will be from a different size class, not coalesced from ptr1+ptr2 + const addr1 = @intFromPtr(ptr1.ptr); + const addr3 = @intFromPtr(ptr3.ptr); + + // They should NOT be adjacent (different size classes) + try testing.expect(addr3 < addr1 or addr3 >= addr1 + 256); + + allocator.free(ptr3); +} + +test "slab allocator - reuse freed memory" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + const ptr1 = try allocator.alloc(u8, 64); + const addr1 = @intFromPtr(ptr1.ptr); + allocator.free(ptr1); + + // Allocate same size, should reuse from same slab + const ptr2 = try allocator.alloc(u8, 64); + const addr2 = @intFromPtr(ptr2.ptr); + + try testing.expectEqual(addr1, addr2); + allocator.free(ptr2); +} + +test "slab allocator - multiple size classes" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate various sizes - each creates a new slab + var ptrs: [10][]u8 = undefined; + const sizes = [_]usize{ 24, 40, 64, 88, 128, 144, 200, 256, 512, 1000 }; + + for (&ptrs, sizes) |*ptr, size| { + ptr.* = try allocator.alloc(u8, size); + @memset(ptr.*, 0xFF); + } + + // Should have created multiple slabs + try testing.expect(seg.slabs.count() >= 10); + + // Free all + for (ptrs) |ptr| { + allocator.free(ptr); + } +} + +test "slab allocator - various sizes" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Test different sizes (not limited to powers of 2!) + const sizes = [_]usize{ 8, 16, 24, 32, 40, 64, 88, 128, 144, 256 }; + + for (sizes) |size| { + const ptr = try allocator.alloc(u8, size); + try testing.expect(ptr.len == size); + @memset(ptr, @intCast(size & 0xFF)); + allocator.free(ptr); + } +} + +test "slab allocator - exact sizes (no rounding)" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Odd sizes stay exact (unlike buddy which rounds to power of 2) + const ptr1 = try allocator.alloc(u8, 100); + const ptr2 = try allocator.alloc(u8, 200); + const ptr3 = try allocator.alloc(u8, 50); + + // Exact sizes! + try testing.expect(ptr1.len == 100); + try testing.expect(ptr2.len == 200); + try testing.expect(ptr3.len == 50); + + allocator.free(ptr1); + allocator.free(ptr2); + allocator.free(ptr3); +} + +test "slab allocator - chunk allocation" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate many items of same size to force multiple chunks + var ptrs: [100][]u8 = undefined; + for (&ptrs) |*ptr| { + ptr.* = try allocator.alloc(u8, 64); + } + + // Should have allocated multiple chunks (32 items per chunk) + const slab = seg.slabs.getPtr(.{ .size = 64, .alignment = Alignment.@"1" }).?; + try testing.expect(slab.chunks.items.len > 1); + + // Free all + for (ptrs) |ptr| { + allocator.free(ptr); + } +} + +test "slab allocator - reset with retain_capacity" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate some memory + const ptr1 = try allocator.alloc(u8, 128); + const ptr2 = try allocator.alloc(u8, 256); + _ = ptr1; + _ = ptr2; + + const slabs_before = seg.slabs.count(); + const slab_128 = seg.slabs.getPtr(.{ .size = 128, .alignment = Alignment.@"1" }).?; + const chunks_before = slab_128.chunks.items.len; + + // Reset but keep chunks + seg.reset(.retain_capacity); + + try testing.expectEqual(slabs_before, seg.slabs.count()); + try testing.expectEqual(chunks_before, slab_128.chunks.items.len); + + // Should be able to allocate again + const ptr3 = try allocator.alloc(u8, 512); + allocator.free(ptr3); +} + +test "slab allocator - reset with clear" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate some memory + const ptr1 = try allocator.alloc(u8, 128); + _ = ptr1; + + try testing.expect(seg.slabs.count() > 0); + + // Reset and free everything + seg.reset(.clear); + + try testing.expectEqual(@as(usize, 0), seg.slabs.count()); + + // Should still work after reset + const ptr2 = try allocator.alloc(u8, 256); + allocator.free(ptr2); +} + +test "slab allocator - stress test" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + var prng = std.Random.DefaultPrng.init(0); + const random = prng.random(); + + var ptrs: std.ArrayList([]u8) = .empty; + + defer { + for (ptrs.items) |ptr| { + allocator.free(ptr); + } + ptrs.deinit(allocator); + } + + // Random allocations and frees + var i: usize = 0; + while (i < 100) : (i += 1) { + if (random.boolean() and ptrs.items.len > 0) { + // Free a random allocation + const index = random.uintLessThan(usize, ptrs.items.len); + allocator.free(ptrs.swapRemove(index)); + } else { + // Allocate random size (8 to 512) + const size = random.uintAtMost(usize, 504) + 8; + const ptr = try allocator.alloc(u8, size); + try ptrs.append(allocator, ptr); + + // Write to ensure it's valid + @memset(ptr, @intCast(i & 0xFF)); + } + } +} + +test "slab allocator - alignment" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + const ptr1 = try allocator.create(u64); + const ptr2 = try allocator.create(u32); + const ptr3 = try allocator.create([100]u8); + + allocator.destroy(ptr1); + allocator.destroy(ptr2); + allocator.destroy(ptr3); +} + +test "slab allocator - no resize support" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + const slice = try allocator.alloc(u8, 100); + @memset(slice, 42); + + // Resize should fail (not supported) + try testing.expect(!allocator.resize(slice, 90)); + try testing.expect(!allocator.resize(slice, 200)); + + allocator.free(slice); +} + +test "slab allocator - fragmentation pattern" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate 10 items + var items: [10][]u8 = undefined; + for (&items) |*item| { + item.* = try allocator.alloc(u8, 64); + @memset(item.*, 0xFF); + } + + // Free every other one + allocator.free(items[0]); + allocator.free(items[2]); + allocator.free(items[4]); + allocator.free(items[6]); + allocator.free(items[8]); + + // Allocate new items - should reuse freed slots + const new1 = try allocator.alloc(u8, 64); + const new2 = try allocator.alloc(u8, 64); + const new3 = try allocator.alloc(u8, 64); + + // Should get some of the freed slots back + const addrs = [_]usize{ + @intFromPtr(items[0].ptr), + @intFromPtr(items[2].ptr), + @intFromPtr(items[4].ptr), + @intFromPtr(items[6].ptr), + @intFromPtr(items[8].ptr), + }; + + const new1_addr = @intFromPtr(new1.ptr); + var found = false; + for (addrs) |addr| { + if (new1_addr == addr) found = true; + } + try testing.expect(found); + + // Cleanup + allocator.free(items[1]); + allocator.free(items[3]); + allocator.free(items[5]); + allocator.free(items[7]); + allocator.free(items[9]); + allocator.free(new1); + allocator.free(new2); + allocator.free(new3); +} + +test "slab allocator - many small allocations" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate 1000 small items + var ptrs: std.ArrayList([]u8) = .empty; + defer { + for (ptrs.items) |ptr| { + allocator.free(ptr); + } + ptrs.deinit(allocator); + } + + var i: usize = 0; + while (i < 1000) : (i += 1) { + const ptr = try allocator.alloc(u8, 24); + try ptrs.append(allocator, ptr); + } + + // Should have created multiple chunks + const slab = seg.slabs.getPtr(.{ .size = 24, .alignment = Alignment.@"1" }).?; + try testing.expect(slab.chunks.items.len > 10); +} + +test "slab allocator - zero waste for exact sizes" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // These sizes have zero internal fragmentation (unlike buddy) + const sizes = [_]usize{ 24, 40, 56, 88, 144, 152, 184, 232, 648 }; + + for (sizes) |size| { + const ptr = try allocator.alloc(u8, size); + + // Exact size returned! + try testing.expectEqual(size, ptr.len); + + @memset(ptr, 0xFF); + allocator.free(ptr); + } +} + +test "slab allocator - different size classes don't interfere" { + var seg = TestSlabAllocator.init(testing.allocator); + defer seg.deinit(); + + const allocator = seg.allocator(); + + // Allocate size 64 + const ptr_64 = try allocator.alloc(u8, 64); + const addr_64 = @intFromPtr(ptr_64.ptr); + allocator.free(ptr_64); + + // Allocate size 128 - should NOT reuse size-64 slot + const ptr_128 = try allocator.alloc(u8, 128); + const addr_128 = @intFromPtr(ptr_128.ptr); + + try testing.expect(addr_64 != addr_128); + + // Allocate size 64 again - SHOULD reuse original slot + const ptr_64_again = try allocator.alloc(u8, 64); + const addr_64_again = @intFromPtr(ptr_64_again.ptr); + + try testing.expectEqual(addr_64, addr_64_again); + + allocator.free(ptr_128); + allocator.free(ptr_64_again); +} From 219245be9534f71043a79d7f352d74711c25bd6e Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Mon, 24 Nov 2025 20:36:15 -0800 Subject: [PATCH 2/4] standardize slab testing names --- src/slab.zig | 122 +++++++++++++++++++++++++-------------------------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/src/slab.zig b/src/slab.zig index 0af4f616..52d63c82 100644 --- a/src/slab.zig +++ b/src/slab.zig @@ -239,10 +239,10 @@ const testing = std.testing; const TestSlabAllocator = SlabAllocator(32); test "slab allocator - basic allocation and free" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate some memory const ptr1 = try allocator.alloc(u8, 100); @@ -257,10 +257,10 @@ test "slab allocator - basic allocation and free" { } test "slab allocator - multiple allocations" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); const ptr1 = try allocator.alloc(u8, 64); const ptr2 = try allocator.alloc(u8, 128); @@ -280,10 +280,10 @@ test "slab allocator - multiple allocations" { } test "slab allocator - no coalescing (different size classes)" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate two blocks of same size const ptr1 = try allocator.alloc(u8, 128); @@ -307,10 +307,10 @@ test "slab allocator - no coalescing (different size classes)" { } test "slab allocator - reuse freed memory" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); const ptr1 = try allocator.alloc(u8, 64); const addr1 = @intFromPtr(ptr1.ptr); @@ -325,10 +325,10 @@ test "slab allocator - reuse freed memory" { } test "slab allocator - multiple size classes" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate various sizes - each creates a new slab var ptrs: [10][]u8 = undefined; @@ -340,7 +340,7 @@ test "slab allocator - multiple size classes" { } // Should have created multiple slabs - try testing.expect(seg.slabs.count() >= 10); + try testing.expect(slab_alloc.slabs.count() >= 10); // Free all for (ptrs) |ptr| { @@ -349,10 +349,10 @@ test "slab allocator - multiple size classes" { } test "slab allocator - various sizes" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Test different sizes (not limited to powers of 2!) const sizes = [_]usize{ 8, 16, 24, 32, 40, 64, 88, 128, 144, 256 }; @@ -366,10 +366,10 @@ test "slab allocator - various sizes" { } test "slab allocator - exact sizes (no rounding)" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Odd sizes stay exact (unlike buddy which rounds to power of 2) const ptr1 = try allocator.alloc(u8, 100); @@ -387,10 +387,10 @@ test "slab allocator - exact sizes (no rounding)" { } test "slab allocator - chunk allocation" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate many items of same size to force multiple chunks var ptrs: [100][]u8 = undefined; @@ -399,7 +399,7 @@ test "slab allocator - chunk allocation" { } // Should have allocated multiple chunks (32 items per chunk) - const slab = seg.slabs.getPtr(.{ .size = 64, .alignment = Alignment.@"1" }).?; + const slab = slab_alloc.slabs.getPtr(.{ .size = 64, .alignment = Alignment.@"1" }).?; try testing.expect(slab.chunks.items.len > 1); // Free all @@ -409,10 +409,10 @@ test "slab allocator - chunk allocation" { } test "slab allocator - reset with retain_capacity" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate some memory const ptr1 = try allocator.alloc(u8, 128); @@ -420,14 +420,14 @@ test "slab allocator - reset with retain_capacity" { _ = ptr1; _ = ptr2; - const slabs_before = seg.slabs.count(); - const slab_128 = seg.slabs.getPtr(.{ .size = 128, .alignment = Alignment.@"1" }).?; + const slabs_before = slab_alloc.slabs.count(); + const slab_128 = slab_alloc.slabs.getPtr(.{ .size = 128, .alignment = Alignment.@"1" }).?; const chunks_before = slab_128.chunks.items.len; // Reset but keep chunks - seg.reset(.retain_capacity); + slab_alloc.reset(.retain_capacity); - try testing.expectEqual(slabs_before, seg.slabs.count()); + try testing.expectEqual(slabs_before, slab_alloc.slabs.count()); try testing.expectEqual(chunks_before, slab_128.chunks.items.len); // Should be able to allocate again @@ -436,21 +436,21 @@ test "slab allocator - reset with retain_capacity" { } test "slab allocator - reset with clear" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate some memory const ptr1 = try allocator.alloc(u8, 128); _ = ptr1; - try testing.expect(seg.slabs.count() > 0); + try testing.expect(slab_alloc.slabs.count() > 0); // Reset and free everything - seg.reset(.clear); + slab_alloc.reset(.clear); - try testing.expectEqual(@as(usize, 0), seg.slabs.count()); + try testing.expectEqual(@as(usize, 0), slab_alloc.slabs.count()); // Should still work after reset const ptr2 = try allocator.alloc(u8, 256); @@ -458,10 +458,10 @@ test "slab allocator - reset with clear" { } test "slab allocator - stress test" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); var prng = std.Random.DefaultPrng.init(0); const random = prng.random(); @@ -495,10 +495,10 @@ test "slab allocator - stress test" { } test "slab allocator - alignment" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); const ptr1 = try allocator.create(u64); const ptr2 = try allocator.create(u32); @@ -510,10 +510,10 @@ test "slab allocator - alignment" { } test "slab allocator - no resize support" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); const slice = try allocator.alloc(u8, 100); @memset(slice, 42); @@ -526,10 +526,10 @@ test "slab allocator - no resize support" { } test "slab allocator - fragmentation pattern" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate 10 items var items: [10][]u8 = undefined; @@ -578,10 +578,10 @@ test "slab allocator - fragmentation pattern" { } test "slab allocator - many small allocations" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate 1000 small items var ptrs: std.ArrayList([]u8) = .empty; @@ -599,15 +599,15 @@ test "slab allocator - many small allocations" { } // Should have created multiple chunks - const slab = seg.slabs.getPtr(.{ .size = 24, .alignment = Alignment.@"1" }).?; + const slab = slab_alloc.slabs.getPtr(.{ .size = 24, .alignment = Alignment.@"1" }).?; try testing.expect(slab.chunks.items.len > 10); } test "slab allocator - zero waste for exact sizes" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // These sizes have zero internal fragmentation (unlike buddy) const sizes = [_]usize{ 24, 40, 56, 88, 144, 152, 184, 232, 648 }; @@ -624,10 +624,10 @@ test "slab allocator - zero waste for exact sizes" { } test "slab allocator - different size classes don't interfere" { - var seg = TestSlabAllocator.init(testing.allocator); - defer seg.deinit(); + var slab_alloc = TestSlabAllocator.init(testing.allocator); + defer slab_alloc.deinit(); - const allocator = seg.allocator(); + const allocator = slab_alloc.allocator(); // Allocate size 64 const ptr_64 = try allocator.alloc(u8, 64); From 0da87e1d5ea79fc4b00ae3f1d9c1d237474975c3 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Tue, 25 Nov 2025 12:13:13 -0800 Subject: [PATCH 3/4] add slab statistics --- src/browser/Page.zig | 5 ++ src/slab.zig | 160 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 161 insertions(+), 4 deletions(-) diff --git a/src/browser/Page.zig b/src/browser/Page.zig index b93a9f94..b988f2ee 100644 --- a/src/browser/Page.zig +++ b/src/browser/Page.zig @@ -174,7 +174,12 @@ pub fn init(arena: Allocator, call_arena: Allocator, session: *Session) !*Page { pub fn deinit(self: *Page) void { if (comptime IS_DEBUG) { log.debug(.page, "page.deinit", .{ .url = self.url }); + + // Uncomment if you want slab statistics to print. + // const stats = self._factory._slab.getStats(self.arena) catch unreachable; + // stats.print() catch unreachable; } + self.js.deinit(); self._script_manager.shutdown = true; self._session.browser.http_client.abort(); diff --git a/src/slab.zig b/src/slab.zig index 52d63c82..509a1879 100644 --- a/src/slab.zig +++ b/src/slab.zig @@ -7,6 +7,11 @@ const Alignment = std.mem.Alignment; pub fn SlabAllocator(comptime slot_count: usize) type { comptime assert(std.math.isPowerOfTwo(slot_count)); + const SlabKey = struct { + size: usize, + alignment: Alignment, + }; + const Slab = struct { const Slab = @This(); const chunk_shift = std.math.log2_int(usize, slot_count); @@ -114,11 +119,45 @@ pub fn SlabAllocator(comptime slot_count: usize) type { const new_capacity = self.chunks.items.len * slot_count; try self.bitset.resize(allocator, new_capacity, true); } - }; - const SlabKey = struct { - size: usize, - alignment: Alignment, + const Stats = struct { + key: SlabKey, + item_size: usize, + chunk_count: usize, + total_slots: usize, + slots_in_use: usize, + slots_free: usize, + bytes_allocated: usize, + bytes_in_use: usize, + bytes_free: usize, + utilization_ratio: f64, + }; + + fn getStats(self: *const Slab, key: SlabKey) Stats { + const total_slots = self.bitset.bit_length; + const free_slots = self.bitset.count(); + const used_slots = total_slots - free_slots; + const bytes_allocated = self.chunks.items.len * slot_count * self.item_size; + const bytes_in_use = used_slots * self.item_size; + + const utilization_ratio = if (bytes_allocated > 0) + @as(f64, @floatFromInt(bytes_in_use)) / @as(f64, @floatFromInt(bytes_allocated)) + else + 0.0; + + return .{ + .key = key, + .item_size = self.item_size, + .chunk_count = self.chunks.items.len, + .total_slots = total_slots, + .slots_in_use = used_slots, + .slots_free = free_slots, + .bytes_allocated = bytes_allocated, + .bytes_in_use = bytes_in_use, + .bytes_free = free_slots * self.item_size, + .utilization_ratio = utilization_ratio, + }; + } }; return struct { @@ -185,6 +224,119 @@ pub fn SlabAllocator(comptime slot_count: usize) type { } } + const Stats = struct { + total_allocated_bytes: usize, + bytes_in_use: usize, + bytes_free: usize, + slab_count: usize, + total_chunks: usize, + total_slots: usize, + slots_in_use: usize, + slots_free: usize, + fragmentation_ratio: f64, + utilization_ratio: f64, + slabs: []const Slab.Stats, + + pub fn print(self: *const Stats) !void { + std.debug.print("\n", .{}); + std.debug.print("\n=== Slab Allocator Statistics ===\n", .{}); + std.debug.print("Overall Memory:\n", .{}); + std.debug.print(" Total allocated: {} bytes ({d:.2} MB)\n", .{ + self.total_allocated_bytes, + @as(f64, @floatFromInt(self.total_allocated_bytes)) / 1_048_576.0, + }); + std.debug.print(" In use: {} bytes ({d:.2} MB)\n", .{ + self.bytes_in_use, + @as(f64, @floatFromInt(self.bytes_in_use)) / 1_048_576.0, + }); + std.debug.print(" Free: {} bytes ({d:.2} MB)\n", .{ + self.bytes_free, + @as(f64, @floatFromInt(self.bytes_free)) / 1_048_576.0, + }); + + std.debug.print("\nOverall Structure:\n", .{}); + std.debug.print(" Slab Count: {}\n", .{self.slab_count}); + std.debug.print(" Total chunks: {}\n", .{self.total_chunks}); + std.debug.print(" Total slots: {}\n", .{self.total_slots}); + std.debug.print(" Slots in use: {}\n", .{self.slots_in_use}); + std.debug.print(" Slots free: {}\n", .{self.slots_free}); + + std.debug.print("\nOverall Efficiency:\n", .{}); + std.debug.print(" Utilization: {d:.1}%\n", .{self.utilization_ratio * 100.0}); + std.debug.print(" Fragmentation: {d:.1}%\n", .{self.fragmentation_ratio * 100.0}); + + if (self.slabs.len > 0) { + std.debug.print("\nPer-Slab Breakdown:\n", .{}); + std.debug.print( + " {s:>5} | {s:>4} | {s:>6} | {s:>6} | {s:>6} | {s:>10} | {s:>6}\n", + .{ "Size", "Algn", "Chunks", "Slots", "InUse", "Bytes", "Util%" }, + ); + std.debug.print( + " {s:-<5}-+-{s:-<4}-+-{s:-<6}-+-{s:-<6}-+-{s:-<6}-+-{s:-<10}-+-{s:-<6}\n", + .{ "", "", "", "", "", "", "" }, + ); + + for (self.slabs) |slab| { + std.debug.print(" {d:5} | {d:4} | {d:6} | {d:6} | {d:6} | {d:10} | {d:5.1}%\n", .{ + slab.key.size, + @intFromEnum(slab.key.alignment), + slab.chunk_count, + slab.total_slots, + slab.slots_in_use, + slab.bytes_allocated, + slab.utilization_ratio * 100.0, + }); + } + } + } + }; + + pub fn getStats(self: *Self, a: std.mem.Allocator) !Stats { + var slab_stats: std.ArrayList(Slab.Stats) = try .initCapacity(a, self.slabs.entries.len); + errdefer slab_stats.deinit(a); + + var stats = Stats{ + .total_allocated_bytes = 0, + .bytes_in_use = 0, + .bytes_free = 0, + .slab_count = self.slabs.count(), + .total_chunks = 0, + .total_slots = 0, + .slots_in_use = 0, + .slots_free = 0, + .fragmentation_ratio = 0.0, + .utilization_ratio = 0.0, + .slabs = &.{}, + }; + + var it = self.slabs.iterator(); + while (it.next()) |entry| { + const key = entry.key_ptr.*; + const slab = entry.value_ptr; + const slab_stat = slab.getStats(key); + + slab_stats.appendAssumeCapacity(slab_stat); + + stats.total_allocated_bytes += slab_stat.bytes_allocated; + stats.bytes_in_use += slab_stat.bytes_in_use; + stats.bytes_free += slab_stat.bytes_free; + stats.total_chunks += slab_stat.chunk_count; + stats.total_slots += slab_stat.total_slots; + stats.slots_in_use += slab_stat.slots_in_use; + stats.slots_free += slab_stat.slots_free; + } + + if (stats.total_allocated_bytes > 0) { + stats.fragmentation_ratio = @as(f64, @floatFromInt(stats.bytes_free)) / + @as(f64, @floatFromInt(stats.total_allocated_bytes)); + stats.utilization_ratio = @as(f64, @floatFromInt(stats.bytes_in_use)) / + @as(f64, @floatFromInt(stats.total_allocated_bytes)); + } + + stats.slabs = try slab_stats.toOwnedSlice(a); + return stats; + } + pub const vtable = Allocator.VTable{ .alloc = alloc, .free = free, From 058f86ec5f45371ec9a2bdce00a5ca4d30056b35 Mon Sep 17 00:00:00 2001 From: Muki Kiboigo Date: Tue, 25 Nov 2025 13:40:51 -0800 Subject: [PATCH 4/4] new exponential SlabAllocator --- src/browser/Factory.zig | 4 +- src/slab.zig | 719 +++++++++++++++++++++------------------- 2 files changed, 372 insertions(+), 351 deletions(-) diff --git a/src/browser/Factory.zig b/src/browser/Factory.zig index 6013a2ff..336924b6 100644 --- a/src/browser/Factory.zig +++ b/src/browser/Factory.zig @@ -24,7 +24,7 @@ const IS_DEBUG = builtin.mode == .Debug; const log = @import("../log.zig"); const String = @import("../string.zig").String; -const SlabAllocator = @import("../slab.zig").SlabAllocator(16); +const SlabAllocator = @import("../slab.zig").SlabAllocator; const Page = @import("Page.zig"); const Node = @import("webapi/Node.zig"); @@ -53,7 +53,7 @@ _slab: SlabAllocator, pub fn init(page: *Page) Factory { return .{ ._page = page, - ._slab = SlabAllocator.init(page.arena), + ._slab = SlabAllocator.init(page.arena, 128), }; } diff --git a/src/slab.zig b/src/slab.zig index 509a1879..02d10aa7 100644 --- a/src/slab.zig +++ b/src/slab.zig @@ -4,394 +4,415 @@ const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Alignment = std.mem.Alignment; -pub fn SlabAllocator(comptime slot_count: usize) type { - comptime assert(std.math.isPowerOfTwo(slot_count)); +const Slab = struct { + alignment: Alignment, + item_size: usize, + max_slot_count: usize, - const SlabKey = struct { - size: usize, - alignment: Alignment, - }; - - const Slab = struct { - const Slab = @This(); - const chunk_shift = std.math.log2_int(usize, slot_count); - const chunk_mask = slot_count - 1; + bitset: std.bit_set.DynamicBitSetUnmanaged, + chunks: std.ArrayListUnmanaged([]u8), + pub fn init( + allocator: Allocator, alignment: Alignment, item_size: usize, + max_slot_count: usize, + ) !Slab { + return .{ + .alignment = alignment, + .item_size = item_size, + .bitset = try .initFull(allocator, 0), + .chunks = .empty, + .max_slot_count = max_slot_count, + }; + } - bitset: std.bit_set.DynamicBitSetUnmanaged, - chunks: std.ArrayListUnmanaged([]u8), + pub fn deinit(self: *Slab, allocator: Allocator) void { + self.bitset.deinit(allocator); - pub fn init( - allocator: Allocator, - alignment: Alignment, - item_size: usize, - ) !Slab { - return .{ - .alignment = alignment, - .item_size = item_size, - .bitset = try .initFull(allocator, 0), - .chunks = .empty, - }; + for (self.chunks.items) |chunk| { + allocator.rawFree(chunk, self.alignment, @returnAddress()); } - pub fn deinit(self: *Slab, allocator: Allocator) void { - self.bitset.deinit(allocator); + self.chunks.deinit(allocator); + } - for (self.chunks.items) |chunk| { - allocator.rawFree(chunk, self.alignment, @returnAddress()); + inline fn calculateChunkSize(self: *Slab, chunk_index: usize) usize { + const safe_index: u6 = @intCast(@min(std.math.maxInt(u6), chunk_index)); + const exponential = @as(usize, 1) << safe_index; + return @min(exponential, self.max_slot_count); + } + + inline fn toBitsetIndex(self: *Slab, chunk_index: usize, slot_index: usize) usize { + var offset: usize = 0; + for (0..chunk_index) |i| { + const chunk_size = self.calculateChunkSize(i); + offset += chunk_size; + } + return offset + slot_index; + } + + inline fn toChunkAndSlotIndices(self: *Slab, bitset_index: usize) struct { usize, usize } { + var offset: usize = 0; + var chunk_index: usize = 0; + + while (chunk_index < self.chunks.items.len) : (chunk_index += 1) { + const chunk_size = self.calculateChunkSize(chunk_index); + if (bitset_index < offset + chunk_size) { + return .{ chunk_index, bitset_index - offset }; } - self.chunks.deinit(allocator); + offset += chunk_size; } - inline fn toBitsetIndex(chunk_index: usize, slot_index: usize) usize { - return chunk_index * slot_count + slot_index; + unreachable; + } + + fn alloc(self: *Slab, allocator: Allocator) ![]u8 { + if (self.bitset.findFirstSet()) |index| { + const chunk_index, const slot_index = self.toChunkAndSlotIndices(index); + + // if we have a free slot + self.bitset.unset(index); + + const chunk = self.chunks.items[chunk_index]; + const offset = slot_index * self.item_size; + return chunk.ptr[offset..][0..self.item_size]; + } else { + const old_capacity = self.bitset.bit_length; + + // if we have don't have a free slot + try self.allocateChunk(allocator); + + const first_slot_index = old_capacity; + self.bitset.unset(first_slot_index); + + const new_chunk = self.chunks.items[self.chunks.items.len - 1]; + return new_chunk.ptr[0..self.item_size]; } + } - inline fn chunkIndex(bitset_index: usize) usize { - return bitset_index >> chunk_shift; - } + fn free(self: *Slab, ptr: [*]u8) void { + const addr = @intFromPtr(ptr); - inline fn slotIndex(bitset_index: usize) usize { - return bitset_index & chunk_mask; - } + for (self.chunks.items, 0..) |chunk, i| { + const chunk_start = @intFromPtr(chunk.ptr); + const chunk_end = chunk_start + chunk.len; - fn alloc(self: *Slab, allocator: Allocator) ![]u8 { - if (self.bitset.findFirstSet()) |index| { - // if we have a free slot - const chunk_index = chunkIndex(index); - const slot_index = slotIndex(index); - self.bitset.unset(index); + if (addr >= chunk_start and addr < chunk_end) { + const offset = addr - chunk_start; + const slot_index = offset / self.item_size; - const chunk = self.chunks.items[chunk_index]; - const offset = slot_index * self.item_size; - return chunk.ptr[offset..][0..self.item_size]; - } else { - const old_capacity = self.bitset.bit_length; + const bitset_index = self.toBitsetIndex(i, slot_index); + assert(!self.bitset.isSet(bitset_index)); - // if we have don't have a free slot - try self.allocateChunk(allocator); - - const first_slot_index = old_capacity; - self.bitset.unset(first_slot_index); - - const new_chunk = self.chunks.items[self.chunks.items.len - 1]; - return new_chunk.ptr[0..self.item_size]; + self.bitset.set(bitset_index); + return; } } - fn free(self: *Slab, ptr: [*]u8) void { - const addr = @intFromPtr(ptr); + unreachable; + } - for (self.chunks.items, 0..) |chunk, i| { - const chunk_start = @intFromPtr(chunk.ptr); - const chunk_end = chunk_start + (slot_count * self.item_size); + fn allocateChunk(self: *Slab, allocator: Allocator) !void { + const next_chunk_size = self.calculateChunkSize(self.chunks.items.len); + const chunk_len = self.item_size * next_chunk_size; - if (addr >= chunk_start and addr < chunk_end) { - const offset = addr - chunk_start; - const slot_index = offset / self.item_size; + const chunk_ptr = allocator.rawAlloc( + chunk_len, + self.alignment, + @returnAddress(), + ) orelse return error.FailedChildAllocation; - const bitset_index = toBitsetIndex(i, slot_index); - assert(!self.bitset.isSet(bitset_index)); + const chunk = chunk_ptr[0..chunk_len]; + try self.chunks.append(allocator, chunk); - self.bitset.set(bitset_index); - return; + const new_capacity = self.bitset.bit_length + next_chunk_size; + try self.bitset.resize(allocator, new_capacity, true); + } + + const Stats = struct { + key: SlabKey, + item_size: usize, + chunk_count: usize, + total_slots: usize, + slots_in_use: usize, + slots_free: usize, + bytes_allocated: usize, + bytes_in_use: usize, + bytes_free: usize, + utilization_ratio: f64, + }; + + fn getStats(self: *const Slab, key: SlabKey) Stats { + const total_slots = self.bitset.bit_length; + const free_slots = self.bitset.count(); + const used_slots = total_slots - free_slots; + const bytes_allocated = total_slots * self.item_size; + const bytes_in_use = used_slots * self.item_size; + + const utilization_ratio = if (bytes_allocated > 0) + @as(f64, @floatFromInt(bytes_in_use)) / @as(f64, @floatFromInt(bytes_allocated)) + else + 0.0; + + return .{ + .key = key, + .item_size = self.item_size, + .chunk_count = self.chunks.items.len, + .total_slots = total_slots, + .slots_in_use = used_slots, + .slots_free = free_slots, + .bytes_allocated = bytes_allocated, + .bytes_in_use = bytes_in_use, + .bytes_free = free_slots * self.item_size, + .utilization_ratio = utilization_ratio, + }; + } +}; + +const SlabKey = struct { + size: usize, + alignment: Alignment, +}; + +pub const SlabAllocator = struct { + const Self = @This(); + + child_allocator: Allocator, + max_slot_count: usize, + + slabs: std.ArrayHashMapUnmanaged(SlabKey, Slab, struct { + const Context = @This(); + + pub fn hash(_: Context, key: SlabKey) u32 { + var hasher = std.hash.Wyhash.init(0); + std.hash.autoHash(&hasher, key.size); + std.hash.autoHash(&hasher, key.alignment); + return @truncate(hasher.final()); + } + + pub fn eql(_: Context, a: SlabKey, b: SlabKey, _: usize) bool { + return a.size == b.size and a.alignment == b.alignment; + } + }, false) = .empty, + + pub fn init(child_allocator: Allocator, max_slot_count: usize) Self { + assert(std.math.isPowerOfTwo(max_slot_count)); + + return .{ + .child_allocator = child_allocator, + .slabs = .empty, + .max_slot_count = max_slot_count, + }; + } + + pub fn deinit(self: *Self) void { + for (self.slabs.values()) |*slab| { + slab.deinit(self.child_allocator); + } + + self.slabs.deinit(self.child_allocator); + } + + pub const ResetKind = enum { + /// Free all chunks and release all memory. + clear, + /// Keep all chunks, reset trees to reuse memory. + retain_capacity, + }; + + /// This clears all of the stored memory, freeing the currently used chunks. + pub fn reset(self: *Self, kind: ResetKind) void { + switch (kind) { + .clear => { + for (self.slabs.values()) |*slab| { + for (slab.chunks.items) |chunk| { + self.child_allocator.free(chunk); + } + + slab.chunks.clearAndFree(self.child_allocator); + slab.bitset.deinit(self.child_allocator); + } + + self.slabs.clearAndFree(self.child_allocator); + }, + .retain_capacity => { + for (self.slabs.values()) |*slab| { + slab.bitset.setAll(); + } + }, + } + } + + const Stats = struct { + total_allocated_bytes: usize, + bytes_in_use: usize, + bytes_free: usize, + slab_count: usize, + total_chunks: usize, + total_slots: usize, + slots_in_use: usize, + slots_free: usize, + fragmentation_ratio: f64, + utilization_ratio: f64, + slabs: []const Slab.Stats, + + pub fn print(self: *const Stats) !void { + std.debug.print("\n", .{}); + std.debug.print("\n=== Slab Allocator Statistics ===\n", .{}); + std.debug.print("Overall Memory:\n", .{}); + std.debug.print(" Total allocated: {} bytes ({d:.2} MB)\n", .{ + self.total_allocated_bytes, + @as(f64, @floatFromInt(self.total_allocated_bytes)) / 1_048_576.0, + }); + std.debug.print(" In use: {} bytes ({d:.2} MB)\n", .{ + self.bytes_in_use, + @as(f64, @floatFromInt(self.bytes_in_use)) / 1_048_576.0, + }); + std.debug.print(" Free: {} bytes ({d:.2} MB)\n", .{ + self.bytes_free, + @as(f64, @floatFromInt(self.bytes_free)) / 1_048_576.0, + }); + + std.debug.print("\nOverall Structure:\n", .{}); + std.debug.print(" Slab Count: {}\n", .{self.slab_count}); + std.debug.print(" Total chunks: {}\n", .{self.total_chunks}); + std.debug.print(" Total slots: {}\n", .{self.total_slots}); + std.debug.print(" Slots in use: {}\n", .{self.slots_in_use}); + std.debug.print(" Slots free: {}\n", .{self.slots_free}); + + std.debug.print("\nOverall Efficiency:\n", .{}); + std.debug.print(" Utilization: {d:.1}%\n", .{self.utilization_ratio * 100.0}); + std.debug.print(" Fragmentation: {d:.1}%\n", .{self.fragmentation_ratio * 100.0}); + + if (self.slabs.len > 0) { + std.debug.print("\nPer-Slab Breakdown:\n", .{}); + std.debug.print( + " {s:>5} | {s:>4} | {s:>6} | {s:>6} | {s:>6} | {s:>10} | {s:>6}\n", + .{ "Size", "Algn", "Chunks", "Slots", "InUse", "Bytes", "Util%" }, + ); + std.debug.print( + " {s:-<5}-+-{s:-<4}-+-{s:-<6}-+-{s:-<6}-+-{s:-<6}-+-{s:-<10}-+-{s:-<6}\n", + .{ "", "", "", "", "", "", "" }, + ); + + for (self.slabs) |slab| { + std.debug.print(" {d:5} | {d:4} | {d:6} | {d:6} | {d:6} | {d:10} | {d:5.1}%\n", .{ + slab.key.size, + @intFromEnum(slab.key.alignment), + slab.chunk_count, + slab.total_slots, + slab.slots_in_use, + slab.bytes_allocated, + slab.utilization_ratio * 100.0, + }); } } - - unreachable; - } - - fn allocateChunk(self: *Slab, allocator: Allocator) !void { - const chunk_len = self.item_size * slot_count; - - const chunk_ptr = allocator.rawAlloc( - chunk_len, - self.alignment, - @returnAddress(), - ) orelse return error.FailedChildAllocation; - - const chunk = chunk_ptr[0..chunk_len]; - try self.chunks.append(allocator, chunk); - - const new_capacity = self.chunks.items.len * slot_count; - try self.bitset.resize(allocator, new_capacity, true); - } - - const Stats = struct { - key: SlabKey, - item_size: usize, - chunk_count: usize, - total_slots: usize, - slots_in_use: usize, - slots_free: usize, - bytes_allocated: usize, - bytes_in_use: usize, - bytes_free: usize, - utilization_ratio: f64, - }; - - fn getStats(self: *const Slab, key: SlabKey) Stats { - const total_slots = self.bitset.bit_length; - const free_slots = self.bitset.count(); - const used_slots = total_slots - free_slots; - const bytes_allocated = self.chunks.items.len * slot_count * self.item_size; - const bytes_in_use = used_slots * self.item_size; - - const utilization_ratio = if (bytes_allocated > 0) - @as(f64, @floatFromInt(bytes_in_use)) / @as(f64, @floatFromInt(bytes_allocated)) - else - 0.0; - - return .{ - .key = key, - .item_size = self.item_size, - .chunk_count = self.chunks.items.len, - .total_slots = total_slots, - .slots_in_use = used_slots, - .slots_free = free_slots, - .bytes_allocated = bytes_allocated, - .bytes_in_use = bytes_in_use, - .bytes_free = free_slots * self.item_size, - .utilization_ratio = utilization_ratio, - }; } }; - return struct { - const Self = @This(); + pub fn getStats(self: *Self, a: std.mem.Allocator) !Stats { + var slab_stats: std.ArrayList(Slab.Stats) = try .initCapacity(a, self.slabs.entries.len); + errdefer slab_stats.deinit(a); - child_allocator: Allocator, - slabs: std.ArrayHashMapUnmanaged(SlabKey, Slab, struct { - const Context = @This(); - - pub fn hash(_: Context, key: SlabKey) u32 { - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, key.size); - std.hash.autoHash(&hasher, key.alignment); - return @truncate(hasher.final()); - } - - pub fn eql(_: Context, a: SlabKey, b: SlabKey, _: usize) bool { - return a.size == b.size and a.alignment == b.alignment; - } - }, false) = .empty, - - pub fn init(child_allocator: Allocator) Self { - return .{ - .child_allocator = child_allocator, - .slabs = .empty, - }; - } - - pub fn deinit(self: *Self) void { - for (self.slabs.values()) |*slab| { - slab.deinit(self.child_allocator); - } - - self.slabs.deinit(self.child_allocator); - } - - pub const ResetKind = enum { - /// Free all chunks and release all memory. - clear, - /// Keep all chunks, reset trees to reuse memory. - retain_capacity, + var stats = Stats{ + .total_allocated_bytes = 0, + .bytes_in_use = 0, + .bytes_free = 0, + .slab_count = self.slabs.count(), + .total_chunks = 0, + .total_slots = 0, + .slots_in_use = 0, + .slots_free = 0, + .fragmentation_ratio = 0.0, + .utilization_ratio = 0.0, + .slabs = &.{}, }; - /// This clears all of the stored memory, freeing the currently used chunks. - pub fn reset(self: *Self, kind: ResetKind) void { - switch (kind) { - .clear => { - for (self.slabs.values()) |*slab| { - for (slab.chunks.items) |chunk| { - self.child_allocator.free(chunk); - } + var it = self.slabs.iterator(); + while (it.next()) |entry| { + const key = entry.key_ptr.*; + const slab = entry.value_ptr; + const slab_stat = slab.getStats(key); - slab.chunks.clearAndFree(self.child_allocator); - slab.bitset.deinit(self.child_allocator); - } + slab_stats.appendAssumeCapacity(slab_stat); - self.slabs.clearAndFree(self.child_allocator); - }, - .retain_capacity => { - for (self.slabs.values()) |*slab| { - slab.bitset.setAll(); - } - }, - } + stats.total_allocated_bytes += slab_stat.bytes_allocated; + stats.bytes_in_use += slab_stat.bytes_in_use; + stats.bytes_free += slab_stat.bytes_free; + stats.total_chunks += slab_stat.chunk_count; + stats.total_slots += slab_stat.total_slots; + stats.slots_in_use += slab_stat.slots_in_use; + stats.slots_free += slab_stat.slots_free; } - const Stats = struct { - total_allocated_bytes: usize, - bytes_in_use: usize, - bytes_free: usize, - slab_count: usize, - total_chunks: usize, - total_slots: usize, - slots_in_use: usize, - slots_free: usize, - fragmentation_ratio: f64, - utilization_ratio: f64, - slabs: []const Slab.Stats, + if (stats.total_allocated_bytes > 0) { + stats.fragmentation_ratio = @as(f64, @floatFromInt(stats.bytes_free)) / + @as(f64, @floatFromInt(stats.total_allocated_bytes)); + stats.utilization_ratio = @as(f64, @floatFromInt(stats.bytes_in_use)) / + @as(f64, @floatFromInt(stats.total_allocated_bytes)); + } - pub fn print(self: *const Stats) !void { - std.debug.print("\n", .{}); - std.debug.print("\n=== Slab Allocator Statistics ===\n", .{}); - std.debug.print("Overall Memory:\n", .{}); - std.debug.print(" Total allocated: {} bytes ({d:.2} MB)\n", .{ - self.total_allocated_bytes, - @as(f64, @floatFromInt(self.total_allocated_bytes)) / 1_048_576.0, - }); - std.debug.print(" In use: {} bytes ({d:.2} MB)\n", .{ - self.bytes_in_use, - @as(f64, @floatFromInt(self.bytes_in_use)) / 1_048_576.0, - }); - std.debug.print(" Free: {} bytes ({d:.2} MB)\n", .{ - self.bytes_free, - @as(f64, @floatFromInt(self.bytes_free)) / 1_048_576.0, - }); + stats.slabs = try slab_stats.toOwnedSlice(a); + return stats; + } - std.debug.print("\nOverall Structure:\n", .{}); - std.debug.print(" Slab Count: {}\n", .{self.slab_count}); - std.debug.print(" Total chunks: {}\n", .{self.total_chunks}); - std.debug.print(" Total slots: {}\n", .{self.total_slots}); - std.debug.print(" Slots in use: {}\n", .{self.slots_in_use}); - std.debug.print(" Slots free: {}\n", .{self.slots_free}); + pub const vtable = Allocator.VTable{ + .alloc = alloc, + .free = free, + .remap = Allocator.noRemap, + .resize = Allocator.noResize, + }; - std.debug.print("\nOverall Efficiency:\n", .{}); - std.debug.print(" Utilization: {d:.1}%\n", .{self.utilization_ratio * 100.0}); - std.debug.print(" Fragmentation: {d:.1}%\n", .{self.fragmentation_ratio * 100.0}); - - if (self.slabs.len > 0) { - std.debug.print("\nPer-Slab Breakdown:\n", .{}); - std.debug.print( - " {s:>5} | {s:>4} | {s:>6} | {s:>6} | {s:>6} | {s:>10} | {s:>6}\n", - .{ "Size", "Algn", "Chunks", "Slots", "InUse", "Bytes", "Util%" }, - ); - std.debug.print( - " {s:-<5}-+-{s:-<4}-+-{s:-<6}-+-{s:-<6}-+-{s:-<6}-+-{s:-<10}-+-{s:-<6}\n", - .{ "", "", "", "", "", "", "" }, - ); - - for (self.slabs) |slab| { - std.debug.print(" {d:5} | {d:4} | {d:6} | {d:6} | {d:6} | {d:10} | {d:5.1}%\n", .{ - slab.key.size, - @intFromEnum(slab.key.alignment), - slab.chunk_count, - slab.total_slots, - slab.slots_in_use, - slab.bytes_allocated, - slab.utilization_ratio * 100.0, - }); - } - } - } + pub fn allocator(self: *Self) Allocator { + return .{ + .ptr = self, + .vtable = &vtable, }; + } - pub fn getStats(self: *Self, a: std.mem.Allocator) !Stats { - var slab_stats: std.ArrayList(Slab.Stats) = try .initCapacity(a, self.slabs.entries.len); - errdefer slab_stats.deinit(a); + fn alloc(ctx: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 { + const self: *Self = @ptrCast(@alignCast(ctx)); + _ = ret_addr; - var stats = Stats{ - .total_allocated_bytes = 0, - .bytes_in_use = 0, - .bytes_free = 0, - .slab_count = self.slabs.count(), - .total_chunks = 0, - .total_slots = 0, - .slots_in_use = 0, - .slots_free = 0, - .fragmentation_ratio = 0.0, - .utilization_ratio = 0.0, - .slabs = &.{}, - }; + const list_gop = self.slabs.getOrPut( + self.child_allocator, + SlabKey{ .size = len, .alignment = alignment }, + ) catch return null; - var it = self.slabs.iterator(); - while (it.next()) |entry| { - const key = entry.key_ptr.*; - const slab = entry.value_ptr; - const slab_stat = slab.getStats(key); - - slab_stats.appendAssumeCapacity(slab_stat); - - stats.total_allocated_bytes += slab_stat.bytes_allocated; - stats.bytes_in_use += slab_stat.bytes_in_use; - stats.bytes_free += slab_stat.bytes_free; - stats.total_chunks += slab_stat.chunk_count; - stats.total_slots += slab_stat.total_slots; - stats.slots_in_use += slab_stat.slots_in_use; - stats.slots_free += slab_stat.slots_free; - } - - if (stats.total_allocated_bytes > 0) { - stats.fragmentation_ratio = @as(f64, @floatFromInt(stats.bytes_free)) / - @as(f64, @floatFromInt(stats.total_allocated_bytes)); - stats.utilization_ratio = @as(f64, @floatFromInt(stats.bytes_in_use)) / - @as(f64, @floatFromInt(stats.total_allocated_bytes)); - } - - stats.slabs = try slab_stats.toOwnedSlice(a); - return stats; - } - - pub const vtable = Allocator.VTable{ - .alloc = alloc, - .free = free, - .remap = Allocator.noRemap, - .resize = Allocator.noResize, - }; - - pub fn allocator(self: *Self) Allocator { - return .{ - .ptr = self, - .vtable = &vtable, - }; - } - - fn alloc(ctx: *anyopaque, len: usize, alignment: Alignment, ret_addr: usize) ?[*]u8 { - const self: *Self = @ptrCast(@alignCast(ctx)); - _ = ret_addr; - - const list_gop = self.slabs.getOrPut( + if (!list_gop.found_existing) { + list_gop.value_ptr.* = Slab.init( self.child_allocator, - SlabKey{ .size = len, .alignment = alignment }, + alignment, + len, + self.max_slot_count, ) catch return null; - - if (!list_gop.found_existing) { - list_gop.value_ptr.* = Slab.init( - self.child_allocator, - alignment, - len, - ) catch return null; - } - - const list = list_gop.value_ptr; - const buf = list.alloc(self.child_allocator) catch return null; - return buf.ptr; } - fn free(ctx: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void { - const self: *Self = @ptrCast(@alignCast(ctx)); - _ = ret_addr; + const list = list_gop.value_ptr; + const buf = list.alloc(self.child_allocator) catch return null; + return buf.ptr; + } - const ptr = memory.ptr; - const len = memory.len; + fn free(ctx: *anyopaque, memory: []u8, alignment: Alignment, ret_addr: usize) void { + const self: *Self = @ptrCast(@alignCast(ctx)); + _ = ret_addr; - const list = self.slabs.getPtr(.{ .size = len, .alignment = alignment }).?; - list.free(ptr); - } - }; -} + const ptr = memory.ptr; + const len = memory.len; + + const list = self.slabs.getPtr(.{ .size = len, .alignment = alignment }).?; + list.free(ptr); + } +}; const testing = std.testing; -const TestSlabAllocator = SlabAllocator(32); +const TestSlabAllocator = SlabAllocator; test "slab allocator - basic allocation and free" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -409,7 +430,7 @@ test "slab allocator - basic allocation and free" { } test "slab allocator - multiple allocations" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -432,7 +453,7 @@ test "slab allocator - multiple allocations" { } test "slab allocator - no coalescing (different size classes)" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -459,7 +480,7 @@ test "slab allocator - no coalescing (different size classes)" { } test "slab allocator - reuse freed memory" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -477,7 +498,7 @@ test "slab allocator - reuse freed memory" { } test "slab allocator - multiple size classes" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -501,7 +522,7 @@ test "slab allocator - multiple size classes" { } test "slab allocator - various sizes" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -518,7 +539,7 @@ test "slab allocator - various sizes" { } test "slab allocator - exact sizes (no rounding)" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -539,7 +560,7 @@ test "slab allocator - exact sizes (no rounding)" { } test "slab allocator - chunk allocation" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -561,7 +582,7 @@ test "slab allocator - chunk allocation" { } test "slab allocator - reset with retain_capacity" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -588,7 +609,7 @@ test "slab allocator - reset with retain_capacity" { } test "slab allocator - reset with clear" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -610,7 +631,7 @@ test "slab allocator - reset with clear" { } test "slab allocator - stress test" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -647,7 +668,7 @@ test "slab allocator - stress test" { } test "slab allocator - alignment" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -662,7 +683,7 @@ test "slab allocator - alignment" { } test "slab allocator - no resize support" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -678,7 +699,7 @@ test "slab allocator - no resize support" { } test "slab allocator - fragmentation pattern" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -730,7 +751,7 @@ test "slab allocator - fragmentation pattern" { } test "slab allocator - many small allocations" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -752,11 +773,11 @@ test "slab allocator - many small allocations" { // Should have created multiple chunks const slab = slab_alloc.slabs.getPtr(.{ .size = 24, .alignment = Alignment.@"1" }).?; - try testing.expect(slab.chunks.items.len > 10); + try testing.expect(slab.chunks.items.len > 1); } test "slab allocator - zero waste for exact sizes" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator(); @@ -776,7 +797,7 @@ test "slab allocator - zero waste for exact sizes" { } test "slab allocator - different size classes don't interfere" { - var slab_alloc = TestSlabAllocator.init(testing.allocator); + var slab_alloc = TestSlabAllocator.init(testing.allocator, 16); defer slab_alloc.deinit(); const allocator = slab_alloc.allocator();