mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-10-30 07:31:47 +00:00
Remove the loop
Previously, the IO loop was doing three things: 1 - Managing timeouts (either from scripts or for our own needs) 2 - Handling browser IO events (page/script/xhr) 3 - Handling CDP events (accept, read, write, timeout) With the libcurl merge, 1 was moved to an in-process scheduler and 2 was moved to libcurl's own event loop. That means the entire loop code, including the dependency on tigerbeetle-io existed for handling a single TCP client. Not only is that a lot of code, there was also friction between the two loops (the libcurl one and our IO loop), which would result in latency - while one loop is waiting for the events, any events on the other loop go un-processed. This PR removes our IO loop. To accomplish this: 1 - The main accept loop is blocking. This is simpler and works perfectly well, given we only allow 1 active connection. 2 - The client socket is passed to libcurl - yes, libcurl's loop can take arbitrary FDs and poll them along with its own. In addition to having one less dependency, the CDP code is quite a bit simpler, especially around shutdowns and writes. This also removes _some_ of the latency caused by the friction between page process and CDP processing. Specifically, when CDP now blocks for input, http page events (script loading, xhr, ...) will still be processed. There's still friction. For one, the reverse isn't true: when the page is waiting for events, CDP events aren't going to be processed. But the page.wait already have some sensitivity to this (e.g. the page.request_intercepted flag). Also, when CDP waits, while we will process network events, page timeouts are still not processed. Because of both these remaining issues, we still need to jump between the two loops - but being able to block on CDP (even for a short time) WITHOUT stopping the page's network I/O, should reduce some latency.
This commit is contained in:
@@ -114,12 +114,9 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
||||
}
|
||||
|
||||
// @newhttp
|
||||
// A bit hacky right now. The main server loop blocks only for CDP
|
||||
// messages. It no longer blocks for page timeouts of page HTTP
|
||||
// transfers. So we need to call this more ourselves.
|
||||
// This is called after every message and [very hackily] from the server
|
||||
// loop.
|
||||
// This is hopefully temporary.
|
||||
// A bit hacky right now. The main server loop doesn't unblock for
|
||||
// scheduled task. So we run this directly in order to process any
|
||||
// timeouts (or http events) which are ready to be processed.
|
||||
pub fn pageWait(self: *Self) void {
|
||||
const session = &(self.browser.session orelse return);
|
||||
// exits early if there's nothing to do, so a large value like
|
||||
@@ -592,8 +589,7 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
||||
};
|
||||
|
||||
const cdp = self.cdp;
|
||||
var arena = std.heap.ArenaAllocator.init(cdp.allocator);
|
||||
errdefer arena.deinit();
|
||||
const allocator = cdp.client.send_arena.allocator();
|
||||
|
||||
const field = ",\"sessionId\":\"";
|
||||
|
||||
@@ -602,7 +598,7 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
||||
const message_len = msg.len + session_id.len + 1 + field.len + 10;
|
||||
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
buf.ensureTotalCapacity(arena.allocator(), message_len) catch |err| {
|
||||
buf.ensureTotalCapacity(allocator, message_len) catch |err| {
|
||||
log.err(.cdp, "inspector buffer", .{ .err = err });
|
||||
return;
|
||||
};
|
||||
@@ -617,7 +613,7 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
||||
buf.appendSliceAssumeCapacity("\"}");
|
||||
std.debug.assert(buf.items.len == message_len);
|
||||
|
||||
try cdp.client.sendJSONRaw(arena, buf);
|
||||
try cdp.client.sendJSONRaw(buf);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -39,12 +39,14 @@ pub const Document = @import("../testing.zig").Document;
|
||||
|
||||
const Client = struct {
|
||||
allocator: Allocator,
|
||||
send_arena: ArenaAllocator,
|
||||
sent: std.ArrayListUnmanaged(json.Value) = .{},
|
||||
serialized: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
|
||||
fn init(alloc: Allocator) Client {
|
||||
return .{
|
||||
.allocator = alloc,
|
||||
.send_arena = ArenaAllocator.init(alloc),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -58,7 +60,7 @@ const Client = struct {
|
||||
try self.sent.append(self.allocator, value);
|
||||
}
|
||||
|
||||
pub fn sendJSONRaw(self: *Client, _: ArenaAllocator, buf: std.ArrayListUnmanaged(u8)) !void {
|
||||
pub fn sendJSONRaw(self: *Client, buf: std.ArrayListUnmanaged(u8)) !void {
|
||||
const value = try json.parseFromSliceLeaky(json.Value, self.allocator, buf.items, .{});
|
||||
try self.sent.append(self.allocator, value);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user