mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-12-16 16:28:58 +00:00
Properly drain event loop when navigating between pages
This commit is contained in:
@@ -72,7 +72,7 @@ pub const Session = struct {
|
|||||||
|
|
||||||
pub fn deinit(self: *Session) void {
|
pub fn deinit(self: *Session) void {
|
||||||
if (self.page != null) {
|
if (self.page != null) {
|
||||||
self.removePage();
|
self.removePage() catch {};
|
||||||
}
|
}
|
||||||
self.cookie_jar.deinit();
|
self.cookie_jar.deinit();
|
||||||
self.storage_shed.deinit();
|
self.storage_shed.deinit();
|
||||||
@@ -104,14 +104,35 @@ pub const Session = struct {
|
|||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn removePage(self: *Session) void {
|
pub fn removePage(self: *Session) !void {
|
||||||
// Inform CDP the page is going to be removed, allowing other worlds to remove themselves before the main one
|
// Inform CDP the page is going to be removed, allowing other worlds to remove themselves before the main one
|
||||||
self.browser.notification.dispatch(.page_remove, .{});
|
self.browser.notification.dispatch(.page_remove, .{});
|
||||||
|
|
||||||
std.debug.assert(self.page != null);
|
std.debug.assert(self.page != null);
|
||||||
// Reset all existing callbacks.
|
|
||||||
self.browser.app.loop.reset();
|
// Cleanup is a bit sensitive. We could still have inflight I/O. For
|
||||||
|
// example, we could have an XHR request which is still in the connect
|
||||||
|
// phase. It's important that we clean these up, as they're holding onto
|
||||||
|
// limited resources (like our fixed-sized http state pool).
|
||||||
|
//
|
||||||
|
// First thing we do, is endScope() which will execute the destructor
|
||||||
|
// of any type that registered a destructor (e.g. XMLHttpRequest).
|
||||||
|
// This will shutdown any pending sockets, which begins our cleaning
|
||||||
|
// processed
|
||||||
self.executor.endScope();
|
self.executor.endScope();
|
||||||
|
|
||||||
|
// Second thing we do is reset the loop. This increments the loop ctx_id
|
||||||
|
// so that any "stale" timeouts we process will get ignored. We need to
|
||||||
|
// do this BEFORE running the loop because, at this point, things like
|
||||||
|
// window.setTimeout and running microtasks should be ignored
|
||||||
|
self.browser.app.loop.reset();
|
||||||
|
|
||||||
|
// Finally, we run the loop. Because of the reset just above, this will
|
||||||
|
// ignore any timeouts. And, because of the endScope about this, it
|
||||||
|
// should ensure that the http requests detect the shutdown socket and
|
||||||
|
// release their resources.
|
||||||
|
try self.browser.app.loop.run();
|
||||||
|
|
||||||
self.page = null;
|
self.page = null;
|
||||||
|
|
||||||
// clear netsurf memory arena.
|
// clear netsurf memory arena.
|
||||||
@@ -143,7 +164,7 @@ pub const Session = struct {
|
|||||||
// the final URL, possibly following redirects)
|
// the final URL, possibly following redirects)
|
||||||
const url = try self.page.?.url.resolve(self.transfer_arena, url_string);
|
const url = try self.page.?.url.resolve(self.transfer_arena, url_string);
|
||||||
|
|
||||||
self.removePage();
|
try self.removePage();
|
||||||
var page = try self.createPage();
|
var page = try self.createPage();
|
||||||
return page.navigate(url, opts);
|
return page.navigate(url, opts);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ fn clickNavigate(cmd: anytype, uri: std.Uri) !void {
|
|||||||
.disposition = "currentTab",
|
.disposition = "currentTab",
|
||||||
}, .{ .session_id = bc.session_id.? });
|
}, .{ .session_id = bc.session_id.? });
|
||||||
|
|
||||||
bc.session.removePage();
|
try bc.session.removePage();
|
||||||
_ = try bc.session.createPage(null);
|
_ = try bc.session.createPage(null);
|
||||||
|
|
||||||
try @import("page.zig").navigateToUrl(cmd, url, false);
|
try @import("page.zig").navigateToUrl(cmd, url, false);
|
||||||
|
|||||||
@@ -220,7 +220,7 @@ fn closeTarget(cmd: anytype) !void {
|
|||||||
bc.session_id = null;
|
bc.session_id = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
bc.session.removePage();
|
try bc.session.removePage();
|
||||||
if (bc.isolated_world) |*world| {
|
if (bc.isolated_world) |*world| {
|
||||||
world.deinit();
|
world.deinit();
|
||||||
bc.isolated_world = null;
|
bc.isolated_world = null;
|
||||||
|
|||||||
@@ -34,9 +34,11 @@ pub const Loop = struct {
|
|||||||
alloc: std.mem.Allocator, // TODO: unmanaged version ?
|
alloc: std.mem.Allocator, // TODO: unmanaged version ?
|
||||||
io: IO,
|
io: IO,
|
||||||
|
|
||||||
// Used to track how many callbacks are to be called and wait until all
|
// number of pending network events we have
|
||||||
// event are finished.
|
pending_network_count: usize,
|
||||||
events_nb: usize,
|
|
||||||
|
// number of pending timeout events we have
|
||||||
|
pending_timeout_count: usize,
|
||||||
|
|
||||||
// Used to stop repeating timeouts when loop.run is called.
|
// Used to stop repeating timeouts when loop.run is called.
|
||||||
stopping: bool,
|
stopping: bool,
|
||||||
@@ -66,8 +68,9 @@ pub const Loop = struct {
|
|||||||
.alloc = alloc,
|
.alloc = alloc,
|
||||||
.cancelled = .{},
|
.cancelled = .{},
|
||||||
.io = try IO.init(32, 0),
|
.io = try IO.init(32, 0),
|
||||||
.events_nb = 0,
|
|
||||||
.stopping = false,
|
.stopping = false,
|
||||||
|
.pending_network_count = 0,
|
||||||
|
.pending_timeout_count = 0,
|
||||||
.timeout_pool = MemoryPool(ContextTimeout).init(alloc),
|
.timeout_pool = MemoryPool(ContextTimeout).init(alloc),
|
||||||
.event_callback_pool = MemoryPool(EventCallbackContext).init(alloc),
|
.event_callback_pool = MemoryPool(EventCallbackContext).init(alloc),
|
||||||
};
|
};
|
||||||
@@ -78,7 +81,7 @@ pub const Loop = struct {
|
|||||||
|
|
||||||
// run tail events. We do run the tail events to ensure all the
|
// run tail events. We do run the tail events to ensure all the
|
||||||
// contexts are correcly free.
|
// contexts are correcly free.
|
||||||
while (self.eventsNb() > 0) {
|
while (self.hasPendinEvents()) {
|
||||||
self.io.run_for_ns(10 * std.time.ns_per_ms) catch |err| {
|
self.io.run_for_ns(10 * std.time.ns_per_ms) catch |err| {
|
||||||
log.err(.loop, "deinit", .{ .err = err });
|
log.err(.loop, "deinit", .{ .err = err });
|
||||||
break;
|
break;
|
||||||
@@ -93,6 +96,21 @@ pub const Loop = struct {
|
|||||||
self.cancelled.deinit(self.alloc);
|
self.cancelled.deinit(self.alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We can shutdown once all the pending network IO is complete.
|
||||||
|
// In debug mode we also wait until al the pending timeouts are complete
|
||||||
|
// but we only do this so that the `timeoutCallback` can free all allocated
|
||||||
|
// memory and we won't report a leak.
|
||||||
|
fn hasPendinEvents(self: *const Self) bool {
|
||||||
|
if (self.pending_network_count > 0) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (builtin.mode != .Debug) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return self.pending_timeout_count > 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Retrieve all registred I/O events completed by OS kernel,
|
// Retrieve all registred I/O events completed by OS kernel,
|
||||||
// and execute sequentially their callbacks.
|
// and execute sequentially their callbacks.
|
||||||
// Stops when there is no more I/O events registered on the loop.
|
// Stops when there is no more I/O events registered on the loop.
|
||||||
@@ -103,25 +121,12 @@ pub const Loop = struct {
|
|||||||
self.stopping = true;
|
self.stopping = true;
|
||||||
defer self.stopping = false;
|
defer self.stopping = false;
|
||||||
|
|
||||||
while (self.eventsNb() > 0) {
|
while (self.pending_network_count > 0) {
|
||||||
try self.io.run_for_ns(10 * std.time.ns_per_ms);
|
try self.io.run_for_ns(10 * std.time.ns_per_ms);
|
||||||
// at each iteration we might have new events registred by previous callbacks
|
// at each iteration we might have new events registred by previous callbacks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register events atomically
|
|
||||||
// - add 1 event and return previous value
|
|
||||||
fn addEvent(self: *Self) void {
|
|
||||||
_ = @atomicRmw(usize, &self.events_nb, .Add, 1, .acq_rel);
|
|
||||||
}
|
|
||||||
// - remove 1 event and return previous value
|
|
||||||
fn removeEvent(self: *Self) void {
|
|
||||||
_ = @atomicRmw(usize, &self.events_nb, .Sub, 1, .acq_rel);
|
|
||||||
}
|
|
||||||
// - get the number of current events
|
|
||||||
fn eventsNb(self: *Self) usize {
|
|
||||||
return @atomicLoad(usize, &self.events_nb, .seq_cst);
|
|
||||||
}
|
|
||||||
|
|
||||||
// JS callbacks APIs
|
// JS callbacks APIs
|
||||||
// -----------------
|
// -----------------
|
||||||
@@ -152,7 +157,7 @@ pub const Loop = struct {
|
|||||||
const loop = ctx.loop;
|
const loop = ctx.loop;
|
||||||
|
|
||||||
if (ctx.initial) {
|
if (ctx.initial) {
|
||||||
loop.removeEvent();
|
loop.pending_timeout_count -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
defer {
|
defer {
|
||||||
@@ -207,7 +212,7 @@ pub const Loop = struct {
|
|||||||
.callback_node = callback_node,
|
.callback_node = callback_node,
|
||||||
};
|
};
|
||||||
|
|
||||||
self.addEvent();
|
self.pending_timeout_count += 1;
|
||||||
self.scheduleTimeout(nanoseconds, ctx, completion);
|
self.scheduleTimeout(nanoseconds, ctx, completion);
|
||||||
return @intFromPtr(completion);
|
return @intFromPtr(completion);
|
||||||
}
|
}
|
||||||
@@ -244,17 +249,18 @@ pub const Loop = struct {
|
|||||||
) !void {
|
) !void {
|
||||||
const onConnect = struct {
|
const onConnect = struct {
|
||||||
fn onConnect(callback: *EventCallbackContext, completion_: *Completion, res: ConnectError!void) void {
|
fn onConnect(callback: *EventCallbackContext, completion_: *Completion, res: ConnectError!void) void {
|
||||||
|
callback.loop.pending_network_count -= 1;
|
||||||
defer callback.loop.event_callback_pool.destroy(callback);
|
defer callback.loop.event_callback_pool.destroy(callback);
|
||||||
callback.loop.removeEvent();
|
|
||||||
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
||||||
}
|
}
|
||||||
}.onConnect;
|
}.onConnect;
|
||||||
|
|
||||||
|
|
||||||
const callback = try self.event_callback_pool.create();
|
const callback = try self.event_callback_pool.create();
|
||||||
errdefer self.event_callback_pool.destroy(callback);
|
errdefer self.event_callback_pool.destroy(callback);
|
||||||
callback.* = .{ .loop = self, .ctx = ctx };
|
callback.* = .{ .loop = self, .ctx = ctx };
|
||||||
|
|
||||||
self.addEvent();
|
self.pending_network_count += 1;
|
||||||
self.io.connect(*EventCallbackContext, callback, onConnect, completion, socket, address);
|
self.io.connect(*EventCallbackContext, callback, onConnect, completion, socket, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,8 +277,8 @@ pub const Loop = struct {
|
|||||||
) !void {
|
) !void {
|
||||||
const onSend = struct {
|
const onSend = struct {
|
||||||
fn onSend(callback: *EventCallbackContext, completion_: *Completion, res: SendError!usize) void {
|
fn onSend(callback: *EventCallbackContext, completion_: *Completion, res: SendError!usize) void {
|
||||||
|
callback.loop.pending_network_count -= 1;
|
||||||
defer callback.loop.event_callback_pool.destroy(callback);
|
defer callback.loop.event_callback_pool.destroy(callback);
|
||||||
callback.loop.removeEvent();
|
|
||||||
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
||||||
}
|
}
|
||||||
}.onSend;
|
}.onSend;
|
||||||
@@ -281,7 +287,7 @@ pub const Loop = struct {
|
|||||||
errdefer self.event_callback_pool.destroy(callback);
|
errdefer self.event_callback_pool.destroy(callback);
|
||||||
callback.* = .{ .loop = self, .ctx = ctx };
|
callback.* = .{ .loop = self, .ctx = ctx };
|
||||||
|
|
||||||
self.addEvent();
|
self.pending_network_count += 1;
|
||||||
self.io.send(*EventCallbackContext, callback, onSend, completion, socket, buf);
|
self.io.send(*EventCallbackContext, callback, onSend, completion, socket, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,8 +304,8 @@ pub const Loop = struct {
|
|||||||
) !void {
|
) !void {
|
||||||
const onRecv = struct {
|
const onRecv = struct {
|
||||||
fn onRecv(callback: *EventCallbackContext, completion_: *Completion, res: RecvError!usize) void {
|
fn onRecv(callback: *EventCallbackContext, completion_: *Completion, res: RecvError!usize) void {
|
||||||
|
callback.loop.pending_network_count -= 1;
|
||||||
defer callback.loop.event_callback_pool.destroy(callback);
|
defer callback.loop.event_callback_pool.destroy(callback);
|
||||||
callback.loop.removeEvent();
|
|
||||||
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
cbk(@alignCast(@ptrCast(callback.ctx)), completion_, res);
|
||||||
}
|
}
|
||||||
}.onRecv;
|
}.onRecv;
|
||||||
@@ -307,8 +313,7 @@ pub const Loop = struct {
|
|||||||
const callback = try self.event_callback_pool.create();
|
const callback = try self.event_callback_pool.create();
|
||||||
errdefer self.event_callback_pool.destroy(callback);
|
errdefer self.event_callback_pool.destroy(callback);
|
||||||
callback.* = .{ .loop = self, .ctx = ctx };
|
callback.* = .{ .loop = self, .ctx = ctx };
|
||||||
|
self.pending_network_count += 1;
|
||||||
self.addEvent();
|
|
||||||
self.io.recv(*EventCallbackContext, callback, onRecv, completion, socket, buf);
|
self.io.recv(*EventCallbackContext, callback, onRecv, completion, socket, buf);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user