mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-10-29 23:23:28 +00:00
Previously, the IO loop was doing three things: 1 - Managing timeouts (either from scripts or for our own needs) 2 - Handling browser IO events (page/script/xhr) 3 - Handling CDP events (accept, read, write, timeout) With the libcurl merge, 1 was moved to an in-process scheduler and 2 was moved to libcurl's own event loop. That means the entire loop code, including the dependency on tigerbeetle-io existed for handling a single TCP client. Not only is that a lot of code, there was also friction between the two loops (the libcurl one and our IO loop), which would result in latency - while one loop is waiting for the events, any events on the other loop go un-processed. This PR removes our IO loop. To accomplish this: 1 - The main accept loop is blocking. This is simpler and works perfectly well, given we only allow 1 active connection. 2 - The client socket is passed to libcurl - yes, libcurl's loop can take arbitrary FDs and poll them along with its own. In addition to having one less dependency, the CDP code is quite a bit simpler, especially around shutdowns and writes. This also removes _some_ of the latency caused by the friction between page process and CDP processing. Specifically, when CDP now blocks for input, http page events (script loading, xhr, ...) will still be processed. There's still friction. For one, the reverse isn't true: when the page is waiting for events, CDP events aren't going to be processed. But the page.wait already have some sensitivity to this (e.g. the page.request_intercepted flag). Also, when CDP waits, while we will process network events, page timeouts are still not processed. Because of both these remaining issues, we still need to jump between the two loops - but being able to block on CDP (even for a short time) WITHOUT stopping the page's network I/O, should reduce some latency.
111 lines
3.3 KiB
Zig
111 lines
3.3 KiB
Zig
const std = @import("std");
|
|
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const log = @import("log.zig");
|
|
const Http = @import("http/Http.zig");
|
|
const Platform = @import("runtime/js.zig").Platform;
|
|
|
|
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
|
const Notification = @import("notification.zig").Notification;
|
|
|
|
// Container for global state / objects that various parts of the system
|
|
// might need.
|
|
pub const App = struct {
|
|
http: Http,
|
|
config: Config,
|
|
platform: ?*const Platform,
|
|
allocator: Allocator,
|
|
telemetry: Telemetry,
|
|
app_dir_path: ?[]const u8,
|
|
notification: *Notification,
|
|
|
|
pub const RunMode = enum {
|
|
help,
|
|
fetch,
|
|
serve,
|
|
version,
|
|
};
|
|
|
|
pub const Config = struct {
|
|
run_mode: RunMode,
|
|
platform: ?*const Platform = null,
|
|
tls_verify_host: bool = true,
|
|
http_proxy: ?[:0]const u8 = null,
|
|
proxy_bearer_token: ?[:0]const u8 = null,
|
|
http_timeout_ms: ?u31 = null,
|
|
http_connect_timeout_ms: ?u31 = null,
|
|
http_max_host_open: ?u8 = null,
|
|
http_max_concurrent: ?u8 = null,
|
|
};
|
|
|
|
pub fn init(allocator: Allocator, config: Config) !*App {
|
|
const app = try allocator.create(App);
|
|
errdefer allocator.destroy(app);
|
|
|
|
const notification = try Notification.init(allocator, null);
|
|
errdefer notification.deinit();
|
|
|
|
var http = try Http.init(allocator, .{
|
|
.max_host_open = config.http_max_host_open orelse 4,
|
|
.max_concurrent = config.http_max_concurrent orelse 10,
|
|
.timeout_ms = config.http_timeout_ms orelse 5000,
|
|
.connect_timeout_ms = config.http_connect_timeout_ms orelse 0,
|
|
.http_proxy = config.http_proxy,
|
|
.tls_verify_host = config.tls_verify_host,
|
|
.proxy_bearer_token = config.proxy_bearer_token,
|
|
});
|
|
errdefer http.deinit();
|
|
|
|
const app_dir_path = getAndMakeAppDir(allocator);
|
|
|
|
app.* = .{
|
|
.http = http,
|
|
.allocator = allocator,
|
|
.telemetry = undefined,
|
|
.platform = config.platform,
|
|
.app_dir_path = app_dir_path,
|
|
.notification = notification,
|
|
.config = config,
|
|
};
|
|
|
|
app.telemetry = try Telemetry.init(app, config.run_mode);
|
|
errdefer app.telemetry.deinit();
|
|
|
|
try app.telemetry.register(app.notification);
|
|
|
|
return app;
|
|
}
|
|
|
|
pub fn deinit(self: *App) void {
|
|
const allocator = self.allocator;
|
|
if (self.app_dir_path) |app_dir_path| {
|
|
allocator.free(app_dir_path);
|
|
}
|
|
self.telemetry.deinit();
|
|
self.notification.deinit();
|
|
self.http.deinit();
|
|
allocator.destroy(self);
|
|
}
|
|
};
|
|
|
|
fn getAndMakeAppDir(allocator: Allocator) ?[]const u8 {
|
|
if (@import("builtin").is_test) {
|
|
return allocator.dupe(u8, "/tmp") catch unreachable;
|
|
}
|
|
const app_dir_path = std.fs.getAppDataDir(allocator, "lightpanda") catch |err| {
|
|
log.warn(.app, "get data dir", .{ .err = err });
|
|
return null;
|
|
};
|
|
|
|
std.fs.cwd().makePath(app_dir_path) catch |err| switch (err) {
|
|
error.PathAlreadyExists => return app_dir_path,
|
|
else => {
|
|
allocator.free(app_dir_path);
|
|
log.warn(.app, "create data dir", .{ .err = err, .path = app_dir_path });
|
|
return null;
|
|
},
|
|
};
|
|
return app_dir_path;
|
|
}
|