mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-10-30 15:41:48 +00:00
The HTTP Client has a state pool. It blocks when we've exceeded max_concurrency. This can block processing forever. A simple way to reproduce this is to go into the demo cdp.js, and execute the XHR request 5 times (loading json/product.json) To some degree, I think this is a result of weird / non-intuitive execution flow. If you exec a JS with 100 XHR requests, it'll call our XHR _send function but none of these will execute until the loop is run (after the script is done being executed). This can result in poor utilization of our connection and state pool. For an async request, getting the *Request object is itself now asynchronous. If no state is available, we use the Loop's timeout (at 20ms) to keep checking for an available state.
101 lines
3.0 KiB
Zig
101 lines
3.0 KiB
Zig
const std = @import("std");
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const log = @import("log.zig");
|
|
const Loop = @import("runtime/loop.zig").Loop;
|
|
const HttpClient = @import("http/client.zig").Client;
|
|
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
|
const Notification = @import("notification.zig").Notification;
|
|
|
|
// Container for global state / objects that various parts of the system
|
|
// might need.
|
|
pub const App = struct {
|
|
loop: *Loop,
|
|
config: Config,
|
|
allocator: Allocator,
|
|
telemetry: Telemetry,
|
|
http_client: HttpClient,
|
|
app_dir_path: ?[]const u8,
|
|
notification: *Notification,
|
|
|
|
pub const RunMode = enum {
|
|
help,
|
|
fetch,
|
|
serve,
|
|
version,
|
|
};
|
|
|
|
pub const Config = struct {
|
|
run_mode: RunMode,
|
|
tls_verify_host: bool = true,
|
|
http_proxy: ?std.Uri = null,
|
|
};
|
|
|
|
pub fn init(allocator: Allocator, config: Config) !*App {
|
|
const app = try allocator.create(App);
|
|
errdefer allocator.destroy(app);
|
|
|
|
const loop = try allocator.create(Loop);
|
|
errdefer allocator.destroy(loop);
|
|
|
|
loop.* = try Loop.init(allocator);
|
|
errdefer loop.deinit();
|
|
|
|
const notification = try Notification.init(allocator, null);
|
|
errdefer notification.deinit();
|
|
|
|
const app_dir_path = getAndMakeAppDir(allocator);
|
|
|
|
app.* = .{
|
|
.loop = loop,
|
|
.allocator = allocator,
|
|
.telemetry = undefined,
|
|
.app_dir_path = app_dir_path,
|
|
.notification = notification,
|
|
.http_client = try HttpClient.init(allocator, .{
|
|
.max_concurrent = 3,
|
|
.http_proxy = config.http_proxy,
|
|
.tls_verify_host = config.tls_verify_host,
|
|
}),
|
|
.config = config,
|
|
};
|
|
app.telemetry = Telemetry.init(app, config.run_mode);
|
|
try app.telemetry.register(app.notification);
|
|
|
|
return app;
|
|
}
|
|
|
|
pub fn deinit(self: *App) void {
|
|
const allocator = self.allocator;
|
|
if (self.app_dir_path) |app_dir_path| {
|
|
allocator.free(app_dir_path);
|
|
}
|
|
self.telemetry.deinit();
|
|
self.loop.deinit();
|
|
allocator.destroy(self.loop);
|
|
self.http_client.deinit();
|
|
self.notification.deinit();
|
|
allocator.destroy(self);
|
|
}
|
|
};
|
|
|
|
fn getAndMakeAppDir(allocator: Allocator) ?[]const u8 {
|
|
if (@import("builtin").is_test) {
|
|
return allocator.dupe(u8, "/tmp") catch unreachable;
|
|
}
|
|
const app_dir_path = std.fs.getAppDataDir(allocator, "lightpanda") catch |err| {
|
|
log.warn(.app, "get data dir", .{ .err = err });
|
|
return null;
|
|
};
|
|
|
|
std.fs.cwd().makePath(app_dir_path) catch |err| switch (err) {
|
|
error.PathAlreadyExists => return app_dir_path,
|
|
else => {
|
|
allocator.free(app_dir_path);
|
|
log.warn(.app, "create data dir", .{ .err = err, .path = app_dir_path });
|
|
return null;
|
|
},
|
|
};
|
|
return app_dir_path;
|
|
}
|