Re-enable telemetry

Start work on supporting navigation events (clicks, form submission).
This commit is contained in:
Karl Seguin
2025-08-01 21:58:24 +08:00
parent 94e8964f69
commit f65a39a3e3
18 changed files with 818 additions and 588 deletions

View File

@@ -3,9 +3,9 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const log = @import("log.zig");
const Http = @import("http/Http.zig");
const Loop = @import("runtime/loop.zig").Loop;
const Platform = @import("runtime/js.zig").Platform;
const http = @import("http/client.zig");
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
const Notification = @import("notification.zig").Notification;
@@ -13,12 +13,12 @@ const Notification = @import("notification.zig").Notification;
// Container for global state / objects that various parts of the system
// might need.
pub const App = struct {
http: Http,
loop: *Loop,
config: Config,
platform: ?*const Platform,
allocator: Allocator,
telemetry: Telemetry,
http_client: *http.Client,
app_dir_path: ?[]const u8,
notification: *Notification,
@@ -34,8 +34,8 @@ pub const App = struct {
platform: ?*const Platform = null,
tls_verify_host: bool = true,
http_proxy: ?std.Uri = null,
proxy_type: ?http.ProxyType = null,
proxy_auth: ?http.ProxyAuth = null,
proxy_type: ?Http.ProxyType = null,
proxy_auth: ?Http.ProxyAuth = null,
};
pub fn init(allocator: Allocator, config: Config) !*App {
@@ -51,21 +51,27 @@ pub const App = struct {
const notification = try Notification.init(allocator, null);
errdefer notification.deinit();
var http = try Http.init(allocator, .{
.max_concurrent_transfers = 3,
});
errdefer http.deinit();
const app_dir_path = getAndMakeAppDir(allocator);
app.* = .{
.loop = loop,
.http = http,
.allocator = allocator,
.telemetry = undefined,
.platform = config.platform,
.app_dir_path = app_dir_path,
.notification = notification,
.http_client = try http.Client.init(allocator, .{
.max_concurrent_transfers = 3,
}),
.config = config,
};
app.telemetry = Telemetry.init(app, config.run_mode);
app.telemetry = try Telemetry.init(app, config.run_mode);
errdefer app.telemetry.deinit();
try app.telemetry.register(app.notification);
return app;
@@ -79,8 +85,8 @@ pub const App = struct {
self.telemetry.deinit();
self.loop.deinit();
allocator.destroy(self.loop);
self.http_client.deinit();
self.notification.deinit();
self.http.deinit();
allocator.destroy(self);
}
};

View File

@@ -37,6 +37,11 @@ pub fn init(allocator: Allocator) Scheduler {
};
}
pub fn reset(self: *Scheduler) void {
self.primary.clearRetainingCapacity();
self.secondary.clearRetainingCapacity();
}
const AddOpts = struct {
name: []const u8 = "",
};

View File

@@ -20,11 +20,11 @@ const std = @import("std");
const log = @import("../log.zig");
const parser = @import("netsurf.zig");
const http = @import("../http/client.zig");
const App = @import("../app.zig").App;
const Env = @import("env.zig").Env;
const Page = @import("page.zig").Page;
const Browser = @import("browser.zig").Browser;
const HttpClient = @import("../http/Client.zig");
const URL = @import("../url.zig").URL;
const Allocator = std.mem.Allocator;
@@ -48,22 +48,23 @@ scripts: OrderList,
// dom_loaded == true,
deferred: OrderList,
client: *http.Client,
client: *HttpClient,
allocator: Allocator,
buffer_pool: BufferPool,
script_pool: std.heap.MemoryPool(PendingScript),
const OrderList = std.DoublyLinkedList(*PendingScript);
pub fn init(app: *App, page: *Page) ScriptManager {
const allocator = app.allocator;
pub fn init(browser: *Browser, page: *Page) ScriptManager {
// page isn't fully initialized, we can setup our reference, but that's it.
const allocator = browser.allocator;
return .{
.page = page,
.scripts = .{},
.deferred = .{},
.async_count = 0,
.allocator = allocator,
.client = app.http_client,
.client = browser.http_client,
.static_scripts_done = false,
.buffer_pool = BufferPool.init(allocator, 5),
.script_pool = std.heap.MemoryPool(PendingScript).init(allocator),
@@ -247,13 +248,16 @@ fn evaluate(self: *ScriptManager) void {
}
}
fn asyncDone(self: *ScriptManager) void {
self.async_count -= 1;
if (self.async_count == 0 and // there are no more async scripts
pub fn isDone(self: *const ScriptManager) bool {
return self.async_count == 0 and // there are no more async scripts
self.static_scripts_done and // and we've finished parsing the HTML to queue all <scripts>
self.scripts.first == null and // and there are no more <script src=> to wait for
self.deferred.first == null // and there are no more <script defer src=> to wait for
) {
self.deferred.first == null; // and there are no more <script defer src=> to wait for
}
fn asyncDone(self: *ScriptManager) void {
self.async_count -= 1;
if (self.isDone()) {
// then the document is considered complete
self.page.documentIsComplete();
}
@@ -272,7 +276,7 @@ fn getList(self: *ScriptManager, script: *const Script) ?*OrderList {
return &self.scripts;
}
fn startCallback(transfer: *http.Transfer) !void {
fn startCallback(transfer: *HttpClient.Transfer) !void {
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
script.startCallback(transfer) catch |err| {
log.err(.http, "SM.startCallback", .{ .err = err, .transfer = transfer });
@@ -280,7 +284,7 @@ fn startCallback(transfer: *http.Transfer) !void {
};
}
fn headerCallback(transfer: *http.Transfer) !void {
fn headerCallback(transfer: *HttpClient.Transfer) !void {
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
script.headerCallback(transfer) catch |err| {
log.err(.http, "SM.headerCallback", .{ .err = err, .transfer = transfer });
@@ -288,7 +292,7 @@ fn headerCallback(transfer: *http.Transfer) !void {
};
}
fn dataCallback(transfer: *http.Transfer, data: []const u8) !void {
fn dataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
script.dataCallback(data) catch |err| {
log.err(.http, "SM.dataCallback", .{ .err = err, .transfer = transfer, .len = data.len });
@@ -296,14 +300,14 @@ fn dataCallback(transfer: *http.Transfer, data: []const u8) !void {
};
}
fn doneCallback(transfer: *http.Transfer) !void {
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
script.doneCallback(transfer);
fn doneCallback(ctx: *anyopaque) !void {
const script: *PendingScript = @alignCast(@ptrCast(ctx));
script.doneCallback();
}
fn errorCallback(transfer: *http.Transfer, err: anyerror) void {
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
script.errorCallback(transfer, err);
fn errorCallback(ctx: *anyopaque, err: anyerror) void {
const script: *PendingScript = @alignCast(@ptrCast(ctx));
script.errorCallback(err);
}
// A script which is pending execution.
@@ -326,7 +330,7 @@ const PendingScript = struct {
}
}
fn startCallback(self: *PendingScript, transfer: *http.Transfer) !void {
fn startCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
if (self.manager.getList(&self.script)) |list| {
self.node.data = self;
list.append(&self.node);
@@ -337,7 +341,7 @@ const PendingScript = struct {
log.debug(.http, "script fetch start", .{ .req = transfer });
}
fn headerCallback(self: *PendingScript, transfer: *http.Transfer) !void {
fn headerCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
const header = &transfer.response_header.?;
if (header.status != 200) {
return error.InvalidStatusCode;
@@ -359,8 +363,8 @@ const PendingScript = struct {
try self.script.source.remote.appendSlice(self.manager.allocator, data);
}
fn doneCallback(self: *PendingScript, transfer: *http.Transfer) void {
log.debug(.http, "script fetch complete", .{ .req = transfer });
fn doneCallback(self: *PendingScript) void {
log.debug(.http, "script fetch complete", .{ .req = self.script.url });
const manager = self.manager;
if (self.script.is_async) {
@@ -374,8 +378,8 @@ const PendingScript = struct {
}
}
fn errorCallback(self: *PendingScript, transfer: *http.Transfer, err: anyerror) void {
log.warn(.http, "script fetch error", .{ .req = transfer, .err = err });
fn errorCallback(self: *PendingScript, err: anyerror) void {
log.warn(.http, "script fetch error", .{ .req = self.script.url, .err = err });
self.deinit();
}
};

View File

@@ -28,7 +28,7 @@ const Session = @import("session.zig").Session;
const Notification = @import("../notification.zig").Notification;
const log = @import("../log.zig");
const http = @import("../http/client.zig");
const HttpClient = @import("../http/Client.zig");
// Browser is an instance of the browser.
// You can create multiple browser instances.
@@ -38,7 +38,7 @@ pub const Browser = struct {
app: *App,
session: ?Session,
allocator: Allocator,
http_client: *http.Client,
http_client: *HttpClient,
page_arena: ArenaAllocator,
session_arena: ArenaAllocator,
transfer_arena: ArenaAllocator,
@@ -60,7 +60,7 @@ pub const Browser = struct {
.session = null,
.allocator = allocator,
.notification = notification,
.http_client = app.http_client,
.http_client = app.http.client,
.page_arena = ArenaAllocator.init(allocator),
.session_arena = ArenaAllocator.init(allocator),
.transfer_arena = ArenaAllocator.init(allocator),

View File

@@ -30,8 +30,8 @@ const Session = @import("session.zig").Session;
const Renderer = @import("renderer.zig").Renderer;
const Window = @import("html/window.zig").Window;
const Walker = @import("dom/walker.zig").WalkerDepthFirst;
const Loop = @import("../runtime/loop.zig").Loop;
const Scheduler = @import("Scheduler.zig");
const HttpClient = @import("../http/Client.zig");
const ScriptManager = @import("ScriptManager.zig");
const HTMLDocument = @import("html/document.zig").HTMLDocument;
@@ -39,7 +39,6 @@ const URL = @import("../url.zig").URL;
const log = @import("../log.zig");
const parser = @import("netsurf.zig");
const http = @import("../http/client.zig");
const storage = @import("storage/storage.zig");
const polyfill = @import("polyfill/polyfill.zig");
@@ -50,9 +49,6 @@ const polyfill = @import("polyfill/polyfill.zig");
// The page handle all its memory in an arena allocator. The arena is reseted
// when end() is called.
pub const Page = struct {
// Our event loop
loop: *Loop,
cookie_jar: *storage.CookieJar,
// Pre-configured http/cilent.zig used to make HTTP requests.
@@ -91,11 +87,11 @@ pub const Page = struct {
polyfill_loader: polyfill.Loader = .{},
scheduler: Scheduler,
http_client: *http.Client,
http_client: *HttpClient,
script_manager: ScriptManager,
mode: Mode,
loaded: bool = false,
document_state: DocumentState = .parsing,
const Mode = union(enum) {
pre: void,
@@ -106,9 +102,15 @@ pub const Page = struct {
raw_done: []const u8,
};
const DocumentState = enum {
parsing,
load,
complete,
};
pub fn init(self: *Page, arena: Allocator, session: *Session) !void {
const browser = session.browser;
const script_manager = ScriptManager.init(browser.app, self);
const script_manager = ScriptManager.init(browser, self);
self.* = .{
.url = URL.empty,
@@ -117,7 +119,6 @@ pub const Page = struct {
.arena = arena,
.session = session,
.call_arena = undefined,
.loop = browser.app.loop,
.renderer = Renderer.init(arena),
.state_pool = &browser.state_pool,
.cookie_jar = &session.cookie_jar,
@@ -143,9 +144,18 @@ pub const Page = struct {
}
pub fn deinit(self: *Page) void {
self.http_client.abort();
self.script_manager.deinit();
}
fn reset(self: *Page) void {
_ = self.session.browser.page_arena.reset(.{ .retain_with_limit = 1 * 1024 * 1024 });
self.http_client.abort();
self.scheduler.reset();
self.document_state = .parsing;
self.mode = .{ .pre = {} };
}
fn runMicrotasks(ctx: *anyopaque) ?u32 {
const self: *Page = @alignCast(@ptrCast(ctx));
self.session.browser.runMicrotasks();
@@ -226,7 +236,21 @@ pub const Page = struct {
// return self.fetchData("module", src);
}
pub fn wait(self: *Page, wait_sec: usize) !void {
pub fn wait(self: *Page, wait_sec: usize) void {
self._wait(wait_sec) catch |err| switch (err) {
error.JsError => {}, // already logged (with hopefully more context)
else => {
// There may be errors from the http/client or ScriptManager
// that we should not treat as an error like this. Will need
// to run this through more real-world sites and see if we need
// to expand the switch (err) to have more customized logs for
// specific messages.
log.err(.browser, "page wait", .{ .err = err });
},
};
}
fn _wait(self: *Page, wait_sec: usize) !void {
switch (self.mode) {
.pre, .html, .raw, .parsed => {
// The HTML page was parsed. We now either have JS scripts to
@@ -243,17 +267,21 @@ pub const Page = struct {
var timer = try std.time.Timer.start();
while (true) {
const has_active_http = http_client.active > 0;
const ms_to_next_task = try scheduler.run(has_active_http);
// If we have active http transfers, we might as well run
// any "secondary" task, since we won't be exiting this loop
// anyways.
// scheduler.run could trigger new http transfers, so do not
// store http_client.active BEFORE this call and then use
// it AFTER.
const ms_to_next_task = try scheduler.run(http_client.active > 0);
if (try_catch.hasCaught()) {
const msg = (try try_catch.err(self.arena)) orelse "unknown";
log.err(.browser, "page wait error", .{ .err = msg });
log.warn(.user_script, "page wait", .{ .err = msg, .src = "scheduler" });
return error.JsError;
}
if (has_active_http == false) {
if (http_client.active == 0) {
if (ms_to_next_task) |ms| {
// There are no HTTP transfers, so there's no point calling
// http_client.tick.
@@ -283,7 +311,7 @@ pub const Page = struct {
if (try_catch.hasCaught()) {
const msg = (try try_catch.err(self.arena)) orelse "unknown";
log.err(.browser, "page wait error", .{ .err = msg });
log.warn(.user_script, "page wait", .{ .err = msg, .src = "data" });
return error.JsError;
}
@@ -307,7 +335,13 @@ pub const Page = struct {
// spec reference: https://html.spec.whatwg.org/#document-lifecycle
pub fn navigate(self: *Page, request_url: []const u8, opts: NavigateOpts) !void {
log.debug(.http, "navigate", .{
if (self.mode != .pre) {
// it's possible for navigate to be called multiple times on the
// same page (via CDP). We want to reset the page between each call.
self.reset();
}
log.info(.http, "navigate", .{
.url = request_url,
.method = opts.method,
.reason = opts.reason,
@@ -331,7 +365,8 @@ pub const Page = struct {
.ctx = self,
.url = owned_url,
.method = opts.method,
.header_done_callback = pageHeaderCallback,
.body = opts.body,
.header_done_callback = pageHeaderDoneCallback,
.data_callback = pageDataCallback,
.done_callback = pageDoneCallback,
.error_callback = pageErrorCallback,
@@ -345,15 +380,23 @@ pub const Page = struct {
}
pub fn documentIsLoaded(self: *Page) void {
std.debug.assert(self.document_state == .parsing);
self.document_state = .load;
HTMLDocument.documentIsLoaded(self.window.document, self) catch |err| {
log.err(.browser, "document is loaded", .{ .err = err });
};
}
pub fn documentIsComplete(self: *Page) void {
std.debug.assert(self.loaded == false);
std.debug.assert(self.document_state != .complete);
self.loaded = true;
// documentIsComplete could be called directly, without first calling
// documentIsLoaded, if there were _only_ async scrypts
if (self.document_state == .parsing) {
self.documentIsLoaded();
}
self.document_state = .complete;
self._documentIsComplete() catch |err| {
log.err(.browser, "document is complete", .{ .err = err });
};
@@ -378,7 +421,7 @@ pub const Page = struct {
);
}
fn pageHeaderCallback(transfer: *http.Transfer) !void {
fn pageHeaderDoneCallback(transfer: *HttpClient.Transfer) !void {
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
// would be different than self.url in the case of a redirect
@@ -393,7 +436,7 @@ pub const Page = struct {
});
}
fn pageDataCallback(transfer: *http.Transfer, data: []const u8) !void {
fn pageDataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
if (self.mode == .pre) {
@@ -426,10 +469,11 @@ pub const Page = struct {
}
}
fn pageDoneCallback(transfer: *http.Transfer) !void {
fn pageDoneCallback(ctx: *anyopaque) !void {
log.debug(.http, "navigate done", .{});
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
var self: *Page = @alignCast(@ptrCast(ctx));
self.clearTransferArena();
switch (self.mode) {
.raw => |buf| self.mode = .{ .raw_done = buf.items },
@@ -475,14 +519,23 @@ pub const Page = struct {
}
self.script_manager.staticScriptsDone();
if (self.script_manager.isDone()) {
// No scripts, or just inline scripts that were already processed
// we need to trigger this ourselves
self.documentIsComplete();
}
},
else => unreachable,
}
}
fn pageErrorCallback(transfer: *http.Transfer, err: anyerror) void {
fn pageErrorCallback(ctx: *anyopaque, err: anyerror) void {
log.err(.http, "navigate failed", .{ .err = err });
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
var self: *Page = @alignCast(@ptrCast(ctx));
self.clearTransferArena();
switch (self.mode) {
.html => |*p| p.deinit(), // don't need the parser anymore
else => {},
@@ -490,6 +543,23 @@ pub const Page = struct {
self.mode = .{ .err = err };
}
// The transfer arena is useful and interesting, but has a weird lifetime.
// When we're transfering from one page to another (via delayed navigation)
// we need things in memory: like the URL that we're navigating to and
// optionally the body to POST. That cannot exist in the page.arena, because
// the page that we have is going to be destroyed and a new page is going
// to be created. If we used the page.arena, we'd wouldn't be able to reset
// it between navigations.
// So the transfer arena is meant to exist between a navigation event. It's
// freed when the main html navigation is complete, either in pageDoneCallback
// or pageErrorCallback. It needs to exist for this long because, if we set
// a body, CURLOPT_POSTFIELDS does not copy the body (it optionally can, but
// why would we want to) and requires the body to live until the transfer
// is complete.
fn clearTransferArena(self: *Page) void {
_ = self.session.browser.transfer_arena.reset(.{ .retain_with_limit = 4 * 1024 });
}
// extracted because this sis called from tests to set things up.
pub fn setDocument(self: *Page, html_doc: *parser.DocumentHTML) !void {
const doc = parser.documentHTMLToDocument(html_doc);
@@ -671,12 +741,14 @@ pub const Page = struct {
navi.* = .{
.opts = opts,
.session = session,
.url = try self.url.resolve(arena, url),
.url = try URL.stitch(arena, url, self.url.raw, .{ .alloc = .always }),
};
self.http_client.abort();
// In v8, this throws an exception which JS code cannot catch.
session.executor.terminateExecution();
_ = try self.loop.timeout(0, &navi.navigate_node);
_ = try self.scheduler.add(navi, DelayedNavigation.run, 0, .{ .name = "delayed navigation" });
}
pub fn getOrCreateNodeState(self: *Page, node: *parser.Node) !*State {
@@ -762,11 +834,9 @@ pub const Page = struct {
};
const DelayedNavigation = struct {
url: URL,
url: []const u8,
session: *Session,
opts: NavigateOpts,
initial: bool = true,
navigate_node: Loop.CallbackNode = .{ .func = delayNavigate },
// Navigation is blocking, which is problem because it can seize up
// the loop and deadlock. We can only safely try to navigate to a
@@ -783,66 +853,31 @@ const DelayedNavigation = struct {
// navigate definetly won't block (which could deadlock the system if there
// are still pending async requests, which we've seen happen, even after
// an abort).
fn delayNavigate(node: *Loop.CallbackNode, repeat_delay: *?u63) void {
_ = node;
_ = repeat_delay;
// @newhttp
// const self: *DelayedNavigation = @fieldParentPtr("navigate_node", node);
fn run(ctx: *anyopaque) ?u32 {
const self: *DelayedNavigation = @alignCast(@ptrCast(ctx));
const session = self.session;
// const session = self.session;
// const initial = self.initial;
// abort any pending requests or active tranfers;
session.browser.http_client.abort();
// if (initial) {
// // Prior to schedule this task, we terminated excution to stop
// // the running script. If we don't resume it before doing a shutdown
// // we'll get an error.
// session.executor.resumeExecution();
// Prior to schedule this task, we terminated excution to stop
// the running script. If we don't resume it before doing a shutdown
// we'll get an error.
session.executor.resumeExecution();
session.removePage();
const page = session.createPage() catch |err| {
log.err(.browser, "delayed navigation page error", .{
.err = err,
.url = self.url,
});
return null;
};
// session.removePage();
// _ = session.createPage() catch |err| {
// log.err(.browser, "delayed navigation page error", .{
// .err = err,
// .url = self.url,
// });
// return;
// };
// self.initial = false;
// }
page.navigate(self.url, self.opts) catch |err| {
log.err(.browser, "delayed navigation error", .{ .err = err, .url = self.url });
};
// if (session.browser.http_client.freeSlotCount() == 0) {
// log.debug(.browser, "delayed navigate waiting", .{});
// const delay = 0 * std.time.ns_per_ms;
// // If this isn't the initial check, we can safely re-use the timer
// // to check again.
// if (initial == false) {
// repeat_delay.* = delay;
// return;
// }
// // However, if this _is_ the initial check, we called
// // session.removePage above, and that reset the loop ctx_id.
// // We can't re-use this timer, because it has the previous ctx_id.
// // We can create a new timeout though, and that'll get the new ctx_id.
// //
// // Page has to be not-null here because we called createPage above.
// _ = session.page.?.loop.timeout(delay, &self.navigate_node) catch |err| {
// log.err(.browser, "delayed navigation loop err", .{ .err = err });
// };
// return;
// }
// const page = session.currentPage() orelse return;
// defer if (!page.delayed_navigation) {
// // If, while loading the page, we intend to navigate to another
// // page, then we need to keep the transfer_arena around, as this
// // sub-navigation is probably using it.
// _ = session.browser.transfer_arena.reset(.{ .retain_with_limit = 64 * 1024 });
// };
// return page.navigate(self.url, self.opts) catch |err| {
// log.err(.browser, "delayed navigation error", .{ .err = err, .url = self.url });
// };
return null;
}
};
@@ -856,7 +891,7 @@ pub const NavigateReason = enum {
pub const NavigateOpts = struct {
cdp_id: ?i64 = null,
reason: NavigateReason = .address_bar,
method: http.Method = .GET,
method: HttpClient.Method = .GET,
body: ?[]const u8 = null,
};

View File

@@ -116,18 +116,10 @@ pub const Session = struct {
// phase. It's important that we clean these up, as they're holding onto
// limited resources (like our fixed-sized http state pool).
//
// First thing we do, is removeJsContext() which will execute the destructor
// of any type that registered a destructor (e.g. XMLHttpRequest).
// This will shutdown any pending sockets, which begins our cleaning
// processed
// RemoveJsContext() will execute the destructor of any type that
// registered a destructor (e.g. XMLHttpRequest).
self.executor.removeJsContext();
// Second thing we do is reset the loop. This increments the loop ctx_id
// so that any "stale" timeouts we process will get ignored. We need to
// do this BEFORE running the loop because, at this point, things like
// window.setTimeout and running microtasks should be ignored
self.browser.app.loop.reset();
self.page.?.deinit();
self.page = null;

View File

@@ -30,7 +30,7 @@ const URL = @import("../../url.zig").URL;
const Mime = @import("../mime.zig").Mime;
const parser = @import("../netsurf.zig");
const Page = @import("../page.zig").Page;
const http = @import("../../http/client.zig");
const HttpClient = @import("../../http/Client.zig");
const CookieJar = @import("../storage/storage.zig").CookieJar;
// XHR interfaces
@@ -80,13 +80,13 @@ const XMLHttpRequestBodyInit = union(enum) {
pub const XMLHttpRequest = struct {
proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
arena: Allocator,
transfer: ?*http.Transfer = null,
transfer: ?*HttpClient.Transfer = null,
cookie_jar: *CookieJar,
err: ?anyerror = null,
last_dispatch: i64 = 0,
send_flag: bool = false,
method: http.Method,
method: HttpClient.Method,
state: State,
url: ?[:0]const u8 = null,
@@ -173,12 +173,12 @@ pub const XMLHttpRequest = struct {
};
}
pub fn destructor(self: *XMLHttpRequest) void {
if (self.transfer) |transfer| {
transfer.abort();
self.transfer = null;
}
}
// pub fn destructor(self: *XMLHttpRequest) void {
// if (self.transfer) |transfer| {
// transfer.abort();
// self.transfer = null;
// }
// }
pub fn reset(self: *XMLHttpRequest) void {
self.url = null;
@@ -322,7 +322,7 @@ pub const XMLHttpRequest = struct {
}
const methods = [_]struct {
tag: http.Method,
tag: HttpClient.Method,
name: []const u8,
}{
.{ .tag = .DELETE, .name = "DELETE" },
@@ -332,7 +332,7 @@ pub const XMLHttpRequest = struct {
.{ .tag = .POST, .name = "POST" },
.{ .tag = .PUT, .name = "PUT" },
};
pub fn validMethod(m: []const u8) DOMError!http.Method {
pub fn validMethod(m: []const u8) DOMError!HttpClient.Method {
for (methods) |method| {
if (std.ascii.eqlIgnoreCase(method.name, m)) {
return method.tag;
@@ -367,13 +367,17 @@ pub const XMLHttpRequest = struct {
self.send_flag = true;
if (body) |b| {
if (self.method != .GET and self.method != .HEAD) {
self.request_body = try self.arena.dupe(u8, b);
}
}
try page.http_client.request(.{
.ctx = self,
.url = self.url.?,
.method = self.method,
.body = self.request_body,
.content_type = "Content-Type: text/plain; charset=UTF-8", // @newhttp TODO
.start_callback = httpStartCallback,
.header_callback = httpHeaderCallback,
.header_done_callback = httpHeaderDoneCallback,
@@ -383,7 +387,7 @@ pub const XMLHttpRequest = struct {
});
}
fn httpStartCallback(transfer: *http.Transfer) !void {
fn httpStartCallback(transfer: *HttpClient.Transfer) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
for (self.headers.items) |hdr| {
@@ -403,22 +407,15 @@ pub const XMLHttpRequest = struct {
// try request.addHeader("Cookie", arr.items, .{});
// }
// }
if (self.request_body) |b| {
if (self.method != .GET and self.method != .HEAD) {
try transfer.setBody(b);
try transfer.addHeader("Content-Type: text/plain; charset=UTF-8");
}
}
self.transfer = transfer;
}
fn httpHeaderCallback(transfer: *http.Transfer, header: []const u8) !void {
fn httpHeaderCallback(transfer: *HttpClient.Transfer, header: []const u8) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
try self.response_headers.append(self.arena, try self.arena.dupe(u8, header));
}
fn httpHeaderDoneCallback(transfer: *http.Transfer) !void {
fn httpHeaderDoneCallback(transfer: *HttpClient.Transfer) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
const header = &transfer.response_header.?;
@@ -451,7 +448,7 @@ pub const XMLHttpRequest = struct {
// try self.cookie_jar.populateFromResponse(self.request.?.request_uri, &header);
}
fn httpDataCallback(transfer: *http.Transfer, data: []const u8) !void {
fn httpDataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
try self.response_bytes.appendSlice(self.arena, data);
@@ -469,8 +466,8 @@ pub const XMLHttpRequest = struct {
self.last_dispatch = now;
}
fn httpDoneCallback(transfer: *http.Transfer) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
fn httpDoneCallback(ctx: *anyopaque) !void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
log.info(.http, "request complete", .{
.source = "xhr",
@@ -494,8 +491,8 @@ pub const XMLHttpRequest = struct {
self.dispatchProgressEvent("loadend", .{ .loaded = loaded, .total = loaded });
}
fn httpErrorCallback(transfer: *http.Transfer, err: anyerror) void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
fn httpErrorCallback(ctx: *anyopaque, err: anyerror) void {
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
// http client will close it after an error, it isn't safe to keep around
self.transfer = null;
self.onErr(err);
@@ -503,7 +500,9 @@ pub const XMLHttpRequest = struct {
pub fn _abort(self: *XMLHttpRequest) void {
self.onErr(DOMError.Abort);
self.destructor();
if (self.transfer) |transfer| {
transfer.abort();
}
}
fn onErr(self: *XMLHttpRequest, err: anyerror) void {

View File

@@ -104,6 +104,7 @@ pub fn CDPT(comptime TypeProvider: type) type {
pub fn handleMessage(self: *Self, msg: []const u8) bool {
// if there's an error, it's already been logged
self.processMessage(msg) catch return false;
self.pageWait();
return true;
}
@@ -113,6 +114,22 @@ pub fn CDPT(comptime TypeProvider: type) type {
return self.dispatch(arena.allocator(), self, msg);
}
// @newhttp
// A bit hacky right now. The main server loop blocks only for CDP
// messages. It no longer blocks for page timeouts of page HTTP
// transfers. So we need to call this more ourselves.
// This is called after every message and [very hackily] from the server
// loop.
// This is hopefully temporary.
pub fn pageWait(self: *Self) void {
const session = &(self.browser.session orelse return);
var page = session.currentPage() orelse return;
// exits early if there's nothing to do, so a large value like
// 5 seconds should be ok
page.wait(5);
}
// Called from above, in processMessage which handles client messages
// but can also be called internally. For example, Target.sendMessageToTarget
// calls back into dispatch to capture the response.

View File

@@ -155,7 +155,6 @@ fn navigate(cmd: anytype) !void {
.reason = .address_bar,
.cdp_id = cmd.input.id,
});
try page.wait(5);
}
pub fn pageNavigate(arena: Allocator, bc: anytype, event: *const Notification.PageNavigate) !void {

View File

@@ -16,32 +16,33 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
pub const c = @cImport({
@cInclude("curl/curl.h");
});
const ENABLE_DEBUG = false;
const std = @import("std");
const log = @import("../log.zig");
const builtin = @import("builtin");
const errors = @import("errors.zig");
const Http = @import("Http.zig");
const c = Http.c;
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
pub fn init() !void {
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL));
if (comptime ENABLE_DEBUG) {
std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
}
}
const errorCheck = Http.errorCheck;
const errorMCheck = Http.errorMCheck;
pub fn deinit() void {
c.curl_global_cleanup();
}
pub const Method = Http.Method;
// This is loosely tied to a browser Page. Loading all the <scripts>, doing
// XHR requests, and loading imports all happens through here. Sine the app
// currently supports 1 browser and 1 page at-a-time, we only have 1 Client and
// re-use it from page to page. This allows us better re-use of the various
// buffers/caches (including keepalive connections) that libcurl has.
//
// The app has other secondary http needs, like telemetry. While we want to
// share some things (namely the ca blob, and maybe some configuration
// (TODO: ??? should proxy settings be global ???)), we're able to do call
// client.abort() to abort the transfers being made by a page, without impacting
// those other http requests.
pub const Client = @This();
pub const Client = struct {
active: usize,
multi: *c.CURLM,
handles: Handles,
@@ -54,13 +55,7 @@ pub const Client = struct {
const RequestQueue = std.DoublyLinkedList(Request);
const Opts = struct {
timeout_ms: u31 = 0,
max_redirects: u8 = 10,
connect_timeout_ms: u31 = 5000,
max_concurrent_transfers: u8 = 5,
};
pub fn init(allocator: Allocator, opts: Opts) !*Client {
pub fn init(allocator: Allocator, ca_blob: c.curl_blob, opts: Http.Opts) !*Client {
var transfer_pool = std.heap.MemoryPool(Transfer).init(allocator);
errdefer transfer_pool.deinit();
@@ -70,12 +65,12 @@ pub const Client = struct {
const client = try allocator.create(Client);
errdefer allocator.destroy(client);
var handles = try Handles.init(allocator, client, opts);
errdefer handles.deinit(allocator);
const multi = c.curl_multi_init() orelse return error.FailedToInitializeMulti;
errdefer _ = c.curl_multi_cleanup(multi);
var handles = try Handles.init(allocator, client, ca_blob, opts);
errdefer handles.deinit(allocator, multi);
client.* = .{
.queue = .{},
.active = 0,
@@ -85,11 +80,12 @@ pub const Client = struct {
.transfer_pool = transfer_pool,
.queue_node_pool = queue_node_pool,
};
return client;
}
pub fn deinit(self: *Client) void {
self.handles.deinit(self.allocator);
self.handles.deinit(self.allocator, self.multi);
_ = c.curl_multi_cleanup(self.multi);
self.transfer_pool.deinit();
@@ -97,6 +93,28 @@ pub const Client = struct {
self.allocator.destroy(self);
}
pub fn abort(self: *Client) void {
self.handles.abort(self.multi);
var n = self.queue.first;
while (n) |node| {
n = node.next;
self.queue_node_pool.destroy(node);
}
self.queue = .{};
self.active = 0;
// Maybe a bit of overkill
// We can remove some (all?) of these once we're confident its right.
std.debug.assert(self.handles.in_use.first == null);
std.debug.assert(self.handles.available.len == self.handles.handles.len);
if (builtin.mode == .Debug) {
var running: c_int = undefined;
std.debug.assert(c.curl_multi_perform(self.multi, &running) == c.CURLE_OK);
std.debug.assert(running == 0);
}
}
pub fn tick(self: *Client, timeout_ms: usize) !void {
var handles = &self.handles.available;
while (true) {
@@ -130,18 +148,20 @@ pub const Client = struct {
const header_list = blk: {
errdefer self.handles.release(handle);
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_URL, req.url.ptr));
switch (req.method) {
.GET => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))),
.POST => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1))),
.PUT => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "put")),
.DELETE => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "delete")),
.HEAD => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "head")),
.OPTIONS => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "options")),
try Http.setMethod(easy, req.method);
if (req.body) |b| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, b.ptr));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(b.len))));
}
const header_list = c.curl_slist_append(null, "User-Agent: Lightpanda/1.0");
var header_list = c.curl_slist_append(null, "User-Agent: Lightpanda/1.0");
errdefer c.curl_slist_free_all(header_list);
if (req.content_type) |ct| {
header_list = c.curl_slist_append(header_list, ct);
}
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list));
break :blk header_list;
@@ -188,22 +208,22 @@ pub const Client = struct {
var remaining: c_int = undefined;
const msg: *c.CURLMsg = c.curl_multi_info_read(multi, &remaining) orelse break;
if (msg.msg == c.CURLMSG_DONE) {
self.active -= 1;
const easy = msg.easy_handle.?;
const transfer = try Transfer.fromEasy(easy);
defer {
self.handles.release(transfer.handle);
transfer.deinit();
self.transfer_pool.destroy(transfer);
}
const ctx = transfer.ctx;
const done_callback = transfer.req.done_callback;
const error_callback = transfer.req.error_callback;
// release it ASAP so that it's avaiable (since some done_callbacks
// will load more resources).
self.endTransfer(transfer);
if (errorCheck(msg.data.result)) {
transfer.req.done_callback(transfer) catch |err| transfer.onError(err);
done_callback(ctx) catch |err| error_callback(ctx, err);
} else |err| {
transfer.onError(err);
error_callback(ctx, err);
}
try errorMCheck(c.curl_multi_remove_handle(multi, easy));
}
if (remaining == 0) {
@@ -211,76 +231,92 @@ pub const Client = struct {
}
}
}
fn endTransfer(self: *Client, transfer: *Transfer) void {
const handle = transfer.handle;
transfer.deinit();
self.transfer_pool.destroy(transfer);
errorMCheck(c.curl_multi_remove_handle(self.multi, handle.easy)) catch |err| {
log.fatal(.http, "Failed to abort", .{ .err = err });
};
self.handles.release(handle);
self.active -= 1;
}
const Handles = struct {
handles: []Handle,
available: FreeList,
cert_arena: ArenaAllocator,
in_use: HandleList,
available: HandleList,
const FreeList = std.DoublyLinkedList(*Handle);
const HandleList = std.DoublyLinkedList(*Handle);
fn init(allocator: Allocator, client: *Client, opts: Client.Opts) !Handles {
fn init(allocator: Allocator, client: *Client, ca_blob: c.curl_blob, opts: Http.Opts) !Handles {
const count = opts.max_concurrent_transfers;
std.debug.assert(count > 0);
const handles = try allocator.alloc(Handle, count);
errdefer allocator.free(handles);
var initialized_count: usize = 0;
errdefer cleanup(allocator, handles[0..initialized_count]);
var cert_arena = ArenaAllocator.init(allocator);
errdefer cert_arena.deinit();
const ca_blob = try @import("ca_certs.zig").load(allocator, cert_arena.allocator());
var available: FreeList = .{};
var available: HandleList = .{};
for (0..count) |i| {
const node = try allocator.create(FreeList.Node);
errdefer allocator.destroy(node);
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
errdefer _ = c.curl_easy_cleanup(easy);
handles[i] = .{
.node = node,
.easy = easy,
.client = client,
.easy = undefined,
.node = undefined,
};
try handles[i].init(ca_blob, opts);
initialized_count += 1;
try handles[i].configure(ca_blob, opts);
node.data = &handles[i];
available.append(node);
handles[i].node.data = &handles[i];
available.append(&handles[i].node);
}
return .{
.in_use = .{},
.handles = handles,
.available = available,
.cert_arena = cert_arena,
};
}
fn deinit(self: *Handles, allocator: Allocator) void {
cleanup(allocator, self.handles);
fn deinit(self: *Handles, allocator: Allocator, multi: *c.CURLM) void {
self.abort(multi);
for (self.handles) |*h| {
_ = c.curl_easy_cleanup(h.easy);
}
allocator.free(self.handles);
self.cert_arena.deinit();
}
// Done line this so that cleanup can be called from init with a partial state
fn cleanup(allocator: Allocator, handles: []Handle) void {
for (handles) |*h| {
_ = c.curl_easy_cleanup(h.easy);
allocator.destroy(h.node);
fn abort(self: *Handles, multi: *c.CURLM) void {
while (self.in_use.first) |node| {
const handle = node.data;
errorMCheck(c.curl_multi_remove_handle(multi, handle.easy)) catch |err| {
log.err(.http, "remove handle", .{ .err = err });
};
self.release(handle);
}
}
fn getFreeHandle(self: *Handles) ?*Handle {
if (self.available.popFirst()) |handle| {
return handle.data;
if (self.available.popFirst()) |node| {
node.prev = null;
node.next = null;
self.in_use.append(node);
return node.data;
}
return null;
}
fn release(self: *Handles, handle: *Handle) void {
self.available.append(handle.node);
const node = &handle.node;
self.in_use.remove(node);
node.prev = null;
node.next = null;
self.available.append(node);
}
};
@@ -289,17 +325,13 @@ const Handles = struct {
const Handle = struct {
easy: *c.CURL,
client: *Client,
node: *Handles.FreeList.Node,
node: Handles.HandleList.Node,
error_buffer: [c.CURL_ERROR_SIZE:0]u8 = undefined,
// Is called by Handles when already partially initialized. Done like this
// so that we have a stable pointer to error_buffer.
fn init(self: *Handle, ca_blob: c.curl_blob, opts: Client.Opts) !void {
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
errdefer _ = c.curl_easy_cleanup(easy);
self.easy = easy;
fn configure(self: *Handle, ca_blob: c.curl_blob, opts: Http.Opts) !void {
const easy = self.easy;
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_ERRORBUFFER, &self.error_buffer));
// timeouts
@@ -323,7 +355,7 @@ const Handle = struct {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
// debug
if (comptime ENABLE_DEBUG) {
if (comptime Http.ENABLE_DEBUG) {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
}
}
@@ -332,6 +364,9 @@ const Handle = struct {
pub const Request = struct {
method: Method,
url: [:0]const u8,
body: ?[]const u8 = null,
content_type: ?[:0]const u8 = null,
// arbitrary data that can be associated with this request
ctx: *anyopaque = undefined,
@@ -339,8 +374,8 @@ pub const Request = struct {
header_callback: ?*const fn (req: *Transfer, header: []const u8) anyerror!void = null,
header_done_callback: *const fn (req: *Transfer) anyerror!void,
data_callback: *const fn (req: *Transfer, data: []const u8) anyerror!void,
done_callback: *const fn (req: *Transfer) anyerror!void,
error_callback: *const fn (req: *Transfer, err: anyerror) void,
done_callback: *const fn (ctx: *anyopaque) anyerror!void,
error_callback: *const fn (ctx: *anyopaque, err: anyerror) void,
};
pub const Transfer = struct {
@@ -368,14 +403,10 @@ pub const Transfer = struct {
return writer.print("[{d}] {s} {s}", .{ self.id, @tagName(req.method), req.url });
}
fn onError(self: *Transfer, err: anyerror) void {
self.req.error_callback(self, err);
}
pub fn setBody(self: *Transfer, body: []const u8) !void {
const easy = self.handle.easy;
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
}
pub fn addHeader(self: *Transfer, value: [:0]const u8) !void {
@@ -383,12 +414,7 @@ pub const Transfer = struct {
}
pub fn abort(self: *Transfer) void {
var client = self.handle.client;
errorMCheck(c.curl_multi_remove_handle(client.multi, self.handle.easy)) catch |err| {
log.err(.http, "Failed to abort", .{ .err = err });
};
client.active -= 1;
self.deinit();
self.handle.client.endTransfer(self);
}
fn headerCallback(buffer: [*]const u8, header_count: usize, buf_len: usize, data: *anyopaque) callconv(.c) usize {
@@ -410,7 +436,7 @@ pub const Transfer = struct {
if (transfer._redirecting) {
return buf_len;
}
transfer.onError(error.InvalidResponseLine);
log.debug(.http, "invalid response line", .{ .line = header });
return 0;
}
const version_start: usize = if (header[5] == '2') 7 else 9;
@@ -421,7 +447,7 @@ pub const Transfer = struct {
std.debug.assert(version_end < 13);
const status = std.fmt.parseInt(u16, header[version_start..version_end], 10) catch {
transfer.onError(error.InvalidResponseStatus);
log.debug(.http, "invalid status code", .{ .line = header });
return 0;
};
@@ -433,7 +459,7 @@ pub const Transfer = struct {
var url: [*c]u8 = undefined;
errorCheck(c.curl_easy_getinfo(handle.easy, c.CURLINFO_EFFECTIVE_URL, &url)) catch |err| {
transfer.onError(err);
log.err(.http, "failed to get URL", .{ .err = err });
return 0;
};
@@ -511,41 +537,3 @@ pub const Header = struct {
return self._content_type[0..self._content_type_len];
}
};
fn errorCheck(code: c.CURLcode) errors.Error!void {
if (code == c.CURLE_OK) {
return;
}
return errors.fromCode(code);
}
fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
if (code == c.CURLM_OK) {
return;
}
if (code == c.CURLM_CALL_MULTI_PERFORM) {
// should we can client.perform() here?
// or just wait until the next time we naturally call it?
return;
}
return errors.fromMCode(code);
}
pub const Method = enum {
GET,
PUT,
POST,
DELETE,
HEAD,
OPTIONS,
};
pub const ProxyType = enum {
forward,
connect,
};
pub const ProxyAuth = union(enum) {
basic: struct { user_pass: []const u8 },
bearer: struct { token: []const u8 },
};

269
src/http/Http.zig Normal file
View File

@@ -0,0 +1,269 @@
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
//
// Francis Bouvier <francis@lightpanda.io>
// Pierre Tachoire <pierre@lightpanda.io>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std");
pub const c = @cImport({
@cInclude("curl/curl.h");
});
const errors = @import("errors.zig");
const Client = @import("Client.zig");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
pub const ENABLE_DEBUG = false;
// Client.zig does the bulk of the work and is loosely tied to a browser Page.
// But we still need something above Client.zig for the "utility" http stuff
// we need to do, like telemetry. The most important thing we want from this
// is to be able to share the ca_blob, which can be quite large - loading it
// once for all http connections is a win.
const Http = @This();
opts: Opts,
client: *Client,
ca_blob: ?c.curl_blob,
cert_arena: ArenaAllocator,
pub fn init(allocator: Allocator, opts: Opts) !Http {
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL));
errdefer c.curl_global_cleanup();
if (comptime ENABLE_DEBUG) {
std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
}
var cert_arena = ArenaAllocator.init(allocator);
errdefer cert_arena.deinit();
const ca_blob = try loadCerts(allocator, cert_arena.allocator());
var client = try Client.init(allocator, ca_blob, opts);
errdefer client.deinit();
return .{
.opts = opts,
.client = client,
.ca_blob = ca_blob,
.cert_arena = cert_arena,
};
}
pub fn deinit(self: *Http) void {
self.client.deinit();
c.curl_global_cleanup();
self.cert_arena.deinit();
}
pub fn newConnection(self: *Http) !Connection {
return Connection.init(self.ca_blob, self.opts);
}
pub const Connection = struct {
easy: *c.CURL,
// Is called by Handles when already partially initialized. Done like this
// so that we have a stable pointer to error_buffer.
pub fn init(ca_blob_: ?c.curl_blob, opts: Opts) !Connection {
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
errdefer _ = c.curl_easy_cleanup(easy);
// timeouts
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_TIMEOUT_MS, @as(c_long, @intCast(opts.timeout_ms))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CONNECTTIMEOUT_MS, @as(c_long, @intCast(opts.connect_timeout_ms))));
// redirect behavior
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_MAXREDIRS, @as(c_long, @intCast(opts.max_redirects))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_FOLLOWLOCATION, @as(c_long, 2)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_REDIR_PROTOCOLS_STR, "HTTP,HTTPS")); // remove FTP and FTPS from the default
// tls
// try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 0)));
// try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 0)));
if (ca_blob_) |ca_blob| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
}
// debug
if (comptime Http.ENABLE_DEBUG) {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
}
return .{
.easy = easy,
};
}
pub fn deinit(self: *const Connection) void {
c.curl_easy_cleanup(self.easy);
}
pub fn setURL(self: *const Connection, url: [:0]const u8) !void {
try errorCheck(c.curl_easy_setopt(self.easy, c.CURLOPT_URL, url.ptr));
}
pub fn setMethod(self: *const Connection, method: Method) !void {
try Http.setMethod(self.easy, method);
}
pub fn setBody(self: *const Connection, body: []const u8) !void {
const easy = self.easy;
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
}
pub fn request(self: *const Connection) !u16 {
try errorCheck(c.curl_easy_perform(self.easy));
var http_code: c_long = undefined;
try errorCheck(c.curl_easy_getinfo(self.easy, c.CURLINFO_RESPONSE_CODE, &http_code));
if (http_code < 0 or http_code > std.math.maxInt(u16)) {
return 0;
}
return @intCast(http_code);
}
};
// used by Connection and Handle
pub fn setMethod(easy: *c.CURL, method: Method) !void {
switch (method) {
.GET => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))),
.POST => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1))),
.PUT => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "put")),
.DELETE => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "delete")),
.HEAD => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "head")),
.OPTIONS => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "options")),
}
}
pub fn errorCheck(code: c.CURLcode) errors.Error!void {
if (code == c.CURLE_OK) {
return;
}
return errors.fromCode(code);
}
pub fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
if (code == c.CURLM_OK) {
return;
}
if (code == c.CURLM_CALL_MULTI_PERFORM) {
// should we can client.perform() here?
// or just wait until the next time we naturally call it?
return;
}
return errors.fromMCode(code);
}
pub const Opts = struct {
timeout_ms: u31 = 0,
max_redirects: u8 = 10,
connect_timeout_ms: u31 = 5000,
max_concurrent_transfers: u8 = 5,
};
pub const Method = enum {
GET,
PUT,
POST,
DELETE,
HEAD,
OPTIONS,
};
pub const ProxyType = enum {
forward,
connect,
};
pub const ProxyAuth = union(enum) {
basic: struct { user_pass: []const u8 },
bearer: struct { token: []const u8 },
};
// TODO: on BSD / Linux, we could just read the PEM file directly.
// This whole rescan + decode is really just needed for MacOS. On Linux
// bundle.rescan does find the .pem file(s) which could be in a few different
// places, so it's still useful, just not efficient.
fn loadCerts(allocator: Allocator, arena: Allocator) !c.curl_blob {
var bundle: std.crypto.Certificate.Bundle = .{};
try bundle.rescan(allocator);
defer bundle.deinit(allocator);
var it = bundle.map.valueIterator();
const bytes = bundle.bytes.items;
const encoder = std.base64.standard.Encoder;
var arr: std.ArrayListUnmanaged(u8) = .empty;
const encoded_size = encoder.calcSize(bytes.len);
const buffer_size = encoded_size +
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
(encoded_size / 64) // newline per 64 characters
;
try arr.ensureTotalCapacity(arena, buffer_size);
var writer = arr.writer(arena);
while (it.next()) |index| {
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
var line_writer = LineWriter{ .inner = writer };
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
try writer.writeAll("\n-----END CERTIFICATE-----\n");
}
// Final encoding should not be larger than our initial size estimate
std.debug.assert(buffer_size > arr.items.len);
return .{
.len = arr.items.len,
.data = arr.items.ptr,
.flags = 0,
};
}
// Wraps lines @ 64 columns. A PEM is basically a base64 encoded DER (which is
// what Zig has), with lines wrapped at 64 characters and with a basic header
// and footer
const LineWriter = struct {
col: usize = 0,
inner: std.ArrayListUnmanaged(u8).Writer,
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
var writer = self.inner;
var col = self.col;
const len = 64 - col;
var remain = data;
if (remain.len > len) {
col = 0;
try writer.writeAll(data[0..len]);
try writer.writeByte('\n');
remain = data[len..];
}
while (remain.len > 64) {
try writer.writeAll(remain[0..64]);
try writer.writeByte('\n');
remain = data[len..];
}
try writer.writeAll(remain);
self.col = col + remain.len;
}
};

View File

@@ -1,93 +0,0 @@
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
//
// Francis Bouvier <francis@lightpanda.io>
// Pierre Tachoire <pierre@lightpanda.io>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std");
const c = @import("client.zig").c;
const Allocator = std.mem.Allocator;
// TODO: on BSD / Linux, we could just read the PEM file directly.
// This whole rescan + decode is really just needed for MacOS. On Linux
// bundle.rescan does find the .pem file(s) which could be in a few different
// places, so it's still useful, just not efficient.
pub fn load(allocator: Allocator, arena: Allocator) !c.curl_blob {
var bundle: std.crypto.Certificate.Bundle = .{};
try bundle.rescan(allocator);
defer bundle.deinit(allocator);
var it = bundle.map.valueIterator();
const bytes = bundle.bytes.items;
const encoder = std.base64.standard.Encoder;
var arr: std.ArrayListUnmanaged(u8) = .empty;
const encoded_size = encoder.calcSize(bytes.len);
const buffer_size = encoded_size +
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
(encoded_size / 64) // newline per 64 characters
;
try arr.ensureTotalCapacity(arena, buffer_size);
var writer = arr.writer(arena);
while (it.next()) |index| {
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
var line_writer = LineWriter{ .inner = writer };
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
try writer.writeAll("\n-----END CERTIFICATE-----\n");
}
// Final encoding should not be larger than our initial size estimate
std.debug.assert(buffer_size > arr.items.len);
return .{
.len = arr.items.len,
.data = arr.items.ptr,
.flags = 0,
};
}
// Wraps lines @ 64 columns
const LineWriter = struct {
col: usize = 0,
inner: std.ArrayListUnmanaged(u8).Writer,
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
var writer = self.inner;
var col = self.col;
const len = 64 - col;
var remain = data;
if (remain.len > len) {
col = 0;
try writer.writeAll(data[0..len]);
try writer.writeByte('\n');
remain = data[len..];
}
while (remain.len > 64) {
try writer.writeAll(remain[0..64]);
try writer.writeByte('\n');
remain = data[len..];
}
try writer.writeAll(remain);
self.col = col + remain.len;
}
};

View File

@@ -17,7 +17,7 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std");
const c = @import("client.zig").c;
const c = @import("Http.zig").c;
pub const Error = error{
UnsupportedProtocol,

View File

@@ -22,8 +22,8 @@ const Allocator = std.mem.Allocator;
const log = @import("log.zig");
const server = @import("server.zig");
const http = @import("http/client.zig");
const App = @import("app.zig").App;
const Http = @import("http/Http.zig");
const Platform = @import("runtime/js.zig").Platform;
const Browser = @import("browser/browser.zig").Browser;
@@ -130,7 +130,7 @@ fn run(alloc: Allocator) !void {
},
};
try page.wait(5); // 5 seconds
page.wait(5); // 5 seconds
// dump
if (opts.dump) {
@@ -163,14 +163,14 @@ const Command = struct {
};
}
fn proxyType(self: *const Command) ?http.ProxyType {
fn proxyType(self: *const Command) ?Http.ProxyType {
return switch (self.mode) {
inline .serve, .fetch => |opts| opts.common.proxy_type,
else => unreachable,
};
}
fn proxyAuth(self: *const Command) ?http.ProxyAuth {
fn proxyAuth(self: *const Command) ?Http.ProxyAuth {
return switch (self.mode) {
inline .serve, .fetch => |opts| opts.common.proxy_auth,
else => unreachable,
@@ -222,8 +222,8 @@ const Command = struct {
const Common = struct {
http_proxy: ?std.Uri = null,
proxy_type: ?http.ProxyType = null,
proxy_auth: ?http.ProxyAuth = null,
proxy_type: ?Http.ProxyType = null,
proxy_auth: ?Http.ProxyAuth = null,
tls_verify_host: bool = true,
log_level: ?log.Level = null,
log_format: ?log.Format = null,
@@ -534,7 +534,7 @@ fn parseCommonArg(
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_type" });
return error.InvalidArgument;
};
common.proxy_type = std.meta.stringToEnum(http.ProxyType, str) orelse {
common.proxy_type = std.meta.stringToEnum(Http.ProxyType, str) orelse {
log.fatal(.app, "invalid option choice", .{ .arg = "--proxy_type", .value = str });
return error.InvalidArgument;
};

View File

@@ -48,8 +48,9 @@ const MAX_MESSAGE_SIZE = 512 * 1024 + 14;
const Server = struct {
app: *App,
allocator: Allocator,
loop: *Loop,
allocator: Allocator,
client: ?*Client = null,
// internal fields
listener: posix.socket_t,
@@ -96,6 +97,7 @@ const Server = struct {
const client = try self.allocator.create(Client);
client.* = Client.init(socket, self);
client.start();
self.client = client;
if (log.enabled(.app, .info)) {
var address: std.net.Address = undefined;
@@ -107,6 +109,7 @@ const Server = struct {
fn releaseClient(self: *Server, client: *Client) void {
self.allocator.destroy(client);
self.client = null;
}
};
@@ -163,9 +166,7 @@ pub const Client = struct {
const SendQueue = std.DoublyLinkedList(Outgoing);
const Self = @This();
fn init(socket: posix.socket_t, server: *Server) Self {
fn init(socket: posix.socket_t, server: *Server) Client {
return .{
.cdp = null,
.mode = .http,
@@ -185,7 +186,7 @@ pub const Client = struct {
};
}
fn maybeDeinit(self: *Self) void {
fn maybeDeinit(self: *Client) void {
if (self.read_pending or self.write_pending) {
// We cannot do anything as long as we still have these pending
// They should not be pending for long as we're only here after
@@ -222,7 +223,7 @@ pub const Client = struct {
}
}
fn close(self: *Self) void {
fn close(self: *Client) void {
log.info(.app, "client disconnected", .{});
self.connected = false;
// recv only, because we might have pending writes we'd like to get
@@ -231,14 +232,14 @@ pub const Client = struct {
self.maybeDeinit();
}
fn start(self: *Self) void {
fn start(self: *Client) void {
self.queueRead();
self.queueTimeout();
}
fn queueRead(self: *Self) void {
fn queueRead(self: *Client) void {
self.server.loop.io.recv(
*Self,
*Client,
self,
callbackRead,
&self.read_completion,
@@ -248,7 +249,7 @@ pub const Client = struct {
self.read_pending = true;
}
fn callbackRead(self: *Self, _: *Completion, result: RecvError!usize) void {
fn callbackRead(self: *Client, _: *Completion, result: RecvError!usize) void {
self.read_pending = false;
if (self.connected == false) {
self.maybeDeinit();
@@ -277,11 +278,11 @@ pub const Client = struct {
}
}
fn readBuf(self: *Self) []u8 {
fn readBuf(self: *Client) []u8 {
return self.reader.readBuf();
}
fn processData(self: *Self, len: usize) !bool {
fn processData(self: *Client, len: usize) !bool {
self.last_active = now();
self.reader.len += len;
@@ -294,7 +295,7 @@ pub const Client = struct {
}
}
fn processHTTPRequest(self: *Self) !void {
fn processHTTPRequest(self: *Client) !void {
std.debug.assert(self.reader.pos == 0);
const request = self.reader.buf[0..self.reader.len];
@@ -330,7 +331,7 @@ pub const Client = struct {
self.reader.len = 0;
}
fn handleHTTPRequest(self: *Self, request: []u8) !void {
fn handleHTTPRequest(self: *Client, request: []u8) !void {
if (request.len < 18) {
// 18 is [generously] the smallest acceptable HTTP request
return error.InvalidRequest;
@@ -365,7 +366,7 @@ pub const Client = struct {
return error.NotFound;
}
fn upgradeConnection(self: *Self, request: []u8) !void {
fn upgradeConnection(self: *Client, request: []u8) !void {
// our caller already confirmed that we have a trailing \r\n\r\n
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
const request_line = request[0..request_line_end];
@@ -462,7 +463,7 @@ pub const Client = struct {
return self.send(arena, response);
}
fn writeHTTPErrorResponse(self: *Self, comptime status: u16, comptime body: []const u8) void {
fn writeHTTPErrorResponse(self: *Client, comptime status: u16, comptime body: []const u8) void {
const response = std.fmt.comptimePrint(
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
.{ status, body.len, body },
@@ -473,7 +474,7 @@ pub const Client = struct {
self.send(null, response) catch {};
}
fn processWebsocketMessage(self: *Self) !bool {
fn processWebsocketMessage(self: *Client) !bool {
errdefer self.close();
var reader = &self.reader;
@@ -517,7 +518,7 @@ pub const Client = struct {
return true;
}
fn sendPong(self: *Self, data: []const u8) !void {
fn sendPong(self: *Client, data: []const u8) !void {
if (data.len == 0) {
return self.send(null, &EMPTY_PONG);
}
@@ -539,7 +540,7 @@ pub const Client = struct {
// writev, so we need to get creative. We'll JSON serialize to a
// buffer, where the first 10 bytes are reserved. We can then backfill
// the header and send the slice.
pub fn sendJSON(self: *Self, message: anytype, opts: std.json.StringifyOptions) !void {
pub fn sendJSON(self: *Client, message: anytype, opts: std.json.StringifyOptions) !void {
var arena = ArenaAllocator.init(self.server.allocator);
errdefer arena.deinit();
@@ -557,7 +558,7 @@ pub const Client = struct {
}
pub fn sendJSONRaw(
self: *Self,
self: *Client,
arena: ArenaAllocator,
buf: std.ArrayListUnmanaged(u8),
) !void {
@@ -567,9 +568,9 @@ pub const Client = struct {
return self.send(arena, framed);
}
fn queueTimeout(self: *Self) void {
fn queueTimeout(self: *Client) void {
self.server.loop.io.timeout(
*Self,
*Client,
self,
callbackTimeout,
&self.timeout_completion,
@@ -578,7 +579,7 @@ pub const Client = struct {
self.timeout_pending = true;
}
fn callbackTimeout(self: *Self, _: *Completion, result: TimeoutError!void) void {
fn callbackTimeout(self: *Client, _: *Completion, result: TimeoutError!void) void {
self.timeout_pending = false;
if (self.connected == false) {
if (self.read_pending == false and self.write_pending == false) {
@@ -614,7 +615,7 @@ pub const Client = struct {
self.queueTimeout();
}
fn send(self: *Self, arena: ?ArenaAllocator, data: []const u8) !void {
fn send(self: *Client, arena: ?ArenaAllocator, data: []const u8) !void {
const node = try self.send_queue_node_pool.create();
errdefer self.send_queue_node_pool.destroy(node);
@@ -632,7 +633,7 @@ pub const Client = struct {
self.queueSend();
}
fn queueSend(self: *Self) void {
fn queueSend(self: *Client) void {
if (self.connected == false) {
return;
}
@@ -643,7 +644,7 @@ pub const Client = struct {
};
self.server.loop.io.send(
*Self,
*Client,
self,
sendCallback,
&self.write_completion,
@@ -653,7 +654,7 @@ pub const Client = struct {
self.write_pending = true;
}
fn sendCallback(self: *Self, _: *Completion, result: SendError!usize) void {
fn sendCallback(self: *Client, _: *Completion, result: SendError!usize) void {
self.write_pending = false;
if (self.connected == false) {
self.maybeDeinit();
@@ -1054,12 +1055,20 @@ pub fn run(
// - JS callbacks events from scripts
// var http_client = app.http_client;
while (true) {
// // @newhttp
// // This is a temporary hack for the newhttp work. The issue is that we
// // now have 2 event loops.
// if (http_client.active > 0) {
// _ = try http_client.tick(10);
// }
// @newhttp. This is a hack. We used to just have 1 loop, so we could
// sleep it it "forever" and any activity (message to this server,
// JS callback, http data) would wake it up.
// Now we have 2 loops. If we block on one, the other won't get woken
// up. We don't block "forever" but even 10ms adds a bunch of latency
// since this is called in a loop.
// Hopefully this is temporary and we can remove the io loop and then
// only have 1 loop. But, until then, we need to check both loops and
// pay some blocking penalty.
if (server.client) |client| {
if (client.cdp) |*cdp| {
cdp.pageWait();
}
}
try loop.io.run_for_ns(10 * std.time.ns_per_ms);
}

View File

@@ -7,26 +7,31 @@ const Allocator = std.mem.Allocator;
const log = @import("../log.zig");
const App = @import("../app.zig").App;
const http = @import("../http/client.zig");
const Http = @import("../http/Http.zig");
const telemetry = @import("telemetry.zig");
const URL = "https://telemetry.lightpanda.io";
const MAX_BATCH_SIZE = 20;
pub const LightPanda = struct {
uri: std.Uri,
pending: List,
running: bool,
thread: ?std.Thread,
allocator: Allocator,
mutex: std.Thread.Mutex,
cond: Thread.Condition,
client: *http.Client,
connection: Http.Connection,
node_pool: std.heap.MemoryPool(List.Node),
const List = std.DoublyLinkedList(LightPandaEvent);
pub fn init(app: *App) LightPanda {
pub fn init(app: *App) !LightPanda {
const connection = try app.http.newConnection();
errdefer connection.deinit();
try connection.setURL(URL);
try connection.setMethod(.POST);
const allocator = app.allocator;
return .{
.cond = .{},
@@ -35,8 +40,7 @@ pub const LightPanda = struct {
.thread = null,
.running = true,
.allocator = allocator,
.client = app.http_client,
.uri = std.Uri.parse(URL) catch unreachable,
.connection = connection,
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
};
}
@@ -50,6 +54,7 @@ pub const LightPanda = struct {
thread.join();
}
self.node_pool.deinit();
self.connection.deinit();
}
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: App.RunMode, raw_event: telemetry.Event) !void {
@@ -102,15 +107,11 @@ pub const LightPanda = struct {
try writer.writeByte('\n');
}
var req = try self.client.request(.POST, &self.uri);
defer req.deinit();
req.body = arr.items;
try self.connection.setBody(arr.items);
const status = try self.connection.request();
// drain the response
var res = try req.sendSync(.{});
while (try res.next()) |_| {}
if (res.header.status != 200) {
log.warn(.telemetry, "server error", .{ .status = res.header.status });
if (status != 200) {
log.warn(.telemetry, "server error", .{ .status = status });
}
}

View File

@@ -29,16 +29,19 @@ fn TelemetryT(comptime P: type) type {
const Self = @This();
pub fn init(app: *App, run_mode: App.RunMode) Self {
pub fn init(app: *App, run_mode: App.RunMode) !Self {
const disabled = std.process.hasEnvVarConstant("LIGHTPANDA_DISABLE_TELEMETRY");
if (builtin.mode != .Debug and builtin.is_test == false) {
log.info(.telemetry, "telemetry status", .{ .disabled = disabled });
}
const provider = try P.init(app);
errdefer provider.deinit();
return .{
.disabled = disabled,
.run_mode = run_mode,
.provider = P.init(app),
.provider = provider,
.iid = if (disabled) null else getOrCreateId(app.app_dir_path),
};
}
@@ -134,7 +137,7 @@ pub const Event = union(enum) {
};
const NoopProvider = struct {
fn init(_: *App) NoopProvider {
fn init(_: *App) !NoopProvider {
return .{};
}
fn deinit(_: NoopProvider) void {}
@@ -150,7 +153,7 @@ test "telemetry: disabled by environment" {
defer _ = unsetenv(@constCast("LIGHTPANDA_DISABLE_TELEMETRY"));
const FailingProvider = struct {
fn init(_: *App) @This() {
fn init(_: *App) !@This() {
return .{};
}
fn deinit(_: @This()) void {}
@@ -159,7 +162,7 @@ test "telemetry: disabled by environment" {
}
};
var telemetry = TelemetryT(FailingProvider).init(undefined, .serve);
var telemetry = try TelemetryT(FailingProvider).init(undefined, .serve);
defer telemetry.deinit();
telemetry.record(.{ .run = {} });
}
@@ -186,7 +189,7 @@ test "telemetry: sends event to provider" {
var app = testing.createApp(.{});
defer app.deinit();
var telemetry = TelemetryT(MockProvider).init(app, .serve);
var telemetry = try TelemetryT(MockProvider).init(app, .serve);
defer telemetry.deinit();
const mock = &telemetry.provider;
@@ -206,7 +209,7 @@ const MockProvider = struct {
allocator: Allocator,
events: std.ArrayListUnmanaged(Event),
fn init(app: *App) @This() {
fn init(app: *App) !@This() {
return .{
.iid = null,
.run_mode = null,

View File

@@ -409,10 +409,6 @@ pub const JsRunner = struct {
const html_doc = try parser.documentHTMLParseFromStr(opts.html);
try page.setDocument(html_doc);
// after the page is considered loaded, page.wait can exit early if
// there's no IO/timeouts. So setting this speeds up our tests
page.loaded = true;
return .{
.app = app,
.page = page,
@@ -445,7 +441,7 @@ pub const JsRunner = struct {
}
return err;
};
try self.page.wait(1);
self.page.wait(1);
@import("root").js_runner_duration += std.time.Instant.since(try std.time.Instant.now(), start);
if (case.@"1") |expected| {