mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-10-29 15:13:28 +00:00
Re-enable telemetry
Start work on supporting navigation events (clicks, form submission).
This commit is contained in:
24
src/app.zig
24
src/app.zig
@@ -3,9 +3,9 @@ const std = @import("std");
|
|||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
const log = @import("log.zig");
|
const log = @import("log.zig");
|
||||||
|
const Http = @import("http/Http.zig");
|
||||||
const Loop = @import("runtime/loop.zig").Loop;
|
const Loop = @import("runtime/loop.zig").Loop;
|
||||||
const Platform = @import("runtime/js.zig").Platform;
|
const Platform = @import("runtime/js.zig").Platform;
|
||||||
const http = @import("http/client.zig");
|
|
||||||
|
|
||||||
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
||||||
const Notification = @import("notification.zig").Notification;
|
const Notification = @import("notification.zig").Notification;
|
||||||
@@ -13,12 +13,12 @@ const Notification = @import("notification.zig").Notification;
|
|||||||
// Container for global state / objects that various parts of the system
|
// Container for global state / objects that various parts of the system
|
||||||
// might need.
|
// might need.
|
||||||
pub const App = struct {
|
pub const App = struct {
|
||||||
|
http: Http,
|
||||||
loop: *Loop,
|
loop: *Loop,
|
||||||
config: Config,
|
config: Config,
|
||||||
platform: ?*const Platform,
|
platform: ?*const Platform,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
telemetry: Telemetry,
|
telemetry: Telemetry,
|
||||||
http_client: *http.Client,
|
|
||||||
app_dir_path: ?[]const u8,
|
app_dir_path: ?[]const u8,
|
||||||
notification: *Notification,
|
notification: *Notification,
|
||||||
|
|
||||||
@@ -34,8 +34,8 @@ pub const App = struct {
|
|||||||
platform: ?*const Platform = null,
|
platform: ?*const Platform = null,
|
||||||
tls_verify_host: bool = true,
|
tls_verify_host: bool = true,
|
||||||
http_proxy: ?std.Uri = null,
|
http_proxy: ?std.Uri = null,
|
||||||
proxy_type: ?http.ProxyType = null,
|
proxy_type: ?Http.ProxyType = null,
|
||||||
proxy_auth: ?http.ProxyAuth = null,
|
proxy_auth: ?Http.ProxyAuth = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn init(allocator: Allocator, config: Config) !*App {
|
pub fn init(allocator: Allocator, config: Config) !*App {
|
||||||
@@ -51,21 +51,27 @@ pub const App = struct {
|
|||||||
const notification = try Notification.init(allocator, null);
|
const notification = try Notification.init(allocator, null);
|
||||||
errdefer notification.deinit();
|
errdefer notification.deinit();
|
||||||
|
|
||||||
|
var http = try Http.init(allocator, .{
|
||||||
|
.max_concurrent_transfers = 3,
|
||||||
|
});
|
||||||
|
errdefer http.deinit();
|
||||||
|
|
||||||
const app_dir_path = getAndMakeAppDir(allocator);
|
const app_dir_path = getAndMakeAppDir(allocator);
|
||||||
|
|
||||||
app.* = .{
|
app.* = .{
|
||||||
.loop = loop,
|
.loop = loop,
|
||||||
|
.http = http,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.telemetry = undefined,
|
.telemetry = undefined,
|
||||||
.platform = config.platform,
|
.platform = config.platform,
|
||||||
.app_dir_path = app_dir_path,
|
.app_dir_path = app_dir_path,
|
||||||
.notification = notification,
|
.notification = notification,
|
||||||
.http_client = try http.Client.init(allocator, .{
|
|
||||||
.max_concurrent_transfers = 3,
|
|
||||||
}),
|
|
||||||
.config = config,
|
.config = config,
|
||||||
};
|
};
|
||||||
app.telemetry = Telemetry.init(app, config.run_mode);
|
|
||||||
|
app.telemetry = try Telemetry.init(app, config.run_mode);
|
||||||
|
errdefer app.telemetry.deinit();
|
||||||
|
|
||||||
try app.telemetry.register(app.notification);
|
try app.telemetry.register(app.notification);
|
||||||
|
|
||||||
return app;
|
return app;
|
||||||
@@ -79,8 +85,8 @@ pub const App = struct {
|
|||||||
self.telemetry.deinit();
|
self.telemetry.deinit();
|
||||||
self.loop.deinit();
|
self.loop.deinit();
|
||||||
allocator.destroy(self.loop);
|
allocator.destroy(self.loop);
|
||||||
self.http_client.deinit();
|
|
||||||
self.notification.deinit();
|
self.notification.deinit();
|
||||||
|
self.http.deinit();
|
||||||
allocator.destroy(self);
|
allocator.destroy(self);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -37,6 +37,11 @@ pub fn init(allocator: Allocator) Scheduler {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn reset(self: *Scheduler) void {
|
||||||
|
self.primary.clearRetainingCapacity();
|
||||||
|
self.secondary.clearRetainingCapacity();
|
||||||
|
}
|
||||||
|
|
||||||
const AddOpts = struct {
|
const AddOpts = struct {
|
||||||
name: []const u8 = "",
|
name: []const u8 = "",
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -20,11 +20,11 @@ const std = @import("std");
|
|||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const parser = @import("netsurf.zig");
|
const parser = @import("netsurf.zig");
|
||||||
const http = @import("../http/client.zig");
|
|
||||||
|
|
||||||
const App = @import("../app.zig").App;
|
|
||||||
const Env = @import("env.zig").Env;
|
const Env = @import("env.zig").Env;
|
||||||
const Page = @import("page.zig").Page;
|
const Page = @import("page.zig").Page;
|
||||||
|
const Browser = @import("browser.zig").Browser;
|
||||||
|
const HttpClient = @import("../http/Client.zig");
|
||||||
const URL = @import("../url.zig").URL;
|
const URL = @import("../url.zig").URL;
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
@@ -48,22 +48,23 @@ scripts: OrderList,
|
|||||||
// dom_loaded == true,
|
// dom_loaded == true,
|
||||||
deferred: OrderList,
|
deferred: OrderList,
|
||||||
|
|
||||||
client: *http.Client,
|
client: *HttpClient,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
buffer_pool: BufferPool,
|
buffer_pool: BufferPool,
|
||||||
script_pool: std.heap.MemoryPool(PendingScript),
|
script_pool: std.heap.MemoryPool(PendingScript),
|
||||||
|
|
||||||
const OrderList = std.DoublyLinkedList(*PendingScript);
|
const OrderList = std.DoublyLinkedList(*PendingScript);
|
||||||
|
|
||||||
pub fn init(app: *App, page: *Page) ScriptManager {
|
pub fn init(browser: *Browser, page: *Page) ScriptManager {
|
||||||
const allocator = app.allocator;
|
// page isn't fully initialized, we can setup our reference, but that's it.
|
||||||
|
const allocator = browser.allocator;
|
||||||
return .{
|
return .{
|
||||||
.page = page,
|
.page = page,
|
||||||
.scripts = .{},
|
.scripts = .{},
|
||||||
.deferred = .{},
|
.deferred = .{},
|
||||||
.async_count = 0,
|
.async_count = 0,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.client = app.http_client,
|
.client = browser.http_client,
|
||||||
.static_scripts_done = false,
|
.static_scripts_done = false,
|
||||||
.buffer_pool = BufferPool.init(allocator, 5),
|
.buffer_pool = BufferPool.init(allocator, 5),
|
||||||
.script_pool = std.heap.MemoryPool(PendingScript).init(allocator),
|
.script_pool = std.heap.MemoryPool(PendingScript).init(allocator),
|
||||||
@@ -247,13 +248,16 @@ fn evaluate(self: *ScriptManager) void {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn asyncDone(self: *ScriptManager) void {
|
pub fn isDone(self: *const ScriptManager) bool {
|
||||||
self.async_count -= 1;
|
return self.async_count == 0 and // there are no more async scripts
|
||||||
if (self.async_count == 0 and // there are no more async scripts
|
|
||||||
self.static_scripts_done and // and we've finished parsing the HTML to queue all <scripts>
|
self.static_scripts_done and // and we've finished parsing the HTML to queue all <scripts>
|
||||||
self.scripts.first == null and // and there are no more <script src=> to wait for
|
self.scripts.first == null and // and there are no more <script src=> to wait for
|
||||||
self.deferred.first == null // and there are no more <script defer src=> to wait for
|
self.deferred.first == null; // and there are no more <script defer src=> to wait for
|
||||||
) {
|
}
|
||||||
|
|
||||||
|
fn asyncDone(self: *ScriptManager) void {
|
||||||
|
self.async_count -= 1;
|
||||||
|
if (self.isDone()) {
|
||||||
// then the document is considered complete
|
// then the document is considered complete
|
||||||
self.page.documentIsComplete();
|
self.page.documentIsComplete();
|
||||||
}
|
}
|
||||||
@@ -272,7 +276,7 @@ fn getList(self: *ScriptManager, script: *const Script) ?*OrderList {
|
|||||||
return &self.scripts;
|
return &self.scripts;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn startCallback(transfer: *http.Transfer) !void {
|
fn startCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
script.startCallback(transfer) catch |err| {
|
script.startCallback(transfer) catch |err| {
|
||||||
log.err(.http, "SM.startCallback", .{ .err = err, .transfer = transfer });
|
log.err(.http, "SM.startCallback", .{ .err = err, .transfer = transfer });
|
||||||
@@ -280,7 +284,7 @@ fn startCallback(transfer: *http.Transfer) !void {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn headerCallback(transfer: *http.Transfer) !void {
|
fn headerCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
script.headerCallback(transfer) catch |err| {
|
script.headerCallback(transfer) catch |err| {
|
||||||
log.err(.http, "SM.headerCallback", .{ .err = err, .transfer = transfer });
|
log.err(.http, "SM.headerCallback", .{ .err = err, .transfer = transfer });
|
||||||
@@ -288,7 +292,7 @@ fn headerCallback(transfer: *http.Transfer) !void {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dataCallback(transfer: *http.Transfer, data: []const u8) !void {
|
fn dataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
script.dataCallback(data) catch |err| {
|
script.dataCallback(data) catch |err| {
|
||||||
log.err(.http, "SM.dataCallback", .{ .err = err, .transfer = transfer, .len = data.len });
|
log.err(.http, "SM.dataCallback", .{ .err = err, .transfer = transfer, .len = data.len });
|
||||||
@@ -296,14 +300,14 @@ fn dataCallback(transfer: *http.Transfer, data: []const u8) !void {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doneCallback(transfer: *http.Transfer) !void {
|
fn doneCallback(ctx: *anyopaque) !void {
|
||||||
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
const script: *PendingScript = @alignCast(@ptrCast(ctx));
|
||||||
script.doneCallback(transfer);
|
script.doneCallback();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn errorCallback(transfer: *http.Transfer, err: anyerror) void {
|
fn errorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
const script: *PendingScript = @alignCast(@ptrCast(ctx));
|
||||||
script.errorCallback(transfer, err);
|
script.errorCallback(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// A script which is pending execution.
|
// A script which is pending execution.
|
||||||
@@ -326,7 +330,7 @@ const PendingScript = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn startCallback(self: *PendingScript, transfer: *http.Transfer) !void {
|
fn startCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
|
||||||
if (self.manager.getList(&self.script)) |list| {
|
if (self.manager.getList(&self.script)) |list| {
|
||||||
self.node.data = self;
|
self.node.data = self;
|
||||||
list.append(&self.node);
|
list.append(&self.node);
|
||||||
@@ -337,7 +341,7 @@ const PendingScript = struct {
|
|||||||
log.debug(.http, "script fetch start", .{ .req = transfer });
|
log.debug(.http, "script fetch start", .{ .req = transfer });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn headerCallback(self: *PendingScript, transfer: *http.Transfer) !void {
|
fn headerCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
|
||||||
const header = &transfer.response_header.?;
|
const header = &transfer.response_header.?;
|
||||||
if (header.status != 200) {
|
if (header.status != 200) {
|
||||||
return error.InvalidStatusCode;
|
return error.InvalidStatusCode;
|
||||||
@@ -359,8 +363,8 @@ const PendingScript = struct {
|
|||||||
try self.script.source.remote.appendSlice(self.manager.allocator, data);
|
try self.script.source.remote.appendSlice(self.manager.allocator, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn doneCallback(self: *PendingScript, transfer: *http.Transfer) void {
|
fn doneCallback(self: *PendingScript) void {
|
||||||
log.debug(.http, "script fetch complete", .{ .req = transfer });
|
log.debug(.http, "script fetch complete", .{ .req = self.script.url });
|
||||||
|
|
||||||
const manager = self.manager;
|
const manager = self.manager;
|
||||||
if (self.script.is_async) {
|
if (self.script.is_async) {
|
||||||
@@ -374,8 +378,8 @@ const PendingScript = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn errorCallback(self: *PendingScript, transfer: *http.Transfer, err: anyerror) void {
|
fn errorCallback(self: *PendingScript, err: anyerror) void {
|
||||||
log.warn(.http, "script fetch error", .{ .req = transfer, .err = err });
|
log.warn(.http, "script fetch error", .{ .req = self.script.url, .err = err });
|
||||||
self.deinit();
|
self.deinit();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ const Session = @import("session.zig").Session;
|
|||||||
const Notification = @import("../notification.zig").Notification;
|
const Notification = @import("../notification.zig").Notification;
|
||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const http = @import("../http/client.zig");
|
const HttpClient = @import("../http/Client.zig");
|
||||||
|
|
||||||
// Browser is an instance of the browser.
|
// Browser is an instance of the browser.
|
||||||
// You can create multiple browser instances.
|
// You can create multiple browser instances.
|
||||||
@@ -38,7 +38,7 @@ pub const Browser = struct {
|
|||||||
app: *App,
|
app: *App,
|
||||||
session: ?Session,
|
session: ?Session,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
http_client: *http.Client,
|
http_client: *HttpClient,
|
||||||
page_arena: ArenaAllocator,
|
page_arena: ArenaAllocator,
|
||||||
session_arena: ArenaAllocator,
|
session_arena: ArenaAllocator,
|
||||||
transfer_arena: ArenaAllocator,
|
transfer_arena: ArenaAllocator,
|
||||||
@@ -60,7 +60,7 @@ pub const Browser = struct {
|
|||||||
.session = null,
|
.session = null,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.notification = notification,
|
.notification = notification,
|
||||||
.http_client = app.http_client,
|
.http_client = app.http.client,
|
||||||
.page_arena = ArenaAllocator.init(allocator),
|
.page_arena = ArenaAllocator.init(allocator),
|
||||||
.session_arena = ArenaAllocator.init(allocator),
|
.session_arena = ArenaAllocator.init(allocator),
|
||||||
.transfer_arena = ArenaAllocator.init(allocator),
|
.transfer_arena = ArenaAllocator.init(allocator),
|
||||||
|
|||||||
@@ -30,8 +30,8 @@ const Session = @import("session.zig").Session;
|
|||||||
const Renderer = @import("renderer.zig").Renderer;
|
const Renderer = @import("renderer.zig").Renderer;
|
||||||
const Window = @import("html/window.zig").Window;
|
const Window = @import("html/window.zig").Window;
|
||||||
const Walker = @import("dom/walker.zig").WalkerDepthFirst;
|
const Walker = @import("dom/walker.zig").WalkerDepthFirst;
|
||||||
const Loop = @import("../runtime/loop.zig").Loop;
|
|
||||||
const Scheduler = @import("Scheduler.zig");
|
const Scheduler = @import("Scheduler.zig");
|
||||||
|
const HttpClient = @import("../http/Client.zig");
|
||||||
const ScriptManager = @import("ScriptManager.zig");
|
const ScriptManager = @import("ScriptManager.zig");
|
||||||
const HTMLDocument = @import("html/document.zig").HTMLDocument;
|
const HTMLDocument = @import("html/document.zig").HTMLDocument;
|
||||||
|
|
||||||
@@ -39,7 +39,6 @@ const URL = @import("../url.zig").URL;
|
|||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const parser = @import("netsurf.zig");
|
const parser = @import("netsurf.zig");
|
||||||
const http = @import("../http/client.zig");
|
|
||||||
const storage = @import("storage/storage.zig");
|
const storage = @import("storage/storage.zig");
|
||||||
|
|
||||||
const polyfill = @import("polyfill/polyfill.zig");
|
const polyfill = @import("polyfill/polyfill.zig");
|
||||||
@@ -50,9 +49,6 @@ const polyfill = @import("polyfill/polyfill.zig");
|
|||||||
// The page handle all its memory in an arena allocator. The arena is reseted
|
// The page handle all its memory in an arena allocator. The arena is reseted
|
||||||
// when end() is called.
|
// when end() is called.
|
||||||
pub const Page = struct {
|
pub const Page = struct {
|
||||||
// Our event loop
|
|
||||||
loop: *Loop,
|
|
||||||
|
|
||||||
cookie_jar: *storage.CookieJar,
|
cookie_jar: *storage.CookieJar,
|
||||||
|
|
||||||
// Pre-configured http/cilent.zig used to make HTTP requests.
|
// Pre-configured http/cilent.zig used to make HTTP requests.
|
||||||
@@ -91,11 +87,11 @@ pub const Page = struct {
|
|||||||
polyfill_loader: polyfill.Loader = .{},
|
polyfill_loader: polyfill.Loader = .{},
|
||||||
|
|
||||||
scheduler: Scheduler,
|
scheduler: Scheduler,
|
||||||
http_client: *http.Client,
|
http_client: *HttpClient,
|
||||||
script_manager: ScriptManager,
|
script_manager: ScriptManager,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
|
|
||||||
loaded: bool = false,
|
document_state: DocumentState = .parsing,
|
||||||
|
|
||||||
const Mode = union(enum) {
|
const Mode = union(enum) {
|
||||||
pre: void,
|
pre: void,
|
||||||
@@ -106,9 +102,15 @@ pub const Page = struct {
|
|||||||
raw_done: []const u8,
|
raw_done: []const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const DocumentState = enum {
|
||||||
|
parsing,
|
||||||
|
load,
|
||||||
|
complete,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn init(self: *Page, arena: Allocator, session: *Session) !void {
|
pub fn init(self: *Page, arena: Allocator, session: *Session) !void {
|
||||||
const browser = session.browser;
|
const browser = session.browser;
|
||||||
const script_manager = ScriptManager.init(browser.app, self);
|
const script_manager = ScriptManager.init(browser, self);
|
||||||
|
|
||||||
self.* = .{
|
self.* = .{
|
||||||
.url = URL.empty,
|
.url = URL.empty,
|
||||||
@@ -117,7 +119,6 @@ pub const Page = struct {
|
|||||||
.arena = arena,
|
.arena = arena,
|
||||||
.session = session,
|
.session = session,
|
||||||
.call_arena = undefined,
|
.call_arena = undefined,
|
||||||
.loop = browser.app.loop,
|
|
||||||
.renderer = Renderer.init(arena),
|
.renderer = Renderer.init(arena),
|
||||||
.state_pool = &browser.state_pool,
|
.state_pool = &browser.state_pool,
|
||||||
.cookie_jar = &session.cookie_jar,
|
.cookie_jar = &session.cookie_jar,
|
||||||
@@ -143,9 +144,18 @@ pub const Page = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn deinit(self: *Page) void {
|
pub fn deinit(self: *Page) void {
|
||||||
|
self.http_client.abort();
|
||||||
self.script_manager.deinit();
|
self.script_manager.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn reset(self: *Page) void {
|
||||||
|
_ = self.session.browser.page_arena.reset(.{ .retain_with_limit = 1 * 1024 * 1024 });
|
||||||
|
self.http_client.abort();
|
||||||
|
self.scheduler.reset();
|
||||||
|
self.document_state = .parsing;
|
||||||
|
self.mode = .{ .pre = {} };
|
||||||
|
}
|
||||||
|
|
||||||
fn runMicrotasks(ctx: *anyopaque) ?u32 {
|
fn runMicrotasks(ctx: *anyopaque) ?u32 {
|
||||||
const self: *Page = @alignCast(@ptrCast(ctx));
|
const self: *Page = @alignCast(@ptrCast(ctx));
|
||||||
self.session.browser.runMicrotasks();
|
self.session.browser.runMicrotasks();
|
||||||
@@ -226,7 +236,21 @@ pub const Page = struct {
|
|||||||
// return self.fetchData("module", src);
|
// return self.fetchData("module", src);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn wait(self: *Page, wait_sec: usize) !void {
|
pub fn wait(self: *Page, wait_sec: usize) void {
|
||||||
|
self._wait(wait_sec) catch |err| switch (err) {
|
||||||
|
error.JsError => {}, // already logged (with hopefully more context)
|
||||||
|
else => {
|
||||||
|
// There may be errors from the http/client or ScriptManager
|
||||||
|
// that we should not treat as an error like this. Will need
|
||||||
|
// to run this through more real-world sites and see if we need
|
||||||
|
// to expand the switch (err) to have more customized logs for
|
||||||
|
// specific messages.
|
||||||
|
log.err(.browser, "page wait", .{ .err = err });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _wait(self: *Page, wait_sec: usize) !void {
|
||||||
switch (self.mode) {
|
switch (self.mode) {
|
||||||
.pre, .html, .raw, .parsed => {
|
.pre, .html, .raw, .parsed => {
|
||||||
// The HTML page was parsed. We now either have JS scripts to
|
// The HTML page was parsed. We now either have JS scripts to
|
||||||
@@ -243,17 +267,21 @@ pub const Page = struct {
|
|||||||
var timer = try std.time.Timer.start();
|
var timer = try std.time.Timer.start();
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
const has_active_http = http_client.active > 0;
|
// If we have active http transfers, we might as well run
|
||||||
|
// any "secondary" task, since we won't be exiting this loop
|
||||||
const ms_to_next_task = try scheduler.run(has_active_http);
|
// anyways.
|
||||||
|
// scheduler.run could trigger new http transfers, so do not
|
||||||
|
// store http_client.active BEFORE this call and then use
|
||||||
|
// it AFTER.
|
||||||
|
const ms_to_next_task = try scheduler.run(http_client.active > 0);
|
||||||
|
|
||||||
if (try_catch.hasCaught()) {
|
if (try_catch.hasCaught()) {
|
||||||
const msg = (try try_catch.err(self.arena)) orelse "unknown";
|
const msg = (try try_catch.err(self.arena)) orelse "unknown";
|
||||||
log.err(.browser, "page wait error", .{ .err = msg });
|
log.warn(.user_script, "page wait", .{ .err = msg, .src = "scheduler" });
|
||||||
return error.JsError;
|
return error.JsError;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_active_http == false) {
|
if (http_client.active == 0) {
|
||||||
if (ms_to_next_task) |ms| {
|
if (ms_to_next_task) |ms| {
|
||||||
// There are no HTTP transfers, so there's no point calling
|
// There are no HTTP transfers, so there's no point calling
|
||||||
// http_client.tick.
|
// http_client.tick.
|
||||||
@@ -283,7 +311,7 @@ pub const Page = struct {
|
|||||||
|
|
||||||
if (try_catch.hasCaught()) {
|
if (try_catch.hasCaught()) {
|
||||||
const msg = (try try_catch.err(self.arena)) orelse "unknown";
|
const msg = (try try_catch.err(self.arena)) orelse "unknown";
|
||||||
log.err(.browser, "page wait error", .{ .err = msg });
|
log.warn(.user_script, "page wait", .{ .err = msg, .src = "data" });
|
||||||
return error.JsError;
|
return error.JsError;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -307,7 +335,13 @@ pub const Page = struct {
|
|||||||
|
|
||||||
// spec reference: https://html.spec.whatwg.org/#document-lifecycle
|
// spec reference: https://html.spec.whatwg.org/#document-lifecycle
|
||||||
pub fn navigate(self: *Page, request_url: []const u8, opts: NavigateOpts) !void {
|
pub fn navigate(self: *Page, request_url: []const u8, opts: NavigateOpts) !void {
|
||||||
log.debug(.http, "navigate", .{
|
if (self.mode != .pre) {
|
||||||
|
// it's possible for navigate to be called multiple times on the
|
||||||
|
// same page (via CDP). We want to reset the page between each call.
|
||||||
|
self.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info(.http, "navigate", .{
|
||||||
.url = request_url,
|
.url = request_url,
|
||||||
.method = opts.method,
|
.method = opts.method,
|
||||||
.reason = opts.reason,
|
.reason = opts.reason,
|
||||||
@@ -331,7 +365,8 @@ pub const Page = struct {
|
|||||||
.ctx = self,
|
.ctx = self,
|
||||||
.url = owned_url,
|
.url = owned_url,
|
||||||
.method = opts.method,
|
.method = opts.method,
|
||||||
.header_done_callback = pageHeaderCallback,
|
.body = opts.body,
|
||||||
|
.header_done_callback = pageHeaderDoneCallback,
|
||||||
.data_callback = pageDataCallback,
|
.data_callback = pageDataCallback,
|
||||||
.done_callback = pageDoneCallback,
|
.done_callback = pageDoneCallback,
|
||||||
.error_callback = pageErrorCallback,
|
.error_callback = pageErrorCallback,
|
||||||
@@ -345,15 +380,23 @@ pub const Page = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn documentIsLoaded(self: *Page) void {
|
pub fn documentIsLoaded(self: *Page) void {
|
||||||
|
std.debug.assert(self.document_state == .parsing);
|
||||||
|
self.document_state = .load;
|
||||||
HTMLDocument.documentIsLoaded(self.window.document, self) catch |err| {
|
HTMLDocument.documentIsLoaded(self.window.document, self) catch |err| {
|
||||||
log.err(.browser, "document is loaded", .{ .err = err });
|
log.err(.browser, "document is loaded", .{ .err = err });
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn documentIsComplete(self: *Page) void {
|
pub fn documentIsComplete(self: *Page) void {
|
||||||
std.debug.assert(self.loaded == false);
|
std.debug.assert(self.document_state != .complete);
|
||||||
|
|
||||||
self.loaded = true;
|
// documentIsComplete could be called directly, without first calling
|
||||||
|
// documentIsLoaded, if there were _only_ async scrypts
|
||||||
|
if (self.document_state == .parsing) {
|
||||||
|
self.documentIsLoaded();
|
||||||
|
}
|
||||||
|
|
||||||
|
self.document_state = .complete;
|
||||||
self._documentIsComplete() catch |err| {
|
self._documentIsComplete() catch |err| {
|
||||||
log.err(.browser, "document is complete", .{ .err = err });
|
log.err(.browser, "document is complete", .{ .err = err });
|
||||||
};
|
};
|
||||||
@@ -378,7 +421,7 @@ pub const Page = struct {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageHeaderCallback(transfer: *http.Transfer) !void {
|
fn pageHeaderDoneCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
|
||||||
// would be different than self.url in the case of a redirect
|
// would be different than self.url in the case of a redirect
|
||||||
@@ -393,7 +436,7 @@ pub const Page = struct {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageDataCallback(transfer: *http.Transfer, data: []const u8) !void {
|
fn pageDataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
|
||||||
if (self.mode == .pre) {
|
if (self.mode == .pre) {
|
||||||
@@ -426,10 +469,11 @@ pub const Page = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageDoneCallback(transfer: *http.Transfer) !void {
|
fn pageDoneCallback(ctx: *anyopaque) !void {
|
||||||
log.debug(.http, "navigate done", .{});
|
log.debug(.http, "navigate done", .{});
|
||||||
|
|
||||||
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
var self: *Page = @alignCast(@ptrCast(ctx));
|
||||||
|
self.clearTransferArena();
|
||||||
|
|
||||||
switch (self.mode) {
|
switch (self.mode) {
|
||||||
.raw => |buf| self.mode = .{ .raw_done = buf.items },
|
.raw => |buf| self.mode = .{ .raw_done = buf.items },
|
||||||
@@ -475,14 +519,23 @@ pub const Page = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.script_manager.staticScriptsDone();
|
self.script_manager.staticScriptsDone();
|
||||||
|
|
||||||
|
if (self.script_manager.isDone()) {
|
||||||
|
// No scripts, or just inline scripts that were already processed
|
||||||
|
// we need to trigger this ourselves
|
||||||
|
self.documentIsComplete();
|
||||||
|
}
|
||||||
},
|
},
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pageErrorCallback(transfer: *http.Transfer, err: anyerror) void {
|
fn pageErrorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
log.err(.http, "navigate failed", .{ .err = err });
|
log.err(.http, "navigate failed", .{ .err = err });
|
||||||
var self: *Page = @alignCast(@ptrCast(transfer.ctx));
|
|
||||||
|
var self: *Page = @alignCast(@ptrCast(ctx));
|
||||||
|
self.clearTransferArena();
|
||||||
|
|
||||||
switch (self.mode) {
|
switch (self.mode) {
|
||||||
.html => |*p| p.deinit(), // don't need the parser anymore
|
.html => |*p| p.deinit(), // don't need the parser anymore
|
||||||
else => {},
|
else => {},
|
||||||
@@ -490,6 +543,23 @@ pub const Page = struct {
|
|||||||
self.mode = .{ .err = err };
|
self.mode = .{ .err = err };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The transfer arena is useful and interesting, but has a weird lifetime.
|
||||||
|
// When we're transfering from one page to another (via delayed navigation)
|
||||||
|
// we need things in memory: like the URL that we're navigating to and
|
||||||
|
// optionally the body to POST. That cannot exist in the page.arena, because
|
||||||
|
// the page that we have is going to be destroyed and a new page is going
|
||||||
|
// to be created. If we used the page.arena, we'd wouldn't be able to reset
|
||||||
|
// it between navigations.
|
||||||
|
// So the transfer arena is meant to exist between a navigation event. It's
|
||||||
|
// freed when the main html navigation is complete, either in pageDoneCallback
|
||||||
|
// or pageErrorCallback. It needs to exist for this long because, if we set
|
||||||
|
// a body, CURLOPT_POSTFIELDS does not copy the body (it optionally can, but
|
||||||
|
// why would we want to) and requires the body to live until the transfer
|
||||||
|
// is complete.
|
||||||
|
fn clearTransferArena(self: *Page) void {
|
||||||
|
_ = self.session.browser.transfer_arena.reset(.{ .retain_with_limit = 4 * 1024 });
|
||||||
|
}
|
||||||
|
|
||||||
// extracted because this sis called from tests to set things up.
|
// extracted because this sis called from tests to set things up.
|
||||||
pub fn setDocument(self: *Page, html_doc: *parser.DocumentHTML) !void {
|
pub fn setDocument(self: *Page, html_doc: *parser.DocumentHTML) !void {
|
||||||
const doc = parser.documentHTMLToDocument(html_doc);
|
const doc = parser.documentHTMLToDocument(html_doc);
|
||||||
@@ -671,12 +741,14 @@ pub const Page = struct {
|
|||||||
navi.* = .{
|
navi.* = .{
|
||||||
.opts = opts,
|
.opts = opts,
|
||||||
.session = session,
|
.session = session,
|
||||||
.url = try self.url.resolve(arena, url),
|
.url = try URL.stitch(arena, url, self.url.raw, .{ .alloc = .always }),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
self.http_client.abort();
|
||||||
|
|
||||||
// In v8, this throws an exception which JS code cannot catch.
|
// In v8, this throws an exception which JS code cannot catch.
|
||||||
session.executor.terminateExecution();
|
session.executor.terminateExecution();
|
||||||
_ = try self.loop.timeout(0, &navi.navigate_node);
|
_ = try self.scheduler.add(navi, DelayedNavigation.run, 0, .{ .name = "delayed navigation" });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn getOrCreateNodeState(self: *Page, node: *parser.Node) !*State {
|
pub fn getOrCreateNodeState(self: *Page, node: *parser.Node) !*State {
|
||||||
@@ -762,11 +834,9 @@ pub const Page = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const DelayedNavigation = struct {
|
const DelayedNavigation = struct {
|
||||||
url: URL,
|
url: []const u8,
|
||||||
session: *Session,
|
session: *Session,
|
||||||
opts: NavigateOpts,
|
opts: NavigateOpts,
|
||||||
initial: bool = true,
|
|
||||||
navigate_node: Loop.CallbackNode = .{ .func = delayNavigate },
|
|
||||||
|
|
||||||
// Navigation is blocking, which is problem because it can seize up
|
// Navigation is blocking, which is problem because it can seize up
|
||||||
// the loop and deadlock. We can only safely try to navigate to a
|
// the loop and deadlock. We can only safely try to navigate to a
|
||||||
@@ -783,66 +853,31 @@ const DelayedNavigation = struct {
|
|||||||
// navigate definetly won't block (which could deadlock the system if there
|
// navigate definetly won't block (which could deadlock the system if there
|
||||||
// are still pending async requests, which we've seen happen, even after
|
// are still pending async requests, which we've seen happen, even after
|
||||||
// an abort).
|
// an abort).
|
||||||
fn delayNavigate(node: *Loop.CallbackNode, repeat_delay: *?u63) void {
|
fn run(ctx: *anyopaque) ?u32 {
|
||||||
_ = node;
|
const self: *DelayedNavigation = @alignCast(@ptrCast(ctx));
|
||||||
_ = repeat_delay;
|
const session = self.session;
|
||||||
// @newhttp
|
|
||||||
// const self: *DelayedNavigation = @fieldParentPtr("navigate_node", node);
|
|
||||||
|
|
||||||
// const session = self.session;
|
// abort any pending requests or active tranfers;
|
||||||
// const initial = self.initial;
|
session.browser.http_client.abort();
|
||||||
|
|
||||||
// if (initial) {
|
// Prior to schedule this task, we terminated excution to stop
|
||||||
// // Prior to schedule this task, we terminated excution to stop
|
// the running script. If we don't resume it before doing a shutdown
|
||||||
// // the running script. If we don't resume it before doing a shutdown
|
// we'll get an error.
|
||||||
// // we'll get an error.
|
session.executor.resumeExecution();
|
||||||
// session.executor.resumeExecution();
|
session.removePage();
|
||||||
|
const page = session.createPage() catch |err| {
|
||||||
|
log.err(.browser, "delayed navigation page error", .{
|
||||||
|
.err = err,
|
||||||
|
.url = self.url,
|
||||||
|
});
|
||||||
|
return null;
|
||||||
|
};
|
||||||
|
|
||||||
// session.removePage();
|
page.navigate(self.url, self.opts) catch |err| {
|
||||||
// _ = session.createPage() catch |err| {
|
log.err(.browser, "delayed navigation error", .{ .err = err, .url = self.url });
|
||||||
// log.err(.browser, "delayed navigation page error", .{
|
};
|
||||||
// .err = err,
|
|
||||||
// .url = self.url,
|
|
||||||
// });
|
|
||||||
// return;
|
|
||||||
// };
|
|
||||||
// self.initial = false;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// if (session.browser.http_client.freeSlotCount() == 0) {
|
return null;
|
||||||
// log.debug(.browser, "delayed navigate waiting", .{});
|
|
||||||
// const delay = 0 * std.time.ns_per_ms;
|
|
||||||
|
|
||||||
// // If this isn't the initial check, we can safely re-use the timer
|
|
||||||
// // to check again.
|
|
||||||
// if (initial == false) {
|
|
||||||
// repeat_delay.* = delay;
|
|
||||||
// return;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // However, if this _is_ the initial check, we called
|
|
||||||
// // session.removePage above, and that reset the loop ctx_id.
|
|
||||||
// // We can't re-use this timer, because it has the previous ctx_id.
|
|
||||||
// // We can create a new timeout though, and that'll get the new ctx_id.
|
|
||||||
// //
|
|
||||||
// // Page has to be not-null here because we called createPage above.
|
|
||||||
// _ = session.page.?.loop.timeout(delay, &self.navigate_node) catch |err| {
|
|
||||||
// log.err(.browser, "delayed navigation loop err", .{ .err = err });
|
|
||||||
// };
|
|
||||||
// return;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// const page = session.currentPage() orelse return;
|
|
||||||
// defer if (!page.delayed_navigation) {
|
|
||||||
// // If, while loading the page, we intend to navigate to another
|
|
||||||
// // page, then we need to keep the transfer_arena around, as this
|
|
||||||
// // sub-navigation is probably using it.
|
|
||||||
// _ = session.browser.transfer_arena.reset(.{ .retain_with_limit = 64 * 1024 });
|
|
||||||
// };
|
|
||||||
|
|
||||||
// return page.navigate(self.url, self.opts) catch |err| {
|
|
||||||
// log.err(.browser, "delayed navigation error", .{ .err = err, .url = self.url });
|
|
||||||
// };
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -856,7 +891,7 @@ pub const NavigateReason = enum {
|
|||||||
pub const NavigateOpts = struct {
|
pub const NavigateOpts = struct {
|
||||||
cdp_id: ?i64 = null,
|
cdp_id: ?i64 = null,
|
||||||
reason: NavigateReason = .address_bar,
|
reason: NavigateReason = .address_bar,
|
||||||
method: http.Method = .GET,
|
method: HttpClient.Method = .GET,
|
||||||
body: ?[]const u8 = null,
|
body: ?[]const u8 = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -116,18 +116,10 @@ pub const Session = struct {
|
|||||||
// phase. It's important that we clean these up, as they're holding onto
|
// phase. It's important that we clean these up, as they're holding onto
|
||||||
// limited resources (like our fixed-sized http state pool).
|
// limited resources (like our fixed-sized http state pool).
|
||||||
//
|
//
|
||||||
// First thing we do, is removeJsContext() which will execute the destructor
|
// RemoveJsContext() will execute the destructor of any type that
|
||||||
// of any type that registered a destructor (e.g. XMLHttpRequest).
|
// registered a destructor (e.g. XMLHttpRequest).
|
||||||
// This will shutdown any pending sockets, which begins our cleaning
|
|
||||||
// processed
|
|
||||||
self.executor.removeJsContext();
|
self.executor.removeJsContext();
|
||||||
|
|
||||||
// Second thing we do is reset the loop. This increments the loop ctx_id
|
|
||||||
// so that any "stale" timeouts we process will get ignored. We need to
|
|
||||||
// do this BEFORE running the loop because, at this point, things like
|
|
||||||
// window.setTimeout and running microtasks should be ignored
|
|
||||||
self.browser.app.loop.reset();
|
|
||||||
|
|
||||||
self.page.?.deinit();
|
self.page.?.deinit();
|
||||||
self.page = null;
|
self.page = null;
|
||||||
|
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ const URL = @import("../../url.zig").URL;
|
|||||||
const Mime = @import("../mime.zig").Mime;
|
const Mime = @import("../mime.zig").Mime;
|
||||||
const parser = @import("../netsurf.zig");
|
const parser = @import("../netsurf.zig");
|
||||||
const Page = @import("../page.zig").Page;
|
const Page = @import("../page.zig").Page;
|
||||||
const http = @import("../../http/client.zig");
|
const HttpClient = @import("../../http/Client.zig");
|
||||||
const CookieJar = @import("../storage/storage.zig").CookieJar;
|
const CookieJar = @import("../storage/storage.zig").CookieJar;
|
||||||
|
|
||||||
// XHR interfaces
|
// XHR interfaces
|
||||||
@@ -80,13 +80,13 @@ const XMLHttpRequestBodyInit = union(enum) {
|
|||||||
pub const XMLHttpRequest = struct {
|
pub const XMLHttpRequest = struct {
|
||||||
proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
|
proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
|
||||||
arena: Allocator,
|
arena: Allocator,
|
||||||
transfer: ?*http.Transfer = null,
|
transfer: ?*HttpClient.Transfer = null,
|
||||||
cookie_jar: *CookieJar,
|
cookie_jar: *CookieJar,
|
||||||
err: ?anyerror = null,
|
err: ?anyerror = null,
|
||||||
last_dispatch: i64 = 0,
|
last_dispatch: i64 = 0,
|
||||||
send_flag: bool = false,
|
send_flag: bool = false,
|
||||||
|
|
||||||
method: http.Method,
|
method: HttpClient.Method,
|
||||||
state: State,
|
state: State,
|
||||||
url: ?[:0]const u8 = null,
|
url: ?[:0]const u8 = null,
|
||||||
|
|
||||||
@@ -173,12 +173,12 @@ pub const XMLHttpRequest = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn destructor(self: *XMLHttpRequest) void {
|
// pub fn destructor(self: *XMLHttpRequest) void {
|
||||||
if (self.transfer) |transfer| {
|
// if (self.transfer) |transfer| {
|
||||||
transfer.abort();
|
// transfer.abort();
|
||||||
self.transfer = null;
|
// self.transfer = null;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
pub fn reset(self: *XMLHttpRequest) void {
|
pub fn reset(self: *XMLHttpRequest) void {
|
||||||
self.url = null;
|
self.url = null;
|
||||||
@@ -322,7 +322,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const methods = [_]struct {
|
const methods = [_]struct {
|
||||||
tag: http.Method,
|
tag: HttpClient.Method,
|
||||||
name: []const u8,
|
name: []const u8,
|
||||||
}{
|
}{
|
||||||
.{ .tag = .DELETE, .name = "DELETE" },
|
.{ .tag = .DELETE, .name = "DELETE" },
|
||||||
@@ -332,7 +332,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
.{ .tag = .POST, .name = "POST" },
|
.{ .tag = .POST, .name = "POST" },
|
||||||
.{ .tag = .PUT, .name = "PUT" },
|
.{ .tag = .PUT, .name = "PUT" },
|
||||||
};
|
};
|
||||||
pub fn validMethod(m: []const u8) DOMError!http.Method {
|
pub fn validMethod(m: []const u8) DOMError!HttpClient.Method {
|
||||||
for (methods) |method| {
|
for (methods) |method| {
|
||||||
if (std.ascii.eqlIgnoreCase(method.name, m)) {
|
if (std.ascii.eqlIgnoreCase(method.name, m)) {
|
||||||
return method.tag;
|
return method.tag;
|
||||||
@@ -367,13 +367,17 @@ pub const XMLHttpRequest = struct {
|
|||||||
|
|
||||||
self.send_flag = true;
|
self.send_flag = true;
|
||||||
if (body) |b| {
|
if (body) |b| {
|
||||||
self.request_body = try self.arena.dupe(u8, b);
|
if (self.method != .GET and self.method != .HEAD) {
|
||||||
|
self.request_body = try self.arena.dupe(u8, b);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try page.http_client.request(.{
|
try page.http_client.request(.{
|
||||||
.ctx = self,
|
.ctx = self,
|
||||||
.url = self.url.?,
|
.url = self.url.?,
|
||||||
.method = self.method,
|
.method = self.method,
|
||||||
|
.body = self.request_body,
|
||||||
|
.content_type = "Content-Type: text/plain; charset=UTF-8", // @newhttp TODO
|
||||||
.start_callback = httpStartCallback,
|
.start_callback = httpStartCallback,
|
||||||
.header_callback = httpHeaderCallback,
|
.header_callback = httpHeaderCallback,
|
||||||
.header_done_callback = httpHeaderDoneCallback,
|
.header_done_callback = httpHeaderDoneCallback,
|
||||||
@@ -383,7 +387,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpStartCallback(transfer: *http.Transfer) !void {
|
fn httpStartCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
|
||||||
for (self.headers.items) |hdr| {
|
for (self.headers.items) |hdr| {
|
||||||
@@ -403,22 +407,15 @@ pub const XMLHttpRequest = struct {
|
|||||||
// try request.addHeader("Cookie", arr.items, .{});
|
// try request.addHeader("Cookie", arr.items, .{});
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
if (self.request_body) |b| {
|
|
||||||
if (self.method != .GET and self.method != .HEAD) {
|
|
||||||
try transfer.setBody(b);
|
|
||||||
try transfer.addHeader("Content-Type: text/plain; charset=UTF-8");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.transfer = transfer;
|
self.transfer = transfer;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpHeaderCallback(transfer: *http.Transfer, header: []const u8) !void {
|
fn httpHeaderCallback(transfer: *HttpClient.Transfer, header: []const u8) !void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
try self.response_headers.append(self.arena, try self.arena.dupe(u8, header));
|
try self.response_headers.append(self.arena, try self.arena.dupe(u8, header));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpHeaderDoneCallback(transfer: *http.Transfer) !void {
|
fn httpHeaderDoneCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
|
||||||
const header = &transfer.response_header.?;
|
const header = &transfer.response_header.?;
|
||||||
@@ -451,7 +448,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
// try self.cookie_jar.populateFromResponse(self.request.?.request_uri, &header);
|
// try self.cookie_jar.populateFromResponse(self.request.?.request_uri, &header);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpDataCallback(transfer: *http.Transfer, data: []const u8) !void {
|
fn httpDataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
try self.response_bytes.appendSlice(self.arena, data);
|
try self.response_bytes.appendSlice(self.arena, data);
|
||||||
|
|
||||||
@@ -469,8 +466,8 @@ pub const XMLHttpRequest = struct {
|
|||||||
self.last_dispatch = now;
|
self.last_dispatch = now;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpDoneCallback(transfer: *http.Transfer) !void {
|
fn httpDoneCallback(ctx: *anyopaque) !void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
|
||||||
|
|
||||||
log.info(.http, "request complete", .{
|
log.info(.http, "request complete", .{
|
||||||
.source = "xhr",
|
.source = "xhr",
|
||||||
@@ -494,8 +491,8 @@ pub const XMLHttpRequest = struct {
|
|||||||
self.dispatchProgressEvent("loadend", .{ .loaded = loaded, .total = loaded });
|
self.dispatchProgressEvent("loadend", .{ .loaded = loaded, .total = loaded });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpErrorCallback(transfer: *http.Transfer, err: anyerror) void {
|
fn httpErrorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
|
||||||
// http client will close it after an error, it isn't safe to keep around
|
// http client will close it after an error, it isn't safe to keep around
|
||||||
self.transfer = null;
|
self.transfer = null;
|
||||||
self.onErr(err);
|
self.onErr(err);
|
||||||
@@ -503,7 +500,9 @@ pub const XMLHttpRequest = struct {
|
|||||||
|
|
||||||
pub fn _abort(self: *XMLHttpRequest) void {
|
pub fn _abort(self: *XMLHttpRequest) void {
|
||||||
self.onErr(DOMError.Abort);
|
self.onErr(DOMError.Abort);
|
||||||
self.destructor();
|
if (self.transfer) |transfer| {
|
||||||
|
transfer.abort();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn onErr(self: *XMLHttpRequest, err: anyerror) void {
|
fn onErr(self: *XMLHttpRequest, err: anyerror) void {
|
||||||
|
|||||||
@@ -104,6 +104,7 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
pub fn handleMessage(self: *Self, msg: []const u8) bool {
|
pub fn handleMessage(self: *Self, msg: []const u8) bool {
|
||||||
// if there's an error, it's already been logged
|
// if there's an error, it's already been logged
|
||||||
self.processMessage(msg) catch return false;
|
self.processMessage(msg) catch return false;
|
||||||
|
self.pageWait();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,6 +114,22 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
return self.dispatch(arena.allocator(), self, msg);
|
return self.dispatch(arena.allocator(), self, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// @newhttp
|
||||||
|
// A bit hacky right now. The main server loop blocks only for CDP
|
||||||
|
// messages. It no longer blocks for page timeouts of page HTTP
|
||||||
|
// transfers. So we need to call this more ourselves.
|
||||||
|
// This is called after every message and [very hackily] from the server
|
||||||
|
// loop.
|
||||||
|
// This is hopefully temporary.
|
||||||
|
pub fn pageWait(self: *Self) void {
|
||||||
|
const session = &(self.browser.session orelse return);
|
||||||
|
var page = session.currentPage() orelse return;
|
||||||
|
|
||||||
|
// exits early if there's nothing to do, so a large value like
|
||||||
|
// 5 seconds should be ok
|
||||||
|
page.wait(5);
|
||||||
|
}
|
||||||
|
|
||||||
// Called from above, in processMessage which handles client messages
|
// Called from above, in processMessage which handles client messages
|
||||||
// but can also be called internally. For example, Target.sendMessageToTarget
|
// but can also be called internally. For example, Target.sendMessageToTarget
|
||||||
// calls back into dispatch to capture the response.
|
// calls back into dispatch to capture the response.
|
||||||
|
|||||||
@@ -155,7 +155,6 @@ fn navigate(cmd: anytype) !void {
|
|||||||
.reason = .address_bar,
|
.reason = .address_bar,
|
||||||
.cdp_id = cmd.input.id,
|
.cdp_id = cmd.input.id,
|
||||||
});
|
});
|
||||||
try page.wait(5);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn pageNavigate(arena: Allocator, bc: anytype, event: *const Notification.PageNavigate) !void {
|
pub fn pageNavigate(arena: Allocator, bc: anytype, event: *const Notification.PageNavigate) !void {
|
||||||
|
|||||||
@@ -16,271 +16,307 @@
|
|||||||
// You should have received a copy of the GNU Affero General Public License
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
pub const c = @cImport({
|
|
||||||
@cInclude("curl/curl.h");
|
|
||||||
});
|
|
||||||
|
|
||||||
const ENABLE_DEBUG = false;
|
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const errors = @import("errors.zig");
|
const Http = @import("Http.zig");
|
||||||
|
|
||||||
|
const c = Http.c;
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
|
||||||
|
|
||||||
pub fn init() !void {
|
const errorCheck = Http.errorCheck;
|
||||||
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL));
|
const errorMCheck = Http.errorMCheck;
|
||||||
if (comptime ENABLE_DEBUG) {
|
|
||||||
std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deinit() void {
|
pub const Method = Http.Method;
|
||||||
c.curl_global_cleanup();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const Client = struct {
|
// This is loosely tied to a browser Page. Loading all the <scripts>, doing
|
||||||
active: usize,
|
// XHR requests, and loading imports all happens through here. Sine the app
|
||||||
multi: *c.CURLM,
|
// currently supports 1 browser and 1 page at-a-time, we only have 1 Client and
|
||||||
handles: Handles,
|
// re-use it from page to page. This allows us better re-use of the various
|
||||||
queue: RequestQueue,
|
// buffers/caches (including keepalive connections) that libcurl has.
|
||||||
allocator: Allocator,
|
//
|
||||||
transfer_pool: std.heap.MemoryPool(Transfer),
|
// The app has other secondary http needs, like telemetry. While we want to
|
||||||
queue_node_pool: std.heap.MemoryPool(RequestQueue.Node),
|
// share some things (namely the ca blob, and maybe some configuration
|
||||||
//@newhttp
|
// (TODO: ??? should proxy settings be global ???)), we're able to do call
|
||||||
http_proxy: ?std.Uri = null,
|
// client.abort() to abort the transfers being made by a page, without impacting
|
||||||
|
// those other http requests.
|
||||||
|
pub const Client = @This();
|
||||||
|
|
||||||
const RequestQueue = std.DoublyLinkedList(Request);
|
active: usize,
|
||||||
|
multi: *c.CURLM,
|
||||||
|
handles: Handles,
|
||||||
|
queue: RequestQueue,
|
||||||
|
allocator: Allocator,
|
||||||
|
transfer_pool: std.heap.MemoryPool(Transfer),
|
||||||
|
queue_node_pool: std.heap.MemoryPool(RequestQueue.Node),
|
||||||
|
//@newhttp
|
||||||
|
http_proxy: ?std.Uri = null,
|
||||||
|
|
||||||
const Opts = struct {
|
const RequestQueue = std.DoublyLinkedList(Request);
|
||||||
timeout_ms: u31 = 0,
|
|
||||||
max_redirects: u8 = 10,
|
pub fn init(allocator: Allocator, ca_blob: c.curl_blob, opts: Http.Opts) !*Client {
|
||||||
connect_timeout_ms: u31 = 5000,
|
var transfer_pool = std.heap.MemoryPool(Transfer).init(allocator);
|
||||||
max_concurrent_transfers: u8 = 5,
|
errdefer transfer_pool.deinit();
|
||||||
|
|
||||||
|
var queue_node_pool = std.heap.MemoryPool(RequestQueue.Node).init(allocator);
|
||||||
|
errdefer queue_node_pool.deinit();
|
||||||
|
|
||||||
|
const client = try allocator.create(Client);
|
||||||
|
errdefer allocator.destroy(client);
|
||||||
|
|
||||||
|
const multi = c.curl_multi_init() orelse return error.FailedToInitializeMulti;
|
||||||
|
errdefer _ = c.curl_multi_cleanup(multi);
|
||||||
|
|
||||||
|
var handles = try Handles.init(allocator, client, ca_blob, opts);
|
||||||
|
errdefer handles.deinit(allocator, multi);
|
||||||
|
|
||||||
|
client.* = .{
|
||||||
|
.queue = .{},
|
||||||
|
.active = 0,
|
||||||
|
.multi = multi,
|
||||||
|
.handles = handles,
|
||||||
|
.allocator = allocator,
|
||||||
|
.transfer_pool = transfer_pool,
|
||||||
|
.queue_node_pool = queue_node_pool,
|
||||||
};
|
};
|
||||||
pub fn init(allocator: Allocator, opts: Opts) !*Client {
|
|
||||||
var transfer_pool = std.heap.MemoryPool(Transfer).init(allocator);
|
|
||||||
errdefer transfer_pool.deinit();
|
|
||||||
|
|
||||||
var queue_node_pool = std.heap.MemoryPool(RequestQueue.Node).init(allocator);
|
return client;
|
||||||
errdefer queue_node_pool.deinit();
|
}
|
||||||
|
|
||||||
const client = try allocator.create(Client);
|
pub fn deinit(self: *Client) void {
|
||||||
errdefer allocator.destroy(client);
|
self.handles.deinit(self.allocator, self.multi);
|
||||||
|
_ = c.curl_multi_cleanup(self.multi);
|
||||||
|
|
||||||
var handles = try Handles.init(allocator, client, opts);
|
self.transfer_pool.deinit();
|
||||||
errdefer handles.deinit(allocator);
|
self.queue_node_pool.deinit();
|
||||||
|
self.allocator.destroy(self);
|
||||||
|
}
|
||||||
|
|
||||||
const multi = c.curl_multi_init() orelse return error.FailedToInitializeMulti;
|
pub fn abort(self: *Client) void {
|
||||||
errdefer _ = c.curl_multi_cleanup(multi);
|
self.handles.abort(self.multi);
|
||||||
|
|
||||||
client.* = .{
|
var n = self.queue.first;
|
||||||
.queue = .{},
|
while (n) |node| {
|
||||||
.active = 0,
|
n = node.next;
|
||||||
.multi = multi,
|
self.queue_node_pool.destroy(node);
|
||||||
.handles = handles,
|
|
||||||
.allocator = allocator,
|
|
||||||
.transfer_pool = transfer_pool,
|
|
||||||
.queue_node_pool = queue_node_pool,
|
|
||||||
};
|
|
||||||
return client;
|
|
||||||
}
|
}
|
||||||
|
self.queue = .{};
|
||||||
|
self.active = 0;
|
||||||
|
|
||||||
pub fn deinit(self: *Client) void {
|
// Maybe a bit of overkill
|
||||||
self.handles.deinit(self.allocator);
|
// We can remove some (all?) of these once we're confident its right.
|
||||||
_ = c.curl_multi_cleanup(self.multi);
|
std.debug.assert(self.handles.in_use.first == null);
|
||||||
|
std.debug.assert(self.handles.available.len == self.handles.handles.len);
|
||||||
self.transfer_pool.deinit();
|
if (builtin.mode == .Debug) {
|
||||||
self.queue_node_pool.deinit();
|
|
||||||
self.allocator.destroy(self);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn tick(self: *Client, timeout_ms: usize) !void {
|
|
||||||
var handles = &self.handles.available;
|
|
||||||
while (true) {
|
|
||||||
if (handles.first == null) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
const queue_node = self.queue.popFirst() orelse break;
|
|
||||||
|
|
||||||
defer self.queue_node_pool.destroy(queue_node);
|
|
||||||
|
|
||||||
const handle = handles.popFirst().?.data;
|
|
||||||
try self.makeRequest(handle, queue_node.data);
|
|
||||||
}
|
|
||||||
|
|
||||||
try self.perform(@intCast(timeout_ms));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn request(self: *Client, req: Request) !void {
|
|
||||||
if (self.handles.getFreeHandle()) |handle| {
|
|
||||||
return self.makeRequest(handle, req);
|
|
||||||
}
|
|
||||||
|
|
||||||
const node = try self.queue_node_pool.create();
|
|
||||||
node.data = req;
|
|
||||||
self.queue.append(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn makeRequest(self: *Client, handle: *Handle, req: Request) !void {
|
|
||||||
const easy = handle.easy;
|
|
||||||
|
|
||||||
const header_list = blk: {
|
|
||||||
errdefer self.handles.release(handle);
|
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_URL, req.url.ptr));
|
|
||||||
switch (req.method) {
|
|
||||||
.GET => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))),
|
|
||||||
.POST => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1))),
|
|
||||||
.PUT => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "put")),
|
|
||||||
.DELETE => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "delete")),
|
|
||||||
.HEAD => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "head")),
|
|
||||||
.OPTIONS => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "options")),
|
|
||||||
}
|
|
||||||
|
|
||||||
const header_list = c.curl_slist_append(null, "User-Agent: Lightpanda/1.0");
|
|
||||||
errdefer c.curl_slist_free_all(header_list);
|
|
||||||
|
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list));
|
|
||||||
|
|
||||||
break :blk header_list;
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
errdefer self.handles.release(handle);
|
|
||||||
|
|
||||||
const transfer = try self.transfer_pool.create();
|
|
||||||
transfer.* = .{
|
|
||||||
.id = 0,
|
|
||||||
.req = req,
|
|
||||||
.ctx = req.ctx,
|
|
||||||
.handle = handle,
|
|
||||||
._request_header_list = header_list,
|
|
||||||
};
|
|
||||||
errdefer self.transfer_pool.destroy(transfer);
|
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PRIVATE, transfer));
|
|
||||||
|
|
||||||
try errorMCheck(c.curl_multi_add_handle(self.multi, easy));
|
|
||||||
if (req.start_callback) |cb| {
|
|
||||||
cb(transfer) catch |err| {
|
|
||||||
try errorMCheck(c.curl_multi_remove_handle(self.multi, easy));
|
|
||||||
return err;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.active += 1;
|
|
||||||
return self.perform(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn perform(self: *Client, timeout_ms: c_int) !void {
|
|
||||||
const multi = self.multi;
|
|
||||||
|
|
||||||
var running: c_int = undefined;
|
var running: c_int = undefined;
|
||||||
try errorMCheck(c.curl_multi_perform(multi, &running));
|
std.debug.assert(c.curl_multi_perform(self.multi, &running) == c.CURLE_OK);
|
||||||
|
std.debug.assert(running == 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (running > 0 and timeout_ms > 0) {
|
pub fn tick(self: *Client, timeout_ms: usize) !void {
|
||||||
try errorMCheck(c.curl_multi_poll(multi, null, 0, timeout_ms, null));
|
var handles = &self.handles.available;
|
||||||
|
while (true) {
|
||||||
|
if (handles.first == null) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const queue_node = self.queue.popFirst() orelse break;
|
||||||
|
|
||||||
|
defer self.queue_node_pool.destroy(queue_node);
|
||||||
|
|
||||||
|
const handle = handles.popFirst().?.data;
|
||||||
|
try self.makeRequest(handle, queue_node.data);
|
||||||
|
}
|
||||||
|
|
||||||
|
try self.perform(@intCast(timeout_ms));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request(self: *Client, req: Request) !void {
|
||||||
|
if (self.handles.getFreeHandle()) |handle| {
|
||||||
|
return self.makeRequest(handle, req);
|
||||||
|
}
|
||||||
|
|
||||||
|
const node = try self.queue_node_pool.create();
|
||||||
|
node.data = req;
|
||||||
|
self.queue.append(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn makeRequest(self: *Client, handle: *Handle, req: Request) !void {
|
||||||
|
const easy = handle.easy;
|
||||||
|
|
||||||
|
const header_list = blk: {
|
||||||
|
errdefer self.handles.release(handle);
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_URL, req.url.ptr));
|
||||||
|
|
||||||
|
try Http.setMethod(easy, req.method);
|
||||||
|
if (req.body) |b| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, b.ptr));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(b.len))));
|
||||||
}
|
}
|
||||||
|
|
||||||
while (true) {
|
var header_list = c.curl_slist_append(null, "User-Agent: Lightpanda/1.0");
|
||||||
var remaining: c_int = undefined;
|
errdefer c.curl_slist_free_all(header_list);
|
||||||
const msg: *c.CURLMsg = c.curl_multi_info_read(multi, &remaining) orelse break;
|
|
||||||
if (msg.msg == c.CURLMSG_DONE) {
|
|
||||||
self.active -= 1;
|
|
||||||
const easy = msg.easy_handle.?;
|
|
||||||
const transfer = try Transfer.fromEasy(easy);
|
|
||||||
defer {
|
|
||||||
self.handles.release(transfer.handle);
|
|
||||||
transfer.deinit();
|
|
||||||
self.transfer_pool.destroy(transfer);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (errorCheck(msg.data.result)) {
|
if (req.content_type) |ct| {
|
||||||
transfer.req.done_callback(transfer) catch |err| transfer.onError(err);
|
header_list = c.curl_slist_append(header_list, ct);
|
||||||
} else |err| {
|
}
|
||||||
transfer.onError(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
try errorMCheck(c.curl_multi_remove_handle(multi, easy));
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list));
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining == 0) {
|
break :blk header_list;
|
||||||
break;
|
};
|
||||||
}
|
|
||||||
|
{
|
||||||
|
errdefer self.handles.release(handle);
|
||||||
|
|
||||||
|
const transfer = try self.transfer_pool.create();
|
||||||
|
transfer.* = .{
|
||||||
|
.id = 0,
|
||||||
|
.req = req,
|
||||||
|
.ctx = req.ctx,
|
||||||
|
.handle = handle,
|
||||||
|
._request_header_list = header_list,
|
||||||
|
};
|
||||||
|
errdefer self.transfer_pool.destroy(transfer);
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PRIVATE, transfer));
|
||||||
|
|
||||||
|
try errorMCheck(c.curl_multi_add_handle(self.multi, easy));
|
||||||
|
if (req.start_callback) |cb| {
|
||||||
|
cb(transfer) catch |err| {
|
||||||
|
try errorMCheck(c.curl_multi_remove_handle(self.multi, easy));
|
||||||
|
return err;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
self.active += 1;
|
||||||
|
return self.perform(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn perform(self: *Client, timeout_ms: c_int) !void {
|
||||||
|
const multi = self.multi;
|
||||||
|
|
||||||
|
var running: c_int = undefined;
|
||||||
|
try errorMCheck(c.curl_multi_perform(multi, &running));
|
||||||
|
|
||||||
|
if (running > 0 and timeout_ms > 0) {
|
||||||
|
try errorMCheck(c.curl_multi_poll(multi, null, 0, timeout_ms, null));
|
||||||
|
}
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
var remaining: c_int = undefined;
|
||||||
|
const msg: *c.CURLMsg = c.curl_multi_info_read(multi, &remaining) orelse break;
|
||||||
|
if (msg.msg == c.CURLMSG_DONE) {
|
||||||
|
const easy = msg.easy_handle.?;
|
||||||
|
|
||||||
|
const transfer = try Transfer.fromEasy(easy);
|
||||||
|
|
||||||
|
const ctx = transfer.ctx;
|
||||||
|
const done_callback = transfer.req.done_callback;
|
||||||
|
const error_callback = transfer.req.error_callback;
|
||||||
|
// release it ASAP so that it's avaiable (since some done_callbacks
|
||||||
|
// will load more resources).
|
||||||
|
self.endTransfer(transfer);
|
||||||
|
|
||||||
|
if (errorCheck(msg.data.result)) {
|
||||||
|
done_callback(ctx) catch |err| error_callback(ctx, err);
|
||||||
|
} else |err| {
|
||||||
|
error_callback(ctx, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (remaining == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn endTransfer(self: *Client, transfer: *Transfer) void {
|
||||||
|
const handle = transfer.handle;
|
||||||
|
|
||||||
|
transfer.deinit();
|
||||||
|
self.transfer_pool.destroy(transfer);
|
||||||
|
|
||||||
|
errorMCheck(c.curl_multi_remove_handle(self.multi, handle.easy)) catch |err| {
|
||||||
|
log.fatal(.http, "Failed to abort", .{ .err = err });
|
||||||
|
};
|
||||||
|
|
||||||
|
self.handles.release(handle);
|
||||||
|
self.active -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
const Handles = struct {
|
const Handles = struct {
|
||||||
handles: []Handle,
|
handles: []Handle,
|
||||||
available: FreeList,
|
in_use: HandleList,
|
||||||
cert_arena: ArenaAllocator,
|
available: HandleList,
|
||||||
|
|
||||||
const FreeList = std.DoublyLinkedList(*Handle);
|
const HandleList = std.DoublyLinkedList(*Handle);
|
||||||
|
|
||||||
fn init(allocator: Allocator, client: *Client, opts: Client.Opts) !Handles {
|
fn init(allocator: Allocator, client: *Client, ca_blob: c.curl_blob, opts: Http.Opts) !Handles {
|
||||||
const count = opts.max_concurrent_transfers;
|
const count = opts.max_concurrent_transfers;
|
||||||
std.debug.assert(count > 0);
|
std.debug.assert(count > 0);
|
||||||
|
|
||||||
const handles = try allocator.alloc(Handle, count);
|
const handles = try allocator.alloc(Handle, count);
|
||||||
errdefer allocator.free(handles);
|
errdefer allocator.free(handles);
|
||||||
|
|
||||||
var initialized_count: usize = 0;
|
var available: HandleList = .{};
|
||||||
errdefer cleanup(allocator, handles[0..initialized_count]);
|
|
||||||
|
|
||||||
var cert_arena = ArenaAllocator.init(allocator);
|
|
||||||
errdefer cert_arena.deinit();
|
|
||||||
const ca_blob = try @import("ca_certs.zig").load(allocator, cert_arena.allocator());
|
|
||||||
|
|
||||||
var available: FreeList = .{};
|
|
||||||
for (0..count) |i| {
|
for (0..count) |i| {
|
||||||
const node = try allocator.create(FreeList.Node);
|
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
|
||||||
errdefer allocator.destroy(node);
|
errdefer _ = c.curl_easy_cleanup(easy);
|
||||||
|
|
||||||
handles[i] = .{
|
handles[i] = .{
|
||||||
.node = node,
|
.easy = easy,
|
||||||
.client = client,
|
.client = client,
|
||||||
.easy = undefined,
|
.node = undefined,
|
||||||
};
|
};
|
||||||
try handles[i].init(ca_blob, opts);
|
try handles[i].configure(ca_blob, opts);
|
||||||
initialized_count += 1;
|
|
||||||
|
|
||||||
node.data = &handles[i];
|
handles[i].node.data = &handles[i];
|
||||||
available.append(node);
|
available.append(&handles[i].node);
|
||||||
}
|
}
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
|
.in_use = .{},
|
||||||
.handles = handles,
|
.handles = handles,
|
||||||
.available = available,
|
.available = available,
|
||||||
.cert_arena = cert_arena,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn deinit(self: *Handles, allocator: Allocator) void {
|
fn deinit(self: *Handles, allocator: Allocator, multi: *c.CURLM) void {
|
||||||
cleanup(allocator, self.handles);
|
self.abort(multi);
|
||||||
|
for (self.handles) |*h| {
|
||||||
|
_ = c.curl_easy_cleanup(h.easy);
|
||||||
|
}
|
||||||
allocator.free(self.handles);
|
allocator.free(self.handles);
|
||||||
self.cert_arena.deinit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done line this so that cleanup can be called from init with a partial state
|
fn abort(self: *Handles, multi: *c.CURLM) void {
|
||||||
fn cleanup(allocator: Allocator, handles: []Handle) void {
|
while (self.in_use.first) |node| {
|
||||||
for (handles) |*h| {
|
const handle = node.data;
|
||||||
_ = c.curl_easy_cleanup(h.easy);
|
errorMCheck(c.curl_multi_remove_handle(multi, handle.easy)) catch |err| {
|
||||||
allocator.destroy(h.node);
|
log.err(.http, "remove handle", .{ .err = err });
|
||||||
|
};
|
||||||
|
self.release(handle);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn getFreeHandle(self: *Handles) ?*Handle {
|
fn getFreeHandle(self: *Handles) ?*Handle {
|
||||||
if (self.available.popFirst()) |handle| {
|
if (self.available.popFirst()) |node| {
|
||||||
return handle.data;
|
node.prev = null;
|
||||||
|
node.next = null;
|
||||||
|
self.in_use.append(node);
|
||||||
|
return node.data;
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn release(self: *Handles, handle: *Handle) void {
|
fn release(self: *Handles, handle: *Handle) void {
|
||||||
self.available.append(handle.node);
|
const node = &handle.node;
|
||||||
|
self.in_use.remove(node);
|
||||||
|
node.prev = null;
|
||||||
|
node.next = null;
|
||||||
|
self.available.append(node);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -289,17 +325,13 @@ const Handles = struct {
|
|||||||
const Handle = struct {
|
const Handle = struct {
|
||||||
easy: *c.CURL,
|
easy: *c.CURL,
|
||||||
client: *Client,
|
client: *Client,
|
||||||
node: *Handles.FreeList.Node,
|
node: Handles.HandleList.Node,
|
||||||
error_buffer: [c.CURL_ERROR_SIZE:0]u8 = undefined,
|
error_buffer: [c.CURL_ERROR_SIZE:0]u8 = undefined,
|
||||||
|
|
||||||
// Is called by Handles when already partially initialized. Done like this
|
// Is called by Handles when already partially initialized. Done like this
|
||||||
// so that we have a stable pointer to error_buffer.
|
// so that we have a stable pointer to error_buffer.
|
||||||
fn init(self: *Handle, ca_blob: c.curl_blob, opts: Client.Opts) !void {
|
fn configure(self: *Handle, ca_blob: c.curl_blob, opts: Http.Opts) !void {
|
||||||
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
|
const easy = self.easy;
|
||||||
errdefer _ = c.curl_easy_cleanup(easy);
|
|
||||||
|
|
||||||
self.easy = easy;
|
|
||||||
|
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_ERRORBUFFER, &self.error_buffer));
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_ERRORBUFFER, &self.error_buffer));
|
||||||
|
|
||||||
// timeouts
|
// timeouts
|
||||||
@@ -323,7 +355,7 @@ const Handle = struct {
|
|||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
|
||||||
|
|
||||||
// debug
|
// debug
|
||||||
if (comptime ENABLE_DEBUG) {
|
if (comptime Http.ENABLE_DEBUG) {
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -332,6 +364,9 @@ const Handle = struct {
|
|||||||
pub const Request = struct {
|
pub const Request = struct {
|
||||||
method: Method,
|
method: Method,
|
||||||
url: [:0]const u8,
|
url: [:0]const u8,
|
||||||
|
body: ?[]const u8 = null,
|
||||||
|
content_type: ?[:0]const u8 = null,
|
||||||
|
|
||||||
// arbitrary data that can be associated with this request
|
// arbitrary data that can be associated with this request
|
||||||
ctx: *anyopaque = undefined,
|
ctx: *anyopaque = undefined,
|
||||||
|
|
||||||
@@ -339,8 +374,8 @@ pub const Request = struct {
|
|||||||
header_callback: ?*const fn (req: *Transfer, header: []const u8) anyerror!void = null,
|
header_callback: ?*const fn (req: *Transfer, header: []const u8) anyerror!void = null,
|
||||||
header_done_callback: *const fn (req: *Transfer) anyerror!void,
|
header_done_callback: *const fn (req: *Transfer) anyerror!void,
|
||||||
data_callback: *const fn (req: *Transfer, data: []const u8) anyerror!void,
|
data_callback: *const fn (req: *Transfer, data: []const u8) anyerror!void,
|
||||||
done_callback: *const fn (req: *Transfer) anyerror!void,
|
done_callback: *const fn (ctx: *anyopaque) anyerror!void,
|
||||||
error_callback: *const fn (req: *Transfer, err: anyerror) void,
|
error_callback: *const fn (ctx: *anyopaque, err: anyerror) void,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Transfer = struct {
|
pub const Transfer = struct {
|
||||||
@@ -368,14 +403,10 @@ pub const Transfer = struct {
|
|||||||
return writer.print("[{d}] {s} {s}", .{ self.id, @tagName(req.method), req.url });
|
return writer.print("[{d}] {s} {s}", .{ self.id, @tagName(req.method), req.url });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn onError(self: *Transfer, err: anyerror) void {
|
|
||||||
self.req.error_callback(self, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setBody(self: *Transfer, body: []const u8) !void {
|
pub fn setBody(self: *Transfer, body: []const u8) !void {
|
||||||
const easy = self.handle.easy;
|
const easy = self.handle.easy;
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
|
|
||||||
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn addHeader(self: *Transfer, value: [:0]const u8) !void {
|
pub fn addHeader(self: *Transfer, value: [:0]const u8) !void {
|
||||||
@@ -383,12 +414,7 @@ pub const Transfer = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn abort(self: *Transfer) void {
|
pub fn abort(self: *Transfer) void {
|
||||||
var client = self.handle.client;
|
self.handle.client.endTransfer(self);
|
||||||
errorMCheck(c.curl_multi_remove_handle(client.multi, self.handle.easy)) catch |err| {
|
|
||||||
log.err(.http, "Failed to abort", .{ .err = err });
|
|
||||||
};
|
|
||||||
client.active -= 1;
|
|
||||||
self.deinit();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn headerCallback(buffer: [*]const u8, header_count: usize, buf_len: usize, data: *anyopaque) callconv(.c) usize {
|
fn headerCallback(buffer: [*]const u8, header_count: usize, buf_len: usize, data: *anyopaque) callconv(.c) usize {
|
||||||
@@ -410,7 +436,7 @@ pub const Transfer = struct {
|
|||||||
if (transfer._redirecting) {
|
if (transfer._redirecting) {
|
||||||
return buf_len;
|
return buf_len;
|
||||||
}
|
}
|
||||||
transfer.onError(error.InvalidResponseLine);
|
log.debug(.http, "invalid response line", .{ .line = header });
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
const version_start: usize = if (header[5] == '2') 7 else 9;
|
const version_start: usize = if (header[5] == '2') 7 else 9;
|
||||||
@@ -421,7 +447,7 @@ pub const Transfer = struct {
|
|||||||
std.debug.assert(version_end < 13);
|
std.debug.assert(version_end < 13);
|
||||||
|
|
||||||
const status = std.fmt.parseInt(u16, header[version_start..version_end], 10) catch {
|
const status = std.fmt.parseInt(u16, header[version_start..version_end], 10) catch {
|
||||||
transfer.onError(error.InvalidResponseStatus);
|
log.debug(.http, "invalid status code", .{ .line = header });
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -433,7 +459,7 @@ pub const Transfer = struct {
|
|||||||
|
|
||||||
var url: [*c]u8 = undefined;
|
var url: [*c]u8 = undefined;
|
||||||
errorCheck(c.curl_easy_getinfo(handle.easy, c.CURLINFO_EFFECTIVE_URL, &url)) catch |err| {
|
errorCheck(c.curl_easy_getinfo(handle.easy, c.CURLINFO_EFFECTIVE_URL, &url)) catch |err| {
|
||||||
transfer.onError(err);
|
log.err(.http, "failed to get URL", .{ .err = err });
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -511,41 +537,3 @@ pub const Header = struct {
|
|||||||
return self._content_type[0..self._content_type_len];
|
return self._content_type[0..self._content_type_len];
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
fn errorCheck(code: c.CURLcode) errors.Error!void {
|
|
||||||
if (code == c.CURLE_OK) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
return errors.fromCode(code);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
|
|
||||||
if (code == c.CURLM_OK) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (code == c.CURLM_CALL_MULTI_PERFORM) {
|
|
||||||
// should we can client.perform() here?
|
|
||||||
// or just wait until the next time we naturally call it?
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
return errors.fromMCode(code);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const Method = enum {
|
|
||||||
GET,
|
|
||||||
PUT,
|
|
||||||
POST,
|
|
||||||
DELETE,
|
|
||||||
HEAD,
|
|
||||||
OPTIONS,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const ProxyType = enum {
|
|
||||||
forward,
|
|
||||||
connect,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const ProxyAuth = union(enum) {
|
|
||||||
basic: struct { user_pass: []const u8 },
|
|
||||||
bearer: struct { token: []const u8 },
|
|
||||||
};
|
|
||||||
269
src/http/Http.zig
Normal file
269
src/http/Http.zig
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
pub const c = @cImport({
|
||||||
|
@cInclude("curl/curl.h");
|
||||||
|
});
|
||||||
|
const errors = @import("errors.zig");
|
||||||
|
const Client = @import("Client.zig");
|
||||||
|
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||||
|
|
||||||
|
pub const ENABLE_DEBUG = false;
|
||||||
|
|
||||||
|
// Client.zig does the bulk of the work and is loosely tied to a browser Page.
|
||||||
|
// But we still need something above Client.zig for the "utility" http stuff
|
||||||
|
// we need to do, like telemetry. The most important thing we want from this
|
||||||
|
// is to be able to share the ca_blob, which can be quite large - loading it
|
||||||
|
// once for all http connections is a win.
|
||||||
|
const Http = @This();
|
||||||
|
|
||||||
|
opts: Opts,
|
||||||
|
client: *Client,
|
||||||
|
ca_blob: ?c.curl_blob,
|
||||||
|
cert_arena: ArenaAllocator,
|
||||||
|
|
||||||
|
pub fn init(allocator: Allocator, opts: Opts) !Http {
|
||||||
|
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL));
|
||||||
|
errdefer c.curl_global_cleanup();
|
||||||
|
|
||||||
|
if (comptime ENABLE_DEBUG) {
|
||||||
|
std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
|
||||||
|
}
|
||||||
|
|
||||||
|
var cert_arena = ArenaAllocator.init(allocator);
|
||||||
|
errdefer cert_arena.deinit();
|
||||||
|
const ca_blob = try loadCerts(allocator, cert_arena.allocator());
|
||||||
|
|
||||||
|
var client = try Client.init(allocator, ca_blob, opts);
|
||||||
|
errdefer client.deinit();
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.opts = opts,
|
||||||
|
.client = client,
|
||||||
|
.ca_blob = ca_blob,
|
||||||
|
.cert_arena = cert_arena,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Http) void {
|
||||||
|
self.client.deinit();
|
||||||
|
c.curl_global_cleanup();
|
||||||
|
self.cert_arena.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn newConnection(self: *Http) !Connection {
|
||||||
|
return Connection.init(self.ca_blob, self.opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Connection = struct {
|
||||||
|
easy: *c.CURL,
|
||||||
|
|
||||||
|
// Is called by Handles when already partially initialized. Done like this
|
||||||
|
// so that we have a stable pointer to error_buffer.
|
||||||
|
pub fn init(ca_blob_: ?c.curl_blob, opts: Opts) !Connection {
|
||||||
|
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
|
||||||
|
errdefer _ = c.curl_easy_cleanup(easy);
|
||||||
|
|
||||||
|
// timeouts
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_TIMEOUT_MS, @as(c_long, @intCast(opts.timeout_ms))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CONNECTTIMEOUT_MS, @as(c_long, @intCast(opts.connect_timeout_ms))));
|
||||||
|
|
||||||
|
// redirect behavior
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_MAXREDIRS, @as(c_long, @intCast(opts.max_redirects))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_FOLLOWLOCATION, @as(c_long, 2)));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_REDIR_PROTOCOLS_STR, "HTTP,HTTPS")); // remove FTP and FTPS from the default
|
||||||
|
|
||||||
|
// tls
|
||||||
|
// try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 0)));
|
||||||
|
// try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 0)));
|
||||||
|
if (ca_blob_) |ca_blob| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
|
||||||
|
}
|
||||||
|
|
||||||
|
// debug
|
||||||
|
if (comptime Http.ENABLE_DEBUG) {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.easy = easy,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *const Connection) void {
|
||||||
|
c.curl_easy_cleanup(self.easy);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setURL(self: *const Connection, url: [:0]const u8) !void {
|
||||||
|
try errorCheck(c.curl_easy_setopt(self.easy, c.CURLOPT_URL, url.ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setMethod(self: *const Connection, method: Method) !void {
|
||||||
|
try Http.setMethod(self.easy, method);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setBody(self: *const Connection, body: []const u8) !void {
|
||||||
|
const easy = self.easy;
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request(self: *const Connection) !u16 {
|
||||||
|
try errorCheck(c.curl_easy_perform(self.easy));
|
||||||
|
var http_code: c_long = undefined;
|
||||||
|
try errorCheck(c.curl_easy_getinfo(self.easy, c.CURLINFO_RESPONSE_CODE, &http_code));
|
||||||
|
if (http_code < 0 or http_code > std.math.maxInt(u16)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return @intCast(http_code);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// used by Connection and Handle
|
||||||
|
pub fn setMethod(easy: *c.CURL, method: Method) !void {
|
||||||
|
switch (method) {
|
||||||
|
.GET => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))),
|
||||||
|
.POST => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1))),
|
||||||
|
.PUT => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "put")),
|
||||||
|
.DELETE => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "delete")),
|
||||||
|
.HEAD => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "head")),
|
||||||
|
.OPTIONS => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "options")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn errorCheck(code: c.CURLcode) errors.Error!void {
|
||||||
|
if (code == c.CURLE_OK) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return errors.fromCode(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
|
||||||
|
if (code == c.CURLM_OK) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (code == c.CURLM_CALL_MULTI_PERFORM) {
|
||||||
|
// should we can client.perform() here?
|
||||||
|
// or just wait until the next time we naturally call it?
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return errors.fromMCode(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Opts = struct {
|
||||||
|
timeout_ms: u31 = 0,
|
||||||
|
max_redirects: u8 = 10,
|
||||||
|
connect_timeout_ms: u31 = 5000,
|
||||||
|
max_concurrent_transfers: u8 = 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Method = enum {
|
||||||
|
GET,
|
||||||
|
PUT,
|
||||||
|
POST,
|
||||||
|
DELETE,
|
||||||
|
HEAD,
|
||||||
|
OPTIONS,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ProxyType = enum {
|
||||||
|
forward,
|
||||||
|
connect,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ProxyAuth = union(enum) {
|
||||||
|
basic: struct { user_pass: []const u8 },
|
||||||
|
bearer: struct { token: []const u8 },
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: on BSD / Linux, we could just read the PEM file directly.
|
||||||
|
// This whole rescan + decode is really just needed for MacOS. On Linux
|
||||||
|
// bundle.rescan does find the .pem file(s) which could be in a few different
|
||||||
|
// places, so it's still useful, just not efficient.
|
||||||
|
fn loadCerts(allocator: Allocator, arena: Allocator) !c.curl_blob {
|
||||||
|
var bundle: std.crypto.Certificate.Bundle = .{};
|
||||||
|
try bundle.rescan(allocator);
|
||||||
|
defer bundle.deinit(allocator);
|
||||||
|
|
||||||
|
var it = bundle.map.valueIterator();
|
||||||
|
const bytes = bundle.bytes.items;
|
||||||
|
|
||||||
|
const encoder = std.base64.standard.Encoder;
|
||||||
|
var arr: std.ArrayListUnmanaged(u8) = .empty;
|
||||||
|
|
||||||
|
const encoded_size = encoder.calcSize(bytes.len);
|
||||||
|
const buffer_size = encoded_size +
|
||||||
|
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
|
||||||
|
(encoded_size / 64) // newline per 64 characters
|
||||||
|
;
|
||||||
|
try arr.ensureTotalCapacity(arena, buffer_size);
|
||||||
|
var writer = arr.writer(arena);
|
||||||
|
|
||||||
|
while (it.next()) |index| {
|
||||||
|
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
|
||||||
|
|
||||||
|
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
|
||||||
|
var line_writer = LineWriter{ .inner = writer };
|
||||||
|
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
|
||||||
|
try writer.writeAll("\n-----END CERTIFICATE-----\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final encoding should not be larger than our initial size estimate
|
||||||
|
std.debug.assert(buffer_size > arr.items.len);
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.len = arr.items.len,
|
||||||
|
.data = arr.items.ptr,
|
||||||
|
.flags = 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wraps lines @ 64 columns. A PEM is basically a base64 encoded DER (which is
|
||||||
|
// what Zig has), with lines wrapped at 64 characters and with a basic header
|
||||||
|
// and footer
|
||||||
|
const LineWriter = struct {
|
||||||
|
col: usize = 0,
|
||||||
|
inner: std.ArrayListUnmanaged(u8).Writer,
|
||||||
|
|
||||||
|
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
|
||||||
|
var writer = self.inner;
|
||||||
|
|
||||||
|
var col = self.col;
|
||||||
|
const len = 64 - col;
|
||||||
|
|
||||||
|
var remain = data;
|
||||||
|
if (remain.len > len) {
|
||||||
|
col = 0;
|
||||||
|
try writer.writeAll(data[0..len]);
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
remain = data[len..];
|
||||||
|
}
|
||||||
|
|
||||||
|
while (remain.len > 64) {
|
||||||
|
try writer.writeAll(remain[0..64]);
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
remain = data[len..];
|
||||||
|
}
|
||||||
|
try writer.writeAll(remain);
|
||||||
|
self.col = col + remain.len;
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
|
||||||
//
|
|
||||||
// Francis Bouvier <francis@lightpanda.io>
|
|
||||||
// Pierre Tachoire <pierre@lightpanda.io>
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as
|
|
||||||
// published by the Free Software Foundation, either version 3 of the
|
|
||||||
// License, or (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
const std = @import("std");
|
|
||||||
const c = @import("client.zig").c;
|
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
|
||||||
|
|
||||||
// TODO: on BSD / Linux, we could just read the PEM file directly.
|
|
||||||
// This whole rescan + decode is really just needed for MacOS. On Linux
|
|
||||||
// bundle.rescan does find the .pem file(s) which could be in a few different
|
|
||||||
// places, so it's still useful, just not efficient.
|
|
||||||
pub fn load(allocator: Allocator, arena: Allocator) !c.curl_blob {
|
|
||||||
var bundle: std.crypto.Certificate.Bundle = .{};
|
|
||||||
try bundle.rescan(allocator);
|
|
||||||
defer bundle.deinit(allocator);
|
|
||||||
|
|
||||||
var it = bundle.map.valueIterator();
|
|
||||||
const bytes = bundle.bytes.items;
|
|
||||||
|
|
||||||
const encoder = std.base64.standard.Encoder;
|
|
||||||
var arr: std.ArrayListUnmanaged(u8) = .empty;
|
|
||||||
|
|
||||||
const encoded_size = encoder.calcSize(bytes.len);
|
|
||||||
const buffer_size = encoded_size +
|
|
||||||
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
|
|
||||||
(encoded_size / 64) // newline per 64 characters
|
|
||||||
;
|
|
||||||
try arr.ensureTotalCapacity(arena, buffer_size);
|
|
||||||
var writer = arr.writer(arena);
|
|
||||||
|
|
||||||
while (it.next()) |index| {
|
|
||||||
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
|
|
||||||
|
|
||||||
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
|
|
||||||
var line_writer = LineWriter{ .inner = writer };
|
|
||||||
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
|
|
||||||
try writer.writeAll("\n-----END CERTIFICATE-----\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final encoding should not be larger than our initial size estimate
|
|
||||||
std.debug.assert(buffer_size > arr.items.len);
|
|
||||||
|
|
||||||
return .{
|
|
||||||
.len = arr.items.len,
|
|
||||||
.data = arr.items.ptr,
|
|
||||||
.flags = 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wraps lines @ 64 columns
|
|
||||||
const LineWriter = struct {
|
|
||||||
col: usize = 0,
|
|
||||||
inner: std.ArrayListUnmanaged(u8).Writer,
|
|
||||||
|
|
||||||
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
|
|
||||||
var writer = self.inner;
|
|
||||||
|
|
||||||
var col = self.col;
|
|
||||||
const len = 64 - col;
|
|
||||||
|
|
||||||
var remain = data;
|
|
||||||
if (remain.len > len) {
|
|
||||||
col = 0;
|
|
||||||
try writer.writeAll(data[0..len]);
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
remain = data[len..];
|
|
||||||
}
|
|
||||||
|
|
||||||
while (remain.len > 64) {
|
|
||||||
try writer.writeAll(remain[0..64]);
|
|
||||||
try writer.writeByte('\n');
|
|
||||||
remain = data[len..];
|
|
||||||
}
|
|
||||||
try writer.writeAll(remain);
|
|
||||||
self.col = col + remain.len;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const c = @import("client.zig").c;
|
const c = @import("Http.zig").c;
|
||||||
|
|
||||||
pub const Error = error{
|
pub const Error = error{
|
||||||
UnsupportedProtocol,
|
UnsupportedProtocol,
|
||||||
|
|||||||
14
src/main.zig
14
src/main.zig
@@ -22,8 +22,8 @@ const Allocator = std.mem.Allocator;
|
|||||||
|
|
||||||
const log = @import("log.zig");
|
const log = @import("log.zig");
|
||||||
const server = @import("server.zig");
|
const server = @import("server.zig");
|
||||||
const http = @import("http/client.zig");
|
|
||||||
const App = @import("app.zig").App;
|
const App = @import("app.zig").App;
|
||||||
|
const Http = @import("http/Http.zig");
|
||||||
const Platform = @import("runtime/js.zig").Platform;
|
const Platform = @import("runtime/js.zig").Platform;
|
||||||
const Browser = @import("browser/browser.zig").Browser;
|
const Browser = @import("browser/browser.zig").Browser;
|
||||||
|
|
||||||
@@ -130,7 +130,7 @@ fn run(alloc: Allocator) !void {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
try page.wait(5); // 5 seconds
|
page.wait(5); // 5 seconds
|
||||||
|
|
||||||
// dump
|
// dump
|
||||||
if (opts.dump) {
|
if (opts.dump) {
|
||||||
@@ -163,14 +163,14 @@ const Command = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn proxyType(self: *const Command) ?http.ProxyType {
|
fn proxyType(self: *const Command) ?Http.ProxyType {
|
||||||
return switch (self.mode) {
|
return switch (self.mode) {
|
||||||
inline .serve, .fetch => |opts| opts.common.proxy_type,
|
inline .serve, .fetch => |opts| opts.common.proxy_type,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn proxyAuth(self: *const Command) ?http.ProxyAuth {
|
fn proxyAuth(self: *const Command) ?Http.ProxyAuth {
|
||||||
return switch (self.mode) {
|
return switch (self.mode) {
|
||||||
inline .serve, .fetch => |opts| opts.common.proxy_auth,
|
inline .serve, .fetch => |opts| opts.common.proxy_auth,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
@@ -222,8 +222,8 @@ const Command = struct {
|
|||||||
|
|
||||||
const Common = struct {
|
const Common = struct {
|
||||||
http_proxy: ?std.Uri = null,
|
http_proxy: ?std.Uri = null,
|
||||||
proxy_type: ?http.ProxyType = null,
|
proxy_type: ?Http.ProxyType = null,
|
||||||
proxy_auth: ?http.ProxyAuth = null,
|
proxy_auth: ?Http.ProxyAuth = null,
|
||||||
tls_verify_host: bool = true,
|
tls_verify_host: bool = true,
|
||||||
log_level: ?log.Level = null,
|
log_level: ?log.Level = null,
|
||||||
log_format: ?log.Format = null,
|
log_format: ?log.Format = null,
|
||||||
@@ -534,7 +534,7 @@ fn parseCommonArg(
|
|||||||
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_type" });
|
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_type" });
|
||||||
return error.InvalidArgument;
|
return error.InvalidArgument;
|
||||||
};
|
};
|
||||||
common.proxy_type = std.meta.stringToEnum(http.ProxyType, str) orelse {
|
common.proxy_type = std.meta.stringToEnum(Http.ProxyType, str) orelse {
|
||||||
log.fatal(.app, "invalid option choice", .{ .arg = "--proxy_type", .value = str });
|
log.fatal(.app, "invalid option choice", .{ .arg = "--proxy_type", .value = str });
|
||||||
return error.InvalidArgument;
|
return error.InvalidArgument;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ const MAX_MESSAGE_SIZE = 512 * 1024 + 14;
|
|||||||
|
|
||||||
const Server = struct {
|
const Server = struct {
|
||||||
app: *App,
|
app: *App,
|
||||||
allocator: Allocator,
|
|
||||||
loop: *Loop,
|
loop: *Loop,
|
||||||
|
allocator: Allocator,
|
||||||
|
client: ?*Client = null,
|
||||||
|
|
||||||
// internal fields
|
// internal fields
|
||||||
listener: posix.socket_t,
|
listener: posix.socket_t,
|
||||||
@@ -96,6 +97,7 @@ const Server = struct {
|
|||||||
const client = try self.allocator.create(Client);
|
const client = try self.allocator.create(Client);
|
||||||
client.* = Client.init(socket, self);
|
client.* = Client.init(socket, self);
|
||||||
client.start();
|
client.start();
|
||||||
|
self.client = client;
|
||||||
|
|
||||||
if (log.enabled(.app, .info)) {
|
if (log.enabled(.app, .info)) {
|
||||||
var address: std.net.Address = undefined;
|
var address: std.net.Address = undefined;
|
||||||
@@ -107,6 +109,7 @@ const Server = struct {
|
|||||||
|
|
||||||
fn releaseClient(self: *Server, client: *Client) void {
|
fn releaseClient(self: *Server, client: *Client) void {
|
||||||
self.allocator.destroy(client);
|
self.allocator.destroy(client);
|
||||||
|
self.client = null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -163,9 +166,7 @@ pub const Client = struct {
|
|||||||
|
|
||||||
const SendQueue = std.DoublyLinkedList(Outgoing);
|
const SendQueue = std.DoublyLinkedList(Outgoing);
|
||||||
|
|
||||||
const Self = @This();
|
fn init(socket: posix.socket_t, server: *Server) Client {
|
||||||
|
|
||||||
fn init(socket: posix.socket_t, server: *Server) Self {
|
|
||||||
return .{
|
return .{
|
||||||
.cdp = null,
|
.cdp = null,
|
||||||
.mode = .http,
|
.mode = .http,
|
||||||
@@ -185,7 +186,7 @@ pub const Client = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maybeDeinit(self: *Self) void {
|
fn maybeDeinit(self: *Client) void {
|
||||||
if (self.read_pending or self.write_pending) {
|
if (self.read_pending or self.write_pending) {
|
||||||
// We cannot do anything as long as we still have these pending
|
// We cannot do anything as long as we still have these pending
|
||||||
// They should not be pending for long as we're only here after
|
// They should not be pending for long as we're only here after
|
||||||
@@ -222,7 +223,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close(self: *Self) void {
|
fn close(self: *Client) void {
|
||||||
log.info(.app, "client disconnected", .{});
|
log.info(.app, "client disconnected", .{});
|
||||||
self.connected = false;
|
self.connected = false;
|
||||||
// recv only, because we might have pending writes we'd like to get
|
// recv only, because we might have pending writes we'd like to get
|
||||||
@@ -231,14 +232,14 @@ pub const Client = struct {
|
|||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start(self: *Self) void {
|
fn start(self: *Client) void {
|
||||||
self.queueRead();
|
self.queueRead();
|
||||||
self.queueTimeout();
|
self.queueTimeout();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueRead(self: *Self) void {
|
fn queueRead(self: *Client) void {
|
||||||
self.server.loop.io.recv(
|
self.server.loop.io.recv(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
callbackRead,
|
callbackRead,
|
||||||
&self.read_completion,
|
&self.read_completion,
|
||||||
@@ -248,7 +249,7 @@ pub const Client = struct {
|
|||||||
self.read_pending = true;
|
self.read_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callbackRead(self: *Self, _: *Completion, result: RecvError!usize) void {
|
fn callbackRead(self: *Client, _: *Completion, result: RecvError!usize) void {
|
||||||
self.read_pending = false;
|
self.read_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
@@ -277,11 +278,11 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn readBuf(self: *Self) []u8 {
|
fn readBuf(self: *Client) []u8 {
|
||||||
return self.reader.readBuf();
|
return self.reader.readBuf();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processData(self: *Self, len: usize) !bool {
|
fn processData(self: *Client, len: usize) !bool {
|
||||||
self.last_active = now();
|
self.last_active = now();
|
||||||
self.reader.len += len;
|
self.reader.len += len;
|
||||||
|
|
||||||
@@ -294,7 +295,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processHTTPRequest(self: *Self) !void {
|
fn processHTTPRequest(self: *Client) !void {
|
||||||
std.debug.assert(self.reader.pos == 0);
|
std.debug.assert(self.reader.pos == 0);
|
||||||
const request = self.reader.buf[0..self.reader.len];
|
const request = self.reader.buf[0..self.reader.len];
|
||||||
|
|
||||||
@@ -330,7 +331,7 @@ pub const Client = struct {
|
|||||||
self.reader.len = 0;
|
self.reader.len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handleHTTPRequest(self: *Self, request: []u8) !void {
|
fn handleHTTPRequest(self: *Client, request: []u8) !void {
|
||||||
if (request.len < 18) {
|
if (request.len < 18) {
|
||||||
// 18 is [generously] the smallest acceptable HTTP request
|
// 18 is [generously] the smallest acceptable HTTP request
|
||||||
return error.InvalidRequest;
|
return error.InvalidRequest;
|
||||||
@@ -365,7 +366,7 @@ pub const Client = struct {
|
|||||||
return error.NotFound;
|
return error.NotFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn upgradeConnection(self: *Self, request: []u8) !void {
|
fn upgradeConnection(self: *Client, request: []u8) !void {
|
||||||
// our caller already confirmed that we have a trailing \r\n\r\n
|
// our caller already confirmed that we have a trailing \r\n\r\n
|
||||||
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
|
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
|
||||||
const request_line = request[0..request_line_end];
|
const request_line = request[0..request_line_end];
|
||||||
@@ -462,7 +463,7 @@ pub const Client = struct {
|
|||||||
return self.send(arena, response);
|
return self.send(arena, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writeHTTPErrorResponse(self: *Self, comptime status: u16, comptime body: []const u8) void {
|
fn writeHTTPErrorResponse(self: *Client, comptime status: u16, comptime body: []const u8) void {
|
||||||
const response = std.fmt.comptimePrint(
|
const response = std.fmt.comptimePrint(
|
||||||
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
|
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
|
||||||
.{ status, body.len, body },
|
.{ status, body.len, body },
|
||||||
@@ -473,7 +474,7 @@ pub const Client = struct {
|
|||||||
self.send(null, response) catch {};
|
self.send(null, response) catch {};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processWebsocketMessage(self: *Self) !bool {
|
fn processWebsocketMessage(self: *Client) !bool {
|
||||||
errdefer self.close();
|
errdefer self.close();
|
||||||
|
|
||||||
var reader = &self.reader;
|
var reader = &self.reader;
|
||||||
@@ -517,7 +518,7 @@ pub const Client = struct {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sendPong(self: *Self, data: []const u8) !void {
|
fn sendPong(self: *Client, data: []const u8) !void {
|
||||||
if (data.len == 0) {
|
if (data.len == 0) {
|
||||||
return self.send(null, &EMPTY_PONG);
|
return self.send(null, &EMPTY_PONG);
|
||||||
}
|
}
|
||||||
@@ -539,7 +540,7 @@ pub const Client = struct {
|
|||||||
// writev, so we need to get creative. We'll JSON serialize to a
|
// writev, so we need to get creative. We'll JSON serialize to a
|
||||||
// buffer, where the first 10 bytes are reserved. We can then backfill
|
// buffer, where the first 10 bytes are reserved. We can then backfill
|
||||||
// the header and send the slice.
|
// the header and send the slice.
|
||||||
pub fn sendJSON(self: *Self, message: anytype, opts: std.json.StringifyOptions) !void {
|
pub fn sendJSON(self: *Client, message: anytype, opts: std.json.StringifyOptions) !void {
|
||||||
var arena = ArenaAllocator.init(self.server.allocator);
|
var arena = ArenaAllocator.init(self.server.allocator);
|
||||||
errdefer arena.deinit();
|
errdefer arena.deinit();
|
||||||
|
|
||||||
@@ -557,7 +558,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sendJSONRaw(
|
pub fn sendJSONRaw(
|
||||||
self: *Self,
|
self: *Client,
|
||||||
arena: ArenaAllocator,
|
arena: ArenaAllocator,
|
||||||
buf: std.ArrayListUnmanaged(u8),
|
buf: std.ArrayListUnmanaged(u8),
|
||||||
) !void {
|
) !void {
|
||||||
@@ -567,9 +568,9 @@ pub const Client = struct {
|
|||||||
return self.send(arena, framed);
|
return self.send(arena, framed);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueTimeout(self: *Self) void {
|
fn queueTimeout(self: *Client) void {
|
||||||
self.server.loop.io.timeout(
|
self.server.loop.io.timeout(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
callbackTimeout,
|
callbackTimeout,
|
||||||
&self.timeout_completion,
|
&self.timeout_completion,
|
||||||
@@ -578,7 +579,7 @@ pub const Client = struct {
|
|||||||
self.timeout_pending = true;
|
self.timeout_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callbackTimeout(self: *Self, _: *Completion, result: TimeoutError!void) void {
|
fn callbackTimeout(self: *Client, _: *Completion, result: TimeoutError!void) void {
|
||||||
self.timeout_pending = false;
|
self.timeout_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
if (self.read_pending == false and self.write_pending == false) {
|
if (self.read_pending == false and self.write_pending == false) {
|
||||||
@@ -614,7 +615,7 @@ pub const Client = struct {
|
|||||||
self.queueTimeout();
|
self.queueTimeout();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send(self: *Self, arena: ?ArenaAllocator, data: []const u8) !void {
|
fn send(self: *Client, arena: ?ArenaAllocator, data: []const u8) !void {
|
||||||
const node = try self.send_queue_node_pool.create();
|
const node = try self.send_queue_node_pool.create();
|
||||||
errdefer self.send_queue_node_pool.destroy(node);
|
errdefer self.send_queue_node_pool.destroy(node);
|
||||||
|
|
||||||
@@ -632,7 +633,7 @@ pub const Client = struct {
|
|||||||
self.queueSend();
|
self.queueSend();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueSend(self: *Self) void {
|
fn queueSend(self: *Client) void {
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -643,7 +644,7 @@ pub const Client = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.server.loop.io.send(
|
self.server.loop.io.send(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
sendCallback,
|
sendCallback,
|
||||||
&self.write_completion,
|
&self.write_completion,
|
||||||
@@ -653,7 +654,7 @@ pub const Client = struct {
|
|||||||
self.write_pending = true;
|
self.write_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sendCallback(self: *Self, _: *Completion, result: SendError!usize) void {
|
fn sendCallback(self: *Client, _: *Completion, result: SendError!usize) void {
|
||||||
self.write_pending = false;
|
self.write_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
@@ -1054,12 +1055,20 @@ pub fn run(
|
|||||||
// - JS callbacks events from scripts
|
// - JS callbacks events from scripts
|
||||||
// var http_client = app.http_client;
|
// var http_client = app.http_client;
|
||||||
while (true) {
|
while (true) {
|
||||||
// // @newhttp
|
// @newhttp. This is a hack. We used to just have 1 loop, so we could
|
||||||
// // This is a temporary hack for the newhttp work. The issue is that we
|
// sleep it it "forever" and any activity (message to this server,
|
||||||
// // now have 2 event loops.
|
// JS callback, http data) would wake it up.
|
||||||
// if (http_client.active > 0) {
|
// Now we have 2 loops. If we block on one, the other won't get woken
|
||||||
// _ = try http_client.tick(10);
|
// up. We don't block "forever" but even 10ms adds a bunch of latency
|
||||||
// }
|
// since this is called in a loop.
|
||||||
|
// Hopefully this is temporary and we can remove the io loop and then
|
||||||
|
// only have 1 loop. But, until then, we need to check both loops and
|
||||||
|
// pay some blocking penalty.
|
||||||
|
if (server.client) |client| {
|
||||||
|
if (client.cdp) |*cdp| {
|
||||||
|
cdp.pageWait();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
try loop.io.run_for_ns(10 * std.time.ns_per_ms);
|
try loop.io.run_for_ns(10 * std.time.ns_per_ms);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,26 +7,31 @@ const Allocator = std.mem.Allocator;
|
|||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const App = @import("../app.zig").App;
|
const App = @import("../app.zig").App;
|
||||||
const http = @import("../http/client.zig");
|
const Http = @import("../http/Http.zig");
|
||||||
const telemetry = @import("telemetry.zig");
|
const telemetry = @import("telemetry.zig");
|
||||||
|
|
||||||
const URL = "https://telemetry.lightpanda.io";
|
const URL = "https://telemetry.lightpanda.io";
|
||||||
const MAX_BATCH_SIZE = 20;
|
const MAX_BATCH_SIZE = 20;
|
||||||
|
|
||||||
pub const LightPanda = struct {
|
pub const LightPanda = struct {
|
||||||
uri: std.Uri,
|
|
||||||
pending: List,
|
pending: List,
|
||||||
running: bool,
|
running: bool,
|
||||||
thread: ?std.Thread,
|
thread: ?std.Thread,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
mutex: std.Thread.Mutex,
|
mutex: std.Thread.Mutex,
|
||||||
cond: Thread.Condition,
|
cond: Thread.Condition,
|
||||||
client: *http.Client,
|
connection: Http.Connection,
|
||||||
node_pool: std.heap.MemoryPool(List.Node),
|
node_pool: std.heap.MemoryPool(List.Node),
|
||||||
|
|
||||||
const List = std.DoublyLinkedList(LightPandaEvent);
|
const List = std.DoublyLinkedList(LightPandaEvent);
|
||||||
|
|
||||||
pub fn init(app: *App) LightPanda {
|
pub fn init(app: *App) !LightPanda {
|
||||||
|
const connection = try app.http.newConnection();
|
||||||
|
errdefer connection.deinit();
|
||||||
|
|
||||||
|
try connection.setURL(URL);
|
||||||
|
try connection.setMethod(.POST);
|
||||||
|
|
||||||
const allocator = app.allocator;
|
const allocator = app.allocator;
|
||||||
return .{
|
return .{
|
||||||
.cond = .{},
|
.cond = .{},
|
||||||
@@ -35,8 +40,7 @@ pub const LightPanda = struct {
|
|||||||
.thread = null,
|
.thread = null,
|
||||||
.running = true,
|
.running = true,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.client = app.http_client,
|
.connection = connection,
|
||||||
.uri = std.Uri.parse(URL) catch unreachable,
|
|
||||||
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
|
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -50,6 +54,7 @@ pub const LightPanda = struct {
|
|||||||
thread.join();
|
thread.join();
|
||||||
}
|
}
|
||||||
self.node_pool.deinit();
|
self.node_pool.deinit();
|
||||||
|
self.connection.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: App.RunMode, raw_event: telemetry.Event) !void {
|
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: App.RunMode, raw_event: telemetry.Event) !void {
|
||||||
@@ -102,15 +107,11 @@ pub const LightPanda = struct {
|
|||||||
try writer.writeByte('\n');
|
try writer.writeByte('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
var req = try self.client.request(.POST, &self.uri);
|
try self.connection.setBody(arr.items);
|
||||||
defer req.deinit();
|
const status = try self.connection.request();
|
||||||
req.body = arr.items;
|
|
||||||
|
|
||||||
// drain the response
|
if (status != 200) {
|
||||||
var res = try req.sendSync(.{});
|
log.warn(.telemetry, "server error", .{ .status = status });
|
||||||
while (try res.next()) |_| {}
|
|
||||||
if (res.header.status != 200) {
|
|
||||||
log.warn(.telemetry, "server error", .{ .status = res.header.status });
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,16 +29,19 @@ fn TelemetryT(comptime P: type) type {
|
|||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn init(app: *App, run_mode: App.RunMode) Self {
|
pub fn init(app: *App, run_mode: App.RunMode) !Self {
|
||||||
const disabled = std.process.hasEnvVarConstant("LIGHTPANDA_DISABLE_TELEMETRY");
|
const disabled = std.process.hasEnvVarConstant("LIGHTPANDA_DISABLE_TELEMETRY");
|
||||||
if (builtin.mode != .Debug and builtin.is_test == false) {
|
if (builtin.mode != .Debug and builtin.is_test == false) {
|
||||||
log.info(.telemetry, "telemetry status", .{ .disabled = disabled });
|
log.info(.telemetry, "telemetry status", .{ .disabled = disabled });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const provider = try P.init(app);
|
||||||
|
errdefer provider.deinit();
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.disabled = disabled,
|
.disabled = disabled,
|
||||||
.run_mode = run_mode,
|
.run_mode = run_mode,
|
||||||
.provider = P.init(app),
|
.provider = provider,
|
||||||
.iid = if (disabled) null else getOrCreateId(app.app_dir_path),
|
.iid = if (disabled) null else getOrCreateId(app.app_dir_path),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -134,7 +137,7 @@ pub const Event = union(enum) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const NoopProvider = struct {
|
const NoopProvider = struct {
|
||||||
fn init(_: *App) NoopProvider {
|
fn init(_: *App) !NoopProvider {
|
||||||
return .{};
|
return .{};
|
||||||
}
|
}
|
||||||
fn deinit(_: NoopProvider) void {}
|
fn deinit(_: NoopProvider) void {}
|
||||||
@@ -150,7 +153,7 @@ test "telemetry: disabled by environment" {
|
|||||||
defer _ = unsetenv(@constCast("LIGHTPANDA_DISABLE_TELEMETRY"));
|
defer _ = unsetenv(@constCast("LIGHTPANDA_DISABLE_TELEMETRY"));
|
||||||
|
|
||||||
const FailingProvider = struct {
|
const FailingProvider = struct {
|
||||||
fn init(_: *App) @This() {
|
fn init(_: *App) !@This() {
|
||||||
return .{};
|
return .{};
|
||||||
}
|
}
|
||||||
fn deinit(_: @This()) void {}
|
fn deinit(_: @This()) void {}
|
||||||
@@ -159,7 +162,7 @@ test "telemetry: disabled by environment" {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var telemetry = TelemetryT(FailingProvider).init(undefined, .serve);
|
var telemetry = try TelemetryT(FailingProvider).init(undefined, .serve);
|
||||||
defer telemetry.deinit();
|
defer telemetry.deinit();
|
||||||
telemetry.record(.{ .run = {} });
|
telemetry.record(.{ .run = {} });
|
||||||
}
|
}
|
||||||
@@ -186,7 +189,7 @@ test "telemetry: sends event to provider" {
|
|||||||
var app = testing.createApp(.{});
|
var app = testing.createApp(.{});
|
||||||
defer app.deinit();
|
defer app.deinit();
|
||||||
|
|
||||||
var telemetry = TelemetryT(MockProvider).init(app, .serve);
|
var telemetry = try TelemetryT(MockProvider).init(app, .serve);
|
||||||
defer telemetry.deinit();
|
defer telemetry.deinit();
|
||||||
const mock = &telemetry.provider;
|
const mock = &telemetry.provider;
|
||||||
|
|
||||||
@@ -206,7 +209,7 @@ const MockProvider = struct {
|
|||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
events: std.ArrayListUnmanaged(Event),
|
events: std.ArrayListUnmanaged(Event),
|
||||||
|
|
||||||
fn init(app: *App) @This() {
|
fn init(app: *App) !@This() {
|
||||||
return .{
|
return .{
|
||||||
.iid = null,
|
.iid = null,
|
||||||
.run_mode = null,
|
.run_mode = null,
|
||||||
|
|||||||
@@ -409,10 +409,6 @@ pub const JsRunner = struct {
|
|||||||
const html_doc = try parser.documentHTMLParseFromStr(opts.html);
|
const html_doc = try parser.documentHTMLParseFromStr(opts.html);
|
||||||
try page.setDocument(html_doc);
|
try page.setDocument(html_doc);
|
||||||
|
|
||||||
// after the page is considered loaded, page.wait can exit early if
|
|
||||||
// there's no IO/timeouts. So setting this speeds up our tests
|
|
||||||
page.loaded = true;
|
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.app = app,
|
.app = app,
|
||||||
.page = page,
|
.page = page,
|
||||||
@@ -445,7 +441,7 @@ pub const JsRunner = struct {
|
|||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
};
|
};
|
||||||
try self.page.wait(1);
|
self.page.wait(1);
|
||||||
@import("root").js_runner_duration += std.time.Instant.since(try std.time.Instant.now(), start);
|
@import("root").js_runner_duration += std.time.Instant.since(try std.time.Instant.now(), start);
|
||||||
|
|
||||||
if (case.@"1") |expected| {
|
if (case.@"1") |expected| {
|
||||||
|
|||||||
Reference in New Issue
Block a user