Merge pull request #1639 from lightpanda-io/wp/mrdimidium/cleanup-io-layer
Some checks failed
e2e-test / zig build release (push) Has been cancelled
e2e-test / demo-scripts (push) Has been cancelled
e2e-test / cdp-and-hyperfine-bench (push) Has been cancelled
e2e-test / perf-fmt (push) Has been cancelled
e2e-test / browser fetch (push) Has been cancelled
zig-test / zig test using v8 in debug mode (push) Has been cancelled
zig-test / zig test (push) Has been cancelled
zig-test / perf-fmt (push) Has been cancelled

Separates network and the browser specific t
This commit is contained in:
Nikolay Govorov
2026-02-25 06:12:56 +00:00
committed by GitHub
9 changed files with 1780 additions and 1692 deletions

1604
src/Net.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@ const log = @import("log.zig");
const App = @import("App.zig"); const App = @import("App.zig");
const Config = @import("Config.zig"); const Config = @import("Config.zig");
const CDP = @import("cdp/cdp.zig").CDP; const CDP = @import("cdp/cdp.zig").CDP;
const Net = @import("Net.zig");
const Http = @import("http/Http.zig"); const Http = @import("http/Http.zig");
const HttpClient = @import("http/Client.zig"); const HttpClient = @import("http/Client.zig");
@@ -265,21 +266,7 @@ pub const Client = struct {
allocator: Allocator, allocator: Allocator,
app: *App, app: *App,
http: *HttpClient, http: *HttpClient,
json_version_response: []const u8, ws: Net.WsConnection,
reader: Reader(true),
socket: posix.socket_t,
socket_flags: usize,
send_arena: ArenaAllocator,
timeout_ms: u32,
const EMPTY_PONG = [_]u8{ 138, 0 };
// CLOSE, 2 length, code
const CLOSE_NORMAL = [_]u8{ 136, 2, 3, 232 }; // code: 1000
const CLOSE_TOO_BIG = [_]u8{ 136, 2, 3, 241 }; // 1009
const CLOSE_PROTOCOL_ERROR = [_]u8{ 136, 2, 3, 234 }; //code: 1002
// "private-use" close codes must be from 4000-49999
const CLOSE_TIMEOUT = [_]u8{ 136, 2, 15, 160 }; // code: 4000
fn init( fn init(
socket: posix.socket_t, socket: posix.socket_t,
@@ -288,40 +275,28 @@ pub const Client = struct {
json_version_response: []const u8, json_version_response: []const u8,
timeout_ms: u32, timeout_ms: u32,
) !Client { ) !Client {
var ws = try Net.WsConnection.init(socket, allocator, json_version_response, timeout_ms);
errdefer ws.deinit();
if (log.enabled(.app, .info)) { if (log.enabled(.app, .info)) {
var client_address: std.net.Address = undefined; const client_address = ws.getAddress() catch null;
var socklen: posix.socklen_t = @sizeOf(net.Address);
try std.posix.getpeername(socket, &client_address.any, &socklen);
log.info(.app, "client connected", .{ .ip = client_address }); log.info(.app, "client connected", .{ .ip = client_address });
} }
const socket_flags = try posix.fcntl(socket, posix.F.GETFL, 0);
const nonblocking = @as(u32, @bitCast(posix.O{ .NONBLOCK = true }));
// we expect the socket to come to us as nonblocking
lp.assert(socket_flags & nonblocking == nonblocking, "Client.init blocking", .{});
var reader = try Reader(true).init(allocator);
errdefer reader.deinit();
const http = try app.http.createClient(allocator); const http = try app.http.createClient(allocator);
errdefer http.deinit(); errdefer http.deinit();
return .{ return .{
.socket = socket,
.allocator = allocator, .allocator = allocator,
.app = app, .app = app,
.http = http, .http = http,
.json_version_response = json_version_response, .ws = ws,
.reader = reader,
.mode = .{ .http = {} }, .mode = .{ .http = {} },
.socket_flags = socket_flags,
.send_arena = ArenaAllocator.init(allocator),
.timeout_ms = timeout_ms,
}; };
} }
fn stop(self: *Client) void { fn stop(self: *Client) void {
posix.shutdown(self.socket, .recv) catch {}; self.ws.shutdown();
} }
fn deinit(self: *Client) void { fn deinit(self: *Client) void {
@@ -329,15 +304,14 @@ pub const Client = struct {
.cdp => |*cdp| cdp.deinit(), .cdp => |*cdp| cdp.deinit(),
.http => {}, .http => {},
} }
self.reader.deinit(); self.ws.deinit();
self.send_arena.deinit();
self.http.deinit(); self.http.deinit();
} }
fn start(self: *Client) void { fn start(self: *Client) void {
const http = self.http; const http = self.http;
http.cdp_client = .{ http.cdp_client = .{
.socket = self.socket, .socket = self.ws.socket,
.ctx = self, .ctx = self,
.blocking_read_start = Client.blockingReadStart, .blocking_read_start = Client.blockingReadStart,
.blocking_read = Client.blockingRead, .blocking_read = Client.blockingRead,
@@ -352,8 +326,9 @@ pub const Client = struct {
fn httpLoop(self: *Client, http: *HttpClient) !void { fn httpLoop(self: *Client, http: *HttpClient) !void {
lp.assert(self.mode == .http, "Client.httpLoop invalid mode", .{}); lp.assert(self.mode == .http, "Client.httpLoop invalid mode", .{});
while (true) { while (true) {
const status = http.tick(self.timeout_ms) catch |err| { const status = http.tick(self.ws.timeout_ms) catch |err| {
log.err(.app, "http tick", .{ .err = err }); log.err(.app, "http tick", .{ .err = err });
return; return;
}; };
@@ -371,13 +346,9 @@ pub const Client = struct {
} }
} }
return self.cdpLoop(http);
}
fn cdpLoop(self: *Client, http: *HttpClient) !void {
var cdp = &self.mode.cdp; var cdp = &self.mode.cdp;
var last_message = timestamp(.monotonic); var last_message = timestamp(.monotonic);
var ms_remaining = self.timeout_ms; var ms_remaining = self.ws.timeout_ms;
while (true) { while (true) {
switch (cdp.pageWait(ms_remaining)) { switch (cdp.pageWait(ms_remaining)) {
@@ -386,7 +357,7 @@ pub const Client = struct {
return; return;
} }
last_message = timestamp(.monotonic); last_message = timestamp(.monotonic);
ms_remaining = self.timeout_ms; ms_remaining = self.ws.timeout_ms;
}, },
.no_page => { .no_page => {
const status = http.tick(ms_remaining) catch |err| { const status = http.tick(ms_remaining) catch |err| {
@@ -401,7 +372,7 @@ pub const Client = struct {
return; return;
} }
last_message = timestamp(.monotonic); last_message = timestamp(.monotonic);
ms_remaining = self.timeout_ms; ms_remaining = self.ws.timeout_ms;
}, },
.done => { .done => {
const elapsed = timestamp(.monotonic) - last_message; const elapsed = timestamp(.monotonic) - last_message;
@@ -417,7 +388,7 @@ pub const Client = struct {
fn blockingReadStart(ctx: *anyopaque) bool { fn blockingReadStart(ctx: *anyopaque) bool {
const self: *Client = @ptrCast(@alignCast(ctx)); const self: *Client = @ptrCast(@alignCast(ctx));
_ = posix.fcntl(self.socket, posix.F.SETFL, self.socket_flags & ~@as(u32, @bitCast(posix.O{ .NONBLOCK = true }))) catch |err| { self.ws.setBlocking(true) catch |err| {
log.warn(.app, "CDP blockingReadStart", .{ .err = err }); log.warn(.app, "CDP blockingReadStart", .{ .err = err });
return false; return false;
}; };
@@ -431,7 +402,7 @@ pub const Client = struct {
fn blockingReadStop(ctx: *anyopaque) bool { fn blockingReadStop(ctx: *anyopaque) bool {
const self: *Client = @ptrCast(@alignCast(ctx)); const self: *Client = @ptrCast(@alignCast(ctx));
_ = posix.fcntl(self.socket, posix.F.SETFL, self.socket_flags) catch |err| { self.ws.setBlocking(false) catch |err| {
log.warn(.app, "CDP blockingReadStop", .{ .err = err }); log.warn(.app, "CDP blockingReadStop", .{ .err = err });
return false; return false;
}; };
@@ -439,7 +410,7 @@ pub const Client = struct {
} }
fn readSocket(self: *Client) bool { fn readSocket(self: *Client) bool {
const n = posix.read(self.socket, self.readBuf()) catch |err| { const n = self.ws.read() catch |err| {
log.warn(.app, "CDP read", .{ .err = err }); log.warn(.app, "CDP read", .{ .err = err });
return false; return false;
}; };
@@ -449,16 +420,10 @@ pub const Client = struct {
return false; return false;
} }
return self.processData(n) catch false; return self.processData() catch false;
} }
fn readBuf(self: *Client) []u8 { fn processData(self: *Client) !bool {
return self.reader.readBuf();
}
fn processData(self: *Client, len: usize) !bool {
self.reader.len += len;
switch (self.mode) { switch (self.mode) {
.cdp => |*cdp| return self.processWebsocketMessage(cdp), .cdp => |*cdp| return self.processWebsocketMessage(cdp),
.http => return self.processHTTPRequest(), .http => return self.processHTTPRequest(),
@@ -466,8 +431,8 @@ pub const Client = struct {
} }
fn processHTTPRequest(self: *Client) !bool { fn processHTTPRequest(self: *Client) !bool {
lp.assert(self.reader.pos == 0, "Client.HTTP pos", .{ .pos = self.reader.pos }); lp.assert(self.ws.reader.pos == 0, "Client.HTTP pos", .{ .pos = self.ws.reader.pos });
const request = self.reader.buf[0..self.reader.len]; const request = self.ws.reader.buf[0..self.ws.reader.len];
if (request.len > Config.CDP_MAX_HTTP_REQUEST_SIZE) { if (request.len > Config.CDP_MAX_HTTP_REQUEST_SIZE) {
self.writeHTTPErrorResponse(413, "Request too large"); self.writeHTTPErrorResponse(413, "Request too large");
@@ -481,7 +446,7 @@ pub const Client = struct {
} }
// the next incoming data can go to the front of our buffer // the next incoming data can go to the front of our buffer
defer self.reader.len = 0; defer self.ws.reader.len = 0;
return self.handleHTTPRequest(request) catch |err| { return self.handleHTTPRequest(request) catch |err| {
switch (err) { switch (err) {
error.NotFound => self.writeHTTPErrorResponse(404, "Not found"), error.NotFound => self.writeHTTPErrorResponse(404, "Not found"),
@@ -522,14 +487,14 @@ pub const Client = struct {
} }
if (std.mem.eql(u8, url, "/json/version") or std.mem.eql(u8, url, "/json/version/")) { if (std.mem.eql(u8, url, "/json/version") or std.mem.eql(u8, url, "/json/version/")) {
try self.send(self.json_version_response); try self.ws.send(self.ws.json_version_response);
// Chromedp (a Go driver) does an http request to /json/version // Chromedp (a Go driver) does an http request to /json/version
// then to / (websocket upgrade) using a different connection. // then to / (websocket upgrade) using a different connection.
// Since we only allow 1 connection at a time, the 2nd one (the // Since we only allow 1 connection at a time, the 2nd one (the
// websocket upgrade) blocks until the first one times out. // websocket upgrade) blocks until the first one times out.
// We can avoid that by closing the connection. json_version_response // We can avoid that by closing the connection. json_version_response
// has a Connection: Close header too. // has a Connection: Close header too.
try posix.shutdown(self.socket, .recv); self.ws.shutdown();
return false; return false;
} }
@@ -537,581 +502,31 @@ pub const Client = struct {
} }
fn upgradeConnection(self: *Client, request: []u8) !void { fn upgradeConnection(self: *Client, request: []u8) !void {
// our caller already confirmed that we have a trailing \r\n\r\n try self.ws.upgrade(request);
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
const request_line = request[0..request_line_end];
if (!std.ascii.endsWithIgnoreCase(request_line, "http/1.1")) {
return error.InvalidProtocol;
}
// we need to extract the sec-websocket-key value
var key: []const u8 = "";
// we need to make sure that we got all the necessary headers + values
var required_headers: u8 = 0;
// can't std.mem.split because it forces the iterated value to be const
// (we could @constCast...)
var buf = request[request_line_end + 2 ..];
while (buf.len > 4) {
const index = std.mem.indexOfScalar(u8, buf, '\r') orelse unreachable;
const separator = std.mem.indexOfScalar(u8, buf[0..index], ':') orelse return error.InvalidRequest;
const name = std.mem.trim(u8, toLower(buf[0..separator]), &std.ascii.whitespace);
const value = std.mem.trim(u8, buf[(separator + 1)..index], &std.ascii.whitespace);
if (std.mem.eql(u8, name, "upgrade")) {
if (!std.ascii.eqlIgnoreCase("websocket", value)) {
return error.InvalidUpgradeHeader;
}
required_headers |= 1;
} else if (std.mem.eql(u8, name, "sec-websocket-version")) {
if (value.len != 2 or value[0] != '1' or value[1] != '3') {
return error.InvalidVersionHeader;
}
required_headers |= 2;
} else if (std.mem.eql(u8, name, "connection")) {
// find if connection header has upgrade in it, example header:
// Connection: keep-alive, Upgrade
if (std.ascii.indexOfIgnoreCase(value, "upgrade") == null) {
return error.InvalidConnectionHeader;
}
required_headers |= 4;
} else if (std.mem.eql(u8, name, "sec-websocket-key")) {
key = value;
required_headers |= 8;
}
const next = index + 2;
buf = buf[next..];
}
if (required_headers != 15) {
return error.MissingHeaders;
}
// our caller has already made sure this request ended in \r\n\r\n
// so it isn't something we need to check again
const allocator = self.send_arena.allocator();
const response = blk: {
// Response to an ugprade request is always this, with
// the Sec-Websocket-Accept value a spacial sha1 hash of the
// request "sec-websocket-version" and a magic value.
const template =
"HTTP/1.1 101 Switching Protocols\r\n" ++
"Upgrade: websocket\r\n" ++
"Connection: upgrade\r\n" ++
"Sec-Websocket-Accept: 0000000000000000000000000000\r\n\r\n";
// The response will be sent via the IO Loop and thus has to have its
// own lifetime.
const res = try allocator.dupe(u8, template);
// magic response
const key_pos = res.len - 32;
var h: [20]u8 = undefined;
var hasher = std.crypto.hash.Sha1.init(.{});
hasher.update(key);
// websocket spec always used this value
hasher.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
hasher.final(&h);
_ = std.base64.standard.Encoder.encode(res[key_pos .. key_pos + 28], h[0..]);
break :blk res;
};
self.mode = .{ .cdp = try CDP.init(self.app, self.http, self) }; self.mode = .{ .cdp = try CDP.init(self.app, self.http, self) };
return self.send(response);
} }
fn writeHTTPErrorResponse(self: *Client, comptime status: u16, comptime body: []const u8) void { fn writeHTTPErrorResponse(self: *Client, comptime status: u16, comptime body: []const u8) void {
const response = std.fmt.comptimePrint( self.ws.sendHttpError(status, body);
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
.{ status, body.len, body },
);
// we're going to close this connection anyways, swallowing any
// error seems safe
self.send(response) catch {};
} }
fn processWebsocketMessage(self: *Client, cdp: *CDP) !bool { fn processWebsocketMessage(self: *Client, cdp: *CDP) !bool {
var reader = &self.reader; return self.ws.processMessages(cdp);
while (true) {
const msg = reader.next() catch |err| {
switch (err) {
error.TooLarge => self.send(&CLOSE_TOO_BIG) catch {},
error.NotMasked => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.ReservedFlags => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.InvalidMessageType => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.ControlTooLarge => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.InvalidContinuation => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.NestedFragementation => self.send(&CLOSE_PROTOCOL_ERROR) catch {},
error.OutOfMemory => {}, // don't borther trying to send an error in this case
}
return err;
} orelse break;
switch (msg.type) {
.pong => {},
.ping => try self.sendPong(msg.data),
.close => {
self.send(&CLOSE_NORMAL) catch {};
return false;
},
.text, .binary => if (cdp.handleMessage(msg.data) == false) {
return false;
},
}
if (msg.cleanup_fragment) {
reader.cleanup();
}
}
// We might have read part of the next message. Our reader potentially
// has to move data around in its buffer to make space.
reader.compact();
return true;
} }
fn sendPong(self: *Client, data: []const u8) !void { pub fn sendAllocator(self: *Client) Allocator {
if (data.len == 0) { return self.ws.send_arena.allocator();
return self.send(&EMPTY_PONG);
}
var header_buf: [10]u8 = undefined;
const header = websocketHeader(&header_buf, .pong, data.len);
const allocator = self.send_arena.allocator();
var framed = try allocator.alloc(u8, header.len + data.len);
@memcpy(framed[0..header.len], header);
@memcpy(framed[header.len..], data);
return self.send(framed);
} }
// called by CDP
// Websocket frames have a variable length header. For server-client,
// it could be anywhere from 2 to 10 bytes. Our IO.Loop doesn't have
// writev, so we need to get creative. We'll JSON serialize to a
// buffer, where the first 10 bytes are reserved. We can then backfill
// the header and send the slice.
pub fn sendJSON(self: *Client, message: anytype, opts: std.json.Stringify.Options) !void { pub fn sendJSON(self: *Client, message: anytype, opts: std.json.Stringify.Options) !void {
const allocator = self.send_arena.allocator(); return self.ws.sendJSON(message, opts);
var aw = try std.Io.Writer.Allocating.initCapacity(allocator, 512);
// reserve space for the maximum possible header
try aw.writer.writeAll(&.{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 });
try std.json.Stringify.value(message, opts, &aw.writer);
const framed = fillWebsocketHeader(aw.toArrayList());
return self.send(framed);
} }
pub fn sendJSONRaw( pub fn sendJSONRaw(self: *Client, buf: std.ArrayList(u8)) !void {
self: *Client, return self.ws.sendJSONRaw(buf);
buf: std.ArrayList(u8),
) !void {
// Dangerous API!. We assume the caller has reserved the first 10
// bytes in `buf`.
const framed = fillWebsocketHeader(buf);
return self.send(framed);
}
fn send(self: *Client, data: []const u8) !void {
var pos: usize = 0;
var changed_to_blocking: bool = false;
defer _ = self.send_arena.reset(.{ .retain_with_limit = 1024 * 32 });
defer if (changed_to_blocking) {
// We had to change our socket to blocking me to get our write out
// We need to change it back to non-blocking.
_ = posix.fcntl(self.socket, posix.F.SETFL, self.socket_flags) catch |err| {
log.err(.app, "CDP restore nonblocking", .{ .err = err });
};
};
LOOP: while (pos < data.len) {
const written = posix.write(self.socket, data[pos..]) catch |err| switch (err) {
error.WouldBlock => {
// self.socket is nonblocking, because we don't want to block
// reads. But our life is a lot easier if we block writes,
// largely, because we don't have to maintain a queue of pending
// writes (which would each need their own allocations). So
// if we get a WouldBlock error, we'll switch the socket to
// blocking and switch it back to non-blocking after the write
// is complete. Doesn't seem particularly efficiently, but
// this should virtually never happen.
lp.assert(changed_to_blocking == false, "Client.double block", .{});
changed_to_blocking = true;
_ = try posix.fcntl(self.socket, posix.F.SETFL, self.socket_flags & ~@as(u32, @bitCast(posix.O{ .NONBLOCK = true })));
continue :LOOP;
},
else => return err,
};
if (written == 0) {
return error.Closed;
}
pos += written;
}
} }
}; };
// WebSocket message reader. Given websocket message, acts as an iterator that
// can return zero or more Messages. When next returns null, any incomplete
// message will remain in reader.data
fn Reader(comptime EXPECT_MASK: bool) type {
return struct {
allocator: Allocator,
// position in buf of the start of the next message
pos: usize = 0,
// position in buf up until where we have valid data
// (any new reads must be placed after this)
len: usize = 0,
// we add 140 to allow 1 control message (ping/pong/close) to be
// fragmented into a normal message.
buf: []u8,
fragments: ?Fragments = null,
const Self = @This();
fn init(allocator: Allocator) !Self {
const buf = try allocator.alloc(u8, 16 * 1024);
return .{
.buf = buf,
.allocator = allocator,
};
}
fn deinit(self: *Self) void {
self.cleanup();
self.allocator.free(self.buf);
}
fn cleanup(self: *Self) void {
if (self.fragments) |*f| {
f.message.deinit(self.allocator);
self.fragments = null;
}
}
fn readBuf(self: *Self) []u8 {
// We might have read a partial http or websocket message.
// Subsequent reads must read from where we left off.
return self.buf[self.len..];
}
fn next(self: *Self) !?Message {
LOOP: while (true) {
var buf = self.buf[self.pos..self.len];
const length_of_len, const message_len = extractLengths(buf) orelse {
// we don't have enough bytes
return null;
};
const byte1 = buf[0];
if (byte1 & 112 != 0) {
return error.ReservedFlags;
}
if (comptime EXPECT_MASK) {
if (buf[1] & 128 != 128) {
// client -> server messages _must_ be masked
return error.NotMasked;
}
} else if (buf[1] & 128 != 0) {
// server -> client are never masked
return error.Masked;
}
var is_control = false;
var is_continuation = false;
var message_type: Message.Type = undefined;
switch (byte1 & 15) {
0 => is_continuation = true,
1 => message_type = .text,
2 => message_type = .binary,
8 => {
is_control = true;
message_type = .close;
},
9 => {
is_control = true;
message_type = .ping;
},
10 => {
is_control = true;
message_type = .pong;
},
else => return error.InvalidMessageType,
}
if (is_control) {
if (message_len > 125) {
return error.ControlTooLarge;
}
} else if (message_len > Config.CDP_MAX_MESSAGE_SIZE) {
return error.TooLarge;
} else if (message_len > self.buf.len) {
const len = self.buf.len;
self.buf = try growBuffer(self.allocator, self.buf, message_len);
buf = self.buf[0..len];
// we need more data
return null;
} else if (buf.len < message_len) {
// we need more data
return null;
}
// prefix + length_of_len + mask
const header_len = 2 + length_of_len + if (comptime EXPECT_MASK) 4 else 0;
const payload = buf[header_len..message_len];
if (comptime EXPECT_MASK) {
mask(buf[header_len - 4 .. header_len], payload);
}
// whatever happens after this, we know where the next message starts
self.pos += message_len;
const fin = byte1 & 128 == 128;
if (is_continuation) {
const fragments = &(self.fragments orelse return error.InvalidContinuation);
if (fragments.message.items.len + message_len > Config.CDP_MAX_MESSAGE_SIZE) {
return error.TooLarge;
}
try fragments.message.appendSlice(self.allocator, payload);
if (fin == false) {
// maybe we have more parts of the message waiting
continue :LOOP;
}
// this continuation is done!
return .{
.type = fragments.type,
.data = fragments.message.items,
.cleanup_fragment = true,
};
}
const can_be_fragmented = message_type == .text or message_type == .binary;
if (self.fragments != null and can_be_fragmented) {
// if this isn't a continuation, then we can't have fragments
return error.NestedFragementation;
}
if (fin == false) {
if (can_be_fragmented == false) {
return error.InvalidContinuation;
}
// not continuation, and not fin. It has to be the first message
// in a fragmented message.
var fragments = Fragments{ .message = .{}, .type = message_type };
try fragments.message.appendSlice(self.allocator, payload);
self.fragments = fragments;
continue :LOOP;
}
return .{
.data = payload,
.type = message_type,
.cleanup_fragment = false,
};
}
}
fn extractLengths(buf: []const u8) ?struct { usize, usize } {
if (buf.len < 2) {
return null;
}
const length_of_len: usize = switch (buf[1] & 127) {
126 => 2,
127 => 8,
else => 0,
};
if (buf.len < length_of_len + 2) {
// we definitely don't have enough buf yet
return null;
}
const message_len = switch (length_of_len) {
2 => @as(u16, @intCast(buf[3])) | @as(u16, @intCast(buf[2])) << 8,
8 => @as(u64, @intCast(buf[9])) | @as(u64, @intCast(buf[8])) << 8 | @as(u64, @intCast(buf[7])) << 16 | @as(u64, @intCast(buf[6])) << 24 | @as(u64, @intCast(buf[5])) << 32 | @as(u64, @intCast(buf[4])) << 40 | @as(u64, @intCast(buf[3])) << 48 | @as(u64, @intCast(buf[2])) << 56,
else => buf[1] & 127,
} + length_of_len + 2 + if (comptime EXPECT_MASK) 4 else 0; // +2 for header prefix, +4 for mask;
return .{ length_of_len, message_len };
}
// This is called after we've processed complete websocket messages (this
// only applies to websocket messages).
// There are three cases:
// 1 - We don't have any incomplete data (for a subsequent message) in buf.
// This is the easier to handle, we can set pos & len to 0.
// 2 - We have part of the next message, but we know it'll fit in the
// remaining buf. We don't need to do anything
// 3 - We have part of the next message, but either it won't fight into the
// remaining buffer, or we don't know (because we don't have enough
// of the header to tell the length). We need to "compact" the buffer
fn compact(self: *Self) void {
const pos = self.pos;
const len = self.len;
lp.assert(pos <= len, "Client.Reader.compact precondition", .{ .pos = pos, .len = len });
// how many (if any) partial bytes do we have
const partial_bytes = len - pos;
if (partial_bytes == 0) {
// We have no partial bytes. Setting these to 0 ensures that we
// get the best utilization of our buffer
self.pos = 0;
self.len = 0;
return;
}
const partial = self.buf[pos..len];
// If we have enough bytes of the next message to tell its length
// we'll be able to figure out whether we need to do anything or not.
if (extractLengths(partial)) |length_meta| {
const next_message_len = length_meta.@"1";
// if this isn't true, then we have a full message and it
// should have been processed.
lp.assert(pos <= len, "Client.Reader.compact postcondition", .{ .next_len = next_message_len, .partial = partial_bytes });
const missing_bytes = next_message_len - partial_bytes;
const free_space = self.buf.len - len;
if (missing_bytes < free_space) {
// we have enough space in our buffer, as is,
return;
}
}
// We're here because we either don't have enough bytes of the next
// message, or we know that it won't fit in our buffer as-is.
std.mem.copyForwards(u8, self.buf, partial);
self.pos = 0;
self.len = partial_bytes;
}
};
}
fn growBuffer(allocator: Allocator, buf: []u8, required_capacity: usize) ![]u8 {
// from std.ArrayList
var new_capacity = buf.len;
while (true) {
new_capacity +|= new_capacity / 2 + 8;
if (new_capacity >= required_capacity) break;
}
log.debug(.app, "CDP buffer growth", .{ .from = buf.len, .to = new_capacity });
if (allocator.resize(buf, new_capacity)) {
return buf.ptr[0..new_capacity];
}
const new_buffer = try allocator.alloc(u8, new_capacity);
@memcpy(new_buffer[0..buf.len], buf);
allocator.free(buf);
return new_buffer;
}
const Fragments = struct {
type: Message.Type,
message: std.ArrayList(u8),
};
const Message = struct {
type: Type,
data: []const u8,
cleanup_fragment: bool,
const Type = enum {
text,
binary,
close,
ping,
pong,
};
};
// These are the only websocket types that we're currently sending
const OpCode = enum(u8) {
text = 128 | 1,
close = 128 | 8,
pong = 128 | 10,
};
fn fillWebsocketHeader(buf: std.ArrayList(u8)) []const u8 {
// can't use buf[0..10] here, because the header length
// is variable. If it's just 2 bytes, for example, we need the
// framed message to be:
// h1, h2, data
// If we use buf[0..10], we'd get:
// h1, h2, 0, 0, 0, 0, 0, 0, 0, 0, data
var header_buf: [10]u8 = undefined;
// -10 because we reserved 10 bytes for the header above
const header = websocketHeader(&header_buf, .text, buf.items.len - 10);
const start = 10 - header.len;
const message = buf.items;
@memcpy(message[start..10], header);
return message[start..];
}
// makes the assumption that our caller reserved the first
// 10 bytes for the header
fn websocketHeader(buf: []u8, op_code: OpCode, payload_len: usize) []const u8 {
lp.assert(buf.len == 10, "Websocket.Header", .{ .len = buf.len });
const len = payload_len;
buf[0] = 128 | @intFromEnum(op_code); // fin | opcode
if (len <= 125) {
buf[1] = @intCast(len);
return buf[0..2];
}
if (len < 65536) {
buf[1] = 126;
buf[2] = @intCast((len >> 8) & 0xFF);
buf[3] = @intCast(len & 0xFF);
return buf[0..4];
}
buf[1] = 127;
buf[2] = 0;
buf[3] = 0;
buf[4] = 0;
buf[5] = 0;
buf[6] = @intCast((len >> 24) & 0xFF);
buf[7] = @intCast((len >> 16) & 0xFF);
buf[8] = @intCast((len >> 8) & 0xFF);
buf[9] = @intCast(len & 0xFF);
return buf[0..10];
}
// Utils // Utils
// -------- // --------
@@ -1139,48 +554,6 @@ fn buildJSONVersionResponse(
pub const timestamp = @import("datetime.zig").timestamp; pub const timestamp = @import("datetime.zig").timestamp;
// In-place string lowercase
fn toLower(str: []u8) []u8 {
for (str, 0..) |c, i| {
str[i] = std.ascii.toLower(c);
}
return str;
}
// Zig is in a weird backend transition right now. Need to determine if
// SIMD is even available.
const backend_supports_vectors = switch (builtin.zig_backend) {
.stage2_llvm, .stage2_c => true,
else => false,
};
// Websocket messages from client->server are masked using a 4 byte XOR mask
fn mask(m: []const u8, payload: []u8) void {
var data = payload;
if (!comptime backend_supports_vectors) return simpleMask(m, data);
const vector_size = std.simd.suggestVectorLength(u8) orelse @sizeOf(usize);
if (data.len >= vector_size) {
const mask_vector = std.simd.repeat(vector_size, @as(@Vector(4, u8), m[0..4].*));
while (data.len >= vector_size) {
const slice = data[0..vector_size];
const masked_data_slice: @Vector(vector_size, u8) = slice.*;
slice.* = masked_data_slice ^ mask_vector;
data = data[vector_size..];
}
}
simpleMask(m, data);
}
// Used when SIMD isn't available, or for any remaining part of the message
// which is too small to effectively use SIMD.
fn simpleMask(m: []const u8, payload: []u8) void {
for (payload, 0..) |b, i| {
payload[i] = b ^ m[i & 3];
}
}
const testing = std.testing; const testing = std.testing;
test "server: buildJSONVersionResponse" { test "server: buildJSONVersionResponse" {
const address = try net.Address.parseIp4("127.0.0.1", 9001); const address = try net.Address.parseIp4("127.0.0.1", 9001);
@@ -1391,22 +764,6 @@ test "Client: close message" {
); );
} }
test "server: mask" {
var buf: [4000]u8 = undefined;
const messages = [_][]const u8{ "1234", "1234" ** 99, "1234" ** 999 };
for (messages) |message| {
// we need the message to be mutable since mask operates in-place
const payload = buf[0..message.len];
@memcpy(payload, message);
mask(&.{ 1, 2, 200, 240 }, payload);
try testing.expectEqual(false, std.mem.eql(u8, payload, message));
mask(&.{ 1, 2, 200, 240 }, payload);
try testing.expectEqual(true, std.mem.eql(u8, payload, message));
}
}
test "server: 404" { test "server: 404" {
var c = try createTestClient(); var c = try createTestClient();
defer c.deinit(); defer c.deinit();
@@ -1542,7 +899,7 @@ fn createTestClient() !TestClient {
const TestClient = struct { const TestClient = struct {
stream: std.net.Stream, stream: std.net.Stream,
buf: [1024]u8 = undefined, buf: [1024]u8 = undefined,
reader: Reader(false), reader: Net.Reader(false),
fn deinit(self: *TestClient) void { fn deinit(self: *TestClient) void {
self.stream.close(); self.stream.close();
@@ -1609,7 +966,7 @@ const TestClient = struct {
"Sec-Websocket-Accept: flzHu2DevQ2dSCSVqKSii5e9C2o=\r\n\r\n", res); "Sec-Websocket-Accept: flzHu2DevQ2dSCSVqKSii5e9C2o=\r\n\r\n", res);
} }
fn readWebsocketMessage(self: *TestClient) !?Message { fn readWebsocketMessage(self: *TestClient) !?Net.Message {
while (true) { while (true) {
const n = try self.stream.read(self.reader.readBuf()); const n = try self.stream.read(self.reader.readBuf());
if (n == 0) { if (n == 0) {

View File

@@ -63,6 +63,7 @@ const NavigationKind = @import("webapi/navigation/root.zig").NavigationKind;
const KeyboardEvent = @import("webapi/event/KeyboardEvent.zig"); const KeyboardEvent = @import("webapi/event/KeyboardEvent.zig");
const Http = App.Http; const Http = App.Http;
const Net = @import("../Net.zig");
const ArenaPool = App.ArenaPool; const ArenaPool = App.ArenaPool;
const timestamp = @import("../datetime.zig").timestamp; const timestamp = @import("../datetime.zig").timestamp;
@@ -1112,8 +1113,8 @@ fn printWaitAnalysis(self: *Page) void {
std.debug.print("\nactive requests: {d}\n", .{self._session.browser.http_client.active}); std.debug.print("\nactive requests: {d}\n", .{self._session.browser.http_client.active});
var n_ = self._session.browser.http_client.handles.in_use.first; var n_ = self._session.browser.http_client.handles.in_use.first;
while (n_) |n| { while (n_) |n| {
const handle: *Http.Client.Handle = @fieldParentPtr("node", n); const conn: *Net.Connection = @fieldParentPtr("node", n);
const transfer = Http.Transfer.fromEasy(handle.conn.easy) catch |err| { const transfer = Http.Transfer.fromConnection(conn) catch |err| {
std.debug.print(" - failed to load transfer: {any}\n", .{err}); std.debug.print(" - failed to load transfer: {any}\n", .{err});
break; break;
}; };

View File

@@ -699,7 +699,7 @@ pub fn BrowserContext(comptime CDP_T: type) type {
}; };
const cdp = self.cdp; const cdp = self.cdp;
const allocator = cdp.client.send_arena.allocator(); const allocator = cdp.client.sendAllocator();
const field = ",\"sessionId\":\""; const field = ",\"sessionId\":\"";

View File

@@ -48,6 +48,10 @@ const Client = struct {
}; };
} }
pub fn sendAllocator(self: *Client) Allocator {
return self.send_arena.allocator();
}
pub fn sendJSON(self: *Client, message: anytype, opts: json.Stringify.Options) !void { pub fn sendJSON(self: *Client, message: anytype, opts: json.Stringify.Options) !void {
var opts_copy = opts; var opts_copy = opts;
opts_copy.whitespace = .indent_2; opts_copy.whitespace = .indent_2;

View File

@@ -22,7 +22,7 @@ const lp = @import("lightpanda");
const log = @import("../log.zig"); const log = @import("../log.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const Http = @import("Http.zig"); const Net = @import("../Net.zig");
const Config = @import("../Config.zig"); const Config = @import("../Config.zig");
const URL = @import("../browser/URL.zig"); const URL = @import("../browser/URL.zig");
const Notification = @import("../Notification.zig"); const Notification = @import("../Notification.zig");
@@ -30,18 +30,17 @@ const CookieJar = @import("../browser/webapi/storage/Cookie.zig").Jar;
const Robots = @import("../browser/Robots.zig"); const Robots = @import("../browser/Robots.zig");
const RobotStore = Robots.RobotStore; const RobotStore = Robots.RobotStore;
const c = Http.c; const c = Net.c;
const posix = std.posix; const posix = std.posix;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator; const ArenaAllocator = std.heap.ArenaAllocator;
const errorCheck = Http.errorCheck;
const errorMCheck = Http.errorMCheck;
const IS_DEBUG = builtin.mode == .Debug; const IS_DEBUG = builtin.mode == .Debug;
const Method = Http.Method; const Method = Net.Method;
const ResponseHead = Net.ResponseHead;
const HeaderIterator = Net.HeaderIterator;
// This is loosely tied to a browser Page. Loading all the <scripts>, doing // This is loosely tied to a browser Page. Loading all the <scripts>, doing
// XHR requests, and loading imports all happens through here. Sine the app // XHR requests, and loading imports all happens through here. Sine the app
@@ -68,14 +67,7 @@ active: usize,
// 'networkAlmostIdle' Page.lifecycleEvent in CDP). // 'networkAlmostIdle' Page.lifecycleEvent in CDP).
intercepted: usize, intercepted: usize,
// curl has 2 APIs: easy and multi. Multi is like a combination of some I/O block // Our easy handles, managed by a curl multi.
// (e.g. epoll) and a bunch of pools. You add/remove easys to the multiple and
// then poll the multi.
multi: *c.CURLM,
// Our easy handles. Although the multi contains buffer pools and connections
// pools, re-using the easys is still recommended. This acts as our own pool
// of easys.
handles: Handles, handles: Handles,
// Use to generate the next request ID // Use to generate the next request ID
@@ -111,10 +103,6 @@ config: *const Config,
cdp_client: ?CDPClient = null, cdp_client: ?CDPClient = null,
// keep track of when curl_multi_perform is happening so that we can avoid
// recursive calls into curl (which it will fail)
performing: bool = false,
// libcurl can monitor arbitrary sockets, this lets us use libcurl to poll // libcurl can monitor arbitrary sockets, this lets us use libcurl to poll
// both HTTP data as well as messages from an CDP connection. // both HTTP data as well as messages from an CDP connection.
// Furthermore, we have some tension between blocking scripts and request // Furthermore, we have some tension between blocking scripts and request
@@ -142,21 +130,20 @@ pub fn init(allocator: Allocator, ca_blob: ?c.curl_blob, robot_store: *RobotStor
const client = try allocator.create(Client); const client = try allocator.create(Client);
errdefer allocator.destroy(client); errdefer allocator.destroy(client);
const multi = c.curl_multi_init() orelse return error.FailedToInitializeMulti; var handles = try Handles.init(allocator, ca_blob, config);
errdefer _ = c.curl_multi_cleanup(multi);
try errorMCheck(c.curl_multi_setopt(multi, c.CURLMOPT_MAX_HOST_CONNECTIONS, @as(c_long, config.httpMaxHostOpen())));
var handles = try Handles.init(allocator, client, ca_blob, config);
errdefer handles.deinit(allocator); errdefer handles.deinit(allocator);
// Set transfer callbacks on each connection.
for (handles.connections) |*conn| {
try conn.setCallbacks(Transfer.headerCallback, Transfer.dataCallback);
}
const http_proxy = config.httpProxy(); const http_proxy = config.httpProxy();
client.* = .{ client.* = .{
.queue = .{}, .queue = .{},
.active = 0, .active = 0,
.intercepted = 0, .intercepted = 0,
.multi = multi,
.handles = handles, .handles = handles,
.allocator = allocator, .allocator = allocator,
.robot_store = robot_store, .robot_store = robot_store,
@@ -173,8 +160,6 @@ pub fn deinit(self: *Client) void {
self.abort(); self.abort();
self.handles.deinit(self.allocator); self.handles.deinit(self.allocator);
_ = c.curl_multi_cleanup(self.multi);
self.transfer_pool.deinit(); self.transfer_pool.deinit();
var robots_iter = self.pending_robots_queue.iterator(); var robots_iter = self.pending_robots_queue.iterator();
@@ -186,14 +171,14 @@ pub fn deinit(self: *Client) void {
self.allocator.destroy(self); self.allocator.destroy(self);
} }
pub fn newHeaders(self: *const Client) !Http.Headers { pub fn newHeaders(self: *const Client) !Net.Headers {
return Http.Headers.init(self.config.http_headers.user_agent_header); return Net.Headers.init(self.config.http_headers.user_agent_header);
} }
pub fn abort(self: *Client) void { pub fn abort(self: *Client) void {
while (self.handles.in_use.first) |node| { while (self.handles.in_use.first) |node| {
const handle: *Handle = @fieldParentPtr("node", node); const conn: *Net.Connection = @fieldParentPtr("node", node);
var transfer = Transfer.fromEasy(handle.conn.easy) catch |err| { var transfer = Transfer.fromConnection(conn) catch |err| {
log.err(.http, "get private info", .{ .err = err, .source = "abort" }); log.err(.http, "get private info", .{ .err = err, .source = "abort" });
continue; continue;
}; };
@@ -213,10 +198,11 @@ pub fn abort(self: *Client) void {
if (comptime IS_DEBUG) { if (comptime IS_DEBUG) {
std.debug.assert(self.handles.in_use.first == null); std.debug.assert(self.handles.in_use.first == null);
std.debug.assert(self.handles.available.len() == self.handles.handles.len); std.debug.assert(self.handles.available.len() == self.handles.connections.len);
var running: c_int = undefined; const running = self.handles.perform() catch |err| {
std.debug.assert(c.curl_multi_perform(self.multi, &running) == c.CURLE_OK); lp.assert(false, "multi perform in abort", .{ .err = err });
};
std.debug.assert(running == 0); std.debug.assert(running == 0);
} }
} }
@@ -229,9 +215,9 @@ pub fn tick(self: *Client, timeout_ms: u32) !PerformStatus {
const queue_node = self.queue.popFirst() orelse break; const queue_node = self.queue.popFirst() orelse break;
const transfer: *Transfer = @fieldParentPtr("_node", queue_node); const transfer: *Transfer = @fieldParentPtr("_node", queue_node);
// we know this exists, because we checked isEmpty() above // we know this exists, because we checked hasAvailable() above
const handle = self.handles.getFreeHandle().?; const conn = self.handles.get().?;
try self.makeRequest(handle, transfer); try self.makeRequest(conn, transfer);
} }
return self.perform(@intCast(timeout_ms)); return self.perform(@intCast(timeout_ms));
} }
@@ -354,7 +340,7 @@ fn fetchRobotsThenProcessRequest(self: *Client, robots_url: [:0]const u8, req: R
try entry.value_ptr.append(self.allocator, req); try entry.value_ptr.append(self.allocator, req);
} }
fn robotsHeaderCallback(transfer: *Http.Transfer) !bool { fn robotsHeaderCallback(transfer: *Transfer) !bool {
const ctx: *RobotsRequestContext = @ptrCast(@alignCast(transfer.ctx)); const ctx: *RobotsRequestContext = @ptrCast(@alignCast(transfer.ctx));
if (transfer.response_header) |hdr| { if (transfer.response_header) |hdr| {
@@ -369,7 +355,7 @@ fn robotsHeaderCallback(transfer: *Http.Transfer) !bool {
return true; return true;
} }
fn robotsDataCallback(transfer: *Http.Transfer, data: []const u8) !void { fn robotsDataCallback(transfer: *Transfer, data: []const u8) !void {
const ctx: *RobotsRequestContext = @ptrCast(@alignCast(transfer.ctx)); const ctx: *RobotsRequestContext = @ptrCast(@alignCast(transfer.ctx));
try ctx.buffer.appendSlice(ctx.client.allocator, data); try ctx.buffer.appendSlice(ctx.client.allocator, data);
} }
@@ -512,8 +498,8 @@ fn waitForInterceptedResponse(self: *Client, transfer: *Transfer) !bool {
// cases, the interecptor is expected to call resume to continue the transfer // cases, the interecptor is expected to call resume to continue the transfer
// or transfer.abort() to abort it. // or transfer.abort() to abort it.
fn process(self: *Client, transfer: *Transfer) !void { fn process(self: *Client, transfer: *Transfer) !void {
if (self.handles.getFreeHandle()) |handle| { if (self.handles.get()) |conn| {
return self.makeRequest(handle, transfer); return self.makeRequest(conn, transfer);
} }
self.queue.append(&transfer._node); self.queue.append(&transfer._node);
@@ -548,7 +534,7 @@ pub fn abortTransfer(self: *Client, transfer: *Transfer) void {
} }
// For an intercepted request // For an intercepted request
pub fn fulfillTransfer(self: *Client, transfer: *Transfer, status: u16, headers: []const Http.Header, body: ?[]const u8) !void { pub fn fulfillTransfer(self: *Client, transfer: *Transfer, status: u16, headers: []const Net.Header, body: ?[]const u8) !void {
if (comptime IS_DEBUG) { if (comptime IS_DEBUG) {
std.debug.assert(transfer._intercept_state != .not_intercepted); std.debug.assert(transfer._intercept_state != .not_intercepted);
log.debug(.http, "filfull transfer", .{ .intercepted = self.intercepted }); log.debug(.http, "filfull transfer", .{ .intercepted = self.intercepted });
@@ -626,8 +612,8 @@ fn requestFailed(transfer: *Transfer, err: anyerror, comptime execute_callback:
pub fn changeProxy(self: *Client, proxy: [:0]const u8) !void { pub fn changeProxy(self: *Client, proxy: [:0]const u8) !void {
try self.ensureNoActiveConnection(); try self.ensureNoActiveConnection();
for (self.handles.handles) |*h| { for (self.handles.connections) |*conn| {
try errorCheck(c.curl_easy_setopt(h.conn.easy, c.CURLOPT_PROXY, proxy.ptr)); try conn.setProxy(proxy.ptr);
} }
self.use_proxy = true; self.use_proxy = true;
} }
@@ -638,8 +624,8 @@ pub fn restoreOriginalProxy(self: *Client) !void {
try self.ensureNoActiveConnection(); try self.ensureNoActiveConnection();
const proxy = if (self.http_proxy) |p| p.ptr else null; const proxy = if (self.http_proxy) |p| p.ptr else null;
for (self.handles.handles) |*h| { for (self.handles.connections) |*conn| {
try errorCheck(c.curl_easy_setopt(h.conn.easy, c.CURLOPT_PROXY, proxy)); try conn.setProxy(proxy);
} }
self.use_proxy = proxy != null; self.use_proxy = proxy != null;
} }
@@ -649,16 +635,8 @@ pub fn enableTlsVerify(self: *const Client) !void {
// Remove inflight connections check on enable TLS b/c chromiumoxide calls // Remove inflight connections check on enable TLS b/c chromiumoxide calls
// the command during navigate and Curl seems to accept it... // the command during navigate and Curl seems to accept it...
for (self.handles.handles) |*h| { for (self.handles.connections) |*conn| {
const easy = h.conn.easy; try conn.setTlsVerify(true, self.use_proxy);
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 2)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 1)));
if (self.use_proxy) {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYHOST, @as(c_long, 2)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYPEER, @as(c_long, 1)));
}
} }
} }
@@ -667,26 +645,16 @@ pub fn disableTlsVerify(self: *const Client) !void {
// Remove inflight connections check on disable TLS b/c chromiumoxide calls // Remove inflight connections check on disable TLS b/c chromiumoxide calls
// the command during navigate and Curl seems to accept it... // the command during navigate and Curl seems to accept it...
for (self.handles.handles) |*h| { for (self.handles.connections) |*conn| {
const easy = h.conn.easy; try conn.setTlsVerify(false, self.use_proxy);
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 0)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 0)));
if (self.use_proxy) {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYHOST, @as(c_long, 0)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYPEER, @as(c_long, 0)));
}
} }
} }
fn makeRequest(self: *Client, handle: *Handle, transfer: *Transfer) anyerror!void { fn makeRequest(self: *Client, conn: *Net.Connection, transfer: *Transfer) anyerror!void {
const conn = handle.conn;
const easy = conn.easy;
const req = &transfer.req; const req = &transfer.req;
{ {
transfer._handle = handle; transfer._conn = conn;
errdefer transfer.deinit(); errdefer transfer.deinit();
try conn.setURL(req.url); try conn.setURL(req.url);
@@ -694,23 +662,23 @@ fn makeRequest(self: *Client, handle: *Handle, transfer: *Transfer) anyerror!voi
if (req.body) |b| { if (req.body) |b| {
try conn.setBody(b); try conn.setBody(b);
} else { } else {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))); try conn.setGetMode();
} }
var header_list = req.headers; var header_list = req.headers;
try conn.secretHeaders(&header_list); // Add headers that must be hidden from intercepts try conn.secretHeaders(&header_list, &self.config.http_headers); // Add headers that must be hidden from intercepts
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list.headers)); try conn.setHeaders(&header_list);
// Add cookies. // Add cookies.
if (header_list.cookies) |cookies| { if (header_list.cookies) |cookies| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_COOKIE, cookies)); try conn.setCookies(cookies);
} }
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PRIVATE, transfer)); try conn.setPrivate(transfer);
// add credentials // add credentials
if (req.credentials) |creds| { if (req.credentials) |creds| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXYUSERPWD, creds.ptr)); try conn.setProxyCredentials(creds);
} }
} }
@@ -719,11 +687,12 @@ fn makeRequest(self: *Client, handle: *Handle, transfer: *Transfer) anyerror!voi
// fails BEFORE `curl_multi_add_handle` suceeds, the we still need to do // fails BEFORE `curl_multi_add_handle` suceeds, the we still need to do
// cleanup. But if things fail after `curl_multi_add_handle`, we expect // cleanup. But if things fail after `curl_multi_add_handle`, we expect
// perfom to pickup the failure and cleanup. // perfom to pickup the failure and cleanup.
try errorMCheck(c.curl_multi_add_handle(self.multi, easy)); try self.handles.add(conn);
if (req.start_callback) |cb| { if (req.start_callback) |cb| {
cb(transfer) catch |err| { cb(transfer) catch |err| {
try errorMCheck(c.curl_multi_remove_handle(self.multi, easy)); self.handles.remove(conn);
transfer._conn = null;
transfer.deinit(); transfer.deinit();
return err; return err;
}; };
@@ -739,14 +708,7 @@ pub const PerformStatus = enum {
}; };
fn perform(self: *Client, timeout_ms: c_int) !PerformStatus { fn perform(self: *Client, timeout_ms: c_int) !PerformStatus {
const multi = self.multi; const running = try self.handles.perform();
var running: c_int = undefined;
{
self.performing = true;
defer self.performing = false;
try errorMCheck(c.curl_multi_perform(multi, &running));
}
// We're potentially going to block for a while until we get data. Process // We're potentially going to block for a while until we get data. Process
// whatever messages we have waiting ahead of time. // whatever messages we have waiting ahead of time.
@@ -756,18 +718,17 @@ fn perform(self: *Client, timeout_ms: c_int) !PerformStatus {
var status = PerformStatus.normal; var status = PerformStatus.normal;
if (self.cdp_client) |cdp_client| { if (self.cdp_client) |cdp_client| {
var wait_fd = c.curl_waitfd{ var wait_fds = [_]c.curl_waitfd{.{
.fd = cdp_client.socket, .fd = cdp_client.socket,
.events = c.CURL_WAIT_POLLIN, .events = c.CURL_WAIT_POLLIN,
.revents = 0, .revents = 0,
}; }};
try errorMCheck(c.curl_multi_poll(multi, &wait_fd, 1, timeout_ms, null)); try self.handles.poll(&wait_fds, timeout_ms);
if (wait_fd.revents != 0) { if (wait_fds[0].revents != 0) {
// the extra socket we passed in is ready, let's signal our caller
status = .cdp_socket; status = .cdp_socket;
} }
} else if (running > 0) { } else if (running > 0) {
try errorMCheck(c.curl_multi_poll(multi, null, 0, timeout_ms, null)); try self.handles.poll(&.{}, timeout_ms);
} }
_ = try self.processMessages(); _ = try self.processMessages();
@@ -775,18 +736,9 @@ fn perform(self: *Client, timeout_ms: c_int) !PerformStatus {
} }
fn processMessages(self: *Client) !bool { fn processMessages(self: *Client) !bool {
const multi = self.multi;
var processed = false; var processed = false;
var messages_count: c_int = 0; while (self.handles.readMessage()) |msg| {
while (c.curl_multi_info_read(multi, &messages_count)) |msg_| { const transfer = try Transfer.fromConnection(&msg.conn);
const msg: *c.CURLMsg = @ptrCast(msg_);
// This is the only possible message type from CURL for now.
if (comptime IS_DEBUG) {
std.debug.assert(msg.msg == c.CURLMSG_DONE);
}
const easy = msg.easy_handle.?;
const transfer = try Transfer.fromEasy(easy);
// In case of auth challenge // In case of auth challenge
// TODO give a way to configure the number of auth retries. // TODO give a way to configure the number of auth retries.
@@ -836,11 +788,13 @@ fn processMessages(self: *Client) !bool {
defer transfer.deinit(); defer transfer.deinit();
if (errorCheck(msg.data.result)) blk: { if (msg.err) |err| {
requestFailed(transfer, err, true);
} else blk: {
// In case of request w/o data, we need to call the header done // In case of request w/o data, we need to call the header done
// callback now. // callback now.
if (!transfer._header_done_called) { if (!transfer._header_done_called) {
const proceed = transfer.headerDoneCallback(easy) catch |err| { const proceed = transfer.headerDoneCallback(&msg.conn) catch |err| {
log.err(.http, "header_done_callback", .{ .err = err }); log.err(.http, "header_done_callback", .{ .err = err });
requestFailed(transfer, err, true); requestFailed(transfer, err, true);
continue; continue;
@@ -861,22 +815,15 @@ fn processMessages(self: *Client) !bool {
.transfer = transfer, .transfer = transfer,
}); });
processed = true; processed = true;
} else |err| {
requestFailed(transfer, err, true);
} }
} }
return processed; return processed;
} }
fn endTransfer(self: *Client, transfer: *Transfer) void { fn endTransfer(self: *Client, transfer: *Transfer) void {
const handle = transfer._handle.?; const conn = transfer._conn.?;
self.handles.remove(conn);
errorMCheck(c.curl_multi_remove_handle(self.multi, handle.conn.easy)) catch |err| { transfer._conn = null;
log.fatal(.http, "Failed to remove handle", .{ .err = err });
};
self.handles.release(handle);
transfer._handle = null;
self.active -= 1; self.active -= 1;
} }
@@ -886,101 +833,7 @@ fn ensureNoActiveConnection(self: *const Client) !void {
} }
} }
const Handles = struct { const Handles = Net.Handles;
handles: []Handle,
in_use: HandleList,
available: HandleList,
const HandleList = std.DoublyLinkedList;
fn init(
allocator: Allocator,
client: *Client,
ca_blob: ?c.curl_blob,
config: *const Config,
) !Handles {
const count: usize = config.httpMaxConcurrent();
if (count == 0) return error.InvalidMaxConcurrent;
const handles = try allocator.alloc(Handle, count);
errdefer allocator.free(handles);
var available: HandleList = .{};
for (0..count) |i| {
handles[i] = try Handle.init(client, ca_blob, config);
available.append(&handles[i].node);
}
return .{
.in_use = .{},
.handles = handles,
.available = available,
};
}
fn deinit(self: *Handles, allocator: Allocator) void {
for (self.handles) |*h| {
h.deinit();
}
allocator.free(self.handles);
}
fn hasAvailable(self: *const Handles) bool {
return self.available.first != null;
}
fn getFreeHandle(self: *Handles) ?*Handle {
if (self.available.popFirst()) |node| {
node.prev = null;
node.next = null;
self.in_use.append(node);
return @as(*Handle, @fieldParentPtr("node", node));
}
return null;
}
fn release(self: *Handles, handle: *Handle) void {
var node = &handle.node;
self.in_use.remove(node);
node.prev = null;
node.next = null;
self.available.append(node);
}
};
// wraps a c.CURL (an easy handle)
pub const Handle = struct {
client: *Client,
conn: Http.Connection,
node: Handles.HandleList.Node,
fn init(
client: *Client,
ca_blob: ?c.curl_blob,
config: *const Config,
) !Handle {
const conn = try Http.Connection.init(ca_blob, config);
errdefer conn.deinit();
const easy = conn.easy;
// callbacks
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HEADERDATA, easy));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HEADERFUNCTION, Transfer.headerCallback));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_WRITEDATA, easy));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_WRITEFUNCTION, Transfer.dataCallback));
return .{
.node = .{},
.conn = conn,
.client = client,
};
}
fn deinit(self: *const Handle) void {
self.conn.deinit();
}
};
pub const RequestCookie = struct { pub const RequestCookie = struct {
is_http: bool, is_http: bool,
@@ -988,7 +841,7 @@ pub const RequestCookie = struct {
is_navigation: bool, is_navigation: bool,
origin: [:0]const u8, origin: [:0]const u8,
pub fn headersForRequest(self: *const RequestCookie, temp: Allocator, url: [:0]const u8, headers: *Http.Headers) !void { pub fn headersForRequest(self: *const RequestCookie, temp: Allocator, url: [:0]const u8, headers: *Net.Headers) !void {
var arr: std.ArrayList(u8) = .{}; var arr: std.ArrayList(u8) = .{};
try self.jar.forRequest(url, arr.writer(temp), .{ try self.jar.forRequest(url, arr.writer(temp), .{
.is_http = self.is_http, .is_http = self.is_http,
@@ -1007,7 +860,7 @@ pub const Request = struct {
page_id: u32, page_id: u32,
method: Method, method: Method,
url: [:0]const u8, url: [:0]const u8,
headers: Http.Headers, headers: Net.Headers,
body: ?[]const u8 = null, body: ?[]const u8 = null,
cookie_jar: ?*CookieJar, cookie_jar: ?*CookieJar,
resource_type: ResourceType, resource_type: ResourceType,
@@ -1053,45 +906,7 @@ pub const Request = struct {
}; };
}; };
pub const AuthChallenge = struct { const AuthChallenge = Net.AuthChallenge;
status: u16,
source: enum { server, proxy },
scheme: enum { basic, digest },
realm: []const u8,
pub fn parse(status: u16, header: []const u8) !AuthChallenge {
var ac: AuthChallenge = .{
.status = status,
.source = undefined,
.realm = "TODO", // TODO parser and set realm
.scheme = undefined,
};
const sep = std.mem.indexOfPos(u8, header, 0, ": ") orelse return error.InvalidHeader;
const hname = header[0..sep];
const hvalue = header[sep + 2 ..];
if (std.ascii.eqlIgnoreCase("WWW-Authenticate", hname)) {
ac.source = .server;
} else if (std.ascii.eqlIgnoreCase("Proxy-Authenticate", hname)) {
ac.source = .proxy;
} else {
return error.InvalidAuthChallenge;
}
const pos = std.mem.indexOfPos(u8, std.mem.trim(u8, hvalue, std.ascii.whitespace[0..]), 0, " ") orelse hvalue.len;
const _scheme = hvalue[0..pos];
if (std.ascii.eqlIgnoreCase(_scheme, "basic")) {
ac.scheme = .basic;
} else if (std.ascii.eqlIgnoreCase(_scheme, "digest")) {
ac.scheme = .digest;
} else {
return error.UnknownAuthChallengeScheme;
}
return ac;
}
};
pub const Transfer = struct { pub const Transfer = struct {
arena: ArenaAllocator, arena: ArenaAllocator,
@@ -1109,14 +924,14 @@ pub const Transfer = struct {
max_response_size: ?usize = null, max_response_size: ?usize = null,
// We'll store the response header here // We'll store the response header here
response_header: ?ResponseHeader = null, response_header: ?ResponseHead = null,
// track if the header callbacks done have been called. // track if the header callbacks done have been called.
_header_done_called: bool = false, _header_done_called: bool = false,
_notified_fail: bool = false, _notified_fail: bool = false,
_handle: ?*Handle = null, _conn: ?*Net.Connection = null,
_redirecting: bool = false, _redirecting: bool = false,
_auth_challenge: ?AuthChallenge = null, _auth_challenge: ?AuthChallenge = null,
@@ -1159,41 +974,35 @@ pub const Transfer = struct {
fn deinit(self: *Transfer) void { fn deinit(self: *Transfer) void {
self.req.headers.deinit(); self.req.headers.deinit();
if (self._handle) |handle| { if (self._conn) |conn| {
self.client.handles.release(handle); self.client.handles.remove(conn);
} }
self.arena.deinit(); self.arena.deinit();
self.client.transfer_pool.destroy(self); self.client.transfer_pool.destroy(self);
} }
fn buildResponseHeader(self: *Transfer, easy: *c.CURL) !void { fn buildResponseHeader(self: *Transfer, conn: *const Net.Connection) !void {
if (comptime IS_DEBUG) { if (comptime IS_DEBUG) {
std.debug.assert(self.response_header == null); std.debug.assert(self.response_header == null);
} }
var url: [*c]u8 = undefined; const url = try conn.getEffectiveUrl();
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_EFFECTIVE_URL, &url));
var status: c_long = undefined; const status: u16 = if (self._auth_challenge != null)
if (self._auth_challenge) |_| { 407
status = 407; else
} else { try conn.getResponseCode();
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_RESPONSE_CODE, &status));
}
var redirect_count: c_long = undefined;
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_REDIRECT_COUNT, &redirect_count));
self.response_header = .{ self.response_header = .{
.url = url, .url = url,
.status = @intCast(status), .status = status,
.redirect_count = @intCast(redirect_count), .redirect_count = try conn.getRedirectCount(),
}; };
if (getResponseHeader(easy, "content-type", 0)) |ct| { if (conn.getResponseHeader("content-type", 0)) |ct| {
var hdr = &self.response_header.?; var hdr = &self.response_header.?;
const value = ct.value; const value = ct.value;
const len = @min(value.len, ResponseHeader.MAX_CONTENT_TYPE_LEN); const len = @min(value.len, ResponseHead.MAX_CONTENT_TYPE_LEN);
hdr._content_type_len = len; hdr._content_type_len = len;
@memcpy(hdr._content_type[0..len], value[0..len]); @memcpy(hdr._content_type[0..len], value[0..len]);
} }
@@ -1204,10 +1013,6 @@ pub const Transfer = struct {
return writer.print("{s} {s}", .{ @tagName(req.method), req.url }); return writer.print("{s} {s}", .{ @tagName(req.method), req.url });
} }
pub fn addHeader(self: *Transfer, value: [:0]const u8) !void {
self._request_header_list = c.curl_slist_append(self._request_header_list, value);
}
pub fn updateURL(self: *Transfer, url: [:0]const u8) !void { pub fn updateURL(self: *Transfer, url: [:0]const u8) !void {
// for cookies // for cookies
self.url = url; self.url = url;
@@ -1220,7 +1025,7 @@ pub const Transfer = struct {
self.req.credentials = userpwd; self.req.credentials = userpwd;
} }
pub fn replaceRequestHeaders(self: *Transfer, allocator: Allocator, headers: []const Http.Header) !void { pub fn replaceRequestHeaders(self: *Transfer, allocator: Allocator, headers: []const Net.Header) !void {
self.req.headers.deinit(); self.req.headers.deinit();
var buf: std.ArrayList(u8) = .empty; var buf: std.ArrayList(u8) = .empty;
@@ -1238,13 +1043,13 @@ pub const Transfer = struct {
pub fn abort(self: *Transfer, err: anyerror) void { pub fn abort(self: *Transfer, err: anyerror) void {
requestFailed(self, err, true); requestFailed(self, err, true);
if (self._handle == null) { if (self._conn == null) {
self.deinit(); self.deinit();
return; return;
} }
const client = self.client; const client = self.client;
if (client.performing) { if (client.handles.performing) {
// We're currently in a curl_multi_perform. We cannot call endTransfer // We're currently in a curl_multi_perform. We cannot call endTransfer
// as that calls curl_multi_remove_handle, and you can't do that // as that calls curl_multi_remove_handle, and you can't do that
// from a curl callback. Instead, we flag this transfer and all of // from a curl callback. Instead, we flag this transfer and all of
@@ -1254,7 +1059,7 @@ pub const Transfer = struct {
return; return;
} }
if (self._handle != null) { if (self._conn != null) {
client.endTransfer(self); client.endTransfer(self);
} }
self.deinit(); self.deinit();
@@ -1262,7 +1067,7 @@ pub const Transfer = struct {
pub fn terminate(self: *Transfer) void { pub fn terminate(self: *Transfer) void {
requestFailed(self, error.Shutdown, false); requestFailed(self, error.Shutdown, false);
if (self._handle != null) { if (self._conn != null) {
self.client.endTransfer(self); self.client.endTransfer(self);
} }
self.deinit(); self.deinit();
@@ -1271,7 +1076,7 @@ pub const Transfer = struct {
// internal, when the page is shutting down. Doesn't have the same ceremony // internal, when the page is shutting down. Doesn't have the same ceremony
// as abort (doesn't send a notification, doesn't invoke an error callback) // as abort (doesn't send a notification, doesn't invoke an error callback)
fn kill(self: *Transfer) void { fn kill(self: *Transfer) void {
if (self._handle != null) { if (self._conn != null) {
self.client.endTransfer(self); self.client.endTransfer(self);
} }
if (self.req.shutdown_callback) |cb| { if (self.req.shutdown_callback) |cb| {
@@ -1299,7 +1104,7 @@ pub const Transfer = struct {
// redirectionCookies manages cookies during redirections handled by Curl. // redirectionCookies manages cookies during redirections handled by Curl.
// It sets the cookies from the current response to the cookie jar. // It sets the cookies from the current response to the cookie jar.
// It also immediately sets cookies for the following request. // It also immediately sets cookies for the following request.
fn redirectionCookies(transfer: *Transfer, easy: *c.CURL) !void { fn redirectionCookies(transfer: *Transfer, conn: *const Net.Connection) !void {
const req = &transfer.req; const req = &transfer.req;
const arena = transfer.arena.allocator(); const arena = transfer.arena.allocator();
@@ -1307,7 +1112,7 @@ pub const Transfer = struct {
if (req.cookie_jar) |jar| { if (req.cookie_jar) |jar| {
var i: usize = 0; var i: usize = 0;
while (true) { while (true) {
const ct = getResponseHeader(easy, "set-cookie", i); const ct = conn.getResponseHeader("set-cookie", i);
if (ct == null) break; if (ct == null) break;
try jar.populateFromResponse(transfer.url, ct.?.value); try jar.populateFromResponse(transfer.url, ct.?.value);
i += 1; i += 1;
@@ -1316,12 +1121,11 @@ pub const Transfer = struct {
} }
// set cookies for the following redirection's request. // set cookies for the following redirection's request.
const location = getResponseHeader(easy, "location", 0) orelse { const location = conn.getResponseHeader("location", 0) orelse {
return error.LocationNotFound; return error.LocationNotFound;
}; };
var base_url: [*c]u8 = undefined; const base_url = try conn.getEffectiveUrl();
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_EFFECTIVE_URL, &base_url));
const url = try URL.resolve(arena, std.mem.span(base_url), location.value, .{}); const url = try URL.resolve(arena, std.mem.span(base_url), location.value, .{});
transfer.url = url; transfer.url = url;
@@ -1335,23 +1139,23 @@ pub const Transfer = struct {
.is_navigation = req.resource_type == .document, .is_navigation = req.resource_type == .document,
}); });
try cookies.append(arena, 0); //null terminate try cookies.append(arena, 0); //null terminate
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_COOKIE, @as([*c]const u8, @ptrCast(cookies.items.ptr)))); try conn.setCookies(@ptrCast(cookies.items.ptr));
} }
} }
// headerDoneCallback is called once the headers have been read. // headerDoneCallback is called once the headers have been read.
// It can be called either on dataCallback or once the request for those // It can be called either on dataCallback or once the request for those
// w/o body. // w/o body.
fn headerDoneCallback(transfer: *Transfer, easy: *c.CURL) !bool { fn headerDoneCallback(transfer: *Transfer, conn: *const Net.Connection) !bool {
lp.assert(transfer._header_done_called == false, "Transfer.headerDoneCallback", .{}); lp.assert(transfer._header_done_called == false, "Transfer.headerDoneCallback", .{});
defer transfer._header_done_called = true; defer transfer._header_done_called = true;
try transfer.buildResponseHeader(easy); try transfer.buildResponseHeader(conn);
if (getResponseHeader(easy, "content-type", 0)) |ct| { if (conn.getResponseHeader("content-type", 0)) |ct| {
var hdr = &transfer.response_header.?; var hdr = &transfer.response_header.?;
const value = ct.value; const value = ct.value;
const len = @min(value.len, ResponseHeader.MAX_CONTENT_TYPE_LEN); const len = @min(value.len, ResponseHead.MAX_CONTENT_TYPE_LEN);
hdr._content_type_len = len; hdr._content_type_len = len;
@memcpy(hdr._content_type[0..len], value[0..len]); @memcpy(hdr._content_type[0..len], value[0..len]);
} }
@@ -1359,7 +1163,7 @@ pub const Transfer = struct {
if (transfer.req.cookie_jar) |jar| { if (transfer.req.cookie_jar) |jar| {
var i: usize = 0; var i: usize = 0;
while (true) { while (true) {
const ct = getResponseHeader(easy, "set-cookie", i); const ct = conn.getResponseHeader("set-cookie", i);
if (ct == null) break; if (ct == null) break;
jar.populateFromResponse(transfer.url, ct.?.value) catch |err| { jar.populateFromResponse(transfer.url, ct.?.value) catch |err| {
log.err(.http, "set cookie", .{ .err = err, .req = transfer }); log.err(.http, "set cookie", .{ .err = err, .req = transfer });
@@ -1397,8 +1201,8 @@ pub const Transfer = struct {
std.debug.assert(header_count == 1); std.debug.assert(header_count == 1);
} }
const easy: *c.CURL = @ptrCast(@alignCast(data)); const conn: Net.Connection = .{ .easy = @ptrCast(@alignCast(data)) };
var transfer = fromEasy(easy) catch |err| { var transfer = fromConnection(&conn) catch |err| {
log.err(.http, "get private info", .{ .err = err, .source = "header callback" }); log.err(.http, "get private info", .{ .err = err, .source = "header callback" });
return 0; return 0;
}; };
@@ -1501,7 +1305,7 @@ pub const Transfer = struct {
if (transfer._redirecting) { if (transfer._redirecting) {
// parse and set cookies for the redirection. // parse and set cookies for the redirection.
redirectionCookies(transfer, easy) catch |err| { redirectionCookies(transfer, &conn) catch |err| {
if (comptime IS_DEBUG) { if (comptime IS_DEBUG) {
log.debug(.http, "redirection cookies", .{ .err = err }); log.debug(.http, "redirection cookies", .{ .err = err });
} }
@@ -1519,8 +1323,8 @@ pub const Transfer = struct {
std.debug.assert(chunk_count == 1); std.debug.assert(chunk_count == 1);
} }
const easy: *c.CURL = @ptrCast(@alignCast(data)); const conn: Net.Connection = .{ .easy = @ptrCast(@alignCast(data)) };
var transfer = fromEasy(easy) catch |err| { var transfer = fromConnection(&conn) catch |err| {
log.err(.http, "get private info", .{ .err = err, .source = "body callback" }); log.err(.http, "get private info", .{ .err = err, .source = "body callback" });
return c.CURL_WRITEFUNC_ERROR; return c.CURL_WRITEFUNC_ERROR;
}; };
@@ -1530,7 +1334,7 @@ pub const Transfer = struct {
} }
if (!transfer._header_done_called) { if (!transfer._header_done_called) {
const proceed = transfer.headerDoneCallback(easy) catch |err| { const proceed = transfer.headerDoneCallback(&conn) catch |err| {
log.err(.http, "header_done_callback", .{ .err = err, .req = transfer }); log.err(.http, "header_done_callback", .{ .err = err, .req = transfer });
return c.CURL_WRITEFUNC_ERROR; return c.CURL_WRITEFUNC_ERROR;
}; };
@@ -1567,10 +1371,10 @@ pub const Transfer = struct {
} }
pub fn responseHeaderIterator(self: *Transfer) HeaderIterator { pub fn responseHeaderIterator(self: *Transfer) HeaderIterator {
if (self._handle) |handle| { if (self._conn) |conn| {
// If we have a handle, than this is a real curl request and we // If we have a connection, than this is a real curl request and we
// iterate through the header that curl maintains. // iterate through the header that curl maintains.
return .{ .curl = .{ .easy = handle.conn.easy } }; return .{ .curl = .{ .conn = conn } };
} }
// If there's no handle, it either means this is being called before // If there's no handle, it either means this is being called before
// the request is even being made (which would be a bug in the code) // the request is even being made (which would be a bug in the code)
@@ -1579,15 +1383,13 @@ pub const Transfer = struct {
return .{ .list = .{ .list = self.response_header.?._injected_headers } }; return .{ .list = .{ .list = self.response_header.?._injected_headers } };
} }
// pub because Page.printWaitAnalysis uses it pub fn fromConnection(conn: *const Net.Connection) !*Transfer {
pub fn fromEasy(easy: *c.CURL) !*Transfer { const private = try conn.getPrivate();
var private: *anyopaque = undefined;
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_PRIVATE, &private));
return @ptrCast(@alignCast(private)); return @ptrCast(@alignCast(private));
} }
pub fn fulfill(transfer: *Transfer, status: u16, headers: []const Http.Header, body: ?[]const u8) !void { pub fn fulfill(transfer: *Transfer, status: u16, headers: []const Net.Header, body: ?[]const u8) !void {
if (transfer._handle != null) { if (transfer._conn != null) {
// should never happen, should have been intercepted/paused, and then // should never happen, should have been intercepted/paused, and then
// either continued, aborted or fulfilled once. // either continued, aborted or fulfilled once.
@branchHint(.unlikely); @branchHint(.unlikely);
@@ -1600,7 +1402,7 @@ pub const Transfer = struct {
}; };
} }
fn _fulfill(transfer: *Transfer, status: u16, headers: []const Http.Header, body: ?[]const u8) !void { fn _fulfill(transfer: *Transfer, status: u16, headers: []const Net.Header, body: ?[]const u8) !void {
const req = &transfer.req; const req = &transfer.req;
if (req.start_callback) |cb| { if (req.start_callback) |cb| {
try cb(transfer); try cb(transfer);
@@ -1614,7 +1416,7 @@ pub const Transfer = struct {
}; };
for (headers) |hdr| { for (headers) |hdr| {
if (std.ascii.eqlIgnoreCase(hdr.name, "content-type")) { if (std.ascii.eqlIgnoreCase(hdr.name, "content-type")) {
const len = @min(hdr.value.len, ResponseHeader.MAX_CONTENT_TYPE_LEN); const len = @min(hdr.value.len, ResponseHead.MAX_CONTENT_TYPE_LEN);
@memcpy(transfer.response_header.?._content_type[0..len], hdr.value[0..len]); @memcpy(transfer.response_header.?._content_type[0..len], hdr.value[0..len]);
transfer.response_header.?._content_type_len = len; transfer.response_header.?._content_type_len = len;
break; break;
@@ -1642,10 +1444,10 @@ pub const Transfer = struct {
} }
fn getContentLengthRawValue(self: *const Transfer) ?[]const u8 { fn getContentLengthRawValue(self: *const Transfer) ?[]const u8 {
if (self._handle) |handle| { if (self._conn) |conn| {
// If we have a handle, than this is a normal request. We can get the // If we have a connection, than this is a normal request. We can get the
// header value from the easy handle. // header value from the connection.
const cl = getResponseHeader(handle.conn.easy, "content-length", 0) orelse return null; const cl = conn.getResponseHeader("content-length", 0) orelse return null;
return cl.value; return cl.value;
} }
@@ -1663,96 +1465,3 @@ pub const Transfer = struct {
return null; return null;
} }
}; };
pub const ResponseHeader = struct {
const MAX_CONTENT_TYPE_LEN = 64;
status: u16,
url: [*c]const u8,
redirect_count: u32,
_content_type_len: usize = 0,
_content_type: [MAX_CONTENT_TYPE_LEN]u8 = undefined,
// this is normally an empty list, but if the response is being injected
// than it'll be populated. It isn't meant to be used directly, but should
// be used through the transfer.responseHeaderIterator() which abstracts
// whether the headers are from a live curl easy handle, or injected.
_injected_headers: []const Http.Header = &.{},
pub fn contentType(self: *ResponseHeader) ?[]u8 {
if (self._content_type_len == 0) {
return null;
}
return self._content_type[0..self._content_type_len];
}
};
// In normal cases, the header iterator comes from the curl linked list.
// But it's also possible to inject a response, via `transfer.fulfill`. In that
// case, the resposne headers are a list, []const Http.Header.
// This union, is an iterator that exposes the same API for either case.
const HeaderIterator = union(enum) {
curl: CurlHeaderIterator,
list: ListHeaderIterator,
pub fn next(self: *HeaderIterator) ?Http.Header {
switch (self.*) {
inline else => |*it| return it.next(),
}
}
const CurlHeaderIterator = struct {
easy: *c.CURL,
prev: ?*c.curl_header = null,
pub fn next(self: *CurlHeaderIterator) ?Http.Header {
const h = c.curl_easy_nextheader(self.easy, c.CURLH_HEADER, -1, self.prev) orelse return null;
self.prev = h;
const header = h.*;
return .{
.name = std.mem.span(header.name),
.value = std.mem.span(header.value),
};
}
};
const ListHeaderIterator = struct {
index: usize = 0,
list: []const Http.Header,
pub fn next(self: *ListHeaderIterator) ?Http.Header {
const index = self.index;
if (index == self.list.len) {
return null;
}
self.index = index + 1;
return self.list[index];
}
};
};
const CurlHeaderValue = struct {
value: []const u8,
amount: usize,
};
fn getResponseHeader(easy: *c.CURL, name: [:0]const u8, index: usize) ?CurlHeaderValue {
var hdr: [*c]c.curl_header = null;
const result = c.curl_easy_header(easy, name, index, c.CURLH_HEADER, -1, &hdr);
if (result == c.CURLE_OK) {
return .{
.amount = hdr.*.amount,
.value = std.mem.span(hdr.*.value),
};
}
if (result == c.CURLE_FAILED_INIT) {
// seems to be what it returns if the header isn't found
return null;
}
log.err(.http, "get response header", .{
.name = name,
.err = @import("errors.zig").fromCode(result),
});
return null;
}

View File

@@ -17,19 +17,19 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>. // along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std"); const std = @import("std");
const Net = @import("../Net.zig");
pub const c = @cImport({ const c = Net.c;
@cInclude("curl/curl.h");
});
pub const ENABLE_DEBUG = false; const ENABLE_DEBUG = Net.ENABLE_DEBUG;
pub const Client = @import("Client.zig"); pub const Client = @import("Client.zig");
pub const Transfer = Client.Transfer; pub const Transfer = Client.Transfer;
const lp = @import("lightpanda"); pub const Method = Net.Method;
pub const Header = Net.Header;
pub const Headers = Net.Headers;
const Config = @import("../Config.zig"); const Config = @import("../Config.zig");
const log = @import("../log.zig");
const errors = @import("errors.zig");
const RobotStore = @import("../browser/Robots.zig").RobotStore; const RobotStore = @import("../browser/Robots.zig").RobotStore;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
@@ -49,8 +49,8 @@ ca_blob: ?c.curl_blob,
robot_store: *RobotStore, robot_store: *RobotStore,
pub fn init(allocator: Allocator, robot_store: *RobotStore, config: *const Config) !Http { pub fn init(allocator: Allocator, robot_store: *RobotStore, config: *const Config) !Http {
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL)); try Net.globalInit();
errdefer c.curl_global_cleanup(); errdefer Net.globalDeinit();
if (comptime ENABLE_DEBUG) { if (comptime ENABLE_DEBUG) {
std.debug.print("curl version: {s}\n\n", .{c.curl_version()}); std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
@@ -61,7 +61,7 @@ pub fn init(allocator: Allocator, robot_store: *RobotStore, config: *const Confi
var ca_blob: ?c.curl_blob = null; var ca_blob: ?c.curl_blob = null;
if (config.tlsVerifyHost()) { if (config.tlsVerifyHost()) {
ca_blob = try loadCerts(allocator); ca_blob = try Net.loadCerts(allocator);
} }
return .{ return .{
@@ -78,7 +78,7 @@ pub fn deinit(self: *Http) void {
const data: [*]u8 = @ptrCast(ca_blob.data); const data: [*]u8 = @ptrCast(ca_blob.data);
self.allocator.free(data[0..ca_blob.len]); self.allocator.free(data[0..ca_blob.len]);
} }
c.curl_global_cleanup(); Net.globalDeinit();
self.arena.deinit(); self.arena.deinit();
} }
@@ -86,351 +86,6 @@ pub fn createClient(self: *Http, allocator: Allocator) !*Client {
return Client.init(allocator, self.ca_blob, self.robot_store, self.config); return Client.init(allocator, self.ca_blob, self.robot_store, self.config);
} }
pub fn newConnection(self: *Http) !Connection { pub fn newConnection(self: *Http) !Net.Connection {
return Connection.init(self.ca_blob, self.config); return Net.Connection.init(self.ca_blob, self.config);
}
pub const Connection = struct {
easy: *c.CURL,
http_headers: *const Config.HttpHeaders,
pub fn init(
ca_blob_: ?c.curl_blob,
config: *const Config,
) !Connection {
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
errdefer _ = c.curl_easy_cleanup(easy);
// timeouts
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_TIMEOUT_MS, @as(c_long, @intCast(config.httpTimeout()))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CONNECTTIMEOUT_MS, @as(c_long, @intCast(config.httpConnectTimeout()))));
// redirect behavior
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_MAXREDIRS, @as(c_long, @intCast(config.httpMaxRedirects()))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_FOLLOWLOCATION, @as(c_long, 2)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_REDIR_PROTOCOLS_STR, "HTTP,HTTPS")); // remove FTP and FTPS from the default
// proxy
const http_proxy = config.httpProxy();
if (http_proxy) |proxy| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY, proxy.ptr));
}
// tls
if (ca_blob_) |ca_blob| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
if (http_proxy != null) {
// Note, this can be difference for the proxy and for the main
// request. Might be something worth exposting as command
// line arguments at some point.
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_CAINFO_BLOB, ca_blob));
}
} else {
lp.assert(config.tlsVerifyHost() == false, "Http.init tls_verify_host", .{});
// Verify peer checks that the cert is signed by a CA, verify host makes sure the
// cert contains the server name.
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 0)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 0)));
if (http_proxy != null) {
// Note, this can be difference for the proxy and for the main
// request. Might be something worth exposting as command
// line arguments at some point.
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYHOST, @as(c_long, 0)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYPEER, @as(c_long, 0)));
}
}
// compression, don't remove this. CloudFront will send gzip content
// even if we don't support it, and then it won't be decompressed.
// empty string means: use whatever's available
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_ACCEPT_ENCODING, ""));
// debug
if (comptime Http.ENABLE_DEBUG) {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
// Sometimes the default debug output hides some useful data. You can
// uncomment the following line (BUT KEEP THE LIVE ABOVE AS-IS), to
// get more control over the data (specifically, the `CURLINFO_TEXT`
// can include useful data).
// try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_DEBUGFUNCTION, debugCallback));
}
return .{
.easy = easy,
.http_headers = &config.http_headers,
};
}
pub fn deinit(self: *const Connection) void {
c.curl_easy_cleanup(self.easy);
}
pub fn setURL(self: *const Connection, url: [:0]const u8) !void {
try errorCheck(c.curl_easy_setopt(self.easy, c.CURLOPT_URL, url.ptr));
}
// a libcurl request has 2 methods. The first is the method that
// controls how libcurl behaves. This specifically influences how redirects
// are handled. For example, if you do a POST and get a 301, libcurl will
// change that to a GET. But if you do a POST and get a 308, libcurl will
// keep the POST (and re-send the body).
// The second method is the actual string that's included in the request
// headers.
// These two methods can be different - you can tell curl to behave as though
// you made a GET, but include "POST" in the request header.
//
// Here, we're only concerned about the 2nd method. If we want, we'll set
// the first one based on whether or not we have a body.
//
// It's important that, for each use of this connection, we set the 2nd
// method. Else, if we make a HEAD request and re-use the connection, but
// DON'T reset this, it'll keep making HEAD requests.
// (I don't know if it's as important to reset the 1st method, or if libcurl
// can infer that based on the presence of the body, but we also reset it
// to be safe);
pub fn setMethod(self: *const Connection, method: Method) !void {
const easy = self.easy;
const m: [:0]const u8 = switch (method) {
.GET => "GET",
.POST => "POST",
.PUT => "PUT",
.DELETE => "DELETE",
.HEAD => "HEAD",
.OPTIONS => "OPTIONS",
.PATCH => "PATCH",
.PROPFIND => "PROPFIND",
};
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, m.ptr));
}
pub fn setBody(self: *const Connection, body: []const u8) !void {
const easy = self.easy;
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1)));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_COPYPOSTFIELDS, body.ptr));
}
// These are headers that may not be send to the users for inteception.
pub fn secretHeaders(self: *const Connection, headers: *Headers) !void {
if (self.http_headers.proxy_bearer_header) |hdr| {
try headers.add(hdr);
}
}
pub fn request(self: *const Connection) !u16 {
const easy = self.easy;
var header_list = try Headers.init(self.http_headers.user_agent_header);
defer header_list.deinit();
try self.secretHeaders(&header_list);
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list.headers));
// Add cookies.
if (header_list.cookies) |cookies| {
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_COOKIE, cookies));
}
try errorCheck(c.curl_easy_perform(easy));
var http_code: c_long = undefined;
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_RESPONSE_CODE, &http_code));
if (http_code < 0 or http_code > std.math.maxInt(u16)) {
return 0;
}
return @intCast(http_code);
}
};
pub const Header = struct {
name: []const u8,
value: []const u8,
};
pub const Headers = struct {
headers: ?*c.curl_slist,
cookies: ?[*c]const u8,
pub fn init(user_agent: [:0]const u8) !Headers {
const header_list = c.curl_slist_append(null, user_agent);
if (header_list == null) {
return error.OutOfMemory;
}
return .{ .headers = header_list, .cookies = null };
}
pub fn deinit(self: *const Headers) void {
if (self.headers) |hdr| {
c.curl_slist_free_all(hdr);
}
}
pub fn add(self: *Headers, header: [*c]const u8) !void {
// Copies the value
const updated_headers = c.curl_slist_append(self.headers, header);
if (updated_headers == null) return error.OutOfMemory;
self.headers = updated_headers;
}
pub fn parseHeader(header_str: []const u8) ?Header {
const colon_pos = std.mem.indexOfScalar(u8, header_str, ':') orelse return null;
const name = std.mem.trim(u8, header_str[0..colon_pos], " \t");
const value = std.mem.trim(u8, header_str[colon_pos + 1 ..], " \t");
return .{ .name = name, .value = value };
}
pub fn iterator(self: *Headers) Iterator {
return .{
.header = self.headers,
.cookies = self.cookies,
};
}
const Iterator = struct {
header: [*c]c.curl_slist,
cookies: ?[*c]const u8,
pub fn next(self: *Iterator) ?Header {
const h = self.header orelse {
const cookies = self.cookies orelse return null;
self.cookies = null;
return .{ .name = "Cookie", .value = std.mem.span(@as([*:0]const u8, cookies)) };
};
self.header = h.*.next;
return parseHeader(std.mem.span(@as([*:0]const u8, @ptrCast(h.*.data))));
}
};
};
pub fn errorCheck(code: c.CURLcode) errors.Error!void {
if (code == c.CURLE_OK) {
return;
}
return errors.fromCode(code);
}
pub fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
if (code == c.CURLM_OK) {
return;
}
if (code == c.CURLM_CALL_MULTI_PERFORM) {
// should we can client.perform() here?
// or just wait until the next time we naturally call it?
return;
}
return errors.fromMCode(code);
}
pub const Method = enum(u8) {
GET = 0,
PUT = 1,
POST = 2,
DELETE = 3,
HEAD = 4,
OPTIONS = 5,
PATCH = 6,
PROPFIND = 7,
};
// TODO: on BSD / Linux, we could just read the PEM file directly.
// This whole rescan + decode is really just needed for MacOS. On Linux
// bundle.rescan does find the .pem file(s) which could be in a few different
// places, so it's still useful, just not efficient.
fn loadCerts(allocator: Allocator) !c.curl_blob {
var bundle: std.crypto.Certificate.Bundle = .{};
try bundle.rescan(allocator);
defer bundle.deinit(allocator);
const bytes = bundle.bytes.items;
if (bytes.len == 0) {
log.warn(.app, "No system certificates", .{});
return .{
.len = 0,
.flags = 0,
.data = bytes.ptr,
};
}
const encoder = std.base64.standard.Encoder;
var arr: std.ArrayList(u8) = .empty;
const encoded_size = encoder.calcSize(bytes.len);
const buffer_size = encoded_size +
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
(encoded_size / 64) // newline per 64 characters
;
try arr.ensureTotalCapacity(allocator, buffer_size);
errdefer arr.deinit(allocator);
var writer = arr.writer(allocator);
var it = bundle.map.valueIterator();
while (it.next()) |index| {
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
var line_writer = LineWriter{ .inner = writer };
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
try writer.writeAll("\n-----END CERTIFICATE-----\n");
}
// Final encoding should not be larger than our initial size estimate
lp.assert(buffer_size > arr.items.len, "Http loadCerts", .{ .estimate = buffer_size, .len = arr.items.len });
// Allocate exactly the size needed and copy the data
const result = try allocator.dupe(u8, arr.items);
// Free the original oversized allocation
arr.deinit(allocator);
return .{
.len = result.len,
.data = result.ptr,
.flags = 0,
};
}
// Wraps lines @ 64 columns. A PEM is basically a base64 encoded DER (which is
// what Zig has), with lines wrapped at 64 characters and with a basic header
// and footer
const LineWriter = struct {
col: usize = 0,
inner: std.ArrayList(u8).Writer,
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
var writer = self.inner;
var col = self.col;
const len = 64 - col;
var remain = data;
if (remain.len > len) {
col = 0;
try writer.writeAll(data[0..len]);
try writer.writeByte('\n');
remain = data[len..];
}
while (remain.len > 64) {
try writer.writeAll(remain[0..64]);
try writer.writeByte('\n');
remain = data[len..];
}
try writer.writeAll(remain);
self.col = col + remain.len;
}
};
pub fn debugCallback(_: *c.CURL, msg_type: c.curl_infotype, raw: [*c]u8, len: usize, _: *anyopaque) callconv(.c) void {
const data = raw[0..len];
switch (msg_type) {
c.CURLINFO_TEXT => std.debug.print("libcurl [text]: {s}\n", .{data}),
c.CURLINFO_HEADER_OUT => std.debug.print("libcurl [req-h]: {s}\n", .{data}),
c.CURLINFO_HEADER_IN => std.debug.print("libcurl [res-h]: {s}\n", .{data}),
// c.CURLINFO_DATA_IN => std.debug.print("libcurl [res-b]: {s}\n", .{data}),
else => std.debug.print("libcurl ?? {d}\n", .{msg_type}),
}
} }

View File

@@ -1,244 +0,0 @@
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
//
// Francis Bouvier <francis@lightpanda.io>
// Pierre Tachoire <pierre@lightpanda.io>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
const std = @import("std");
const c = @import("Http.zig").c;
const IS_DEBUG = @import("builtin").mode == .Debug;
pub const Error = error{
UnsupportedProtocol,
FailedInit,
UrlMalformat,
NotBuiltIn,
CouldntResolveProxy,
CouldntResolveHost,
CouldntConnect,
WeirdServerReply,
RemoteAccessDenied,
FtpAcceptFailed,
FtpWeirdPassReply,
FtpAcceptTimeout,
FtpWeirdPasvReply,
FtpWeird227Format,
FtpCantGetHost,
Http2,
FtpCouldntSetType,
PartialFile,
FtpCouldntRetrFile,
QuoteError,
HttpReturnedError,
WriteError,
UploadFailed,
ReadError,
OutOfMemory,
OperationTimedout,
FtpPortFailed,
FtpCouldntUseRest,
RangeError,
SslConnectError,
BadDownloadResume,
FileCouldntReadFile,
LdapCannotBind,
LdapSearchFailed,
AbortedByCallback,
BadFunctionArgument,
InterfaceFailed,
TooManyRedirects,
UnknownOption,
SetoptOptionSyntax,
GotNothing,
SslEngineNotfound,
SslEngineSetfailed,
SendError,
RecvError,
SslCertproblem,
SslCipher,
PeerFailedVerification,
BadContentEncoding,
FilesizeExceeded,
UseSslFailed,
SendFailRewind,
SslEngineInitfailed,
LoginDenied,
TftpNotfound,
TftpPerm,
RemoteDiskFull,
TftpIllegal,
TftpUnknownid,
RemoteFileExists,
TftpNosuchuser,
SslCacertBadfile,
RemoteFileNotFound,
Ssh,
SslShutdownFailed,
Again,
SslCrlBadfile,
SslIssuerError,
FtpPretFailed,
RtspCseqError,
RtspSessionError,
FtpBadFileList,
ChunkFailed,
NoConnectionAvailable,
SslPinnedpubkeynotmatch,
SslInvalidcertstatus,
Http2Stream,
RecursiveApiCall,
AuthError,
Http3,
QuicConnectError,
Proxy,
SslClientcert,
UnrecoverablePoll,
TooLarge,
Unknown,
};
pub fn fromCode(code: c.CURLcode) Error {
if (comptime IS_DEBUG) {
std.debug.assert(code != c.CURLE_OK);
}
return switch (code) {
c.CURLE_UNSUPPORTED_PROTOCOL => Error.UnsupportedProtocol,
c.CURLE_FAILED_INIT => Error.FailedInit,
c.CURLE_URL_MALFORMAT => Error.UrlMalformat,
c.CURLE_NOT_BUILT_IN => Error.NotBuiltIn,
c.CURLE_COULDNT_RESOLVE_PROXY => Error.CouldntResolveProxy,
c.CURLE_COULDNT_RESOLVE_HOST => Error.CouldntResolveHost,
c.CURLE_COULDNT_CONNECT => Error.CouldntConnect,
c.CURLE_WEIRD_SERVER_REPLY => Error.WeirdServerReply,
c.CURLE_REMOTE_ACCESS_DENIED => Error.RemoteAccessDenied,
c.CURLE_FTP_ACCEPT_FAILED => Error.FtpAcceptFailed,
c.CURLE_FTP_WEIRD_PASS_REPLY => Error.FtpWeirdPassReply,
c.CURLE_FTP_ACCEPT_TIMEOUT => Error.FtpAcceptTimeout,
c.CURLE_FTP_WEIRD_PASV_REPLY => Error.FtpWeirdPasvReply,
c.CURLE_FTP_WEIRD_227_FORMAT => Error.FtpWeird227Format,
c.CURLE_FTP_CANT_GET_HOST => Error.FtpCantGetHost,
c.CURLE_HTTP2 => Error.Http2,
c.CURLE_FTP_COULDNT_SET_TYPE => Error.FtpCouldntSetType,
c.CURLE_PARTIAL_FILE => Error.PartialFile,
c.CURLE_FTP_COULDNT_RETR_FILE => Error.FtpCouldntRetrFile,
c.CURLE_QUOTE_ERROR => Error.QuoteError,
c.CURLE_HTTP_RETURNED_ERROR => Error.HttpReturnedError,
c.CURLE_WRITE_ERROR => Error.WriteError,
c.CURLE_UPLOAD_FAILED => Error.UploadFailed,
c.CURLE_READ_ERROR => Error.ReadError,
c.CURLE_OUT_OF_MEMORY => Error.OutOfMemory,
c.CURLE_OPERATION_TIMEDOUT => Error.OperationTimedout,
c.CURLE_FTP_PORT_FAILED => Error.FtpPortFailed,
c.CURLE_FTP_COULDNT_USE_REST => Error.FtpCouldntUseRest,
c.CURLE_RANGE_ERROR => Error.RangeError,
c.CURLE_SSL_CONNECT_ERROR => Error.SslConnectError,
c.CURLE_BAD_DOWNLOAD_RESUME => Error.BadDownloadResume,
c.CURLE_FILE_COULDNT_READ_FILE => Error.FileCouldntReadFile,
c.CURLE_LDAP_CANNOT_BIND => Error.LdapCannotBind,
c.CURLE_LDAP_SEARCH_FAILED => Error.LdapSearchFailed,
c.CURLE_ABORTED_BY_CALLBACK => Error.AbortedByCallback,
c.CURLE_BAD_FUNCTION_ARGUMENT => Error.BadFunctionArgument,
c.CURLE_INTERFACE_FAILED => Error.InterfaceFailed,
c.CURLE_TOO_MANY_REDIRECTS => Error.TooManyRedirects,
c.CURLE_UNKNOWN_OPTION => Error.UnknownOption,
c.CURLE_SETOPT_OPTION_SYNTAX => Error.SetoptOptionSyntax,
c.CURLE_GOT_NOTHING => Error.GotNothing,
c.CURLE_SSL_ENGINE_NOTFOUND => Error.SslEngineNotfound,
c.CURLE_SSL_ENGINE_SETFAILED => Error.SslEngineSetfailed,
c.CURLE_SEND_ERROR => Error.SendError,
c.CURLE_RECV_ERROR => Error.RecvError,
c.CURLE_SSL_CERTPROBLEM => Error.SslCertproblem,
c.CURLE_SSL_CIPHER => Error.SslCipher,
c.CURLE_PEER_FAILED_VERIFICATION => Error.PeerFailedVerification,
c.CURLE_BAD_CONTENT_ENCODING => Error.BadContentEncoding,
c.CURLE_FILESIZE_EXCEEDED => Error.FilesizeExceeded,
c.CURLE_USE_SSL_FAILED => Error.UseSslFailed,
c.CURLE_SEND_FAIL_REWIND => Error.SendFailRewind,
c.CURLE_SSL_ENGINE_INITFAILED => Error.SslEngineInitfailed,
c.CURLE_LOGIN_DENIED => Error.LoginDenied,
c.CURLE_TFTP_NOTFOUND => Error.TftpNotfound,
c.CURLE_TFTP_PERM => Error.TftpPerm,
c.CURLE_REMOTE_DISK_FULL => Error.RemoteDiskFull,
c.CURLE_TFTP_ILLEGAL => Error.TftpIllegal,
c.CURLE_TFTP_UNKNOWNID => Error.TftpUnknownid,
c.CURLE_REMOTE_FILE_EXISTS => Error.RemoteFileExists,
c.CURLE_TFTP_NOSUCHUSER => Error.TftpNosuchuser,
c.CURLE_SSL_CACERT_BADFILE => Error.SslCacertBadfile,
c.CURLE_REMOTE_FILE_NOT_FOUND => Error.RemoteFileNotFound,
c.CURLE_SSH => Error.Ssh,
c.CURLE_SSL_SHUTDOWN_FAILED => Error.SslShutdownFailed,
c.CURLE_AGAIN => Error.Again,
c.CURLE_SSL_CRL_BADFILE => Error.SslCrlBadfile,
c.CURLE_SSL_ISSUER_ERROR => Error.SslIssuerError,
c.CURLE_FTP_PRET_FAILED => Error.FtpPretFailed,
c.CURLE_RTSP_CSEQ_ERROR => Error.RtspCseqError,
c.CURLE_RTSP_SESSION_ERROR => Error.RtspSessionError,
c.CURLE_FTP_BAD_FILE_LIST => Error.FtpBadFileList,
c.CURLE_CHUNK_FAILED => Error.ChunkFailed,
c.CURLE_NO_CONNECTION_AVAILABLE => Error.NoConnectionAvailable,
c.CURLE_SSL_PINNEDPUBKEYNOTMATCH => Error.SslPinnedpubkeynotmatch,
c.CURLE_SSL_INVALIDCERTSTATUS => Error.SslInvalidcertstatus,
c.CURLE_HTTP2_STREAM => Error.Http2Stream,
c.CURLE_RECURSIVE_API_CALL => Error.RecursiveApiCall,
c.CURLE_AUTH_ERROR => Error.AuthError,
c.CURLE_HTTP3 => Error.Http3,
c.CURLE_QUIC_CONNECT_ERROR => Error.QuicConnectError,
c.CURLE_PROXY => Error.Proxy,
c.CURLE_SSL_CLIENTCERT => Error.SslClientcert,
c.CURLE_UNRECOVERABLE_POLL => Error.UnrecoverablePoll,
c.CURLE_TOO_LARGE => Error.TooLarge,
else => Error.Unknown,
};
}
pub const Multi = error{
BadHandle,
BadEasyHandle,
OutOfMemory,
InternalError,
BadSocket,
UnknownOption,
AddedAlready,
RecursiveApiCall,
WakeupFailure,
BadFunctionArgument,
AbortedByCallback,
UnrecoverablePoll,
Unknown,
};
pub fn fromMCode(code: c.CURLMcode) Multi {
if (comptime IS_DEBUG) {
std.debug.assert(code != c.CURLM_OK);
}
return switch (code) {
c.CURLM_BAD_HANDLE => Multi.BadHandle,
c.CURLM_BAD_EASY_HANDLE => Multi.BadEasyHandle,
c.CURLM_OUT_OF_MEMORY => Multi.OutOfMemory,
c.CURLM_INTERNAL_ERROR => Multi.InternalError,
c.CURLM_BAD_SOCKET => Multi.BadSocket,
c.CURLM_UNKNOWN_OPTION => Multi.UnknownOption,
c.CURLM_ADDED_ALREADY => Multi.AddedAlready,
c.CURLM_RECURSIVE_API_CALL => Multi.RecursiveApiCall,
c.CURLM_WAKEUP_FAILURE => Multi.WakeupFailure,
c.CURLM_BAD_FUNCTION_ARGUMENT => Multi.BadFunctionArgument,
c.CURLM_ABORTED_BY_CALLBACK => Multi.AbortedByCallback,
c.CURLM_UNRECOVERABLE_POLL => Multi.UnrecoverablePoll,
else => Multi.Unknown,
};
}

View File

@@ -7,7 +7,7 @@ const Allocator = std.mem.Allocator;
const log = @import("../log.zig"); const log = @import("../log.zig");
const App = @import("../App.zig"); const App = @import("../App.zig");
const Http = @import("../http/Http.zig"); const Net = @import("../Net.zig");
const Config = @import("../Config.zig"); const Config = @import("../Config.zig");
const telemetry = @import("telemetry.zig"); const telemetry = @import("telemetry.zig");
@@ -20,7 +20,8 @@ pub const LightPanda = struct {
allocator: Allocator, allocator: Allocator,
mutex: std.Thread.Mutex, mutex: std.Thread.Mutex,
cond: Thread.Condition, cond: Thread.Condition,
connection: Http.Connection, connection: Net.Connection,
config: *const Config,
pending: std.DoublyLinkedList, pending: std.DoublyLinkedList,
mem_pool: std.heap.MemoryPool(LightPandaEvent), mem_pool: std.heap.MemoryPool(LightPandaEvent),
@@ -40,6 +41,7 @@ pub const LightPanda = struct {
.running = true, .running = true,
.allocator = allocator, .allocator = allocator,
.connection = connection, .connection = connection,
.config = app.config,
.mem_pool = std.heap.MemoryPool(LightPandaEvent).init(allocator), .mem_pool = std.heap.MemoryPool(LightPandaEvent).init(allocator),
}; };
} }
@@ -109,7 +111,7 @@ pub const LightPanda = struct {
} }
try self.connection.setBody(aw.written()); try self.connection.setBody(aw.written());
const status = try self.connection.request(); const status = try self.connection.request(&self.config.http_headers);
if (status != 200) { if (status != 200) {
log.warn(.telemetry, "server error", .{ .status = status }); log.warn(.telemetry, "server error", .{ .status = status });