mirror of
https://github.com/lightpanda-io/browser.git
synced 2026-03-23 05:04:42 +00:00
Merge pull request #922 from lightpanda-io/nonblocking_libcurl
Some checks failed
e2e-test / zig build release (push) Has been cancelled
e2e-test / demo-scripts (push) Has been cancelled
e2e-test / cdp-and-hyperfine-bench (push) Has been cancelled
e2e-test / perf-fmt (push) Has been cancelled
zig-test / zig build dev (push) Has been cancelled
zig-test / browser fetch (push) Has been cancelled
zig-test / zig test (push) Has been cancelled
zig-test / perf-fmt (push) Has been cancelled
Some checks failed
e2e-test / zig build release (push) Has been cancelled
e2e-test / demo-scripts (push) Has been cancelled
e2e-test / cdp-and-hyperfine-bench (push) Has been cancelled
e2e-test / perf-fmt (push) Has been cancelled
zig-test / zig build dev (push) Has been cancelled
zig-test / browser fetch (push) Has been cancelled
zig-test / zig test (push) Has been cancelled
zig-test / perf-fmt (push) Has been cancelled
Nonblocking libcurl
This commit is contained in:
12
.gitmodules
vendored
12
.gitmodules
vendored
@@ -19,3 +19,15 @@
|
|||||||
[submodule "vendor/mimalloc"]
|
[submodule "vendor/mimalloc"]
|
||||||
path = vendor/mimalloc
|
path = vendor/mimalloc
|
||||||
url = https://github.com/microsoft/mimalloc.git/
|
url = https://github.com/microsoft/mimalloc.git/
|
||||||
|
[submodule "vendor/nghttp2"]
|
||||||
|
path = vendor/nghttp2
|
||||||
|
url = https://github.com/nghttp2/nghttp2.git
|
||||||
|
[submodule "vendor/mbedtls"]
|
||||||
|
path = vendor/mbedtls
|
||||||
|
url = https://github.com/Mbed-TLS/mbedtls.git
|
||||||
|
[submodule "vendor/zlib"]
|
||||||
|
path = vendor/zlib
|
||||||
|
url = https://github.com/madler/zlib.git
|
||||||
|
[submodule "vendor/curl"]
|
||||||
|
path = vendor/curl
|
||||||
|
url = https://github.com/curl/curl.git
|
||||||
|
|||||||
597
build.zig
597
build.zig
@@ -19,11 +19,13 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
|
const Build = std.Build;
|
||||||
|
|
||||||
/// Do not rename this constant. It is scanned by some scripts to determine
|
/// Do not rename this constant. It is scanned by some scripts to determine
|
||||||
/// which zig version to install.
|
/// which zig version to install.
|
||||||
const recommended_zig_version = "0.14.1";
|
const recommended_zig_version = "0.14.1";
|
||||||
|
|
||||||
pub fn build(b: *std.Build) !void {
|
pub fn build(b: *Build) !void {
|
||||||
switch (comptime builtin.zig_version.order(std.SemanticVersion.parse(recommended_zig_version) catch unreachable)) {
|
switch (comptime builtin.zig_version.order(std.SemanticVersion.parse(recommended_zig_version) catch unreachable)) {
|
||||||
.eq => {},
|
.eq => {},
|
||||||
.lt => {
|
.lt => {
|
||||||
@@ -138,29 +140,29 @@ pub fn build(b: *std.Build) !void {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn common(b: *std.Build, opts: *std.Build.Step.Options, step: *std.Build.Step.Compile) !void {
|
fn common(b: *Build, opts: *Build.Step.Options, step: *Build.Step.Compile) !void {
|
||||||
const mod = step.root_module;
|
const mod = step.root_module;
|
||||||
const target = mod.resolved_target.?;
|
const target = mod.resolved_target.?;
|
||||||
const optimize = mod.optimize.?;
|
const optimize = mod.optimize.?;
|
||||||
const dep_opts = .{ .target = target, .optimize = optimize };
|
const dep_opts = .{ .target = target, .optimize = optimize };
|
||||||
|
|
||||||
try moduleNetSurf(b, step, target);
|
try moduleNetSurf(b, step, target);
|
||||||
mod.addImport("tls", b.dependency("tls", dep_opts).module("tls"));
|
mod.addImport("build_config", opts.createModule());
|
||||||
mod.addImport("tigerbeetle-io", b.dependency("tigerbeetle_io", .{}).module("tigerbeetle_io"));
|
mod.addImport("tigerbeetle-io", b.dependency("tigerbeetle_io", .{}).module("tigerbeetle_io"));
|
||||||
|
|
||||||
|
mod.addIncludePath(b.path("vendor/lightpanda"));
|
||||||
|
|
||||||
{
|
{
|
||||||
// v8
|
// v8
|
||||||
|
mod.link_libcpp = true;
|
||||||
|
|
||||||
const v8_opts = b.addOptions();
|
const v8_opts = b.addOptions();
|
||||||
v8_opts.addOption(bool, "inspector_subtype", false);
|
v8_opts.addOption(bool, "inspector_subtype", false);
|
||||||
|
|
||||||
const v8_mod = b.dependency("v8", dep_opts).module("v8");
|
const v8_mod = b.dependency("v8", dep_opts).module("v8");
|
||||||
v8_mod.addOptions("default_exports", v8_opts);
|
v8_mod.addOptions("default_exports", v8_opts);
|
||||||
mod.addImport("v8", v8_mod);
|
mod.addImport("v8", v8_mod);
|
||||||
}
|
|
||||||
|
|
||||||
mod.link_libcpp = true;
|
|
||||||
|
|
||||||
{
|
|
||||||
const release_dir = if (mod.optimize.? == .Debug) "debug" else "release";
|
const release_dir = if (mod.optimize.? == .Debug) "debug" else "release";
|
||||||
const os = switch (target.result.os.tag) {
|
const os = switch (target.result.os.tag) {
|
||||||
.linux => "linux",
|
.linux => "linux",
|
||||||
@@ -181,21 +183,210 @@ fn common(b: *std.Build, opts: *std.Build.Step.Options, step: *std.Build.Step.Co
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
mod.addObjectFile(mod.owner.path(lib_path));
|
mod.addObjectFile(mod.owner.path(lib_path));
|
||||||
|
|
||||||
|
switch (target.result.os.tag) {
|
||||||
|
.macos => {
|
||||||
|
// v8 has a dependency, abseil-cpp, which, on Mac, uses CoreFoundation
|
||||||
|
mod.addSystemFrameworkPath(.{ .cwd_relative = "/System/Library/Frameworks" });
|
||||||
|
mod.linkFramework("CoreFoundation", .{});
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (target.result.os.tag) {
|
{
|
||||||
.macos => {
|
//curl
|
||||||
// v8 has a dependency, abseil-cpp, which, on Mac, uses CoreFoundation
|
{
|
||||||
mod.addSystemFrameworkPath(.{ .cwd_relative = "/System/Library/Frameworks" });
|
const is_linux = target.result.os.tag == .linux;
|
||||||
mod.linkFramework("CoreFoundation", .{});
|
if (is_linux) {
|
||||||
},
|
mod.addCMacro("HAVE_LINUX_TCP_H", "1");
|
||||||
else => {},
|
mod.addCMacro("HAVE_MSG_NOSIGNAL", "1");
|
||||||
}
|
mod.addCMacro("HAVE_GETHOSTBYNAME_R", "1");
|
||||||
|
}
|
||||||
|
mod.addCMacro("_FILE_OFFSET_BITS", "64");
|
||||||
|
mod.addCMacro("BUILDING_LIBCURL", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_AWS", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_DICT", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_DOH", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_FILE", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_FTP", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_GOPHER", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_KERBEROS", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_IMAP", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_IPFS", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_LDAP", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_LDAPS", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_MQTT", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_NTLM", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_PROGRESS_METER", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_POP3", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_RTSP", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_SMB", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_SMTP", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_TELNET", "1");
|
||||||
|
mod.addCMacro("CURL_DISABLE_TFTP", "1");
|
||||||
|
mod.addCMacro("CURL_EXTERN_SYMBOL", "__attribute__ ((__visibility__ (\"default\"))");
|
||||||
|
mod.addCMacro("CURL_OS", if (is_linux) "\"Linux\"" else "\"mac\"");
|
||||||
|
mod.addCMacro("CURL_STATICLIB", "1");
|
||||||
|
mod.addCMacro("ENABLE_IPV6", "1");
|
||||||
|
mod.addCMacro("HAVE_ALARM", "1");
|
||||||
|
mod.addCMacro("HAVE_ALLOCA_H", "1");
|
||||||
|
mod.addCMacro("HAVE_ARPA_INET_H", "1");
|
||||||
|
mod.addCMacro("HAVE_ARPA_TFTP_H", "1");
|
||||||
|
mod.addCMacro("HAVE_ASSERT_H", "1");
|
||||||
|
mod.addCMacro("HAVE_BASENAME", "1");
|
||||||
|
mod.addCMacro("HAVE_BOOL_T", "1");
|
||||||
|
mod.addCMacro("HAVE_BUILTIN_AVAILABLE", "1");
|
||||||
|
mod.addCMacro("HAVE_CLOCK_GETTIME_MONOTONIC", "1");
|
||||||
|
mod.addCMacro("HAVE_DLFCN_H", "1");
|
||||||
|
mod.addCMacro("HAVE_ERRNO_H", "1");
|
||||||
|
mod.addCMacro("HAVE_FCNTL", "1");
|
||||||
|
mod.addCMacro("HAVE_FCNTL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_FCNTL_O_NONBLOCK", "1");
|
||||||
|
mod.addCMacro("HAVE_FREEADDRINFO", "1");
|
||||||
|
mod.addCMacro("HAVE_FSETXATTR", "1");
|
||||||
|
mod.addCMacro("HAVE_FSETXATTR_5", "1");
|
||||||
|
mod.addCMacro("HAVE_FTRUNCATE", "1");
|
||||||
|
mod.addCMacro("HAVE_GETADDRINFO", "1");
|
||||||
|
mod.addCMacro("HAVE_GETEUID", "1");
|
||||||
|
mod.addCMacro("HAVE_GETHOSTBYNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_GETHOSTBYNAME_R_6", "1");
|
||||||
|
mod.addCMacro("HAVE_GETHOSTNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPEERNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPPID", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPPID", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPROTOBYNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPWUID", "1");
|
||||||
|
mod.addCMacro("HAVE_GETPWUID_R", "1");
|
||||||
|
mod.addCMacro("HAVE_GETRLIMIT", "1");
|
||||||
|
mod.addCMacro("HAVE_GETSOCKNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_GETTIMEOFDAY", "1");
|
||||||
|
mod.addCMacro("HAVE_GMTIME_R", "1");
|
||||||
|
mod.addCMacro("HAVE_IDN2_H", "1");
|
||||||
|
mod.addCMacro("HAVE_IF_NAMETOINDEX", "1");
|
||||||
|
mod.addCMacro("HAVE_IFADDRS_H", "1");
|
||||||
|
mod.addCMacro("HAVE_INET_ADDR", "1");
|
||||||
|
mod.addCMacro("HAVE_INET_PTON", "1");
|
||||||
|
mod.addCMacro("HAVE_INTTYPES_H", "1");
|
||||||
|
mod.addCMacro("HAVE_IOCTL", "1");
|
||||||
|
mod.addCMacro("HAVE_IOCTL_FIONBIO", "1");
|
||||||
|
mod.addCMacro("HAVE_IOCTL_SIOCGIFADDR", "1");
|
||||||
|
mod.addCMacro("HAVE_LDAP_URL_PARSE", "1");
|
||||||
|
mod.addCMacro("HAVE_LIBGEN_H", "1");
|
||||||
|
mod.addCMacro("HAVE_LIBZ", "1");
|
||||||
|
mod.addCMacro("HAVE_LL", "1");
|
||||||
|
mod.addCMacro("HAVE_LOCALE_H", "1");
|
||||||
|
mod.addCMacro("HAVE_LOCALTIME_R", "1");
|
||||||
|
mod.addCMacro("HAVE_LONGLONG", "1");
|
||||||
|
mod.addCMacro("HAVE_MALLOC_H", "1");
|
||||||
|
mod.addCMacro("HAVE_MEMORY_H", "1");
|
||||||
|
mod.addCMacro("HAVE_NET_IF_H", "1");
|
||||||
|
mod.addCMacro("HAVE_NETDB_H", "1");
|
||||||
|
mod.addCMacro("HAVE_NETINET_IN_H", "1");
|
||||||
|
mod.addCMacro("HAVE_NETINET_TCP_H", "1");
|
||||||
|
mod.addCMacro("HAVE_PIPE", "1");
|
||||||
|
mod.addCMacro("HAVE_POLL", "1");
|
||||||
|
mod.addCMacro("HAVE_POLL_FINE", "1");
|
||||||
|
mod.addCMacro("HAVE_POLL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_POSIX_STRERROR_R", "1");
|
||||||
|
mod.addCMacro("HAVE_PTHREAD_H", "1");
|
||||||
|
mod.addCMacro("HAVE_PWD_H", "1");
|
||||||
|
mod.addCMacro("HAVE_RECV", "1");
|
||||||
|
mod.addCMacro("HAVE_SA_FAMILY_T", "1");
|
||||||
|
mod.addCMacro("HAVE_SELECT", "1");
|
||||||
|
mod.addCMacro("HAVE_SEND", "1");
|
||||||
|
mod.addCMacro("HAVE_SETJMP_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SETLOCALE", "1");
|
||||||
|
mod.addCMacro("HAVE_SETRLIMIT", "1");
|
||||||
|
mod.addCMacro("HAVE_SETSOCKOPT", "1");
|
||||||
|
mod.addCMacro("HAVE_SIGACTION", "1");
|
||||||
|
mod.addCMacro("HAVE_SIGINTERRUPT", "1");
|
||||||
|
mod.addCMacro("HAVE_SIGNAL", "1");
|
||||||
|
mod.addCMacro("HAVE_SIGNAL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SIGSETJMP", "1");
|
||||||
|
mod.addCMacro("HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID", "1");
|
||||||
|
mod.addCMacro("HAVE_SOCKET", "1");
|
||||||
|
mod.addCMacro("HAVE_STDBOOL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STDINT_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STDIO_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STDLIB_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STRCASECMP", "1");
|
||||||
|
mod.addCMacro("HAVE_STRDUP", "1");
|
||||||
|
mod.addCMacro("HAVE_STRERROR_R", "1");
|
||||||
|
mod.addCMacro("HAVE_STRING_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STRINGS_H", "1");
|
||||||
|
mod.addCMacro("HAVE_STRSTR", "1");
|
||||||
|
mod.addCMacro("HAVE_STRTOK_R", "1");
|
||||||
|
mod.addCMacro("HAVE_STRTOLL", "1");
|
||||||
|
mod.addCMacro("HAVE_STRUCT_SOCKADDR_STORAGE", "1");
|
||||||
|
mod.addCMacro("HAVE_STRUCT_TIMEVAL", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_IOCTL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_PARAM_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_POLL_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_RESOURCE_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_SELECT_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_SOCKET_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_STAT_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_TIME_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_TYPES_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_UIO_H", "1");
|
||||||
|
mod.addCMacro("HAVE_SYS_UN_H", "1");
|
||||||
|
mod.addCMacro("HAVE_TERMIO_H", "1");
|
||||||
|
mod.addCMacro("HAVE_TERMIOS_H", "1");
|
||||||
|
mod.addCMacro("HAVE_TIME_H", "1");
|
||||||
|
mod.addCMacro("HAVE_UNAME", "1");
|
||||||
|
mod.addCMacro("HAVE_UNISTD_H", "1");
|
||||||
|
mod.addCMacro("HAVE_UTIME", "1");
|
||||||
|
mod.addCMacro("HAVE_UTIME_H", "1");
|
||||||
|
mod.addCMacro("HAVE_UTIMES", "1");
|
||||||
|
mod.addCMacro("HAVE_VARIADIC_MACROS_C99", "1");
|
||||||
|
mod.addCMacro("HAVE_VARIADIC_MACROS_GCC", "1");
|
||||||
|
mod.addCMacro("HAVE_ZLIB_H", "1");
|
||||||
|
mod.addCMacro("RANDOM_FILE", "\"/dev/urandom\"");
|
||||||
|
mod.addCMacro("RECV_TYPE_ARG1", "int");
|
||||||
|
mod.addCMacro("RECV_TYPE_ARG2", "void *");
|
||||||
|
mod.addCMacro("RECV_TYPE_ARG3", "size_t");
|
||||||
|
mod.addCMacro("RECV_TYPE_ARG4", "int");
|
||||||
|
mod.addCMacro("RECV_TYPE_RETV", "ssize_t");
|
||||||
|
mod.addCMacro("SEND_QUAL_ARG2", "const");
|
||||||
|
mod.addCMacro("SEND_TYPE_ARG1", "int");
|
||||||
|
mod.addCMacro("SEND_TYPE_ARG2", "void *");
|
||||||
|
mod.addCMacro("SEND_TYPE_ARG3", "size_t");
|
||||||
|
mod.addCMacro("SEND_TYPE_ARG4", "int");
|
||||||
|
mod.addCMacro("SEND_TYPE_RETV", "ssize_t");
|
||||||
|
mod.addCMacro("SIZEOF_CURL_OFF_T", "8");
|
||||||
|
mod.addCMacro("SIZEOF_INT", "4");
|
||||||
|
mod.addCMacro("SIZEOF_LONG", "8");
|
||||||
|
mod.addCMacro("SIZEOF_OFF_T", "8");
|
||||||
|
mod.addCMacro("SIZEOF_SHORT", "2");
|
||||||
|
mod.addCMacro("SIZEOF_SIZE_T", "8");
|
||||||
|
mod.addCMacro("SIZEOF_TIME_T", "8");
|
||||||
|
mod.addCMacro("STDC_HEADERS", "1");
|
||||||
|
mod.addCMacro("TIME_WITH_SYS_TIME", "1");
|
||||||
|
mod.addCMacro("USE_NGHTTP2", "1");
|
||||||
|
mod.addCMacro("USE_MBEDTLS", "1");
|
||||||
|
mod.addCMacro("USE_THREADS_POSIX", "1");
|
||||||
|
mod.addCMacro("USE_UNIX_SOCKETS", "1");
|
||||||
|
}
|
||||||
|
|
||||||
mod.addImport("build_config", opts.createModule());
|
try buildZlib(b, mod);
|
||||||
|
try buildMbedtls(b, mod);
|
||||||
|
try buildNghttp2(b, mod);
|
||||||
|
try buildCurl(b, mod);
|
||||||
|
|
||||||
|
switch (target.result.os.tag) {
|
||||||
|
.macos => {
|
||||||
|
// needed for proxying on mac
|
||||||
|
mod.addSystemFrameworkPath(.{ .cwd_relative = "/System/Library/Frameworks" });
|
||||||
|
mod.linkFramework("CoreFoundation", .{});
|
||||||
|
mod.linkFramework("SystemConfiguration", .{});
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn moduleNetSurf(b: *std.Build, step: *std.Build.Step.Compile, target: std.Build.ResolvedTarget) !void {
|
fn moduleNetSurf(b: *Build, step: *Build.Step.Compile, target: std.Build.ResolvedTarget) !void {
|
||||||
const os = target.result.os.tag;
|
const os = target.result.os.tag;
|
||||||
const arch = target.result.cpu.arch;
|
const arch = target.result.cpu.arch;
|
||||||
|
|
||||||
@@ -250,3 +441,375 @@ fn moduleNetSurf(b: *std.Build, step: *std.Build.Step.Compile, target: std.Build
|
|||||||
step.addIncludePath(b.path(ns ++ "/" ++ lib ++ "/src"));
|
step.addIncludePath(b.path(ns ++ "/" ++ lib ++ "/src"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn buildZlib(b: *Build, m: *Build.Module) !void {
|
||||||
|
const zlib = b.addLibrary(.{
|
||||||
|
.name = "zlib",
|
||||||
|
.root_module = m,
|
||||||
|
});
|
||||||
|
|
||||||
|
const root = "vendor/zlib/";
|
||||||
|
zlib.installHeader(b.path(root ++ "zlib.h"), "zlib.h");
|
||||||
|
zlib.installHeader(b.path(root ++ "zconf.h"), "zconf.h");
|
||||||
|
zlib.addCSourceFiles(.{ .flags = &.{
|
||||||
|
"-DHAVE_SYS_TYPES_H",
|
||||||
|
"-DHAVE_STDINT_H",
|
||||||
|
"-DHAVE_STDDEF_H",
|
||||||
|
}, .files = &.{
|
||||||
|
root ++ "adler32.c",
|
||||||
|
root ++ "compress.c",
|
||||||
|
root ++ "crc32.c",
|
||||||
|
root ++ "deflate.c",
|
||||||
|
root ++ "gzclose.c",
|
||||||
|
root ++ "gzlib.c",
|
||||||
|
root ++ "gzread.c",
|
||||||
|
root ++ "gzwrite.c",
|
||||||
|
root ++ "inflate.c",
|
||||||
|
root ++ "infback.c",
|
||||||
|
root ++ "inftrees.c",
|
||||||
|
root ++ "inffast.c",
|
||||||
|
root ++ "trees.c",
|
||||||
|
root ++ "uncompr.c",
|
||||||
|
root ++ "zutil.c",
|
||||||
|
} });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn buildMbedtls(b: *Build, m: *Build.Module) !void {
|
||||||
|
const mbedtls = b.addLibrary(.{
|
||||||
|
.name = "mbedtls",
|
||||||
|
.root_module = m,
|
||||||
|
});
|
||||||
|
|
||||||
|
const root = "vendor/mbedtls/";
|
||||||
|
mbedtls.addIncludePath(b.path(root ++ "include"));
|
||||||
|
mbedtls.addIncludePath(b.path(root ++ "library"));
|
||||||
|
|
||||||
|
mbedtls.addCSourceFiles(.{ .flags = &.{}, .files = &.{
|
||||||
|
root ++ "library/aes.c",
|
||||||
|
root ++ "library/aesni.c",
|
||||||
|
root ++ "library/aesce.c",
|
||||||
|
root ++ "library/aria.c",
|
||||||
|
root ++ "library/asn1parse.c",
|
||||||
|
root ++ "library/asn1write.c",
|
||||||
|
root ++ "library/base64.c",
|
||||||
|
root ++ "library/bignum.c",
|
||||||
|
root ++ "library/bignum_core.c",
|
||||||
|
root ++ "library/bignum_mod.c",
|
||||||
|
root ++ "library/bignum_mod_raw.c",
|
||||||
|
root ++ "library/camellia.c",
|
||||||
|
root ++ "library/ccm.c",
|
||||||
|
root ++ "library/chacha20.c",
|
||||||
|
root ++ "library/chachapoly.c",
|
||||||
|
root ++ "library/cipher.c",
|
||||||
|
root ++ "library/cipher_wrap.c",
|
||||||
|
root ++ "library/constant_time.c",
|
||||||
|
root ++ "library/cmac.c",
|
||||||
|
root ++ "library/ctr_drbg.c",
|
||||||
|
root ++ "library/des.c",
|
||||||
|
root ++ "library/dhm.c",
|
||||||
|
root ++ "library/ecdh.c",
|
||||||
|
root ++ "library/ecdsa.c",
|
||||||
|
root ++ "library/ecjpake.c",
|
||||||
|
root ++ "library/ecp.c",
|
||||||
|
root ++ "library/ecp_curves.c",
|
||||||
|
root ++ "library/entropy.c",
|
||||||
|
root ++ "library/entropy_poll.c",
|
||||||
|
root ++ "library/error.c",
|
||||||
|
root ++ "library/gcm.c",
|
||||||
|
root ++ "library/hkdf.c",
|
||||||
|
root ++ "library/hmac_drbg.c",
|
||||||
|
root ++ "library/lmots.c",
|
||||||
|
root ++ "library/lms.c",
|
||||||
|
root ++ "library/md.c",
|
||||||
|
root ++ "library/md5.c",
|
||||||
|
root ++ "library/memory_buffer_alloc.c",
|
||||||
|
root ++ "library/nist_kw.c",
|
||||||
|
root ++ "library/oid.c",
|
||||||
|
root ++ "library/padlock.c",
|
||||||
|
root ++ "library/pem.c",
|
||||||
|
root ++ "library/pk.c",
|
||||||
|
root ++ "library/pk_ecc.c",
|
||||||
|
root ++ "library/pk_wrap.c",
|
||||||
|
root ++ "library/pkcs12.c",
|
||||||
|
root ++ "library/pkcs5.c",
|
||||||
|
root ++ "library/pkparse.c",
|
||||||
|
root ++ "library/pkwrite.c",
|
||||||
|
root ++ "library/platform.c",
|
||||||
|
root ++ "library/platform_util.c",
|
||||||
|
root ++ "library/poly1305.c",
|
||||||
|
root ++ "library/psa_crypto.c",
|
||||||
|
root ++ "library/psa_crypto_aead.c",
|
||||||
|
root ++ "library/psa_crypto_cipher.c",
|
||||||
|
root ++ "library/psa_crypto_client.c",
|
||||||
|
root ++ "library/psa_crypto_ffdh.c",
|
||||||
|
root ++ "library/psa_crypto_driver_wrappers_no_static.c",
|
||||||
|
root ++ "library/psa_crypto_ecp.c",
|
||||||
|
root ++ "library/psa_crypto_hash.c",
|
||||||
|
root ++ "library/psa_crypto_mac.c",
|
||||||
|
root ++ "library/psa_crypto_pake.c",
|
||||||
|
root ++ "library/psa_crypto_rsa.c",
|
||||||
|
root ++ "library/psa_crypto_se.c",
|
||||||
|
root ++ "library/psa_crypto_slot_management.c",
|
||||||
|
root ++ "library/psa_crypto_storage.c",
|
||||||
|
root ++ "library/psa_its_file.c",
|
||||||
|
root ++ "library/psa_util.c",
|
||||||
|
root ++ "library/ripemd160.c",
|
||||||
|
root ++ "library/rsa.c",
|
||||||
|
root ++ "library/rsa_alt_helpers.c",
|
||||||
|
root ++ "library/sha1.c",
|
||||||
|
root ++ "library/sha3.c",
|
||||||
|
root ++ "library/sha256.c",
|
||||||
|
root ++ "library/sha512.c",
|
||||||
|
root ++ "library/threading.c",
|
||||||
|
root ++ "library/timing.c",
|
||||||
|
root ++ "library/version.c",
|
||||||
|
root ++ "library/version_features.c",
|
||||||
|
root ++ "library/pkcs7.c",
|
||||||
|
root ++ "library/x509.c",
|
||||||
|
root ++ "library/x509_create.c",
|
||||||
|
root ++ "library/x509_crl.c",
|
||||||
|
root ++ "library/x509_crt.c",
|
||||||
|
root ++ "library/x509_csr.c",
|
||||||
|
root ++ "library/x509write.c",
|
||||||
|
root ++ "library/x509write_crt.c",
|
||||||
|
root ++ "library/x509write_csr.c",
|
||||||
|
root ++ "library/debug.c",
|
||||||
|
root ++ "library/mps_reader.c",
|
||||||
|
root ++ "library/mps_trace.c",
|
||||||
|
root ++ "library/net_sockets.c",
|
||||||
|
root ++ "library/ssl_cache.c",
|
||||||
|
root ++ "library/ssl_ciphersuites.c",
|
||||||
|
root ++ "library/ssl_client.c",
|
||||||
|
root ++ "library/ssl_cookie.c",
|
||||||
|
root ++ "library/ssl_debug_helpers_generated.c",
|
||||||
|
root ++ "library/ssl_msg.c",
|
||||||
|
root ++ "library/ssl_ticket.c",
|
||||||
|
root ++ "library/ssl_tls.c",
|
||||||
|
root ++ "library/ssl_tls12_client.c",
|
||||||
|
root ++ "library/ssl_tls12_server.c",
|
||||||
|
root ++ "library/ssl_tls13_keys.c",
|
||||||
|
root ++ "library/ssl_tls13_server.c",
|
||||||
|
root ++ "library/ssl_tls13_client.c",
|
||||||
|
root ++ "library/ssl_tls13_generic.c",
|
||||||
|
} });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn buildNghttp2(b: *Build, m: *Build.Module) !void {
|
||||||
|
const nghttp2 = b.addLibrary(.{
|
||||||
|
.name = "nghttp2",
|
||||||
|
.root_module = m,
|
||||||
|
});
|
||||||
|
|
||||||
|
const root = "vendor/nghttp2/";
|
||||||
|
nghttp2.addIncludePath(b.path(root ++ "lib"));
|
||||||
|
nghttp2.addIncludePath(b.path(root ++ "lib/includes"));
|
||||||
|
nghttp2.addCSourceFiles(.{ .flags = &.{
|
||||||
|
"-DNGHTTP2_STATICLIB",
|
||||||
|
"-DHAVE_NETINET_IN",
|
||||||
|
"-DHAVE_TIME_H",
|
||||||
|
}, .files = &.{
|
||||||
|
root ++ "lib/sfparse.c",
|
||||||
|
root ++ "lib/nghttp2_alpn.c",
|
||||||
|
root ++ "lib/nghttp2_buf.c",
|
||||||
|
root ++ "lib/nghttp2_callbacks.c",
|
||||||
|
root ++ "lib/nghttp2_debug.c",
|
||||||
|
root ++ "lib/nghttp2_extpri.c",
|
||||||
|
root ++ "lib/nghttp2_frame.c",
|
||||||
|
root ++ "lib/nghttp2_hd.c",
|
||||||
|
root ++ "lib/nghttp2_hd_huffman.c",
|
||||||
|
root ++ "lib/nghttp2_hd_huffman_data.c",
|
||||||
|
root ++ "lib/nghttp2_helper.c",
|
||||||
|
root ++ "lib/nghttp2_http.c",
|
||||||
|
root ++ "lib/nghttp2_map.c",
|
||||||
|
root ++ "lib/nghttp2_mem.c",
|
||||||
|
root ++ "lib/nghttp2_option.c",
|
||||||
|
root ++ "lib/nghttp2_outbound_item.c",
|
||||||
|
root ++ "lib/nghttp2_pq.c",
|
||||||
|
root ++ "lib/nghttp2_priority_spec.c",
|
||||||
|
root ++ "lib/nghttp2_queue.c",
|
||||||
|
root ++ "lib/nghttp2_rcbuf.c",
|
||||||
|
root ++ "lib/nghttp2_session.c",
|
||||||
|
root ++ "lib/nghttp2_stream.c",
|
||||||
|
root ++ "lib/nghttp2_submit.c",
|
||||||
|
root ++ "lib/nghttp2_version.c",
|
||||||
|
root ++ "lib/nghttp2_ratelim.c",
|
||||||
|
root ++ "lib/nghttp2_time.c",
|
||||||
|
} });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn buildCurl(b: *Build, m: *Build.Module) !void {
|
||||||
|
const curl = b.addLibrary(.{
|
||||||
|
.name = "curl",
|
||||||
|
.root_module = m,
|
||||||
|
});
|
||||||
|
|
||||||
|
const root = "vendor/curl/";
|
||||||
|
|
||||||
|
curl.addIncludePath(b.path(root ++ "lib"));
|
||||||
|
curl.addIncludePath(b.path(root ++ "include"));
|
||||||
|
curl.addCSourceFiles(.{
|
||||||
|
.flags = &.{},
|
||||||
|
.files = &.{
|
||||||
|
root ++ "lib/altsvc.c",
|
||||||
|
root ++ "lib/amigaos.c",
|
||||||
|
root ++ "lib/asyn-ares.c",
|
||||||
|
root ++ "lib/asyn-base.c",
|
||||||
|
root ++ "lib/asyn-thrdd.c",
|
||||||
|
root ++ "lib/bufq.c",
|
||||||
|
root ++ "lib/bufref.c",
|
||||||
|
root ++ "lib/cf-h1-proxy.c",
|
||||||
|
root ++ "lib/cf-h2-proxy.c",
|
||||||
|
root ++ "lib/cf-haproxy.c",
|
||||||
|
root ++ "lib/cf-https-connect.c",
|
||||||
|
root ++ "lib/cf-socket.c",
|
||||||
|
root ++ "lib/cfilters.c",
|
||||||
|
root ++ "lib/conncache.c",
|
||||||
|
root ++ "lib/connect.c",
|
||||||
|
root ++ "lib/content_encoding.c",
|
||||||
|
root ++ "lib/cookie.c",
|
||||||
|
root ++ "lib/cshutdn.c",
|
||||||
|
root ++ "lib/curl_addrinfo.c",
|
||||||
|
root ++ "lib/curl_des.c",
|
||||||
|
root ++ "lib/curl_endian.c",
|
||||||
|
root ++ "lib/curl_fnmatch.c",
|
||||||
|
root ++ "lib/curl_get_line.c",
|
||||||
|
root ++ "lib/curl_gethostname.c",
|
||||||
|
root ++ "lib/curl_gssapi.c",
|
||||||
|
root ++ "lib/curl_memrchr.c",
|
||||||
|
root ++ "lib/curl_ntlm_core.c",
|
||||||
|
root ++ "lib/curl_range.c",
|
||||||
|
root ++ "lib/curl_rtmp.c",
|
||||||
|
root ++ "lib/curl_sasl.c",
|
||||||
|
root ++ "lib/curl_sha512_256.c",
|
||||||
|
root ++ "lib/curl_sspi.c",
|
||||||
|
root ++ "lib/curl_threads.c",
|
||||||
|
root ++ "lib/curl_trc.c",
|
||||||
|
root ++ "lib/cw-out.c",
|
||||||
|
root ++ "lib/cw-pause.c",
|
||||||
|
root ++ "lib/dict.c",
|
||||||
|
root ++ "lib/doh.c",
|
||||||
|
root ++ "lib/dynhds.c",
|
||||||
|
root ++ "lib/easy.c",
|
||||||
|
root ++ "lib/easygetopt.c",
|
||||||
|
root ++ "lib/easyoptions.c",
|
||||||
|
root ++ "lib/escape.c",
|
||||||
|
root ++ "lib/fake_addrinfo.c",
|
||||||
|
root ++ "lib/file.c",
|
||||||
|
root ++ "lib/fileinfo.c",
|
||||||
|
root ++ "lib/fopen.c",
|
||||||
|
root ++ "lib/formdata.c",
|
||||||
|
root ++ "lib/ftp.c",
|
||||||
|
root ++ "lib/ftplistparser.c",
|
||||||
|
root ++ "lib/getenv.c",
|
||||||
|
root ++ "lib/getinfo.c",
|
||||||
|
root ++ "lib/gopher.c",
|
||||||
|
root ++ "lib/hash.c",
|
||||||
|
root ++ "lib/headers.c",
|
||||||
|
root ++ "lib/hmac.c",
|
||||||
|
root ++ "lib/hostip.c",
|
||||||
|
root ++ "lib/hostip4.c",
|
||||||
|
root ++ "lib/hostip6.c",
|
||||||
|
root ++ "lib/hsts.c",
|
||||||
|
root ++ "lib/http.c",
|
||||||
|
root ++ "lib/http1.c",
|
||||||
|
root ++ "lib/http2.c",
|
||||||
|
root ++ "lib/http_aws_sigv4.c",
|
||||||
|
root ++ "lib/http_chunks.c",
|
||||||
|
root ++ "lib/http_digest.c",
|
||||||
|
root ++ "lib/http_negotiate.c",
|
||||||
|
root ++ "lib/http_ntlm.c",
|
||||||
|
root ++ "lib/http_proxy.c",
|
||||||
|
root ++ "lib/httpsrr.c",
|
||||||
|
root ++ "lib/idn.c",
|
||||||
|
root ++ "lib/if2ip.c",
|
||||||
|
root ++ "lib/imap.c",
|
||||||
|
root ++ "lib/krb5.c",
|
||||||
|
root ++ "lib/ldap.c",
|
||||||
|
root ++ "lib/llist.c",
|
||||||
|
root ++ "lib/macos.c",
|
||||||
|
root ++ "lib/md4.c",
|
||||||
|
root ++ "lib/md5.c",
|
||||||
|
root ++ "lib/memdebug.c",
|
||||||
|
root ++ "lib/mime.c",
|
||||||
|
root ++ "lib/mprintf.c",
|
||||||
|
root ++ "lib/mqtt.c",
|
||||||
|
root ++ "lib/multi.c",
|
||||||
|
root ++ "lib/multi_ev.c",
|
||||||
|
root ++ "lib/netrc.c",
|
||||||
|
root ++ "lib/noproxy.c",
|
||||||
|
root ++ "lib/openldap.c",
|
||||||
|
root ++ "lib/parsedate.c",
|
||||||
|
root ++ "lib/pingpong.c",
|
||||||
|
root ++ "lib/pop3.c",
|
||||||
|
root ++ "lib/progress.c",
|
||||||
|
root ++ "lib/psl.c",
|
||||||
|
root ++ "lib/rand.c",
|
||||||
|
root ++ "lib/rename.c",
|
||||||
|
root ++ "lib/request.c",
|
||||||
|
root ++ "lib/rtsp.c",
|
||||||
|
root ++ "lib/select.c",
|
||||||
|
root ++ "lib/sendf.c",
|
||||||
|
root ++ "lib/setopt.c",
|
||||||
|
root ++ "lib/sha256.c",
|
||||||
|
root ++ "lib/share.c",
|
||||||
|
root ++ "lib/slist.c",
|
||||||
|
root ++ "lib/smb.c",
|
||||||
|
root ++ "lib/smtp.c",
|
||||||
|
root ++ "lib/socketpair.c",
|
||||||
|
root ++ "lib/socks.c",
|
||||||
|
root ++ "lib/socks_gssapi.c",
|
||||||
|
root ++ "lib/socks_sspi.c",
|
||||||
|
root ++ "lib/speedcheck.c",
|
||||||
|
root ++ "lib/splay.c",
|
||||||
|
root ++ "lib/strcase.c",
|
||||||
|
root ++ "lib/strdup.c",
|
||||||
|
root ++ "lib/strequal.c",
|
||||||
|
root ++ "lib/strerror.c",
|
||||||
|
root ++ "lib/system_win32.c",
|
||||||
|
root ++ "lib/telnet.c",
|
||||||
|
root ++ "lib/tftp.c",
|
||||||
|
root ++ "lib/transfer.c",
|
||||||
|
root ++ "lib/uint-bset.c",
|
||||||
|
root ++ "lib/uint-hash.c",
|
||||||
|
root ++ "lib/uint-spbset.c",
|
||||||
|
root ++ "lib/uint-table.c",
|
||||||
|
root ++ "lib/url.c",
|
||||||
|
root ++ "lib/urlapi.c",
|
||||||
|
root ++ "lib/version.c",
|
||||||
|
root ++ "lib/ws.c",
|
||||||
|
root ++ "lib/curlx/base64.c",
|
||||||
|
root ++ "lib/curlx/dynbuf.c",
|
||||||
|
root ++ "lib/curlx/inet_ntop.c",
|
||||||
|
root ++ "lib/curlx/nonblock.c",
|
||||||
|
root ++ "lib/curlx/strparse.c",
|
||||||
|
root ++ "lib/curlx/timediff.c",
|
||||||
|
root ++ "lib/curlx/timeval.c",
|
||||||
|
root ++ "lib/curlx/wait.c",
|
||||||
|
root ++ "lib/curlx/warnless.c",
|
||||||
|
root ++ "lib/vquic/curl_ngtcp2.c",
|
||||||
|
root ++ "lib/vquic/curl_osslq.c",
|
||||||
|
root ++ "lib/vquic/curl_quiche.c",
|
||||||
|
root ++ "lib/vquic/vquic.c",
|
||||||
|
root ++ "lib/vquic/vquic-tls.c",
|
||||||
|
root ++ "lib/vauth/cleartext.c",
|
||||||
|
root ++ "lib/vauth/cram.c",
|
||||||
|
root ++ "lib/vauth/digest.c",
|
||||||
|
root ++ "lib/vauth/digest_sspi.c",
|
||||||
|
root ++ "lib/vauth/gsasl.c",
|
||||||
|
root ++ "lib/vauth/krb5_gssapi.c",
|
||||||
|
root ++ "lib/vauth/krb5_sspi.c",
|
||||||
|
root ++ "lib/vauth/ntlm.c",
|
||||||
|
root ++ "lib/vauth/ntlm_sspi.c",
|
||||||
|
root ++ "lib/vauth/oauth2.c",
|
||||||
|
root ++ "lib/vauth/spnego_gssapi.c",
|
||||||
|
root ++ "lib/vauth/spnego_sspi.c",
|
||||||
|
root ++ "lib/vauth/vauth.c",
|
||||||
|
root ++ "lib/vtls/cipher_suite.c",
|
||||||
|
root ++ "lib/vtls/mbedtls.c",
|
||||||
|
root ++ "lib/vtls/mbedtls_threadlock.c",
|
||||||
|
root ++ "lib/vtls/vtls.c",
|
||||||
|
root ++ "lib/vtls/vtls_scache.c",
|
||||||
|
root ++ "lib/vtls/x509asn1.c",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,10 +4,6 @@
|
|||||||
.version = "0.0.0",
|
.version = "0.0.0",
|
||||||
.fingerprint = 0xda130f3af836cea0,
|
.fingerprint = 0xda130f3af836cea0,
|
||||||
.dependencies = .{
|
.dependencies = .{
|
||||||
.tls = .{
|
|
||||||
.url = "https://github.com/ianic/tls.zig/archive/55845f755d9e2e821458ea55693f85c737cd0c7a.tar.gz",
|
|
||||||
.hash = "tls-0.1.0-ER2e0m43BQAshi8ixj1qf3w2u2lqKtXtkrxUJ4AGZDcl",
|
|
||||||
},
|
|
||||||
.tigerbeetle_io = .{
|
.tigerbeetle_io = .{
|
||||||
.url = "https://github.com/lightpanda-io/tigerbeetle-io/archive/61d9652f1a957b7f4db723ea6aa0ce9635e840ce.tar.gz",
|
.url = "https://github.com/lightpanda-io/tigerbeetle-io/archive/61d9652f1a957b7f4db723ea6aa0ce9635e840ce.tar.gz",
|
||||||
.hash = "tigerbeetle_io-0.0.0-ViLgxpyRBAB5BMfIcj3KMXfbJzwARs9uSl8aRy2OXULd",
|
.hash = "tigerbeetle_io-0.0.0-ViLgxpyRBAB5BMfIcj3KMXfbJzwARs9uSl8aRy2OXULd",
|
||||||
|
|||||||
6
flake.lock
generated
6
flake.lock
generated
@@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1748964450,
|
"lastModified": 1754919767,
|
||||||
"narHash": "sha256-ZouDiXkUk8mkMnah10QcoQ9Nu6UW6AFAHLScS3En6aI=",
|
"narHash": "sha256-bc9tjR2ymbmbtYlnOcksjI7tQtDDEEJFGm41t0msXsg=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "9ff500cd9e123f46c55855eca64beccead29b152",
|
"rev": "8c0c41355297485b39d6f6a6d722c8cdfe0257df",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|||||||
@@ -49,6 +49,7 @@
|
|||||||
glib.dev
|
glib.dev
|
||||||
glibc.dev
|
glibc.dev
|
||||||
zlib
|
zlib
|
||||||
|
zlib.dev
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|||||||
40
src/app.zig
40
src/app.zig
@@ -1,9 +1,10 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
const log = @import("log.zig");
|
const log = @import("log.zig");
|
||||||
|
const Http = @import("http/Http.zig");
|
||||||
const Loop = @import("runtime/loop.zig").Loop;
|
const Loop = @import("runtime/loop.zig").Loop;
|
||||||
const http = @import("http/client.zig");
|
|
||||||
const Platform = @import("runtime/js.zig").Platform;
|
const Platform = @import("runtime/js.zig").Platform;
|
||||||
|
|
||||||
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
const Telemetry = @import("telemetry/telemetry.zig").Telemetry;
|
||||||
@@ -12,12 +13,12 @@ const Notification = @import("notification.zig").Notification;
|
|||||||
// Container for global state / objects that various parts of the system
|
// Container for global state / objects that various parts of the system
|
||||||
// might need.
|
// might need.
|
||||||
pub const App = struct {
|
pub const App = struct {
|
||||||
|
http: Http,
|
||||||
loop: *Loop,
|
loop: *Loop,
|
||||||
config: Config,
|
config: Config,
|
||||||
platform: ?*const Platform,
|
platform: ?*const Platform,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
telemetry: Telemetry,
|
telemetry: Telemetry,
|
||||||
http_client: http.Client,
|
|
||||||
app_dir_path: ?[]const u8,
|
app_dir_path: ?[]const u8,
|
||||||
notification: *Notification,
|
notification: *Notification,
|
||||||
|
|
||||||
@@ -32,9 +33,12 @@ pub const App = struct {
|
|||||||
run_mode: RunMode,
|
run_mode: RunMode,
|
||||||
platform: ?*const Platform = null,
|
platform: ?*const Platform = null,
|
||||||
tls_verify_host: bool = true,
|
tls_verify_host: bool = true,
|
||||||
http_proxy: ?std.Uri = null,
|
http_proxy: ?[:0]const u8 = null,
|
||||||
proxy_type: ?http.ProxyType = null,
|
proxy_bearer_token: ?[:0]const u8 = null,
|
||||||
proxy_auth: ?http.ProxyAuth = null,
|
http_timeout_ms: ?u31 = null,
|
||||||
|
http_connect_timeout_ms: ?u31 = null,
|
||||||
|
http_max_host_open: ?u8 = null,
|
||||||
|
http_max_concurrent: ?u8 = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn init(allocator: Allocator, config: Config) !*App {
|
pub fn init(allocator: Allocator, config: Config) !*App {
|
||||||
@@ -50,25 +54,33 @@ pub const App = struct {
|
|||||||
const notification = try Notification.init(allocator, null);
|
const notification = try Notification.init(allocator, null);
|
||||||
errdefer notification.deinit();
|
errdefer notification.deinit();
|
||||||
|
|
||||||
|
var http = try Http.init(allocator, .{
|
||||||
|
.max_host_open = config.http_max_host_open orelse 4,
|
||||||
|
.max_concurrent = config.http_max_concurrent orelse 10,
|
||||||
|
.timeout_ms = config.http_timeout_ms orelse 5000,
|
||||||
|
.connect_timeout_ms = config.http_connect_timeout_ms orelse 0,
|
||||||
|
.http_proxy = config.http_proxy,
|
||||||
|
.tls_verify_host = config.tls_verify_host,
|
||||||
|
.proxy_bearer_token = config.proxy_bearer_token,
|
||||||
|
});
|
||||||
|
errdefer http.deinit();
|
||||||
|
|
||||||
const app_dir_path = getAndMakeAppDir(allocator);
|
const app_dir_path = getAndMakeAppDir(allocator);
|
||||||
|
|
||||||
app.* = .{
|
app.* = .{
|
||||||
.loop = loop,
|
.loop = loop,
|
||||||
|
.http = http,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.telemetry = undefined,
|
.telemetry = undefined,
|
||||||
.platform = config.platform,
|
.platform = config.platform,
|
||||||
.app_dir_path = app_dir_path,
|
.app_dir_path = app_dir_path,
|
||||||
.notification = notification,
|
.notification = notification,
|
||||||
.http_client = try http.Client.init(allocator, loop, .{
|
|
||||||
.max_concurrent = 3,
|
|
||||||
.http_proxy = config.http_proxy,
|
|
||||||
.proxy_type = config.proxy_type,
|
|
||||||
.proxy_auth = config.proxy_auth,
|
|
||||||
.tls_verify_host = config.tls_verify_host,
|
|
||||||
}),
|
|
||||||
.config = config,
|
.config = config,
|
||||||
};
|
};
|
||||||
app.telemetry = Telemetry.init(app, config.run_mode);
|
|
||||||
|
app.telemetry = try Telemetry.init(app, config.run_mode);
|
||||||
|
errdefer app.telemetry.deinit();
|
||||||
|
|
||||||
try app.telemetry.register(app.notification);
|
try app.telemetry.register(app.notification);
|
||||||
|
|
||||||
return app;
|
return app;
|
||||||
@@ -82,8 +94,8 @@ pub const App = struct {
|
|||||||
self.telemetry.deinit();
|
self.telemetry.deinit();
|
||||||
self.loop.deinit();
|
self.loop.deinit();
|
||||||
allocator.destroy(self.loop);
|
allocator.destroy(self.loop);
|
||||||
self.http_client.deinit();
|
|
||||||
self.notification.deinit();
|
self.notification.deinit();
|
||||||
|
self.http.deinit();
|
||||||
allocator.destroy(self);
|
allocator.destroy(self);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
52
src/browser/DataURI.zig
Normal file
52
src/browser/DataURI.zig
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
const std = @import("std");
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
|
// Parses data:[<media-type>][;base64],<data>
|
||||||
|
pub fn parse(allocator: Allocator, src: []const u8) !?[]const u8 {
|
||||||
|
if (!std.mem.startsWith(u8, src, "data:")) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const uri = src[5..];
|
||||||
|
const data_starts = std.mem.indexOfScalar(u8, uri, ',') orelse return null;
|
||||||
|
|
||||||
|
var data = uri[data_starts + 1 ..];
|
||||||
|
|
||||||
|
// Extract the encoding.
|
||||||
|
const metadata = uri[0..data_starts];
|
||||||
|
if (std.mem.endsWith(u8, metadata, ";base64")) {
|
||||||
|
const decoder = std.base64.standard.Decoder;
|
||||||
|
const decoded_size = try decoder.calcSizeForSlice(data);
|
||||||
|
|
||||||
|
const buffer = try allocator.alloc(u8, decoded_size);
|
||||||
|
errdefer allocator.free(buffer);
|
||||||
|
|
||||||
|
try decoder.decode(buffer, data);
|
||||||
|
data = buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
const testing = @import("../testing.zig");
|
||||||
|
test "DataURI: parse valid" {
|
||||||
|
try test_valid("data:text/javascript; charset=utf-8;base64,Zm9v", "foo");
|
||||||
|
try test_valid("data:text/javascript; charset=utf-8;,foo", "foo");
|
||||||
|
try test_valid("data:,foo", "foo");
|
||||||
|
}
|
||||||
|
|
||||||
|
test "DataURI: parse invalid" {
|
||||||
|
try test_cannot_parse("atad:,foo");
|
||||||
|
try test_cannot_parse("data:foo");
|
||||||
|
try test_cannot_parse("data:");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_valid(uri: []const u8, expected: []const u8) !void {
|
||||||
|
defer testing.reset();
|
||||||
|
const data_uri = try parse(testing.arena_allocator, uri) orelse return error.TestFailed;
|
||||||
|
try testing.expectEqual(expected, data_uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_cannot_parse(uri: []const u8) !void {
|
||||||
|
try testing.expectEqual(null, parse(undefined, uri));
|
||||||
|
}
|
||||||
168
src/browser/Scheduler.zig
Normal file
168
src/browser/Scheduler.zig
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const log = @import("../log.zig");
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
|
const Scheduler = @This();
|
||||||
|
|
||||||
|
primary: Queue,
|
||||||
|
|
||||||
|
// For repeating tasks. We only want to run these if there are other things to
|
||||||
|
// do. We don't, for example, want a window.setInterval or the page.runMicrotasks
|
||||||
|
// to block the page.wait.
|
||||||
|
secondary: Queue,
|
||||||
|
|
||||||
|
// we expect allocator to be the page arena, hence we never call primary.deinit
|
||||||
|
pub fn init(allocator: Allocator) Scheduler {
|
||||||
|
return .{
|
||||||
|
.primary = Queue.init(allocator, {}),
|
||||||
|
.secondary = Queue.init(allocator, {}),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(self: *Scheduler) void {
|
||||||
|
self.primary.clearRetainingCapacity();
|
||||||
|
self.secondary.clearRetainingCapacity();
|
||||||
|
}
|
||||||
|
|
||||||
|
const AddOpts = struct {
|
||||||
|
name: []const u8 = "",
|
||||||
|
};
|
||||||
|
pub fn add(self: *Scheduler, ctx: *anyopaque, func: Task.Func, ms: u32, opts: AddOpts) !void {
|
||||||
|
if (ms > 5_000) {
|
||||||
|
log.warn(.user_script, "long timeout ignored", .{ .delay = ms });
|
||||||
|
// ignore any task that we're almost certainly never going to run
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return self.primary.add(.{
|
||||||
|
.ms = std.time.milliTimestamp() + ms,
|
||||||
|
.ctx = ctx,
|
||||||
|
.func = func,
|
||||||
|
.name = opts.name,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn runHighPriority(self: *Scheduler) !?u32 {
|
||||||
|
return self.runQueue(&self.primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn runLowPriority(self: *Scheduler) !?u32 {
|
||||||
|
return self.runQueue(&self.secondary);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn runQueue(self: *Scheduler, queue: *Queue) !?u32 {
|
||||||
|
// this is O(1)
|
||||||
|
if (queue.count() == 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const now = std.time.milliTimestamp();
|
||||||
|
|
||||||
|
var next = queue.peek();
|
||||||
|
while (next) |task| {
|
||||||
|
const time_to_next = task.ms - now;
|
||||||
|
if (time_to_next > 0) {
|
||||||
|
// @intCast is petty safe since we limit tasks to just 5 seconds
|
||||||
|
// in the future
|
||||||
|
return @intCast(time_to_next);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (task.func(task.ctx)) |repeat_delay| {
|
||||||
|
// if we do (now + 0) then our WHILE loop will run endlessly.
|
||||||
|
// no task should ever return 0
|
||||||
|
std.debug.assert(repeat_delay != 0);
|
||||||
|
|
||||||
|
var copy = task;
|
||||||
|
copy.ms = now + repeat_delay;
|
||||||
|
try self.secondary.add(copy);
|
||||||
|
}
|
||||||
|
_ = queue.remove();
|
||||||
|
next = queue.peek();
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Task = struct {
|
||||||
|
ms: i64,
|
||||||
|
func: Func,
|
||||||
|
ctx: *anyopaque,
|
||||||
|
name: []const u8,
|
||||||
|
|
||||||
|
const Func = *const fn (ctx: *anyopaque) ?u32;
|
||||||
|
};
|
||||||
|
|
||||||
|
const Queue = std.PriorityQueue(Task, void, struct {
|
||||||
|
fn compare(_: void, a: Task, b: Task) std.math.Order {
|
||||||
|
return std.math.order(a.ms, b.ms);
|
||||||
|
}
|
||||||
|
}.compare);
|
||||||
|
|
||||||
|
const testing = @import("../testing.zig");
|
||||||
|
test "Scheduler" {
|
||||||
|
defer testing.reset();
|
||||||
|
|
||||||
|
var task = TestTask{ .allocator = testing.arena_allocator };
|
||||||
|
|
||||||
|
var s = Scheduler.init(testing.arena_allocator);
|
||||||
|
try testing.expectEqual(null, s.runHighPriority());
|
||||||
|
try testing.expectEqual(0, task.calls.items.len);
|
||||||
|
|
||||||
|
try s.add(&task, TestTask.run1, 3, .{});
|
||||||
|
|
||||||
|
try testing.expectDelta(3, try s.runHighPriority(), 1);
|
||||||
|
try testing.expectEqual(0, task.calls.items.len);
|
||||||
|
|
||||||
|
std.time.sleep(std.time.ns_per_ms * 5);
|
||||||
|
try testing.expectEqual(null, s.runHighPriority());
|
||||||
|
try testing.expectEqualSlices(u32, &.{1}, task.calls.items);
|
||||||
|
|
||||||
|
try s.add(&task, TestTask.run2, 3, .{});
|
||||||
|
try s.add(&task, TestTask.run1, 2, .{});
|
||||||
|
|
||||||
|
std.time.sleep(std.time.ns_per_ms * 5);
|
||||||
|
try testing.expectDelta(null, try s.runHighPriority(), 1);
|
||||||
|
try testing.expectEqualSlices(u32, &.{ 1, 1, 2 }, task.calls.items);
|
||||||
|
|
||||||
|
std.time.sleep(std.time.ns_per_ms * 5);
|
||||||
|
// wont' run secondary
|
||||||
|
try testing.expectEqual(null, try s.runHighPriority());
|
||||||
|
try testing.expectEqualSlices(u32, &.{ 1, 1, 2 }, task.calls.items);
|
||||||
|
|
||||||
|
//runs secondary
|
||||||
|
try testing.expectDelta(2, try s.runLowPriority(), 1);
|
||||||
|
try testing.expectEqualSlices(u32, &.{ 1, 1, 2, 2 }, task.calls.items);
|
||||||
|
}
|
||||||
|
|
||||||
|
const TestTask = struct {
|
||||||
|
allocator: Allocator,
|
||||||
|
calls: std.ArrayListUnmanaged(u32) = .{},
|
||||||
|
|
||||||
|
fn run1(ctx: *anyopaque) ?u32 {
|
||||||
|
var self: *TestTask = @alignCast(@ptrCast(ctx));
|
||||||
|
self.calls.append(self.allocator, 1) catch unreachable;
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run2(ctx: *anyopaque) ?u32 {
|
||||||
|
var self: *TestTask = @alignCast(@ptrCast(ctx));
|
||||||
|
self.calls.append(self.allocator, 2) catch unreachable;
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
};
|
||||||
835
src/browser/ScriptManager.zig
Normal file
835
src/browser/ScriptManager.zig
Normal file
@@ -0,0 +1,835 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
const log = @import("../log.zig");
|
||||||
|
const parser = @import("netsurf.zig");
|
||||||
|
|
||||||
|
const Env = @import("env.zig").Env;
|
||||||
|
const Page = @import("page.zig").Page;
|
||||||
|
const DataURI = @import("DataURI.zig");
|
||||||
|
const Browser = @import("browser.zig").Browser;
|
||||||
|
const HttpClient = @import("../http/Client.zig");
|
||||||
|
const URL = @import("../url.zig").URL;
|
||||||
|
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
||||||
|
|
||||||
|
const ScriptManager = @This();
|
||||||
|
|
||||||
|
page: *Page,
|
||||||
|
|
||||||
|
// used to prevent recursive evalution
|
||||||
|
is_evaluating: bool,
|
||||||
|
|
||||||
|
// used to prevent executing scripts while we're doing a blocking load
|
||||||
|
is_blocking: bool = false,
|
||||||
|
|
||||||
|
// Only once this is true can deferred scripts be run
|
||||||
|
static_scripts_done: bool,
|
||||||
|
|
||||||
|
// List of async scripts. We don't care about the execution order of these, but
|
||||||
|
// on shutdown/abort, we need to co cleanup any pending ones.
|
||||||
|
asyncs: OrderList,
|
||||||
|
|
||||||
|
// Normal scripts (non-deffered & non-async). These must be executed ni order
|
||||||
|
scripts: OrderList,
|
||||||
|
|
||||||
|
// List of deferred scripts. These must be executed in order, but only once
|
||||||
|
// dom_loaded == true,
|
||||||
|
deferreds: OrderList,
|
||||||
|
|
||||||
|
shutdown: bool = false,
|
||||||
|
|
||||||
|
client: *HttpClient,
|
||||||
|
allocator: Allocator,
|
||||||
|
buffer_pool: BufferPool,
|
||||||
|
script_pool: std.heap.MemoryPool(PendingScript),
|
||||||
|
|
||||||
|
const OrderList = std.DoublyLinkedList(*PendingScript);
|
||||||
|
|
||||||
|
pub fn init(browser: *Browser, page: *Page) ScriptManager {
|
||||||
|
// page isn't fully initialized, we can setup our reference, but that's it.
|
||||||
|
const allocator = browser.allocator;
|
||||||
|
return .{
|
||||||
|
.page = page,
|
||||||
|
.asyncs = .{},
|
||||||
|
.scripts = .{},
|
||||||
|
.deferreds = .{},
|
||||||
|
.is_evaluating = false,
|
||||||
|
.allocator = allocator,
|
||||||
|
.client = browser.http_client,
|
||||||
|
.static_scripts_done = false,
|
||||||
|
.buffer_pool = BufferPool.init(allocator, 5),
|
||||||
|
.script_pool = std.heap.MemoryPool(PendingScript).init(allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *ScriptManager) void {
|
||||||
|
self.reset();
|
||||||
|
self.buffer_pool.deinit();
|
||||||
|
self.script_pool.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(self: *ScriptManager) void {
|
||||||
|
self.clearList(&self.asyncs);
|
||||||
|
self.clearList(&self.scripts);
|
||||||
|
self.clearList(&self.deferreds);
|
||||||
|
self.static_scripts_done = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clearList(_: *const ScriptManager, list: *OrderList) void {
|
||||||
|
while (list.first) |node| {
|
||||||
|
const pending_script = node.data;
|
||||||
|
// this removes it from the list
|
||||||
|
pending_script.deinit();
|
||||||
|
}
|
||||||
|
std.debug.assert(list.first == null);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn addFromElement(self: *ScriptManager, element: *parser.Element) !void {
|
||||||
|
if (try parser.elementGetAttribute(element, "nomodule") != null) {
|
||||||
|
// these scripts should only be loaded if we don't support modules
|
||||||
|
// but since we do support modules, we can just skip them.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a script tag gets dynamically created and added to the dom:
|
||||||
|
// document.getElementsByTagName('head')[0].appendChild(script)
|
||||||
|
// that script tag will immediately get executed by our scriptAddedCallback.
|
||||||
|
// However, if the location where the script tag is inserted happens to be
|
||||||
|
// below where processHTMLDoc curently is, then we'll re-run that same script
|
||||||
|
// again in processHTMLDoc. This flag is used to let us know if a specific
|
||||||
|
// <script> has already been processed.
|
||||||
|
if (try parser.scriptGetProcessed(@ptrCast(element))) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try parser.scriptSetProcessed(@ptrCast(element), true);
|
||||||
|
|
||||||
|
const kind: Script.Kind = blk: {
|
||||||
|
const script_type = try parser.elementGetAttribute(element, "type") orelse break :blk .javascript;
|
||||||
|
if (script_type.len == 0) {
|
||||||
|
break :blk .javascript;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(script_type, "application/javascript")) {
|
||||||
|
break :blk .javascript;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(script_type, "text/javascript")) {
|
||||||
|
break :blk .javascript;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(script_type, "module")) {
|
||||||
|
break :blk .module;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(script_type, "application/json")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(script_type, "application/ld+json")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.warn(.user_script, "unknown script type", .{ .type = script_type });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
var onload: ?Script.Callback = null;
|
||||||
|
var onerror: ?Script.Callback = null;
|
||||||
|
|
||||||
|
const page = self.page;
|
||||||
|
if (page.getNodeState(@ptrCast(element))) |se| {
|
||||||
|
// if the script has a node state, then it was dynamically added and thus
|
||||||
|
// the onload/onerror were saved in the state (if there are any)
|
||||||
|
if (se.onload) |function| {
|
||||||
|
onload = .{ .function = function };
|
||||||
|
}
|
||||||
|
if (se.onerror) |function| {
|
||||||
|
onerror = .{ .function = function };
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// if the script has no node state, then it could still be dynamically
|
||||||
|
// added (could have been dynamically added, but no attributes were set
|
||||||
|
// which required a node state to be created) or it could be a inline
|
||||||
|
// <script>.
|
||||||
|
if (try parser.elementGetAttribute(element, "onload")) |string| {
|
||||||
|
onload = .{ .string = string };
|
||||||
|
}
|
||||||
|
if (try parser.elementGetAttribute(element, "onerror")) |string| {
|
||||||
|
onerror = .{ .string = string };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var source: Script.Source = undefined;
|
||||||
|
var remote_url: ?[:0]const u8 = null;
|
||||||
|
if (try parser.elementGetAttribute(element, "src")) |src| {
|
||||||
|
if (try DataURI.parse(page.arena, src)) |data_uri| {
|
||||||
|
source = .{ .@"inline" = data_uri };
|
||||||
|
}
|
||||||
|
remote_url = try URL.stitch(page.arena, src, page.url.raw, .{ .null_terminated = true });
|
||||||
|
source = .{ .remote = .{} };
|
||||||
|
} else {
|
||||||
|
const inline_source = try parser.nodeTextContent(@ptrCast(element)) orelse return;
|
||||||
|
source = .{ .@"inline" = inline_source };
|
||||||
|
}
|
||||||
|
|
||||||
|
var script = Script{
|
||||||
|
.kind = kind,
|
||||||
|
.onload = onload,
|
||||||
|
.onerror = onerror,
|
||||||
|
.element = element,
|
||||||
|
.source = source,
|
||||||
|
.url = remote_url orelse page.url.raw,
|
||||||
|
.is_defer = try parser.elementGetAttribute(element, "defer") != null,
|
||||||
|
.is_async = try parser.elementGetAttribute(element, "async") != null,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (source == .@"inline" and self.scripts.first == null) {
|
||||||
|
// inline script with no pending scripts, execute it immediately.
|
||||||
|
// (if there is a pending script, then we cannot execute this immediately
|
||||||
|
// as it needs to be executed in order)
|
||||||
|
return script.eval(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
const pending_script = try self.script_pool.create();
|
||||||
|
errdefer self.script_pool.destroy(pending_script);
|
||||||
|
pending_script.* = .{
|
||||||
|
.script = script,
|
||||||
|
.complete = false,
|
||||||
|
.manager = self,
|
||||||
|
.node = .{ .data = pending_script },
|
||||||
|
};
|
||||||
|
|
||||||
|
if (source == .@"inline") {
|
||||||
|
// if we're here, it means that we have pending scripts (i.e. self.scripts
|
||||||
|
// is not empty). Because the script is inline, it's complete/ready, but
|
||||||
|
// we need to process them in order
|
||||||
|
pending_script.complete = true;
|
||||||
|
self.scripts.append(&pending_script.node);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
log.debug(.http, "script queue", .{ .url = remote_url.? });
|
||||||
|
}
|
||||||
|
|
||||||
|
pending_script.node = .{ .data = pending_script };
|
||||||
|
self.getList(&pending_script.script).append(&pending_script.node);
|
||||||
|
|
||||||
|
errdefer pending_script.deinit();
|
||||||
|
|
||||||
|
var headers = try HttpClient.Headers.init();
|
||||||
|
try page.requestCookie(.{}).headersForRequest(self.allocator, remote_url.?, &headers);
|
||||||
|
|
||||||
|
try self.client.request(.{
|
||||||
|
.url = remote_url.?,
|
||||||
|
.ctx = pending_script,
|
||||||
|
.method = .GET,
|
||||||
|
.headers = headers,
|
||||||
|
.cookie_jar = page.cookie_jar,
|
||||||
|
.start_callback = if (log.enabled(.http, .debug)) startCallback else null,
|
||||||
|
.header_done_callback = headerCallback,
|
||||||
|
.data_callback = dataCallback,
|
||||||
|
.done_callback = doneCallback,
|
||||||
|
.error_callback = errorCallback,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// @TODO: Improving this would have the simplest biggest performance improvement
|
||||||
|
// for most sites.
|
||||||
|
//
|
||||||
|
// For JS imports (both static and dynamic), we currently block to get the
|
||||||
|
// result (the content of the file).
|
||||||
|
//
|
||||||
|
// For static imports, this is necessary, since v8 is expecting the compiled module
|
||||||
|
// as part of the function return. (we should try to pre-load the JavaScript
|
||||||
|
// source via module.GetModuleRequests(), but that's for a later time).
|
||||||
|
//
|
||||||
|
// For dynamic dynamic imports, this is not strictly necessary since the v8
|
||||||
|
// call returns a Promise; we could make this a normal get call, associated with
|
||||||
|
// the promise, and when done, resolve the promise.
|
||||||
|
//
|
||||||
|
// In both cases, for now at least, we just issue a "blocking" request. We block
|
||||||
|
// by ticking the http client until the script is complete.
|
||||||
|
//
|
||||||
|
// This uses the client.blockingRequest call which has a dedicated handle for
|
||||||
|
// these blocking requests. Because they are blocking, we're guaranteed to have
|
||||||
|
// only 1 at a time, thus the 1 reserved handle.
|
||||||
|
//
|
||||||
|
// You almost don't need the http client's blocking handle. In most cases, you
|
||||||
|
// should always have 1 free handle whenever you get here, because we always
|
||||||
|
// release the handle before executing the doneCallback. So, if a module does:
|
||||||
|
// import * as x from 'blah'
|
||||||
|
// And we need to load 'blah', there should always be 1 free handle - the handle
|
||||||
|
// of the http GET we just completed before executing the module.
|
||||||
|
// The exception to this, and the reason we need a special blocking handle, is
|
||||||
|
// for inline modules within the HTML page itself:
|
||||||
|
// <script type=module>import ....</script>
|
||||||
|
// Unlike external modules which can only ever be executed after releasing an
|
||||||
|
// http handle, these are executed without there necessarily being a free handle.
|
||||||
|
// Thus, Http/Client.zig maintains a dedicated handle for these calls.
|
||||||
|
pub fn blockingGet(self: *ScriptManager, url: [:0]const u8) !BlockingResult {
|
||||||
|
std.debug.assert(self.is_blocking == false);
|
||||||
|
|
||||||
|
self.is_blocking = true;
|
||||||
|
defer {
|
||||||
|
self.is_blocking = false;
|
||||||
|
|
||||||
|
// we blocked evaluation while loading this script, there could be
|
||||||
|
// scripts ready to process.
|
||||||
|
self.evaluate();
|
||||||
|
}
|
||||||
|
|
||||||
|
var blocking = Blocking{
|
||||||
|
.allocator = self.allocator,
|
||||||
|
.buffer_pool = &self.buffer_pool,
|
||||||
|
};
|
||||||
|
|
||||||
|
var headers = try HttpClient.Headers.init();
|
||||||
|
try self.page.requestCookie(.{}).headersForRequest(self.allocator, url, &headers);
|
||||||
|
|
||||||
|
var client = self.client;
|
||||||
|
try client.blockingRequest(.{
|
||||||
|
.url = url,
|
||||||
|
.method = .GET,
|
||||||
|
.headers = headers,
|
||||||
|
.cookie_jar = self.page.cookie_jar,
|
||||||
|
.ctx = &blocking,
|
||||||
|
.start_callback = if (log.enabled(.http, .debug)) Blocking.startCallback else null,
|
||||||
|
.header_done_callback = Blocking.headerCallback,
|
||||||
|
.data_callback = Blocking.dataCallback,
|
||||||
|
.done_callback = Blocking.doneCallback,
|
||||||
|
.error_callback = Blocking.errorCallback,
|
||||||
|
});
|
||||||
|
|
||||||
|
// rely on http's timeout settings to avoid an endless/long loop.
|
||||||
|
while (true) {
|
||||||
|
try client.tick(200);
|
||||||
|
switch (blocking.state) {
|
||||||
|
.running => {},
|
||||||
|
.done => |result| return result,
|
||||||
|
.err => |err| return err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn staticScriptsDone(self: *ScriptManager) void {
|
||||||
|
std.debug.assert(self.static_scripts_done == false);
|
||||||
|
self.static_scripts_done = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to evaluate completed scripts (in order). This is called whenever a script
|
||||||
|
// is completed.
|
||||||
|
fn evaluate(self: *ScriptManager) void {
|
||||||
|
if (self.is_evaluating) {
|
||||||
|
// It's possible for a script.eval to cause evaluate to be called again.
|
||||||
|
// This is particularly true with blockingGet, but even without this,
|
||||||
|
// it's theoretically possible (but unlikely). We could make this work
|
||||||
|
// but there's little reason to support the complexity.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.is_blocking) {
|
||||||
|
// Cannot evaluate scripts while a blocking-load is in progress. Not
|
||||||
|
// only could that result in incorrect evaluation order, it could
|
||||||
|
// triger another blocking request, while we're doing a blocking request.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const page = self.page;
|
||||||
|
self.is_evaluating = true;
|
||||||
|
defer self.is_evaluating = false;
|
||||||
|
|
||||||
|
while (self.scripts.first) |n| {
|
||||||
|
var pending_script = n.data;
|
||||||
|
if (pending_script.complete == false) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
defer pending_script.deinit();
|
||||||
|
pending_script.script.eval(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.static_scripts_done == false) {
|
||||||
|
// We can only execute deferred scripts if
|
||||||
|
// 1 - all the normal scripts are done
|
||||||
|
// 2 - we've finished parsing the HTML and at least queued all the scripts
|
||||||
|
// The last one isn't obvious, but it's possible for self.scripts to
|
||||||
|
// be empty not because we're done executing all the normal scripts
|
||||||
|
// but because we're done executing some (or maybe none), but we're still
|
||||||
|
// parsing the HTML.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (self.deferreds.first) |n| {
|
||||||
|
var pending_script = n.data;
|
||||||
|
if (pending_script.complete == false) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
defer pending_script.deinit();
|
||||||
|
pending_script.script.eval(page);
|
||||||
|
}
|
||||||
|
|
||||||
|
// When all scripts (normal and deferred) are done loading, the document
|
||||||
|
// state changes (this ultimately triggers the DOMContentLoaded event)
|
||||||
|
page.documentIsLoaded();
|
||||||
|
|
||||||
|
if (self.asyncs.first == null) {
|
||||||
|
// if we're here, then its like `asyncDone`
|
||||||
|
// 1 - there are no async scripts pending
|
||||||
|
// 2 - we checkecked static_scripts_done == true above
|
||||||
|
// 3 - we drained self.scripts above
|
||||||
|
// 4 - we drained self.deferred above
|
||||||
|
page.documentIsComplete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn isDone(self: *const ScriptManager) bool {
|
||||||
|
return self.asyncs.first == null and // there are no more async scripts
|
||||||
|
self.static_scripts_done and // and we've finished parsing the HTML to queue all <scripts>
|
||||||
|
self.scripts.first == null and // and there are no more <script src=> to wait for
|
||||||
|
self.deferreds.first == null; // and there are no more <script defer src=> to wait for
|
||||||
|
}
|
||||||
|
|
||||||
|
fn asyncDone(self: *ScriptManager) void {
|
||||||
|
if (self.isDone()) {
|
||||||
|
// then the document is considered complete
|
||||||
|
self.page.documentIsComplete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn getList(self: *ScriptManager, script: *const Script) *OrderList {
|
||||||
|
// When a script has both the async and defer flag set, it should be
|
||||||
|
// treated as async. Async is newer, so some websites use both so that
|
||||||
|
// if async isn't known, it'll fallback to defer.
|
||||||
|
if (script.is_async) {
|
||||||
|
return &self.asyncs;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (script.is_defer) {
|
||||||
|
return &self.deferreds;
|
||||||
|
}
|
||||||
|
|
||||||
|
return &self.scripts;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn startCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
script.startCallback(transfer) catch |err| {
|
||||||
|
log.err(.http, "SM.startCallback", .{ .err = err, .transfer = transfer });
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headerCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
script.headerCallback(transfer) catch |err| {
|
||||||
|
log.err(.http, "SM.headerCallback", .{
|
||||||
|
.err = err,
|
||||||
|
.transfer = transfer,
|
||||||
|
.status = transfer.response_header.?.status,
|
||||||
|
});
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
|
const script: *PendingScript = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
script.dataCallback(transfer, data) catch |err| {
|
||||||
|
log.err(.http, "SM.dataCallback", .{ .err = err, .transfer = transfer, .len = data.len });
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn doneCallback(ctx: *anyopaque) !void {
|
||||||
|
const script: *PendingScript = @alignCast(@ptrCast(ctx));
|
||||||
|
script.doneCallback();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn errorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
|
const script: *PendingScript = @alignCast(@ptrCast(ctx));
|
||||||
|
script.errorCallback(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// A script which is pending execution.
|
||||||
|
// It could be pending because:
|
||||||
|
// (a) we're still downloading its content or
|
||||||
|
// (b) this is a non-async script that has to be executed in order
|
||||||
|
const PendingScript = struct {
|
||||||
|
script: Script,
|
||||||
|
complete: bool,
|
||||||
|
node: OrderList.Node,
|
||||||
|
manager: *ScriptManager,
|
||||||
|
|
||||||
|
fn deinit(self: *PendingScript) void {
|
||||||
|
const script = &self.script;
|
||||||
|
const manager = self.manager;
|
||||||
|
|
||||||
|
if (script.source == .remote) {
|
||||||
|
manager.buffer_pool.release(script.source.remote);
|
||||||
|
}
|
||||||
|
manager.getList(script).remove(&self.node);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(self: *PendingScript) void {
|
||||||
|
if (self.node) |*node| {
|
||||||
|
self.manager.getList(&self.script).remove(node);
|
||||||
|
self.node = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn startCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
|
||||||
|
_ = self;
|
||||||
|
log.debug(.http, "script fetch start", .{ .req = transfer });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headerCallback(self: *PendingScript, transfer: *HttpClient.Transfer) !void {
|
||||||
|
const header = &transfer.response_header.?;
|
||||||
|
log.debug(.http, "script header", .{
|
||||||
|
.req = transfer,
|
||||||
|
.status = header.status,
|
||||||
|
.content_type = header.contentType(),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (header.status != 200) {
|
||||||
|
return error.InvalidStatusCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this isn't true, then we'll likely leak memory. If you don't
|
||||||
|
// set `CURLOPT_SUPPRESS_CONNECT_HEADERS` and CONNECT to a proxy, this
|
||||||
|
// will fail. This assertion exists to catch incorrect assumptions about
|
||||||
|
// how libcurl works, or about how we've configured it.
|
||||||
|
std.debug.assert(self.script.source.remote.capacity == 0);
|
||||||
|
self.script.source = .{ .remote = self.manager.buffer_pool.get() };
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dataCallback(self: *PendingScript, transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
|
_ = transfer;
|
||||||
|
// too verbose
|
||||||
|
// log.debug(.http, "script data chunk", .{
|
||||||
|
// .req = transfer,
|
||||||
|
// .len = data.len,
|
||||||
|
// });
|
||||||
|
|
||||||
|
try self.script.source.remote.appendSlice(self.manager.allocator, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn doneCallback(self: *PendingScript) void {
|
||||||
|
log.debug(.http, "script fetch complete", .{ .req = self.script.url });
|
||||||
|
|
||||||
|
const manager = self.manager;
|
||||||
|
if (self.script.is_async) {
|
||||||
|
// async script can be evaluated immediately
|
||||||
|
self.script.eval(self.manager.page);
|
||||||
|
self.deinit();
|
||||||
|
manager.asyncDone();
|
||||||
|
} else {
|
||||||
|
self.complete = true;
|
||||||
|
manager.evaluate();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn errorCallback(self: *PendingScript, err: anyerror) void {
|
||||||
|
log.warn(.http, "script fetch error", .{ .req = self.script.url, .err = err });
|
||||||
|
|
||||||
|
const manager = self.manager;
|
||||||
|
|
||||||
|
self.deinit();
|
||||||
|
|
||||||
|
if (manager.shutdown) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.evaluate();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const Script = struct {
|
||||||
|
kind: Kind,
|
||||||
|
url: []const u8,
|
||||||
|
is_async: bool,
|
||||||
|
is_defer: bool,
|
||||||
|
source: Source,
|
||||||
|
onload: ?Callback,
|
||||||
|
onerror: ?Callback,
|
||||||
|
element: *parser.Element,
|
||||||
|
|
||||||
|
const Kind = enum {
|
||||||
|
module,
|
||||||
|
javascript,
|
||||||
|
};
|
||||||
|
|
||||||
|
const Callback = union(enum) {
|
||||||
|
string: []const u8,
|
||||||
|
function: Env.Function,
|
||||||
|
};
|
||||||
|
|
||||||
|
const Source = union(enum) {
|
||||||
|
@"inline": []const u8,
|
||||||
|
remote: std.ArrayListUnmanaged(u8),
|
||||||
|
|
||||||
|
fn content(self: Source) []const u8 {
|
||||||
|
return switch (self) {
|
||||||
|
.remote => |buf| buf.items,
|
||||||
|
.@"inline" => |c| c,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fn eval(self: *Script, page: *Page) void {
|
||||||
|
page.setCurrentScript(@ptrCast(self.element)) catch |err| {
|
||||||
|
log.err(.browser, "set document script", .{ .err = err });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
defer page.setCurrentScript(null) catch |err| {
|
||||||
|
log.err(.browser, "clear document script", .{ .err = err });
|
||||||
|
};
|
||||||
|
|
||||||
|
// inline scripts aren't cached. remote ones are.
|
||||||
|
const cacheable = self.source == .remote;
|
||||||
|
|
||||||
|
const url = self.url;
|
||||||
|
|
||||||
|
log.info(.browser, "executing script", .{
|
||||||
|
.src = url,
|
||||||
|
.kind = self.kind,
|
||||||
|
.cacheable = cacheable,
|
||||||
|
});
|
||||||
|
|
||||||
|
const js_context = page.main_context;
|
||||||
|
var try_catch: Env.TryCatch = undefined;
|
||||||
|
try_catch.init(js_context);
|
||||||
|
defer try_catch.deinit();
|
||||||
|
|
||||||
|
const success = blk: {
|
||||||
|
const content = self.source.content();
|
||||||
|
switch (self.kind) {
|
||||||
|
.javascript => _ = js_context.eval(content, url) catch break :blk false,
|
||||||
|
.module => {
|
||||||
|
// We don't care about waiting for the evaluation here.
|
||||||
|
_ = js_context.module(content, url, cacheable) catch break :blk false;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
break :blk true;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (success) {
|
||||||
|
self.executeCallback("onload", page);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (page.delayed_navigation) {
|
||||||
|
// If we're navigating to another page, an error is expected
|
||||||
|
// since we probably terminated the script forcefully.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = try_catch.err(page.arena) catch |err| @errorName(err) orelse "unknown";
|
||||||
|
log.warn(.user_script, "eval script", .{
|
||||||
|
.url = url,
|
||||||
|
.err = msg,
|
||||||
|
.cacheable = cacheable,
|
||||||
|
});
|
||||||
|
|
||||||
|
self.executeCallback("onerror", page);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn executeCallback(self: *const Script, comptime typ: []const u8, page: *Page) void {
|
||||||
|
const callback = @field(self, typ) orelse return;
|
||||||
|
|
||||||
|
switch (callback) {
|
||||||
|
.string => |str| {
|
||||||
|
var try_catch: Env.TryCatch = undefined;
|
||||||
|
try_catch.init(page.main_context);
|
||||||
|
defer try_catch.deinit();
|
||||||
|
|
||||||
|
_ = page.main_context.exec(str, typ) catch |err| {
|
||||||
|
const msg = try_catch.err(page.arena) catch @errorName(err) orelse "unknown";
|
||||||
|
log.warn(.user_script, "script callback", .{
|
||||||
|
.url = self.url,
|
||||||
|
.err = msg,
|
||||||
|
.type = typ,
|
||||||
|
.@"inline" = true,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
},
|
||||||
|
.function => |f| {
|
||||||
|
const Event = @import("events/event.zig").Event;
|
||||||
|
const loadevt = parser.eventCreate() catch |err| {
|
||||||
|
log.err(.browser, "SM event creation", .{ .err = err });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
defer parser.eventDestroy(loadevt);
|
||||||
|
|
||||||
|
var result: Env.Function.Result = undefined;
|
||||||
|
const iface = Event.toInterface(loadevt) catch |err| {
|
||||||
|
log.err(.browser, "SM event interface", .{ .err = err });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
f.tryCall(void, .{iface}, &result) catch {
|
||||||
|
log.warn(.user_script, "script callback", .{
|
||||||
|
.url = self.url,
|
||||||
|
.type = typ,
|
||||||
|
.err = result.exception,
|
||||||
|
.stack = result.stack,
|
||||||
|
.@"inline" = false,
|
||||||
|
});
|
||||||
|
};
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const BufferPool = struct {
|
||||||
|
count: usize,
|
||||||
|
available: List = .{},
|
||||||
|
allocator: Allocator,
|
||||||
|
max_concurrent_transfers: u8,
|
||||||
|
node_pool: std.heap.MemoryPool(List.Node),
|
||||||
|
|
||||||
|
const List = std.DoublyLinkedList(std.ArrayListUnmanaged(u8));
|
||||||
|
|
||||||
|
fn init(allocator: Allocator, max_concurrent_transfers: u8) BufferPool {
|
||||||
|
return .{
|
||||||
|
.available = .{},
|
||||||
|
.count = 0,
|
||||||
|
.allocator = allocator,
|
||||||
|
.max_concurrent_transfers = max_concurrent_transfers,
|
||||||
|
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deinit(self: *BufferPool) void {
|
||||||
|
const allocator = self.allocator;
|
||||||
|
|
||||||
|
var node = self.available.first;
|
||||||
|
while (node) |n| {
|
||||||
|
n.data.deinit(allocator);
|
||||||
|
node = n.next;
|
||||||
|
}
|
||||||
|
self.node_pool.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get(self: *BufferPool) ArrayListUnmanaged(u8) {
|
||||||
|
const node = self.available.popFirst() orelse {
|
||||||
|
// return a new buffer
|
||||||
|
return .{};
|
||||||
|
};
|
||||||
|
|
||||||
|
self.count -= 1;
|
||||||
|
defer self.node_pool.destroy(node);
|
||||||
|
return node.data;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn release(self: *BufferPool, buffer: ArrayListUnmanaged(u8)) void {
|
||||||
|
// create mutable copy
|
||||||
|
var b = buffer;
|
||||||
|
|
||||||
|
if (self.count == self.max_concurrent_transfers) {
|
||||||
|
b.deinit(self.allocator);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const node = self.node_pool.create() catch |err| {
|
||||||
|
b.deinit(self.allocator);
|
||||||
|
log.err(.http, "SM BufferPool release", .{ .err = err });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
b.clearRetainingCapacity();
|
||||||
|
node.* = .{ .data = b };
|
||||||
|
self.count += 1;
|
||||||
|
self.available.append(node);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const Blocking = struct {
|
||||||
|
allocator: Allocator,
|
||||||
|
buffer_pool: *BufferPool,
|
||||||
|
state: State = .{ .running = {} },
|
||||||
|
buffer: std.ArrayListUnmanaged(u8) = .{},
|
||||||
|
|
||||||
|
const State = union(enum) {
|
||||||
|
running: void,
|
||||||
|
err: anyerror,
|
||||||
|
done: BlockingResult,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn startCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
|
log.debug(.http, "script fetch start", .{ .req = transfer, .blocking = true });
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headerCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
|
const header = &transfer.response_header.?;
|
||||||
|
log.debug(.http, "script header", .{
|
||||||
|
.req = transfer,
|
||||||
|
.blocking = true,
|
||||||
|
.status = header.status,
|
||||||
|
.content_type = header.contentType(),
|
||||||
|
});
|
||||||
|
|
||||||
|
if (header.status != 200) {
|
||||||
|
return error.InvalidStatusCode;
|
||||||
|
}
|
||||||
|
|
||||||
|
var self: *Blocking = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
self.buffer = self.buffer_pool.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
|
// too verbose
|
||||||
|
// log.debug(.http, "script data chunk", .{
|
||||||
|
// .req = transfer,
|
||||||
|
// .blocking = true,
|
||||||
|
// });
|
||||||
|
|
||||||
|
var self: *Blocking = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
self.buffer.appendSlice(self.allocator, data) catch |err| {
|
||||||
|
log.err(.http, "SM.dataCallback", .{
|
||||||
|
.err = err,
|
||||||
|
.len = data.len,
|
||||||
|
.blocking = true,
|
||||||
|
.transfer = transfer,
|
||||||
|
});
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn doneCallback(ctx: *anyopaque) !void {
|
||||||
|
var self: *Blocking = @alignCast(@ptrCast(ctx));
|
||||||
|
self.state = .{ .done = .{
|
||||||
|
.buffer = self.buffer,
|
||||||
|
.buffer_pool = self.buffer_pool,
|
||||||
|
} };
|
||||||
|
}
|
||||||
|
|
||||||
|
fn errorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
|
var self: *Blocking = @alignCast(@ptrCast(ctx));
|
||||||
|
self.state = .{ .err = err };
|
||||||
|
self.buffer_pool.release(self.buffer);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const BlockingResult = struct {
|
||||||
|
buffer: std.ArrayListUnmanaged(u8),
|
||||||
|
buffer_pool: *BufferPool,
|
||||||
|
|
||||||
|
pub fn deinit(self: *BlockingResult) void {
|
||||||
|
self.buffer_pool.release(self.buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn src(self: *const BlockingResult) []const u8 {
|
||||||
|
return self.buffer.items;
|
||||||
|
}
|
||||||
|
};
|
||||||
@@ -28,8 +28,7 @@ const Session = @import("session.zig").Session;
|
|||||||
const Notification = @import("../notification.zig").Notification;
|
const Notification = @import("../notification.zig").Notification;
|
||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
|
const HttpClient = @import("../http/Client.zig");
|
||||||
const http = @import("../http/client.zig");
|
|
||||||
|
|
||||||
// Browser is an instance of the browser.
|
// Browser is an instance of the browser.
|
||||||
// You can create multiple browser instances.
|
// You can create multiple browser instances.
|
||||||
@@ -39,7 +38,7 @@ pub const Browser = struct {
|
|||||||
app: *App,
|
app: *App,
|
||||||
session: ?Session,
|
session: ?Session,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
http_client: *http.Client,
|
http_client: *HttpClient,
|
||||||
page_arena: ArenaAllocator,
|
page_arena: ArenaAllocator,
|
||||||
session_arena: ArenaAllocator,
|
session_arena: ArenaAllocator,
|
||||||
transfer_arena: ArenaAllocator,
|
transfer_arena: ArenaAllocator,
|
||||||
@@ -53,6 +52,8 @@ pub const Browser = struct {
|
|||||||
errdefer env.deinit();
|
errdefer env.deinit();
|
||||||
|
|
||||||
const notification = try Notification.init(allocator, app.notification);
|
const notification = try Notification.init(allocator, app.notification);
|
||||||
|
app.http.client.notification = notification;
|
||||||
|
app.http.client.next_request_id = 0; // Should we track ids in CDP only?
|
||||||
errdefer notification.deinit();
|
errdefer notification.deinit();
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
@@ -61,7 +62,7 @@ pub const Browser = struct {
|
|||||||
.session = null,
|
.session = null,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.notification = notification,
|
.notification = notification,
|
||||||
.http_client = &app.http_client,
|
.http_client = app.http.client,
|
||||||
.page_arena = ArenaAllocator.init(allocator),
|
.page_arena = ArenaAllocator.init(allocator),
|
||||||
.session_arena = ArenaAllocator.init(allocator),
|
.session_arena = ArenaAllocator.init(allocator),
|
||||||
.transfer_arena = ArenaAllocator.init(allocator),
|
.transfer_arena = ArenaAllocator.init(allocator),
|
||||||
@@ -75,6 +76,7 @@ pub const Browser = struct {
|
|||||||
self.page_arena.deinit();
|
self.page_arena.deinit();
|
||||||
self.session_arena.deinit();
|
self.session_arena.deinit();
|
||||||
self.transfer_arena.deinit();
|
self.transfer_arena.deinit();
|
||||||
|
self.http_client.notification = null;
|
||||||
self.notification.deinit();
|
self.notification.deinit();
|
||||||
self.state_pool.deinit();
|
self.state_pool.deinit();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,79 +0,0 @@
|
|||||||
const std = @import("std");
|
|
||||||
const Allocator = std.mem.Allocator;
|
|
||||||
|
|
||||||
// Represents https://developer.mozilla.org/en-US/docs/Web/URI/Reference/Schemes/data
|
|
||||||
pub const DataURI = struct {
|
|
||||||
was_base64_encoded: bool,
|
|
||||||
// The contents in the uri. It will be base64 decoded but not prepared in
|
|
||||||
// any way for mime.charset.
|
|
||||||
data: []const u8,
|
|
||||||
|
|
||||||
// Parses data:[<media-type>][;base64],<data>
|
|
||||||
pub fn parse(allocator: Allocator, src: []const u8) !?DataURI {
|
|
||||||
if (!std.mem.startsWith(u8, src, "data:")) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uri = src[5..];
|
|
||||||
const data_starts = std.mem.indexOfScalar(u8, uri, ',') orelse return null;
|
|
||||||
|
|
||||||
// Extract the encoding.
|
|
||||||
var metadata = uri[0..data_starts];
|
|
||||||
var base64_encoded = false;
|
|
||||||
if (std.mem.endsWith(u8, metadata, ";base64")) {
|
|
||||||
base64_encoded = true;
|
|
||||||
metadata = metadata[0 .. metadata.len - 7];
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Extract mime type. This not trivial because Mime.parse requires
|
|
||||||
// a []u8 and might mutate the src. And, the DataURI.parse references atm
|
|
||||||
// do not have deinit calls.
|
|
||||||
|
|
||||||
// Prepare the data.
|
|
||||||
var data = uri[data_starts + 1 ..];
|
|
||||||
if (base64_encoded) {
|
|
||||||
const decoder = std.base64.standard.Decoder;
|
|
||||||
const decoded_size = try decoder.calcSizeForSlice(data);
|
|
||||||
|
|
||||||
const buffer = try allocator.alloc(u8, decoded_size);
|
|
||||||
errdefer allocator.free(buffer);
|
|
||||||
|
|
||||||
try decoder.decode(buffer, data);
|
|
||||||
data = buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
return .{
|
|
||||||
.was_base64_encoded = base64_encoded,
|
|
||||||
.data = data,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deinit(self: *const DataURI, allocator: Allocator) void {
|
|
||||||
if (self.was_base64_encoded) {
|
|
||||||
allocator.free(self.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const testing = std.testing;
|
|
||||||
test "DataURI: parse valid" {
|
|
||||||
try test_valid("data:text/javascript; charset=utf-8;base64,Zm9v", "foo");
|
|
||||||
try test_valid("data:text/javascript; charset=utf-8;,foo", "foo");
|
|
||||||
try test_valid("data:,foo", "foo");
|
|
||||||
}
|
|
||||||
|
|
||||||
test "DataURI: parse invalid" {
|
|
||||||
try test_cannot_parse("atad:,foo");
|
|
||||||
try test_cannot_parse("data:foo");
|
|
||||||
try test_cannot_parse("data:");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_valid(uri: []const u8, expected: []const u8) !void {
|
|
||||||
const data_uri = try DataURI.parse(std.testing.allocator, uri) orelse return error.TestFailed;
|
|
||||||
defer data_uri.deinit(testing.allocator);
|
|
||||||
try testing.expectEqualStrings(expected, data_uri.data);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_cannot_parse(uri: []const u8) !void {
|
|
||||||
try testing.expectEqual(null, DataURI.parse(std.testing.allocator, uri));
|
|
||||||
}
|
|
||||||
@@ -22,7 +22,6 @@ const Allocator = std.mem.Allocator;
|
|||||||
const log = @import("../../log.zig");
|
const log = @import("../../log.zig");
|
||||||
const parser = @import("../netsurf.zig");
|
const parser = @import("../netsurf.zig");
|
||||||
const Page = @import("../page.zig").Page;
|
const Page = @import("../page.zig").Page;
|
||||||
const Loop = @import("../../runtime/loop.zig").Loop;
|
|
||||||
|
|
||||||
const Env = @import("../env.zig").Env;
|
const Env = @import("../env.zig").Env;
|
||||||
const NodeList = @import("nodelist.zig").NodeList;
|
const NodeList = @import("nodelist.zig").NodeList;
|
||||||
@@ -36,12 +35,10 @@ const Walker = @import("../dom/walker.zig").WalkerChildren;
|
|||||||
|
|
||||||
// WEB IDL https://dom.spec.whatwg.org/#interface-mutationobserver
|
// WEB IDL https://dom.spec.whatwg.org/#interface-mutationobserver
|
||||||
pub const MutationObserver = struct {
|
pub const MutationObserver = struct {
|
||||||
loop: *Loop,
|
page: *Page,
|
||||||
cbk: Env.Function,
|
cbk: Env.Function,
|
||||||
arena: Allocator,
|
|
||||||
connected: bool,
|
connected: bool,
|
||||||
scheduled: bool,
|
scheduled: bool,
|
||||||
loop_node: Loop.CallbackNode,
|
|
||||||
|
|
||||||
// List of records which were observed. When the call scope ends, we need to
|
// List of records which were observed. When the call scope ends, we need to
|
||||||
// execute our callback with it.
|
// execute our callback with it.
|
||||||
@@ -50,17 +47,15 @@ pub const MutationObserver = struct {
|
|||||||
pub fn constructor(cbk: Env.Function, page: *Page) !MutationObserver {
|
pub fn constructor(cbk: Env.Function, page: *Page) !MutationObserver {
|
||||||
return .{
|
return .{
|
||||||
.cbk = cbk,
|
.cbk = cbk,
|
||||||
.loop = page.loop,
|
.page = page,
|
||||||
.observed = .{},
|
.observed = .{},
|
||||||
.connected = true,
|
.connected = true,
|
||||||
.scheduled = false,
|
.scheduled = false,
|
||||||
.arena = page.arena,
|
|
||||||
.loop_node = .{ .func = callback },
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _observe(self: *MutationObserver, node: *parser.Node, options_: ?Options) !void {
|
pub fn _observe(self: *MutationObserver, node: *parser.Node, options_: ?Options) !void {
|
||||||
const arena = self.arena;
|
const arena = self.page.arena;
|
||||||
var options = options_ orelse Options{};
|
var options = options_ orelse Options{};
|
||||||
if (options.attributeFilter.len > 0) {
|
if (options.attributeFilter.len > 0) {
|
||||||
options.attributeFilter = try arena.dupe([]const u8, options.attributeFilter);
|
options.attributeFilter = try arena.dupe([]const u8, options.attributeFilter);
|
||||||
@@ -115,17 +110,17 @@ pub const MutationObserver = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callback(node: *Loop.CallbackNode, _: *?u63) void {
|
fn callback(ctx: *anyopaque) ?u32 {
|
||||||
const self: *MutationObserver = @fieldParentPtr("loop_node", node);
|
const self: *MutationObserver = @alignCast(@ptrCast(ctx));
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
self.scheduled = true;
|
self.scheduled = true;
|
||||||
return;
|
return null;
|
||||||
}
|
}
|
||||||
self.scheduled = false;
|
self.scheduled = false;
|
||||||
|
|
||||||
const records = self.observed.items;
|
const records = self.observed.items;
|
||||||
if (records.len == 0) {
|
if (records.len == 0) {
|
||||||
return;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
defer self.observed.clearRetainingCapacity();
|
defer self.observed.clearRetainingCapacity();
|
||||||
@@ -138,6 +133,7 @@ pub const MutationObserver = struct {
|
|||||||
.source = "mutation observer",
|
.source = "mutation observer",
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
@@ -301,7 +297,7 @@ const Observer = struct {
|
|||||||
.type = event_type.recordType(),
|
.type = event_type.recordType(),
|
||||||
};
|
};
|
||||||
|
|
||||||
const arena = mutation_observer.arena;
|
const arena = mutation_observer.page.arena;
|
||||||
switch (event_type) {
|
switch (event_type) {
|
||||||
.DOMAttrModified => {
|
.DOMAttrModified => {
|
||||||
record.attribute_name = parser.mutationEventAttributeName(mutation_event) catch null;
|
record.attribute_name = parser.mutationEventAttributeName(mutation_event) catch null;
|
||||||
@@ -330,7 +326,12 @@ const Observer = struct {
|
|||||||
|
|
||||||
if (mutation_observer.scheduled == false) {
|
if (mutation_observer.scheduled == false) {
|
||||||
mutation_observer.scheduled = true;
|
mutation_observer.scheduled = true;
|
||||||
_ = try mutation_observer.loop.timeout(0, &mutation_observer.loop_node);
|
try mutation_observer.page.scheduler.add(
|
||||||
|
mutation_observer,
|
||||||
|
MutationObserver.callback,
|
||||||
|
0,
|
||||||
|
.{ .name = "mutation_observer" },
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ const log = @import("../../log.zig");
|
|||||||
const parser = @import("../netsurf.zig");
|
const parser = @import("../netsurf.zig");
|
||||||
const Env = @import("../env.zig").Env;
|
const Env = @import("../env.zig").Env;
|
||||||
const Page = @import("../page.zig").Page;
|
const Page = @import("../page.zig").Page;
|
||||||
const Loop = @import("../../runtime/loop.zig").Loop;
|
|
||||||
const EventTarget = @import("../dom/event_target.zig").EventTarget;
|
const EventTarget = @import("../dom/event_target.zig").EventTarget;
|
||||||
|
|
||||||
pub const Interfaces = .{
|
pub const Interfaces = .{
|
||||||
@@ -77,11 +76,9 @@ pub const AbortSignal = struct {
|
|||||||
const callback = try page.arena.create(TimeoutCallback);
|
const callback = try page.arena.create(TimeoutCallback);
|
||||||
callback.* = .{
|
callback.* = .{
|
||||||
.signal = .init,
|
.signal = .init,
|
||||||
.node = .{ .func = TimeoutCallback.run },
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const delay_ms: u63 = @as(u63, delay) * std.time.ns_per_ms;
|
try page.scheduler.add(callback, TimeoutCallback.run, delay, .{ .name = "abort_signal" });
|
||||||
_ = try page.loop.timeout(delay_ms, &callback.node);
|
|
||||||
return &callback.signal;
|
return &callback.signal;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,15 +128,12 @@ pub const AbortSignal = struct {
|
|||||||
const TimeoutCallback = struct {
|
const TimeoutCallback = struct {
|
||||||
signal: AbortSignal,
|
signal: AbortSignal,
|
||||||
|
|
||||||
// This is the internal data that the event loop tracks. We'll get this
|
fn run(ctx: *anyopaque) ?u32 {
|
||||||
// back in run and, from it, can get our TimeoutCallback instance
|
const self: *TimeoutCallback = @alignCast(@ptrCast(ctx));
|
||||||
node: Loop.CallbackNode = undefined,
|
|
||||||
|
|
||||||
fn run(node: *Loop.CallbackNode, _: *?u63) void {
|
|
||||||
const self: *TimeoutCallback = @fieldParentPtr("node", node);
|
|
||||||
self.signal.abort("TimeoutError") catch |err| {
|
self.signal.abort("TimeoutError") catch |err| {
|
||||||
log.warn(.app, "abort signal timeout", .{ .err = err });
|
log.warn(.app, "abort signal timeout", .{ .err = err });
|
||||||
};
|
};
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -85,7 +85,10 @@ pub const HTMLDocument = struct {
|
|||||||
|
|
||||||
pub fn get_cookie(_: *parser.DocumentHTML, page: *Page) ![]const u8 {
|
pub fn get_cookie(_: *parser.DocumentHTML, page: *Page) ![]const u8 {
|
||||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||||
try page.cookie_jar.forRequest(&page.url.uri, buf.writer(page.arena), .{ .navigation = true, .is_http = false });
|
try page.cookie_jar.forRequest(&page.url.uri, buf.writer(page.arena), .{
|
||||||
|
.is_http = false,
|
||||||
|
.is_navigation = true,
|
||||||
|
});
|
||||||
return buf.items;
|
return buf.items;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ const log = @import("../../log.zig");
|
|||||||
const parser = @import("../netsurf.zig");
|
const parser = @import("../netsurf.zig");
|
||||||
const Env = @import("../env.zig").Env;
|
const Env = @import("../env.zig").Env;
|
||||||
const Page = @import("../page.zig").Page;
|
const Page = @import("../page.zig").Page;
|
||||||
const Loop = @import("../../runtime/loop.zig").Loop;
|
|
||||||
|
|
||||||
const Navigator = @import("navigator.zig").Navigator;
|
const Navigator = @import("navigator.zig").Navigator;
|
||||||
const History = @import("history.zig").History;
|
const History = @import("history.zig").History;
|
||||||
@@ -57,7 +56,7 @@ pub const Window = struct {
|
|||||||
|
|
||||||
// counter for having unique timer ids
|
// counter for having unique timer ids
|
||||||
timer_id: u30 = 0,
|
timer_id: u30 = 0,
|
||||||
timers: std.AutoHashMapUnmanaged(u32, *TimerCallback) = .{},
|
timers: std.AutoHashMapUnmanaged(u32, void) = .{},
|
||||||
|
|
||||||
crypto: Crypto = .{},
|
crypto: Crypto = .{},
|
||||||
console: Console = .{},
|
console: Console = .{},
|
||||||
@@ -179,34 +178,31 @@ pub const Window = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn _requestAnimationFrame(self: *Window, cbk: Function, page: *Page) !u32 {
|
pub fn _requestAnimationFrame(self: *Window, cbk: Function, page: *Page) !u32 {
|
||||||
return self.createTimeout(cbk, 5, page, .{ .animation_frame = true });
|
return self.createTimeout(cbk, 5, page, .{ .animation_frame = true, .name = "animationFrame" });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _cancelAnimationFrame(self: *Window, id: u32, page: *Page) !void {
|
pub fn _cancelAnimationFrame(self: *Window, id: u32) !void {
|
||||||
const kv = self.timers.fetchRemove(id) orelse return;
|
_ = self.timers.remove(id);
|
||||||
return page.loop.cancel(kv.value.loop_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _setTimeout(self: *Window, cbk: Function, delay: ?u32, params: []Env.JsObject, page: *Page) !u32 {
|
pub fn _setTimeout(self: *Window, cbk: Function, delay: ?u32, params: []Env.JsObject, page: *Page) !u32 {
|
||||||
return self.createTimeout(cbk, delay, page, .{ .args = params });
|
return self.createTimeout(cbk, delay, page, .{ .args = params, .name = "setTimeout" });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _setInterval(self: *Window, cbk: Function, delay: ?u32, params: []Env.JsObject, page: *Page) !u32 {
|
pub fn _setInterval(self: *Window, cbk: Function, delay: ?u32, params: []Env.JsObject, page: *Page) !u32 {
|
||||||
return self.createTimeout(cbk, delay, page, .{ .repeat = true, .args = params });
|
return self.createTimeout(cbk, delay, page, .{ .repeat = true, .args = params, .name = "setInterval" });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _clearTimeout(self: *Window, id: u32, page: *Page) !void {
|
pub fn _clearTimeout(self: *Window, id: u32) !void {
|
||||||
const kv = self.timers.fetchRemove(id) orelse return;
|
_ = self.timers.remove(id);
|
||||||
return page.loop.cancel(kv.value.loop_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _clearInterval(self: *Window, id: u32, page: *Page) !void {
|
pub fn _clearInterval(self: *Window, id: u32) !void {
|
||||||
const kv = self.timers.fetchRemove(id) orelse return;
|
_ = self.timers.remove(id);
|
||||||
return page.loop.cancel(kv.value.loop_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _queueMicrotask(self: *Window, cbk: Function, page: *Page) !u32 {
|
pub fn _queueMicrotask(self: *Window, cbk: Function, page: *Page) !u32 {
|
||||||
return self.createTimeout(cbk, 0, page, .{});
|
return self.createTimeout(cbk, 0, page, .{ .name = "queueMicrotask" });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _matchMedia(_: *const Window, media: []const u8, page: *Page) !MediaQueryList {
|
pub fn _matchMedia(_: *const Window, media: []const u8, page: *Page) !MediaQueryList {
|
||||||
@@ -232,6 +228,7 @@ pub const Window = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const CreateTimeoutOpts = struct {
|
const CreateTimeoutOpts = struct {
|
||||||
|
name: []const u8,
|
||||||
args: []Env.JsObject = &.{},
|
args: []Env.JsObject = &.{},
|
||||||
repeat: bool = false,
|
repeat: bool = false,
|
||||||
animation_frame: bool = false,
|
animation_frame: bool = false,
|
||||||
@@ -258,6 +255,8 @@ pub const Window = struct {
|
|||||||
if (gop.found_existing) {
|
if (gop.found_existing) {
|
||||||
// this can only happen if we've created 2^31 timeouts.
|
// this can only happen if we've created 2^31 timeouts.
|
||||||
return error.TooManyTimeout;
|
return error.TooManyTimeout;
|
||||||
|
} else {
|
||||||
|
gop.value_ptr.* = {};
|
||||||
}
|
}
|
||||||
errdefer _ = self.timers.remove(timer_id);
|
errdefer _ = self.timers.remove(timer_id);
|
||||||
|
|
||||||
@@ -270,22 +269,19 @@ pub const Window = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const delay_ms: u63 = @as(u63, delay) * std.time.ns_per_ms;
|
|
||||||
const callback = try arena.create(TimerCallback);
|
const callback = try arena.create(TimerCallback);
|
||||||
|
|
||||||
callback.* = .{
|
callback.* = .{
|
||||||
.cbk = cbk,
|
.cbk = cbk,
|
||||||
.loop_id = 0, // we're going to set this to a real value shortly
|
|
||||||
.window = self,
|
.window = self,
|
||||||
.timer_id = timer_id,
|
.timer_id = timer_id,
|
||||||
.args = persisted_args,
|
.args = persisted_args,
|
||||||
.node = .{ .func = TimerCallback.run },
|
|
||||||
.repeat = if (opts.repeat) delay_ms else null,
|
|
||||||
.animation_frame = opts.animation_frame,
|
.animation_frame = opts.animation_frame,
|
||||||
|
// setting a repeat time of 0 is illegal, doing + 1 is a simple way to avoid that
|
||||||
|
.repeat = if (opts.repeat) delay + 1 else null,
|
||||||
};
|
};
|
||||||
callback.loop_id = try page.loop.timeout(delay_ms, &callback.node);
|
|
||||||
|
|
||||||
gop.value_ptr.* = callback;
|
try page.scheduler.add(callback, TimerCallback.run, delay, .{ .name = opts.name });
|
||||||
|
|
||||||
return timer_id;
|
return timer_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -354,30 +350,32 @@ pub const Window = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const TimerCallback = struct {
|
const TimerCallback = struct {
|
||||||
// the internal loop id, need it when cancelling
|
|
||||||
loop_id: usize,
|
|
||||||
|
|
||||||
// the id of our timer (windows.timers key)
|
// the id of our timer (windows.timers key)
|
||||||
timer_id: u31,
|
timer_id: u31,
|
||||||
|
|
||||||
|
// if false, we'll remove the timer_id from the window.timers lookup on run
|
||||||
|
repeat: ?u32,
|
||||||
|
|
||||||
// The JavaScript callback to execute
|
// The JavaScript callback to execute
|
||||||
cbk: Function,
|
cbk: Function,
|
||||||
|
|
||||||
// This is the internal data that the event loop tracks. We'll get this
|
|
||||||
// back in run and, from it, can get our TimerCallback instance
|
|
||||||
node: Loop.CallbackNode = undefined,
|
|
||||||
|
|
||||||
// if the event should be repeated
|
|
||||||
repeat: ?u63 = null,
|
|
||||||
|
|
||||||
animation_frame: bool = false,
|
animation_frame: bool = false,
|
||||||
|
|
||||||
window: *Window,
|
window: *Window,
|
||||||
|
|
||||||
args: []Env.JsObject = &.{},
|
args: []Env.JsObject = &.{},
|
||||||
|
|
||||||
fn run(node: *Loop.CallbackNode, repeat_delay: *?u63) void {
|
fn run(ctx: *anyopaque) ?u32 {
|
||||||
const self: *TimerCallback = @fieldParentPtr("node", node);
|
const self: *TimerCallback = @alignCast(@ptrCast(ctx));
|
||||||
|
if (self.repeat != null) {
|
||||||
|
if (self.window.timers.contains(self.timer_id) == false) {
|
||||||
|
// it was called
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} else if (self.window.timers.remove(self.timer_id) == false) {
|
||||||
|
// it was cancelled
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
var result: Function.Result = undefined;
|
var result: Function.Result = undefined;
|
||||||
|
|
||||||
@@ -396,14 +394,7 @@ const TimerCallback = struct {
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
if (self.repeat) |r| {
|
return self.repeat;
|
||||||
// setInterval
|
|
||||||
repeat_delay.* = r;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// setTimeout
|
|
||||||
_ = self.window.timers.remove(self.timer_id);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -412,13 +403,11 @@ test "Browser.HTML.Window" {
|
|||||||
var runner = try testing.jsRunner(testing.tracking_allocator, .{});
|
var runner = try testing.jsRunner(testing.tracking_allocator, .{});
|
||||||
defer runner.deinit();
|
defer runner.deinit();
|
||||||
|
|
||||||
try runner.testCases(&.{
|
// try runner.testCases(&.{
|
||||||
.{ "window.parent === window", "true" },
|
// .{ "window.parent === window", "true" },
|
||||||
.{ "window.top === window", "true" },
|
// .{ "window.top === window", "true" },
|
||||||
}, .{});
|
// }, .{});
|
||||||
|
|
||||||
// requestAnimationFrame should be able to wait by recursively calling itself
|
|
||||||
// Note however that we in this test do not wait as the request is just send to the browser
|
|
||||||
try runner.testCases(&.{
|
try runner.testCases(&.{
|
||||||
.{
|
.{
|
||||||
\\ let start = 0;
|
\\ let start = 0;
|
||||||
|
|||||||
@@ -22,11 +22,11 @@ const Allocator = std.mem.Allocator;
|
|||||||
pub const Mime = struct {
|
pub const Mime = struct {
|
||||||
content_type: ContentType,
|
content_type: ContentType,
|
||||||
params: []const u8 = "",
|
params: []const u8 = "",
|
||||||
charset: ?[]const u8 = null,
|
charset: ?[:0]const u8 = null,
|
||||||
|
|
||||||
pub const unknown = Mime{
|
pub const unknown = Mime{
|
||||||
.params = "",
|
.params = "",
|
||||||
.charset = "",
|
.charset = null,
|
||||||
.content_type = .{ .unknown = {} },
|
.content_type = .{ .unknown = {} },
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ pub const Mime = struct {
|
|||||||
other: struct { type: []const u8, sub_type: []const u8 },
|
other: struct { type: []const u8, sub_type: []const u8 },
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn parse(arena: Allocator, input: []u8) !Mime {
|
pub fn parse(input: []u8) !Mime {
|
||||||
if (input.len > 255) {
|
if (input.len > 255) {
|
||||||
return error.TooBig;
|
return error.TooBig;
|
||||||
}
|
}
|
||||||
@@ -69,7 +69,7 @@ pub const Mime = struct {
|
|||||||
|
|
||||||
const params = trimLeft(normalized[type_len..]);
|
const params = trimLeft(normalized[type_len..]);
|
||||||
|
|
||||||
var charset: ?[]const u8 = null;
|
var charset: ?[:0]const u8 = null;
|
||||||
|
|
||||||
var it = std.mem.splitScalar(u8, params, ';');
|
var it = std.mem.splitScalar(u8, params, ';');
|
||||||
while (it.next()) |attr| {
|
while (it.next()) |attr| {
|
||||||
@@ -86,7 +86,37 @@ pub const Mime = struct {
|
|||||||
}, name) orelse continue;
|
}, name) orelse continue;
|
||||||
|
|
||||||
switch (attribute_name) {
|
switch (attribute_name) {
|
||||||
.charset => charset = try parseAttributeValue(arena, value),
|
.charset => {
|
||||||
|
// We used to have a proper value parser, but we currently
|
||||||
|
// only care about the charset attribute, plus only about
|
||||||
|
// the UTF-8 value. It's a lot easier to do it this way,
|
||||||
|
// and it doesn't require an allocation to (a) unescape the
|
||||||
|
// value or (b) ensure the correct lifetime.
|
||||||
|
if (value.len == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
var attribute_value = value;
|
||||||
|
if (value[0] == '"') {
|
||||||
|
if (value.len < 3 or value[value.len - 1] != '"') {
|
||||||
|
return error.Invalid;
|
||||||
|
}
|
||||||
|
attribute_value = value[1 .. value.len - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (std.ascii.eqlIgnoreCase(attribute_value, "utf-8")) {
|
||||||
|
charset = "UTF-8";
|
||||||
|
} else if (std.ascii.eqlIgnoreCase(attribute_value, "iso-8859-1")) {
|
||||||
|
charset = "ISO-8859-1";
|
||||||
|
} else {
|
||||||
|
// we only care about null (which we default to UTF-8)
|
||||||
|
// or UTF-8. If this is actually set (i.e. not null)
|
||||||
|
// and isn't UTF-8, we'll just put a dummy value. If
|
||||||
|
// we want to capture the actual value, we'll need to
|
||||||
|
// dupe/allocate it. Since, for now, we don't need that
|
||||||
|
// we can avoid the allocation.
|
||||||
|
charset = "lightpanda:UNSUPPORTED";
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,58 +254,6 @@ pub const Mime = struct {
|
|||||||
break :blk v;
|
break :blk v;
|
||||||
};
|
};
|
||||||
|
|
||||||
fn parseAttributeValue(arena: Allocator, value: []const u8) ![]const u8 {
|
|
||||||
if (value[0] != '"') {
|
|
||||||
// almost certainly referenced from an http.Request which has its
|
|
||||||
// own lifetime.
|
|
||||||
return arena.dupe(u8, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1 to skip the opening quote
|
|
||||||
var value_pos: usize = 1;
|
|
||||||
var unescaped_len: usize = 0;
|
|
||||||
const last = value.len - 1;
|
|
||||||
|
|
||||||
while (value_pos < value.len) {
|
|
||||||
switch (value[value_pos]) {
|
|
||||||
'"' => break,
|
|
||||||
'\\' => {
|
|
||||||
if (value_pos == last) {
|
|
||||||
return error.Invalid;
|
|
||||||
}
|
|
||||||
const next = value[value_pos + 1];
|
|
||||||
if (T_SPECIAL[next] == false) {
|
|
||||||
return error.Invalid;
|
|
||||||
}
|
|
||||||
value_pos += 2;
|
|
||||||
},
|
|
||||||
else => value_pos += 1,
|
|
||||||
}
|
|
||||||
unescaped_len += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unescaped_len == 0) {
|
|
||||||
return error.Invalid;
|
|
||||||
}
|
|
||||||
|
|
||||||
value_pos = 1;
|
|
||||||
const owned = try arena.alloc(u8, unescaped_len);
|
|
||||||
for (0..unescaped_len) |i| {
|
|
||||||
switch (value[value_pos]) {
|
|
||||||
'"' => break,
|
|
||||||
'\\' => {
|
|
||||||
owned[i] = value[value_pos + 1];
|
|
||||||
value_pos += 2;
|
|
||||||
},
|
|
||||||
else => |c| {
|
|
||||||
owned[i] = c;
|
|
||||||
value_pos += 1;
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return owned;
|
|
||||||
}
|
|
||||||
|
|
||||||
const VALID_CODEPOINTS = blk: {
|
const VALID_CODEPOINTS = blk: {
|
||||||
var v: [256]bool = undefined;
|
var v: [256]bool = undefined;
|
||||||
for (0..256) |i| {
|
for (0..256) |i| {
|
||||||
@@ -306,7 +284,7 @@ pub const Mime = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const testing = @import("../testing.zig");
|
const testing = @import("../testing.zig");
|
||||||
test "Mime: invalid " {
|
test "Mime: invalid" {
|
||||||
defer testing.reset();
|
defer testing.reset();
|
||||||
|
|
||||||
const invalids = [_][]const u8{
|
const invalids = [_][]const u8{
|
||||||
@@ -324,12 +302,11 @@ test "Mime: invalid " {
|
|||||||
"text/html; charset=\"\"",
|
"text/html; charset=\"\"",
|
||||||
"text/html; charset=\"",
|
"text/html; charset=\"",
|
||||||
"text/html; charset=\"\\",
|
"text/html; charset=\"\\",
|
||||||
"text/html; charset=\"\\a\"", // invalid to escape non special characters
|
|
||||||
};
|
};
|
||||||
|
|
||||||
for (invalids) |invalid| {
|
for (invalids) |invalid| {
|
||||||
const mutable_input = try testing.arena_allocator.dupe(u8, invalid);
|
const mutable_input = try testing.arena_allocator.dupe(u8, invalid);
|
||||||
try testing.expectError(error.Invalid, Mime.parse(undefined, mutable_input));
|
try testing.expectError(error.Invalid, Mime.parse(mutable_input));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -386,19 +363,19 @@ test "Mime: parse charset" {
|
|||||||
|
|
||||||
try expect(.{
|
try expect(.{
|
||||||
.content_type = .{ .text_xml = {} },
|
.content_type = .{ .text_xml = {} },
|
||||||
.charset = "utf-8",
|
.charset = "UTF-8",
|
||||||
.params = "charset=utf-8",
|
.params = "charset=utf-8",
|
||||||
}, "text/xml; charset=utf-8");
|
}, "text/xml; charset=utf-8");
|
||||||
|
|
||||||
try expect(.{
|
try expect(.{
|
||||||
.content_type = .{ .text_xml = {} },
|
.content_type = .{ .text_xml = {} },
|
||||||
.charset = "utf-8",
|
.charset = "UTF-8",
|
||||||
.params = "charset=\"utf-8\"",
|
.params = "charset=\"utf-8\"",
|
||||||
}, "text/xml;charset=\"utf-8\"");
|
}, "text/xml;charset=\"utf-8\"");
|
||||||
|
|
||||||
try expect(.{
|
try expect(.{
|
||||||
.content_type = .{ .text_xml = {} },
|
.content_type = .{ .text_xml = {} },
|
||||||
.charset = "\\ \" ",
|
.charset = "lightpanda:UNSUPPORTED",
|
||||||
.params = "charset=\"\\\\ \\\" \"",
|
.params = "charset=\"\\\\ \\\" \"",
|
||||||
}, "text/xml;charset=\"\\\\ \\\" \" ");
|
}, "text/xml;charset=\"\\\\ \\\" \" ");
|
||||||
}
|
}
|
||||||
@@ -409,7 +386,7 @@ test "Mime: isHTML" {
|
|||||||
const isHTML = struct {
|
const isHTML = struct {
|
||||||
fn isHTML(expected: bool, input: []const u8) !void {
|
fn isHTML(expected: bool, input: []const u8) !void {
|
||||||
const mutable_input = try testing.arena_allocator.dupe(u8, input);
|
const mutable_input = try testing.arena_allocator.dupe(u8, input);
|
||||||
var mime = try Mime.parse(testing.arena_allocator, mutable_input);
|
var mime = try Mime.parse(mutable_input);
|
||||||
try testing.expectEqual(expected, mime.isHTML());
|
try testing.expectEqual(expected, mime.isHTML());
|
||||||
}
|
}
|
||||||
}.isHTML;
|
}.isHTML;
|
||||||
@@ -495,7 +472,7 @@ const Expectation = struct {
|
|||||||
fn expect(expected: Expectation, input: []const u8) !void {
|
fn expect(expected: Expectation, input: []const u8) !void {
|
||||||
const mutable_input = try testing.arena_allocator.dupe(u8, input);
|
const mutable_input = try testing.arena_allocator.dupe(u8, input);
|
||||||
|
|
||||||
const actual = try Mime.parse(testing.arena_allocator, mutable_input);
|
const actual = try Mime.parse(mutable_input);
|
||||||
try testing.expectEqual(
|
try testing.expectEqual(
|
||||||
std.meta.activeTag(expected.content_type),
|
std.meta.activeTag(expected.content_type),
|
||||||
std.meta.activeTag(actual.content_type),
|
std.meta.activeTag(actual.content_type),
|
||||||
|
|||||||
@@ -2371,6 +2371,31 @@ fn parserErr(err: HubbubErr) ParserError!void {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub const Parser = struct {
|
||||||
|
html_doc: *DocumentHTML,
|
||||||
|
parser: *c.dom_hubbub_parser,
|
||||||
|
|
||||||
|
pub fn init(encoding: ?[:0]const u8) !Parser {
|
||||||
|
var params = parseParams(encoding);
|
||||||
|
var doc: ?*c.dom_document = undefined;
|
||||||
|
var parser: ?*c.dom_hubbub_parser = undefined;
|
||||||
|
|
||||||
|
try parserErr(c.dom_hubbub_parser_create(¶ms, &parser, &doc));
|
||||||
|
return .{
|
||||||
|
.parser = parser.?,
|
||||||
|
.html_doc = @ptrCast(doc.?),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Parser) void {
|
||||||
|
c.dom_hubbub_parser_destroy(self.parser);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process(self: *Parser, data: []const u8) !void {
|
||||||
|
try parserErr(c.dom_hubbub_parser_parse_chunk(self.parser, data.ptr, data.len));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// documentHTMLParseFromStr parses the given HTML string.
|
// documentHTMLParseFromStr parses the given HTML string.
|
||||||
// The caller is responsible for closing the document.
|
// The caller is responsible for closing the document.
|
||||||
pub fn documentHTMLParseFromStr(str: []const u8) !*DocumentHTML {
|
pub fn documentHTMLParseFromStr(str: []const u8) !*DocumentHTML {
|
||||||
@@ -2379,18 +2404,10 @@ pub fn documentHTMLParseFromStr(str: []const u8) !*DocumentHTML {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn documentHTMLParse(reader: anytype, enc: ?[:0]const u8) !*DocumentHTML {
|
pub fn documentHTMLParse(reader: anytype, enc: ?[:0]const u8) !*DocumentHTML {
|
||||||
var parser: ?*c.dom_hubbub_parser = undefined;
|
var parser = try Parser.init(enc);
|
||||||
var doc: ?*c.dom_document = undefined;
|
defer parser.deinit();
|
||||||
var err: c.hubbub_error = undefined;
|
try parseData(parser.parser, reader);
|
||||||
var params = parseParams(enc);
|
return parser.html_doc;
|
||||||
|
|
||||||
err = c.dom_hubbub_parser_create(¶ms, &parser, &doc);
|
|
||||||
try parserErr(err);
|
|
||||||
defer c.dom_hubbub_parser_destroy(parser);
|
|
||||||
|
|
||||||
try parseData(parser.?, reader);
|
|
||||||
|
|
||||||
return @as(*DocumentHTML, @ptrCast(doc.?));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn documentParseFragmentFromStr(self: *Document, str: []const u8) !*DocumentFragment {
|
pub fn documentParseFragmentFromStr(self: *Document, str: []const u8) !*DocumentFragment {
|
||||||
|
|||||||
1206
src/browser/page.zig
1206
src/browser/page.zig
File diff suppressed because it is too large
Load Diff
@@ -56,6 +56,12 @@ pub const Session = struct {
|
|||||||
|
|
||||||
page: ?Page = null,
|
page: ?Page = null,
|
||||||
|
|
||||||
|
// If the current page want to navigate to a new page
|
||||||
|
// (form submit, link click, top.location = xxx)
|
||||||
|
// the details are stored here so that, on the next call to session.wait
|
||||||
|
// we can destroy the current page and start a new one.
|
||||||
|
queued_navigation: ?QueuedNavigation,
|
||||||
|
|
||||||
pub fn init(self: *Session, browser: *Browser) !void {
|
pub fn init(self: *Session, browser: *Browser) !void {
|
||||||
var executor = try browser.env.newExecutionWorld();
|
var executor = try browser.env.newExecutionWorld();
|
||||||
errdefer executor.deinit();
|
errdefer executor.deinit();
|
||||||
@@ -64,6 +70,7 @@ pub const Session = struct {
|
|||||||
self.* = .{
|
self.* = .{
|
||||||
.browser = browser,
|
.browser = browser,
|
||||||
.executor = executor,
|
.executor = executor,
|
||||||
|
.queued_navigation = null,
|
||||||
.arena = browser.session_arena.allocator(),
|
.arena = browser.session_arena.allocator(),
|
||||||
.storage_shed = storage.Shed.init(allocator),
|
.storage_shed = storage.Shed.init(allocator),
|
||||||
.cookie_jar = storage.CookieJar.init(allocator),
|
.cookie_jar = storage.CookieJar.init(allocator),
|
||||||
@@ -111,23 +118,13 @@ pub const Session = struct {
|
|||||||
|
|
||||||
std.debug.assert(self.page != null);
|
std.debug.assert(self.page != null);
|
||||||
|
|
||||||
// Cleanup is a bit sensitive. We could still have inflight I/O. For
|
// RemoveJsContext() will execute the destructor of any type that
|
||||||
// example, we could have an XHR request which is still in the connect
|
// registered a destructor (e.g. XMLHttpRequest).
|
||||||
// phase. It's important that we clean these up, as they're holding onto
|
// Should be called before we deinit the page, because these objects
|
||||||
// limited resources (like our fixed-sized http state pool).
|
// could be referencing it.
|
||||||
//
|
|
||||||
// First thing we do, is removeJsContext() which will execute the destructor
|
|
||||||
// of any type that registered a destructor (e.g. XMLHttpRequest).
|
|
||||||
// This will shutdown any pending sockets, which begins our cleaning
|
|
||||||
// processed
|
|
||||||
self.executor.removeJsContext();
|
self.executor.removeJsContext();
|
||||||
|
|
||||||
// Second thing we do is reset the loop. This increments the loop ctx_id
|
self.page.?.deinit();
|
||||||
// so that any "stale" timeouts we process will get ignored. We need to
|
|
||||||
// do this BEFORE running the loop because, at this point, things like
|
|
||||||
// window.setTimeout and running microtasks should be ignored
|
|
||||||
self.browser.app.loop.reset();
|
|
||||||
|
|
||||||
self.page = null;
|
self.page = null;
|
||||||
|
|
||||||
// clear netsurf memory arena.
|
// clear netsurf memory arena.
|
||||||
@@ -139,4 +136,40 @@ pub const Session = struct {
|
|||||||
pub fn currentPage(self: *Session) ?*Page {
|
pub fn currentPage(self: *Session) ?*Page {
|
||||||
return &(self.page orelse return null);
|
return &(self.page orelse return null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn wait(self: *Session, wait_sec: usize) void {
|
||||||
|
if (self.queued_navigation) |qn| {
|
||||||
|
// This was already aborted on the page, but it would be pretty
|
||||||
|
// bad if old requests went to the new page, so let's make double sure
|
||||||
|
self.browser.http_client.abort();
|
||||||
|
|
||||||
|
// Page.navigateFromWebAPI terminatedExecution. If we don't resume
|
||||||
|
// it before doing a shutdown we'll get an error.
|
||||||
|
self.executor.resumeExecution();
|
||||||
|
self.removePage();
|
||||||
|
self.queued_navigation = null;
|
||||||
|
|
||||||
|
const page = self.createPage() catch |err| {
|
||||||
|
log.err(.browser, "queued navigation page error", .{
|
||||||
|
.err = err,
|
||||||
|
.url = qn.url,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
page.navigate(qn.url, qn.opts) catch |err| {
|
||||||
|
log.err(.browser, "queued navigation error", .{ .err = err, .url = qn.url });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.page) |*page| {
|
||||||
|
page.wait(wait_sec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const QueuedNavigation = struct {
|
||||||
|
url: []const u8,
|
||||||
|
opts: NavigateOpts,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -4,15 +4,15 @@ const Allocator = std.mem.Allocator;
|
|||||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||||
|
|
||||||
const log = @import("../../log.zig");
|
const log = @import("../../log.zig");
|
||||||
const http = @import("../../http/client.zig");
|
|
||||||
const DateTime = @import("../../datetime.zig").DateTime;
|
const DateTime = @import("../../datetime.zig").DateTime;
|
||||||
const public_suffix_list = @import("../../data/public_suffix_list.zig").lookup;
|
const public_suffix_list = @import("../../data/public_suffix_list.zig").lookup;
|
||||||
|
|
||||||
pub const LookupOpts = struct {
|
pub const LookupOpts = struct {
|
||||||
request_time: ?i64 = null,
|
request_time: ?i64 = null,
|
||||||
origin_uri: ?*const Uri = null,
|
origin_uri: ?*const Uri = null,
|
||||||
navigation: bool = true,
|
|
||||||
is_http: bool,
|
is_http: bool,
|
||||||
|
is_navigation: bool = true,
|
||||||
|
prefix: ?[]const u8 = null,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Jar = struct {
|
pub const Jar = struct {
|
||||||
@@ -92,10 +92,15 @@ pub const Jar = struct {
|
|||||||
|
|
||||||
var first = true;
|
var first = true;
|
||||||
for (self.cookies.items) |*cookie| {
|
for (self.cookies.items) |*cookie| {
|
||||||
if (!cookie.appliesTo(&target, same_site, opts.navigation, opts.is_http)) continue;
|
if (!cookie.appliesTo(&target, same_site, opts.is_navigation, opts.is_http)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// we have a match!
|
// we have a match!
|
||||||
if (first) {
|
if (first) {
|
||||||
|
if (opts.prefix) |prefix| {
|
||||||
|
try writer.writeAll(prefix);
|
||||||
|
}
|
||||||
first = false;
|
first = false;
|
||||||
} else {
|
} else {
|
||||||
try writer.writeAll("; ");
|
try writer.writeAll("; ");
|
||||||
@@ -104,16 +109,14 @@ pub const Jar = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn populateFromResponse(self: *Jar, uri: *const Uri, header: *const http.ResponseHeader) !void {
|
pub fn populateFromResponse(self: *Jar, uri: *const Uri, set_cookie: []const u8) !void {
|
||||||
|
const c = Cookie.parse(self.allocator, uri, set_cookie) catch |err| {
|
||||||
|
log.warn(.web_api, "cookie parse failed", .{ .raw = set_cookie, .err = err });
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
const now = std.time.timestamp();
|
const now = std.time.timestamp();
|
||||||
var it = header.iterate("set-cookie");
|
try self.add(c, now);
|
||||||
while (it.next()) |set_cookie| {
|
|
||||||
const c = Cookie.parse(self.allocator, uri, set_cookie) catch |err| {
|
|
||||||
log.warn(.web_api, "cookie parse failed", .{ .raw = set_cookie, .err = err });
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
try self.add(c, now);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writeCookie(cookie: *const Cookie, writer: anytype) !void {
|
fn writeCookie(cookie: *const Cookie, writer: anytype) !void {
|
||||||
@@ -429,7 +432,7 @@ pub const Cookie = struct {
|
|||||||
return .{ name, value, rest };
|
return .{ name, value, rest };
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn appliesTo(self: *const Cookie, url: *const PreparedUri, same_site: bool, navigation: bool, is_http: bool) bool {
|
pub fn appliesTo(self: *const Cookie, url: *const PreparedUri, same_site: bool, is_navigation: bool, is_http: bool) bool {
|
||||||
if (self.http_only and is_http == false) {
|
if (self.http_only and is_http == false) {
|
||||||
// http only cookies can be accessed from Javascript
|
// http only cookies can be accessed from Javascript
|
||||||
return false;
|
return false;
|
||||||
@@ -448,7 +451,7 @@ pub const Cookie = struct {
|
|||||||
// and cookie.same_site == .lax
|
// and cookie.same_site == .lax
|
||||||
switch (self.same_site) {
|
switch (self.same_site) {
|
||||||
.strict => return false,
|
.strict => return false,
|
||||||
.lax => if (navigation == false) return false,
|
.lax => if (is_navigation == false) return false,
|
||||||
.none => {},
|
.none => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -619,7 +622,7 @@ test "Jar: forRequest" {
|
|||||||
|
|
||||||
// nothing fancy here
|
// nothing fancy here
|
||||||
try expectCookies("global1=1; global2=2", &jar, test_uri, .{ .is_http = true });
|
try expectCookies("global1=1; global2=2", &jar, test_uri, .{ .is_http = true });
|
||||||
try expectCookies("global1=1; global2=2", &jar, test_uri, .{ .origin_uri = &test_uri, .navigation = false, .is_http = true });
|
try expectCookies("global1=1; global2=2", &jar, test_uri, .{ .origin_uri = &test_uri, .is_navigation = false, .is_http = true });
|
||||||
|
|
||||||
// We have a cookie where Domain=lightpanda.io
|
// We have a cookie where Domain=lightpanda.io
|
||||||
// This should _not_ match xyxlightpanda.io
|
// This should _not_ match xyxlightpanda.io
|
||||||
@@ -685,22 +688,22 @@ test "Jar: forRequest" {
|
|||||||
// non-navigational cross domain, insecure
|
// non-navigational cross domain, insecure
|
||||||
try expectCookies("", &jar, try std.Uri.parse("http://lightpanda.io/x/"), .{
|
try expectCookies("", &jar, try std.Uri.parse("http://lightpanda.io/x/"), .{
|
||||||
.origin_uri = &(try std.Uri.parse("https://example.com/")),
|
.origin_uri = &(try std.Uri.parse("https://example.com/")),
|
||||||
.navigation = false,
|
|
||||||
.is_http = true,
|
.is_http = true,
|
||||||
|
.is_navigation = false,
|
||||||
});
|
});
|
||||||
|
|
||||||
// non-navigational cross domain, secure
|
// non-navigational cross domain, secure
|
||||||
try expectCookies("sitenone=6", &jar, try std.Uri.parse("https://lightpanda.io/x/"), .{
|
try expectCookies("sitenone=6", &jar, try std.Uri.parse("https://lightpanda.io/x/"), .{
|
||||||
.origin_uri = &(try std.Uri.parse("https://example.com/")),
|
.origin_uri = &(try std.Uri.parse("https://example.com/")),
|
||||||
.navigation = false,
|
|
||||||
.is_http = true,
|
.is_http = true,
|
||||||
|
.is_navigation = false,
|
||||||
});
|
});
|
||||||
|
|
||||||
// non-navigational same origin
|
// non-navigational same origin
|
||||||
try expectCookies("global1=1; global2=2; sitelax=7; sitestrict=8", &jar, try std.Uri.parse("http://lightpanda.io/x/"), .{
|
try expectCookies("global1=1; global2=2; sitelax=7; sitestrict=8", &jar, try std.Uri.parse("http://lightpanda.io/x/"), .{
|
||||||
.origin_uri = &(try std.Uri.parse("https://lightpanda.io/")),
|
.origin_uri = &(try std.Uri.parse("https://lightpanda.io/")),
|
||||||
.navigation = false,
|
|
||||||
.is_http = true,
|
.is_http = true,
|
||||||
|
.is_navigation = false,
|
||||||
});
|
});
|
||||||
|
|
||||||
// exact domain match + suffix
|
// exact domain match + suffix
|
||||||
|
|||||||
@@ -17,6 +17,7 @@
|
|||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
const DOMError = @import("../netsurf.zig").DOMError;
|
const DOMError = @import("../netsurf.zig").DOMError;
|
||||||
@@ -28,9 +29,8 @@ const log = @import("../../log.zig");
|
|||||||
const URL = @import("../../url.zig").URL;
|
const URL = @import("../../url.zig").URL;
|
||||||
const Mime = @import("../mime.zig").Mime;
|
const Mime = @import("../mime.zig").Mime;
|
||||||
const parser = @import("../netsurf.zig");
|
const parser = @import("../netsurf.zig");
|
||||||
const http = @import("../../http/client.zig");
|
|
||||||
const Page = @import("../page.zig").Page;
|
const Page = @import("../page.zig").Page;
|
||||||
const Loop = @import("../../runtime/loop.zig").Loop;
|
const HttpClient = @import("../../http/Client.zig");
|
||||||
const CookieJar = @import("../storage/storage.zig").CookieJar;
|
const CookieJar = @import("../storage/storage.zig").CookieJar;
|
||||||
|
|
||||||
// XHR interfaces
|
// XHR interfaces
|
||||||
@@ -79,54 +79,28 @@ const XMLHttpRequestBodyInit = union(enum) {
|
|||||||
|
|
||||||
pub const XMLHttpRequest = struct {
|
pub const XMLHttpRequest = struct {
|
||||||
proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
|
proto: XMLHttpRequestEventTarget = XMLHttpRequestEventTarget{},
|
||||||
loop: *Loop,
|
|
||||||
arena: Allocator,
|
arena: Allocator,
|
||||||
request: ?*http.Request = null,
|
transfer: ?*HttpClient.Transfer = null,
|
||||||
|
|
||||||
method: http.Request.Method,
|
|
||||||
state: State,
|
|
||||||
url: ?URL = null,
|
|
||||||
origin_url: *const URL,
|
|
||||||
|
|
||||||
// request headers
|
|
||||||
headers: Headers,
|
|
||||||
sync: bool = true,
|
|
||||||
err: ?anyerror = null,
|
err: ?anyerror = null,
|
||||||
last_dispatch: i64 = 0,
|
last_dispatch: i64 = 0,
|
||||||
|
send_flag: bool = false,
|
||||||
|
|
||||||
|
method: HttpClient.Method,
|
||||||
|
state: State,
|
||||||
|
url: ?[:0]const u8 = null,
|
||||||
|
|
||||||
|
sync: bool = true,
|
||||||
|
withCredentials: bool = false,
|
||||||
|
headers: std.ArrayListUnmanaged([:0]const u8),
|
||||||
request_body: ?[]const u8 = null,
|
request_body: ?[]const u8 = null,
|
||||||
|
|
||||||
cookie_jar: *CookieJar,
|
response_status: u16 = 0,
|
||||||
// the URI of the page where this request is originating from
|
|
||||||
|
|
||||||
// TODO uncomment this field causes casting issue with
|
|
||||||
// XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
|
|
||||||
// not sure. see
|
|
||||||
// https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
|
|
||||||
// upload: ?XMLHttpRequestUpload = null,
|
|
||||||
|
|
||||||
// TODO uncomment this field causes casting issue with
|
|
||||||
// XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
|
|
||||||
// not sure. see
|
|
||||||
// https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
|
|
||||||
// timeout: u32 = 0,
|
|
||||||
|
|
||||||
withCredentials: bool = false,
|
|
||||||
// TODO: response readonly attribute any response;
|
|
||||||
response_bytes: std.ArrayListUnmanaged(u8) = .{},
|
response_bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||||
response_type: ResponseType = .Empty,
|
response_type: ResponseType = .Empty,
|
||||||
response_headers: Headers,
|
response_headers: std.ArrayListUnmanaged([]const u8) = .{},
|
||||||
|
|
||||||
response_status: u16 = 0,
|
|
||||||
|
|
||||||
// TODO uncomment this field causes casting issue with
|
|
||||||
// XMLHttpRequestEventTarget. I think it's dueto an alignement issue, but
|
|
||||||
// not sure. see
|
|
||||||
// https://lightpanda.slack.com/archives/C05TRU6RBM1/p1707819010681019
|
|
||||||
// response_override_mime_type: ?[]const u8 = null,
|
|
||||||
|
|
||||||
response_mime: ?Mime = null,
|
response_mime: ?Mime = null,
|
||||||
response_obj: ?ResponseObj = null,
|
response_obj: ?ResponseObj = null,
|
||||||
send_flag: bool = false,
|
|
||||||
|
|
||||||
pub const prototype = *XMLHttpRequestEventTarget;
|
pub const prototype = *XMLHttpRequestEventTarget;
|
||||||
|
|
||||||
@@ -157,68 +131,6 @@ pub const XMLHttpRequest = struct {
|
|||||||
|
|
||||||
const JSONValue = std.json.Value;
|
const JSONValue = std.json.Value;
|
||||||
|
|
||||||
const Headers = struct {
|
|
||||||
list: List,
|
|
||||||
arena: Allocator,
|
|
||||||
|
|
||||||
const List = std.ArrayListUnmanaged(std.http.Header);
|
|
||||||
|
|
||||||
fn init(arena: Allocator) Headers {
|
|
||||||
return .{
|
|
||||||
.arena = arena,
|
|
||||||
.list = .{},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn append(self: *Headers, k: []const u8, v: []const u8) !void {
|
|
||||||
// duplicate strings
|
|
||||||
const kk = try self.arena.dupe(u8, k);
|
|
||||||
const vv = try self.arena.dupe(u8, v);
|
|
||||||
try self.list.append(self.arena, .{ .name = kk, .value = vv });
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset(self: *Headers) void {
|
|
||||||
self.list.clearRetainingCapacity();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn has(self: Headers, k: []const u8) bool {
|
|
||||||
for (self.list.items) |h| {
|
|
||||||
if (std.ascii.eqlIgnoreCase(k, h.name)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn getFirstValue(self: Headers, k: []const u8) ?[]const u8 {
|
|
||||||
for (self.list.items) |h| {
|
|
||||||
if (std.ascii.eqlIgnoreCase(k, h.name)) {
|
|
||||||
return h.value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace any existing header with the same key
|
|
||||||
fn set(self: *Headers, k: []const u8, v: []const u8) !void {
|
|
||||||
for (self.list.items, 0..) |h, i| {
|
|
||||||
if (std.ascii.eqlIgnoreCase(k, h.name)) {
|
|
||||||
_ = self.list.swapRemove(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.append(k, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
fn sort(_: *Headers) void {}
|
|
||||||
|
|
||||||
fn all(self: Headers) []std.http.Header {
|
|
||||||
return self.list.items;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const Response = union(ResponseType) {
|
const Response = union(ResponseType) {
|
||||||
Empty: void,
|
Empty: void,
|
||||||
Text: []const u8,
|
Text: []const u8,
|
||||||
@@ -253,20 +165,16 @@ pub const XMLHttpRequest = struct {
|
|||||||
return .{
|
return .{
|
||||||
.url = null,
|
.url = null,
|
||||||
.arena = arena,
|
.arena = arena,
|
||||||
.loop = page.loop,
|
.headers = .{},
|
||||||
.headers = Headers.init(arena),
|
|
||||||
.response_headers = Headers.init(arena),
|
|
||||||
.method = undefined,
|
.method = undefined,
|
||||||
.state = .unsent,
|
.state = .unsent,
|
||||||
.origin_url = &page.url,
|
|
||||||
.cookie_jar = page.cookie_jar,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn destructor(self: *XMLHttpRequest) void {
|
pub fn destructor(self: *XMLHttpRequest) void {
|
||||||
if (self.request) |req| {
|
if (self.transfer) |transfer| {
|
||||||
req.abort();
|
transfer.abort();
|
||||||
self.request = null;
|
self.transfer = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -281,9 +189,8 @@ pub const XMLHttpRequest = struct {
|
|||||||
self.response_type = .Empty;
|
self.response_type = .Empty;
|
||||||
self.response_mime = null;
|
self.response_mime = null;
|
||||||
|
|
||||||
// TODO should we clearRetainingCapacity instead?
|
self.headers.clearRetainingCapacity();
|
||||||
self.headers.reset();
|
self.response_headers.clearRetainingCapacity();
|
||||||
self.response_headers.reset();
|
|
||||||
self.response_status = 0;
|
self.response_status = 0;
|
||||||
|
|
||||||
self.send_flag = false;
|
self.send_flag = false;
|
||||||
@@ -323,6 +230,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
asyn: ?bool,
|
asyn: ?bool,
|
||||||
username: ?[]const u8,
|
username: ?[]const u8,
|
||||||
password: ?[]const u8,
|
password: ?[]const u8,
|
||||||
|
page: *Page,
|
||||||
) !void {
|
) !void {
|
||||||
_ = username;
|
_ = username;
|
||||||
_ = password;
|
_ = password;
|
||||||
@@ -333,9 +241,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
self.reset();
|
self.reset();
|
||||||
|
|
||||||
self.method = try validMethod(method);
|
self.method = try validMethod(method);
|
||||||
const arena = self.arena;
|
self.url = try URL.stitch(page.arena, url, page.url.raw, .{ .null_terminated = true });
|
||||||
|
|
||||||
self.url = try self.origin_url.resolve(arena, url);
|
|
||||||
self.sync = if (asyn) |b| !b else false;
|
self.sync = if (asyn) |b| !b else false;
|
||||||
|
|
||||||
self.state = .opened;
|
self.state = .opened;
|
||||||
@@ -414,7 +320,7 @@ pub const XMLHttpRequest = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const methods = [_]struct {
|
const methods = [_]struct {
|
||||||
tag: http.Request.Method,
|
tag: HttpClient.Method,
|
||||||
name: []const u8,
|
name: []const u8,
|
||||||
}{
|
}{
|
||||||
.{ .tag = .DELETE, .name = "DELETE" },
|
.{ .tag = .DELETE, .name = "DELETE" },
|
||||||
@@ -424,29 +330,30 @@ pub const XMLHttpRequest = struct {
|
|||||||
.{ .tag = .POST, .name = "POST" },
|
.{ .tag = .POST, .name = "POST" },
|
||||||
.{ .tag = .PUT, .name = "PUT" },
|
.{ .tag = .PUT, .name = "PUT" },
|
||||||
};
|
};
|
||||||
const methods_forbidden = [_][]const u8{ "CONNECT", "TRACE", "TRACK" };
|
pub fn validMethod(m: []const u8) DOMError!HttpClient.Method {
|
||||||
|
|
||||||
pub fn validMethod(m: []const u8) DOMError!http.Request.Method {
|
|
||||||
for (methods) |method| {
|
for (methods) |method| {
|
||||||
if (std.ascii.eqlIgnoreCase(method.name, m)) {
|
if (std.ascii.eqlIgnoreCase(method.name, m)) {
|
||||||
return method.tag;
|
return method.tag;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If method is a forbidden method, then throw a "SecurityError" DOMException.
|
|
||||||
for (methods_forbidden) |method| {
|
|
||||||
if (std.ascii.eqlIgnoreCase(method, m)) {
|
|
||||||
return DOMError.Security;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If method is not a method, then throw a "SyntaxError" DOMException.
|
// If method is not a method, then throw a "SyntaxError" DOMException.
|
||||||
return DOMError.Syntax;
|
return DOMError.Syntax;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _setRequestHeader(self: *XMLHttpRequest, name: []const u8, value: []const u8) !void {
|
pub fn _setRequestHeader(self: *XMLHttpRequest, name: []const u8, value: []const u8) !void {
|
||||||
if (self.state != .opened) return DOMError.InvalidState;
|
if (self.state != .opened) {
|
||||||
if (self.send_flag) return DOMError.InvalidState;
|
return DOMError.InvalidState;
|
||||||
return try self.headers.append(name, value);
|
}
|
||||||
|
|
||||||
|
if (self.send_flag) {
|
||||||
|
return DOMError.InvalidState;
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.headers.append(
|
||||||
|
self.arena,
|
||||||
|
try std.fmt.allocPrintZ(self.arena, "{s}: {s}", .{ name, value }),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO body can be either a XMLHttpRequestBodyInit or a document
|
// TODO body can be either a XMLHttpRequestBodyInit or a document
|
||||||
@@ -454,118 +361,98 @@ pub const XMLHttpRequest = struct {
|
|||||||
if (self.state != .opened) return DOMError.InvalidState;
|
if (self.state != .opened) return DOMError.InvalidState;
|
||||||
if (self.send_flag) return DOMError.InvalidState;
|
if (self.send_flag) return DOMError.InvalidState;
|
||||||
|
|
||||||
log.debug(.http, "request", .{ .method = self.method, .url = self.url, .source = "xhr" });
|
log.debug(.http, "request queued", .{ .method = self.method, .url = self.url, .source = "xhr" });
|
||||||
|
|
||||||
self.send_flag = true;
|
self.send_flag = true;
|
||||||
if (body) |b| {
|
if (body) |b| {
|
||||||
self.request_body = try self.arena.dupe(u8, b);
|
|
||||||
}
|
|
||||||
|
|
||||||
try page.request_factory.initAsync(
|
|
||||||
page.arena,
|
|
||||||
self.method,
|
|
||||||
&self.url.?.uri,
|
|
||||||
self,
|
|
||||||
onHttpRequestReady,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn onHttpRequestReady(ctx: *anyopaque, request: *http.Request) !void {
|
|
||||||
// on error, our caller will cleanup request
|
|
||||||
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
|
|
||||||
|
|
||||||
for (self.headers.list.items) |hdr| {
|
|
||||||
try request.addHeader(hdr.name, hdr.value, .{});
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
var arr: std.ArrayListUnmanaged(u8) = .{};
|
|
||||||
try self.cookie_jar.forRequest(&self.url.?.uri, arr.writer(self.arena), .{
|
|
||||||
.navigation = false,
|
|
||||||
.origin_uri = &self.origin_url.uri,
|
|
||||||
.is_http = true,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (arr.items.len > 0) {
|
|
||||||
try request.addHeader("Cookie", arr.items, .{});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The body argument provides the request body, if any, and is ignored
|
|
||||||
// if the request method is GET or HEAD.
|
|
||||||
// https://xhr.spec.whatwg.org/#the-send()-method
|
|
||||||
// var used_body: ?XMLHttpRequestBodyInit = null;
|
|
||||||
if (self.request_body) |b| {
|
|
||||||
if (self.method != .GET and self.method != .HEAD) {
|
if (self.method != .GET and self.method != .HEAD) {
|
||||||
request.body = b;
|
self.request_body = try self.arena.dupe(u8, b);
|
||||||
try request.addHeader("Content-Type", "text/plain; charset=UTF-8", .{});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try request.sendAsync(self, .{});
|
var headers = try HttpClient.Headers.init();
|
||||||
self.request = request;
|
for (self.headers.items) |hdr| {
|
||||||
|
try headers.add(hdr);
|
||||||
|
}
|
||||||
|
try page.requestCookie(.{}).headersForRequest(self.arena, self.url.?, &headers);
|
||||||
|
|
||||||
|
try page.http_client.request(.{
|
||||||
|
.ctx = self,
|
||||||
|
.url = self.url.?,
|
||||||
|
.method = self.method,
|
||||||
|
.headers = headers,
|
||||||
|
.body = self.request_body,
|
||||||
|
.cookie_jar = page.cookie_jar,
|
||||||
|
.start_callback = httpStartCallback,
|
||||||
|
.header_callback = httpHeaderCallback,
|
||||||
|
.header_done_callback = httpHeaderDoneCallback,
|
||||||
|
.data_callback = httpDataCallback,
|
||||||
|
.done_callback = httpDoneCallback,
|
||||||
|
.error_callback = httpErrorCallback,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn onHttpResponse(self: *XMLHttpRequest, progress_: anyerror!http.Progress) !void {
|
fn httpStartCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
const progress = progress_ catch |err| {
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
// The request has been closed internally by the client, it isn't safe
|
log.debug(.http, "request start", .{ .method = self.method, .url = self.url, .source = "xhr" });
|
||||||
// for us to keep it around.
|
self.transfer = transfer;
|
||||||
self.request = null;
|
}
|
||||||
self.onErr(err);
|
|
||||||
return err;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (progress.first) {
|
fn httpHeaderCallback(transfer: *HttpClient.Transfer, header: []const u8) !void {
|
||||||
const header = progress.header;
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
log.debug(.http, "request header", .{
|
try self.response_headers.append(self.arena, try self.arena.dupe(u8, header));
|
||||||
.source = "xhr",
|
}
|
||||||
.url = self.url,
|
|
||||||
.status = header.status,
|
|
||||||
});
|
|
||||||
for (header.headers.items) |hdr| {
|
|
||||||
try self.response_headers.append(hdr.name, hdr.value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract a mime type from headers.
|
fn httpHeaderDoneCallback(transfer: *HttpClient.Transfer) !void {
|
||||||
if (header.get("content-type")) |ct| {
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
self.response_mime = Mime.parse(self.arena, ct) catch |e| {
|
|
||||||
return self.onErr(e);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO handle override mime type
|
const header = &transfer.response_header.?;
|
||||||
self.state = .headers_received;
|
|
||||||
self.dispatchEvt("readystatechange");
|
|
||||||
|
|
||||||
self.response_status = header.status;
|
log.debug(.http, "request header", .{
|
||||||
|
.source = "xhr",
|
||||||
|
.url = self.url,
|
||||||
|
.status = header.status,
|
||||||
|
});
|
||||||
|
|
||||||
// TODO correct total
|
if (header.contentType()) |ct| {
|
||||||
self.dispatchProgressEvent("loadstart", .{ .loaded = 0, .total = 0 });
|
self.response_mime = Mime.parse(ct) catch |e| {
|
||||||
|
return self.onErr(e);
|
||||||
self.state = .loading;
|
};
|
||||||
self.dispatchEvt("readystatechange");
|
|
||||||
|
|
||||||
try self.cookie_jar.populateFromResponse(self.request.?.request_uri, &header);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (progress.data) |data| {
|
// TODO handle override mime type
|
||||||
try self.response_bytes.appendSlice(self.arena, data);
|
self.state = .headers_received;
|
||||||
|
self.dispatchEvt("readystatechange");
|
||||||
|
|
||||||
|
self.response_status = header.status;
|
||||||
|
|
||||||
|
// TODO correct total
|
||||||
|
self.dispatchProgressEvent("loadstart", .{ .loaded = 0, .total = 0 });
|
||||||
|
|
||||||
|
self.state = .loading;
|
||||||
|
self.dispatchEvt("readystatechange");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn httpDataCallback(transfer: *HttpClient.Transfer, data: []const u8) !void {
|
||||||
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(transfer.ctx));
|
||||||
|
try self.response_bytes.appendSlice(self.arena, data);
|
||||||
|
|
||||||
|
const now = std.time.milliTimestamp();
|
||||||
|
if (now - self.last_dispatch < 50) {
|
||||||
|
// don't send this more than once every 50ms
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const loaded = self.response_bytes.items.len;
|
const loaded = self.response_bytes.items.len;
|
||||||
const now = std.time.milliTimestamp();
|
self.dispatchProgressEvent("progress", .{
|
||||||
if (now - self.last_dispatch > 50) {
|
.total = loaded, // TODO, this is wrong? Need the content-type
|
||||||
// don't send this more than once every 50ms
|
.loaded = loaded,
|
||||||
self.dispatchProgressEvent("progress", .{
|
});
|
||||||
.total = loaded,
|
self.last_dispatch = now;
|
||||||
.loaded = loaded,
|
}
|
||||||
});
|
|
||||||
self.last_dispatch = now;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (progress.done == false) {
|
fn httpDoneCallback(ctx: *anyopaque) !void {
|
||||||
return;
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
|
||||||
}
|
|
||||||
|
|
||||||
log.info(.http, "request complete", .{
|
log.info(.http, "request complete", .{
|
||||||
.source = "xhr",
|
.source = "xhr",
|
||||||
@@ -573,20 +460,36 @@ pub const XMLHttpRequest = struct {
|
|||||||
.status = self.response_status,
|
.status = self.response_status,
|
||||||
});
|
});
|
||||||
|
|
||||||
// Not that the request is done, the http/client will free the request
|
// Not that the request is done, the http/client will free the transfer
|
||||||
// object. It isn't safe to keep it around.
|
// object. It isn't safe to keep it around.
|
||||||
self.request = null;
|
self.transfer = null;
|
||||||
|
|
||||||
self.state = .done;
|
self.state = .done;
|
||||||
self.send_flag = false;
|
self.send_flag = false;
|
||||||
self.dispatchEvt("readystatechange");
|
self.dispatchEvt("readystatechange");
|
||||||
|
|
||||||
|
const loaded = self.response_bytes.items.len;
|
||||||
|
|
||||||
// dispatch a progress event load.
|
// dispatch a progress event load.
|
||||||
self.dispatchProgressEvent("load", .{ .loaded = loaded, .total = loaded });
|
self.dispatchProgressEvent("load", .{ .loaded = loaded, .total = loaded });
|
||||||
// dispatch a progress event loadend.
|
// dispatch a progress event loadend.
|
||||||
self.dispatchProgressEvent("loadend", .{ .loaded = loaded, .total = loaded });
|
self.dispatchProgressEvent("loadend", .{ .loaded = loaded, .total = loaded });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn httpErrorCallback(ctx: *anyopaque, err: anyerror) void {
|
||||||
|
const self: *XMLHttpRequest = @alignCast(@ptrCast(ctx));
|
||||||
|
// http client will close it after an error, it isn't safe to keep around
|
||||||
|
self.transfer = null;
|
||||||
|
self.onErr(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn _abort(self: *XMLHttpRequest) void {
|
||||||
|
self.onErr(DOMError.Abort);
|
||||||
|
if (self.transfer) |transfer| {
|
||||||
|
transfer.abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn onErr(self: *XMLHttpRequest, err: anyerror) void {
|
fn onErr(self: *XMLHttpRequest, err: anyerror) void {
|
||||||
self.send_flag = false;
|
self.send_flag = false;
|
||||||
|
|
||||||
@@ -614,15 +517,10 @@ pub const XMLHttpRequest = struct {
|
|||||||
log.log(.http, level, "error", .{
|
log.log(.http, level, "error", .{
|
||||||
.url = self.url,
|
.url = self.url,
|
||||||
.err = err,
|
.err = err,
|
||||||
.source = "xhr",
|
.source = "xhr.OnErr",
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _abort(self: *XMLHttpRequest) void {
|
|
||||||
self.onErr(DOMError.Abort);
|
|
||||||
self.destructor();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_responseType(self: *XMLHttpRequest) []const u8 {
|
pub fn get_responseType(self: *XMLHttpRequest) []const u8 {
|
||||||
return switch (self.response_type) {
|
return switch (self.response_type) {
|
||||||
.Empty => "",
|
.Empty => "",
|
||||||
@@ -664,9 +562,8 @@ pub const XMLHttpRequest = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO retrieve the redirected url
|
// TODO retrieve the redirected url
|
||||||
pub fn get_responseURL(self: *XMLHttpRequest) ?[]const u8 {
|
pub fn get_responseURL(self: *XMLHttpRequest) ?[:0]const u8 {
|
||||||
const url = &(self.url orelse return null);
|
return self.url;
|
||||||
return url.raw;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_responseXML(self: *XMLHttpRequest) !?Response {
|
pub fn get_responseXML(self: *XMLHttpRequest) !?Response {
|
||||||
@@ -770,18 +667,8 @@ pub const XMLHttpRequest = struct {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var ccharset: [:0]const u8 = "utf-8";
|
|
||||||
if (mime.charset) |rc| {
|
|
||||||
if (std.mem.eql(u8, rc, "utf-8") == false) {
|
|
||||||
ccharset = self.arena.dupeZ(u8, rc) catch {
|
|
||||||
self.response_obj = .{ .Failure = {} };
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var fbs = std.io.fixedBufferStream(self.response_bytes.items);
|
var fbs = std.io.fixedBufferStream(self.response_bytes.items);
|
||||||
const doc = parser.documentHTMLParse(fbs.reader(), ccharset) catch {
|
const doc = parser.documentHTMLParse(fbs.reader(), mime.charset orelse "UTF-8") catch {
|
||||||
self.response_obj = .{ .Failure = {} };
|
self.response_obj = .{ .Failure = {} };
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
@@ -818,26 +705,27 @@ pub const XMLHttpRequest = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn _getResponseHeader(self: *XMLHttpRequest, name: []const u8) ?[]const u8 {
|
pub fn _getResponseHeader(self: *XMLHttpRequest, name: []const u8) ?[]const u8 {
|
||||||
return self.response_headers.getFirstValue(name);
|
for (self.response_headers.items) |entry| {
|
||||||
|
if (entry.len <= name.len) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (std.ascii.eqlIgnoreCase(name, entry[0..name.len]) == false) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (entry[name.len] != ':') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return std.mem.trimLeft(u8, entry[name.len + 1 ..], " ");
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The caller owns the string returned.
|
|
||||||
// TODO change the return type to express the string ownership and let
|
|
||||||
// jsruntime free the string once copied to v8.
|
|
||||||
// see https://github.com/lightpanda-io/jsruntime-lib/issues/195
|
|
||||||
pub fn _getAllResponseHeaders(self: *XMLHttpRequest) ![]const u8 {
|
pub fn _getAllResponseHeaders(self: *XMLHttpRequest) ![]const u8 {
|
||||||
if (self.response_headers.list.items.len == 0) return "";
|
|
||||||
self.response_headers.sort();
|
|
||||||
|
|
||||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||||
const w = buf.writer(self.arena);
|
const w = buf.writer(self.arena);
|
||||||
|
|
||||||
for (self.response_headers.list.items) |entry| {
|
for (self.response_headers.items) |entry| {
|
||||||
if (entry.value.len == 0) continue;
|
try w.writeAll(entry);
|
||||||
|
|
||||||
try w.writeAll(entry.name);
|
|
||||||
try w.writeAll(": ");
|
|
||||||
try w.writeAll(entry.value);
|
|
||||||
try w.writeAll("\r\n");
|
try w.writeAll("\r\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -869,8 +757,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
.{ "req.onload", "function cbk(event) { nb ++; evt = event; }" },
|
.{ "req.onload", "function cbk(event) { nb ++; evt = event; }" },
|
||||||
.{ "req.onload = cbk", "function cbk(event) { nb ++; evt = event; }" },
|
.{ "req.onload = cbk", "function cbk(event) { nb ++; evt = event; }" },
|
||||||
|
|
||||||
.{ "req.open('GET', 'https://127.0.0.1:9581/xhr')", "undefined" },
|
.{ "req.open('GET', 'http://127.0.0.1:9582/xhr')", null },
|
||||||
.{ "req.setRequestHeader('User-Agent', 'lightpanda/1.0')", "undefined" },
|
|
||||||
|
|
||||||
// ensure open resets values
|
// ensure open resets values
|
||||||
.{ "req.status ", "0" },
|
.{ "req.status ", "0" },
|
||||||
@@ -890,7 +777,9 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
.{ "req.status", "200" },
|
.{ "req.status", "200" },
|
||||||
.{ "req.statusText", "OK" },
|
.{ "req.statusText", "OK" },
|
||||||
.{ "req.getResponseHeader('Content-Type')", "text/html; charset=utf-8" },
|
.{ "req.getResponseHeader('Content-Type')", "text/html; charset=utf-8" },
|
||||||
.{ "req.getAllResponseHeaders().length", "80" },
|
.{ "req.getAllResponseHeaders()", "content-length: 100\r\n" ++
|
||||||
|
"Content-Type: text/html; charset=utf-8\r\n" ++
|
||||||
|
"Connection: Close\r\n" },
|
||||||
.{ "req.responseText.length", "100" },
|
.{ "req.responseText.length", "100" },
|
||||||
.{ "req.response.length == req.responseText.length", "true" },
|
.{ "req.response.length == req.responseText.length", "true" },
|
||||||
.{ "req.responseXML instanceof Document", "true" },
|
.{ "req.responseXML instanceof Document", "true" },
|
||||||
@@ -898,7 +787,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
|
|
||||||
try runner.testCases(&.{
|
try runner.testCases(&.{
|
||||||
.{ "const req2 = new XMLHttpRequest()", "undefined" },
|
.{ "const req2 = new XMLHttpRequest()", "undefined" },
|
||||||
.{ "req2.open('GET', 'https://127.0.0.1:9581/xhr')", "undefined" },
|
.{ "req2.open('GET', 'http://127.0.0.1:9582/xhr')", "undefined" },
|
||||||
.{ "req2.responseType = 'document'", "document" },
|
.{ "req2.responseType = 'document'", "document" },
|
||||||
|
|
||||||
.{ "req2.send()", "undefined" },
|
.{ "req2.send()", "undefined" },
|
||||||
@@ -913,7 +802,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
|
|
||||||
try runner.testCases(&.{
|
try runner.testCases(&.{
|
||||||
.{ "const req3 = new XMLHttpRequest()", "undefined" },
|
.{ "const req3 = new XMLHttpRequest()", "undefined" },
|
||||||
.{ "req3.open('GET', 'https://127.0.0.1:9581/xhr/json')", "undefined" },
|
.{ "req3.open('GET', 'http://127.0.0.1:9582/xhr/json')", "undefined" },
|
||||||
.{ "req3.responseType = 'json'", "json" },
|
.{ "req3.responseType = 'json'", "json" },
|
||||||
|
|
||||||
.{ "req3.send()", "undefined" },
|
.{ "req3.send()", "undefined" },
|
||||||
@@ -927,7 +816,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
|
|
||||||
try runner.testCases(&.{
|
try runner.testCases(&.{
|
||||||
.{ "const req4 = new XMLHttpRequest()", "undefined" },
|
.{ "const req4 = new XMLHttpRequest()", "undefined" },
|
||||||
.{ "req4.open('POST', 'https://127.0.0.1:9581/xhr')", "undefined" },
|
.{ "req4.open('POST', 'http://127.0.0.1:9582/xhr')", "undefined" },
|
||||||
.{ "req4.send('foo')", "undefined" },
|
.{ "req4.send('foo')", "undefined" },
|
||||||
|
|
||||||
// Each case executed waits for all loop callaback calls.
|
// Each case executed waits for all loop callaback calls.
|
||||||
@@ -939,7 +828,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
|
|
||||||
try runner.testCases(&.{
|
try runner.testCases(&.{
|
||||||
.{ "const req5 = new XMLHttpRequest()", "undefined" },
|
.{ "const req5 = new XMLHttpRequest()", "undefined" },
|
||||||
.{ "req5.open('GET', 'https://127.0.0.1:9581/xhr')", "undefined" },
|
.{ "req5.open('GET', 'http://127.0.0.1:9582/xhr')", "undefined" },
|
||||||
.{ "var status = 0; req5.onload = function () { status = this.status };", "function () { status = this.status }" },
|
.{ "var status = 0; req5.onload = function () { status = this.status };", "function () { status = this.status }" },
|
||||||
.{ "req5.send()", "undefined" },
|
.{ "req5.send()", "undefined" },
|
||||||
|
|
||||||
@@ -960,7 +849,7 @@ test "Browser.XHR.XMLHttpRequest" {
|
|||||||
,
|
,
|
||||||
null,
|
null,
|
||||||
},
|
},
|
||||||
.{ "req6.open('GET', 'https://127.0.0.1:9581/xhr')", null },
|
.{ "req6.open('GET', 'http://127.0.0.1:9582/xhr')", null },
|
||||||
.{ "req6.send()", null },
|
.{ "req6.send()", null },
|
||||||
.{ "readyStates.length", "4" },
|
.{ "readyStates.length", "4" },
|
||||||
.{ "readyStates[0] === XMLHttpRequest.OPENED", "true" },
|
.{ "readyStates[0] === XMLHttpRequest.OPENED", "true" },
|
||||||
|
|||||||
@@ -29,6 +29,8 @@ const Page = @import("../browser/page.zig").Page;
|
|||||||
const Inspector = @import("../browser/env.zig").Env.Inspector;
|
const Inspector = @import("../browser/env.zig").Env.Inspector;
|
||||||
const Incrementing = @import("../id.zig").Incrementing;
|
const Incrementing = @import("../id.zig").Incrementing;
|
||||||
const Notification = @import("../notification.zig").Notification;
|
const Notification = @import("../notification.zig").Notification;
|
||||||
|
const NetworkState = @import("domains/network.zig").NetworkState;
|
||||||
|
const InterceptState = @import("domains/fetch.zig").InterceptState;
|
||||||
|
|
||||||
const polyfill = @import("../browser/polyfill/polyfill.zig");
|
const polyfill = @import("../browser/polyfill/polyfill.zig");
|
||||||
|
|
||||||
@@ -73,7 +75,9 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
notification_arena: std.heap.ArenaAllocator,
|
notification_arena: std.heap.ArenaAllocator,
|
||||||
|
|
||||||
// Extra headers to add to all requests. TBD under which conditions this should be reset.
|
// Extra headers to add to all requests. TBD under which conditions this should be reset.
|
||||||
extra_headers: std.ArrayListUnmanaged(std.http.Header) = .empty,
|
extra_headers: std.ArrayListUnmanaged([*c]const u8) = .empty,
|
||||||
|
|
||||||
|
intercept_state: InterceptState,
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
@@ -89,6 +93,7 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
.browser_context = null,
|
.browser_context = null,
|
||||||
.message_arena = std.heap.ArenaAllocator.init(allocator),
|
.message_arena = std.heap.ArenaAllocator.init(allocator),
|
||||||
.notification_arena = std.heap.ArenaAllocator.init(allocator),
|
.notification_arena = std.heap.ArenaAllocator.init(allocator),
|
||||||
|
.intercept_state = try InterceptState.init(allocator), // TBD or browser session arena?
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,6 +101,7 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
if (self.browser_context) |*bc| {
|
if (self.browser_context) |*bc| {
|
||||||
bc.deinit();
|
bc.deinit();
|
||||||
}
|
}
|
||||||
|
self.intercept_state.deinit(); // TBD Should this live in BC?
|
||||||
self.browser.deinit();
|
self.browser.deinit();
|
||||||
self.message_arena.deinit();
|
self.message_arena.deinit();
|
||||||
self.notification_arena.deinit();
|
self.notification_arena.deinit();
|
||||||
@@ -104,6 +110,7 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
pub fn handleMessage(self: *Self, msg: []const u8) bool {
|
pub fn handleMessage(self: *Self, msg: []const u8) bool {
|
||||||
// if there's an error, it's already been logged
|
// if there's an error, it's already been logged
|
||||||
self.processMessage(msg) catch return false;
|
self.processMessage(msg) catch return false;
|
||||||
|
self.pageWait();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,6 +120,20 @@ pub fn CDPT(comptime TypeProvider: type) type {
|
|||||||
return self.dispatch(arena.allocator(), self, msg);
|
return self.dispatch(arena.allocator(), self, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// @newhttp
|
||||||
|
// A bit hacky right now. The main server loop blocks only for CDP
|
||||||
|
// messages. It no longer blocks for page timeouts of page HTTP
|
||||||
|
// transfers. So we need to call this more ourselves.
|
||||||
|
// This is called after every message and [very hackily] from the server
|
||||||
|
// loop.
|
||||||
|
// This is hopefully temporary.
|
||||||
|
pub fn pageWait(self: *Self) void {
|
||||||
|
const session = &(self.browser.session orelse return);
|
||||||
|
// exits early if there's nothing to do, so a large value like
|
||||||
|
// 5 seconds should be ok
|
||||||
|
session.wait(5);
|
||||||
|
}
|
||||||
|
|
||||||
// Called from above, in processMessage which handles client messages
|
// Called from above, in processMessage which handles client messages
|
||||||
// but can also be called internally. For example, Target.sendMessageToTarget
|
// but can also be called internally. For example, Target.sendMessageToTarget
|
||||||
// calls back into dispatch to capture the response.
|
// calls back into dispatch to capture the response.
|
||||||
@@ -323,10 +344,7 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
|||||||
inspector: Inspector,
|
inspector: Inspector,
|
||||||
isolated_world: ?IsolatedWorld,
|
isolated_world: ?IsolatedWorld,
|
||||||
|
|
||||||
// Used to restore the proxy after the CDP session ends. If CDP never over-wrote it, it won't restore it (the first null).
|
http_proxy_changed: bool = false,
|
||||||
// If the CDP is restoring it, but the original value was null, that's the 2nd null.
|
|
||||||
// If you only have 1 null it would be ambiguous, does null mean it shouldn't be restored, or should it be restored to null?
|
|
||||||
http_proxy_before: ??std.Uri = null,
|
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
@@ -382,7 +400,13 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
|||||||
self.node_search_list.deinit();
|
self.node_search_list.deinit();
|
||||||
self.cdp.browser.notification.unregisterAll(self);
|
self.cdp.browser.notification.unregisterAll(self);
|
||||||
|
|
||||||
if (self.http_proxy_before) |prev_proxy| self.cdp.browser.http_client.http_proxy = prev_proxy;
|
if (self.http_proxy_changed) {
|
||||||
|
// has to be called after browser.closeSession, since it won't
|
||||||
|
// work if there are active connections.
|
||||||
|
self.cdp.browser.http_client.restoreOriginalProxy() catch |err| {
|
||||||
|
log.warn(.http, "restoreOriginalProxy", .{ .err = err });
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reset(self: *Self) void {
|
pub fn reset(self: *Self) void {
|
||||||
@@ -424,18 +448,26 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
|||||||
pub fn networkEnable(self: *Self) !void {
|
pub fn networkEnable(self: *Self) !void {
|
||||||
try self.cdp.browser.notification.register(.http_request_fail, self, onHttpRequestFail);
|
try self.cdp.browser.notification.register(.http_request_fail, self, onHttpRequestFail);
|
||||||
try self.cdp.browser.notification.register(.http_request_start, self, onHttpRequestStart);
|
try self.cdp.browser.notification.register(.http_request_start, self, onHttpRequestStart);
|
||||||
try self.cdp.browser.notification.register(.http_request_complete, self, onHttpRequestComplete);
|
try self.cdp.browser.notification.register(.http_headers_done, self, onHttpHeadersDone);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn networkDisable(self: *Self) void {
|
pub fn networkDisable(self: *Self) void {
|
||||||
self.cdp.browser.notification.unregister(.http_request_fail, self);
|
self.cdp.browser.notification.unregister(.http_request_fail, self);
|
||||||
self.cdp.browser.notification.unregister(.http_request_start, self);
|
self.cdp.browser.notification.unregister(.http_request_start, self);
|
||||||
self.cdp.browser.notification.unregister(.http_request_complete, self);
|
self.cdp.browser.notification.unregister(.http_headers_done, self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetchEnable(self: *Self) !void {
|
||||||
|
try self.cdp.browser.notification.register(.http_request_intercept, self, onHttpRequestIntercept);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn fetchDisable(self: *Self) void {
|
||||||
|
self.cdp.browser.notification.unregister(.http_request_intercept, self);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn onPageRemove(ctx: *anyopaque, _: Notification.PageRemove) !void {
|
pub fn onPageRemove(ctx: *anyopaque, _: Notification.PageRemove) !void {
|
||||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||||
return @import("domains/page.zig").pageRemove(self);
|
try @import("domains/page.zig").pageRemove(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn onPageCreated(ctx: *anyopaque, page: *Page) !void {
|
pub fn onPageCreated(ctx: *anyopaque, page: *Page) !void {
|
||||||
@@ -457,7 +489,13 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
|||||||
pub fn onHttpRequestStart(ctx: *anyopaque, data: *const Notification.RequestStart) !void {
|
pub fn onHttpRequestStart(ctx: *anyopaque, data: *const Notification.RequestStart) !void {
|
||||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||||
defer self.resetNotificationArena();
|
defer self.resetNotificationArena();
|
||||||
return @import("domains/network.zig").httpRequestStart(self.notification_arena, self, data);
|
try @import("domains/network.zig").httpRequestStart(self.notification_arena, self, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn onHttpRequestIntercept(ctx: *anyopaque, data: *const Notification.RequestIntercept) !void {
|
||||||
|
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||||
|
defer self.resetNotificationArena();
|
||||||
|
try @import("domains/fetch.zig").requestPaused(self.notification_arena, self, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn onHttpRequestFail(ctx: *anyopaque, data: *const Notification.RequestFail) !void {
|
pub fn onHttpRequestFail(ctx: *anyopaque, data: *const Notification.RequestFail) !void {
|
||||||
@@ -466,10 +504,10 @@ pub fn BrowserContext(comptime CDP_T: type) type {
|
|||||||
return @import("domains/network.zig").httpRequestFail(self.notification_arena, self, data);
|
return @import("domains/network.zig").httpRequestFail(self.notification_arena, self, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn onHttpRequestComplete(ctx: *anyopaque, data: *const Notification.RequestComplete) !void {
|
pub fn onHttpHeadersDone(ctx: *anyopaque, data: *const Notification.ResponseHeadersDone) !void {
|
||||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||||
defer self.resetNotificationArena();
|
defer self.resetNotificationArena();
|
||||||
return @import("domains/network.zig").httpRequestComplete(self.notification_arena, self, data);
|
return @import("domains/network.zig").httpHeadersDone(self.notification_arena, self, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn resetNotificationArena(self: *Self) void {
|
fn resetNotificationArena(self: *Self) void {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright (C) 2023-2024 Lightpanda (Selecy SAS)
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
//
|
//
|
||||||
// Francis Bouvier <francis@lightpanda.io>
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
// Pierre Tachoire <pierre@lightpanda.io>
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
@@ -17,13 +17,215 @@
|
|||||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const Notification = @import("../../notification.zig").Notification;
|
||||||
|
const log = @import("../../log.zig");
|
||||||
|
const Method = @import("../../http/Client.zig").Method;
|
||||||
|
const Transfer = @import("../../http/Client.zig").Transfer;
|
||||||
|
|
||||||
pub fn processMessage(cmd: anytype) !void {
|
pub fn processMessage(cmd: anytype) !void {
|
||||||
const action = std.meta.stringToEnum(enum {
|
const action = std.meta.stringToEnum(enum {
|
||||||
disable,
|
disable,
|
||||||
|
enable,
|
||||||
|
continueRequest,
|
||||||
|
failRequest,
|
||||||
}, cmd.input.action) orelse return error.UnknownMethod;
|
}, cmd.input.action) orelse return error.UnknownMethod;
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
.disable => return cmd.sendResult(null, .{}),
|
.disable => return disable(cmd),
|
||||||
|
.enable => return enable(cmd),
|
||||||
|
.continueRequest => return continueRequest(cmd),
|
||||||
|
.failRequest => return failRequest(cmd),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stored in CDP
|
||||||
|
pub const InterceptState = struct {
|
||||||
|
const Self = @This();
|
||||||
|
waiting: std.AutoArrayHashMap(u64, *Transfer),
|
||||||
|
|
||||||
|
pub fn init(allocator: Allocator) !InterceptState {
|
||||||
|
return .{
|
||||||
|
.waiting = std.AutoArrayHashMap(u64, *Transfer).init(allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Self) void {
|
||||||
|
self.waiting.deinit();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const RequestPattern = struct {
|
||||||
|
urlPattern: []const u8 = "*", // Wildcards ('*' -> zero or more, '?' -> exactly one) are allowed. Escape character is backslash. Omitting is equivalent to "*".
|
||||||
|
resourceType: ?ResourceType = null,
|
||||||
|
requestStage: RequestStage = .Request,
|
||||||
|
};
|
||||||
|
const ResourceType = enum {
|
||||||
|
Document,
|
||||||
|
Stylesheet,
|
||||||
|
Image,
|
||||||
|
Media,
|
||||||
|
Font,
|
||||||
|
Script,
|
||||||
|
TextTrack,
|
||||||
|
XHR,
|
||||||
|
Fetch,
|
||||||
|
Prefetch,
|
||||||
|
EventSource,
|
||||||
|
WebSocket,
|
||||||
|
Manifest,
|
||||||
|
SignedExchange,
|
||||||
|
Ping,
|
||||||
|
CSPViolationReport,
|
||||||
|
Preflight,
|
||||||
|
FedCM,
|
||||||
|
Other,
|
||||||
|
};
|
||||||
|
const RequestStage = enum {
|
||||||
|
Request,
|
||||||
|
Response,
|
||||||
|
};
|
||||||
|
|
||||||
|
const EnableParam = struct {
|
||||||
|
patterns: []RequestPattern = &.{},
|
||||||
|
handleAuthRequests: bool = false,
|
||||||
|
};
|
||||||
|
const ErrorReason = enum {
|
||||||
|
Failed,
|
||||||
|
Aborted,
|
||||||
|
TimedOut,
|
||||||
|
AccessDenied,
|
||||||
|
ConnectionClosed,
|
||||||
|
ConnectionReset,
|
||||||
|
ConnectionRefused,
|
||||||
|
ConnectionAborted,
|
||||||
|
ConnectionFailed,
|
||||||
|
NameNotResolved,
|
||||||
|
InternetDisconnected,
|
||||||
|
AddressUnreachable,
|
||||||
|
BlockedByClient,
|
||||||
|
BlockedByResponse,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn disable(cmd: anytype) !void {
|
||||||
|
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
||||||
|
bc.fetchDisable();
|
||||||
|
return cmd.sendResult(null, .{});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enable(cmd: anytype) !void {
|
||||||
|
const params = (try cmd.params(EnableParam)) orelse EnableParam{};
|
||||||
|
if (params.patterns.len != 0) log.warn(.cdp, "Fetch.enable No patterns yet", .{});
|
||||||
|
if (params.handleAuthRequests) log.warn(.cdp, "Fetch.enable No auth yet", .{});
|
||||||
|
|
||||||
|
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
||||||
|
try bc.fetchEnable();
|
||||||
|
|
||||||
|
return cmd.sendResult(null, .{});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn requestPaused(arena: Allocator, bc: anytype, intercept: *const Notification.RequestIntercept) !void {
|
||||||
|
var cdp = bc.cdp;
|
||||||
|
|
||||||
|
// unreachable because we _have_ to have a page.
|
||||||
|
const session_id = bc.session_id orelse unreachable;
|
||||||
|
const target_id = bc.target_id orelse unreachable;
|
||||||
|
|
||||||
|
// We keep it around to wait for modifications to the request.
|
||||||
|
// NOTE: we assume whomever created the request created it with a lifetime of the Page.
|
||||||
|
// TODO: What to do when receiving replies for a previous page's requests?
|
||||||
|
|
||||||
|
const transfer = intercept.transfer;
|
||||||
|
try cdp.intercept_state.waiting.put(transfer.id, transfer);
|
||||||
|
|
||||||
|
// NOTE: .request data preparation is duped from network.zig
|
||||||
|
const full_request_url = transfer.uri;
|
||||||
|
const request_url = try @import("network.zig").urlToString(arena, &full_request_url, .{
|
||||||
|
.scheme = true,
|
||||||
|
.authentication = true,
|
||||||
|
.authority = true,
|
||||||
|
.path = true,
|
||||||
|
.query = true,
|
||||||
|
});
|
||||||
|
const request_fragment = try @import("network.zig").urlToString(arena, &full_request_url, .{
|
||||||
|
.fragment = true,
|
||||||
|
});
|
||||||
|
const headers = try transfer.req.headers.asHashMap(arena);
|
||||||
|
// End of duped code
|
||||||
|
|
||||||
|
try cdp.sendEvent("Fetch.requestPaused", .{
|
||||||
|
.requestId = try std.fmt.allocPrint(arena, "INTERCEPT-{d}", .{transfer.id}),
|
||||||
|
.request = .{
|
||||||
|
.url = request_url,
|
||||||
|
.urlFragment = request_fragment,
|
||||||
|
.method = @tagName(transfer.req.method),
|
||||||
|
.hasPostData = transfer.req.body != null,
|
||||||
|
.headers = std.json.ArrayHashMap([]const u8){ .map = headers },
|
||||||
|
},
|
||||||
|
.frameId = target_id,
|
||||||
|
.resourceType = ResourceType.Document, // TODO!
|
||||||
|
.networkId = try std.fmt.allocPrint(arena, "REQ-{d}", .{transfer.id}),
|
||||||
|
}, .{ .session_id = session_id });
|
||||||
|
|
||||||
|
// Await either continueRequest, failRequest or fulfillRequest
|
||||||
|
intercept.wait_for_interception.* = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const HeaderEntry = struct {
|
||||||
|
name: []const u8,
|
||||||
|
value: []const u8,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn continueRequest(cmd: anytype) !void {
|
||||||
|
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
||||||
|
const params = (try cmd.params(struct {
|
||||||
|
requestId: []const u8, // "INTERCEPT-{d}"
|
||||||
|
url: ?[]const u8 = null,
|
||||||
|
method: ?[]const u8 = null,
|
||||||
|
postData: ?[]const u8 = null,
|
||||||
|
headers: ?[]const HeaderEntry = null,
|
||||||
|
interceptResponse: bool = false,
|
||||||
|
})) orelse return error.InvalidParams;
|
||||||
|
if (params.postData != null or params.headers != null or params.interceptResponse) return error.NotYetImplementedParams;
|
||||||
|
|
||||||
|
const request_id = try idFromRequestId(params.requestId);
|
||||||
|
const entry = bc.cdp.intercept_state.waiting.fetchSwapRemove(request_id) orelse return error.RequestNotFound;
|
||||||
|
const transfer = entry.value;
|
||||||
|
|
||||||
|
// Update the request with the new parameters
|
||||||
|
if (params.url) |url| {
|
||||||
|
// The request url must be modified in a way that's not observable by page. So page.url is not updated.
|
||||||
|
try transfer.updateURL(try bc.cdp.browser.page_arena.allocator().dupeZ(u8, url));
|
||||||
|
}
|
||||||
|
if (params.method) |method| {
|
||||||
|
transfer.req.method = std.meta.stringToEnum(Method, method) orelse return error.InvalidParams;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info(.cdp, "Request continued by intercept", .{ .id = params.requestId });
|
||||||
|
try bc.cdp.browser.http_client.process(transfer);
|
||||||
|
|
||||||
|
return cmd.sendResult(null, .{});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn failRequest(cmd: anytype) !void {
|
||||||
|
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
||||||
|
var state = &bc.cdp.intercept_state;
|
||||||
|
const params = (try cmd.params(struct {
|
||||||
|
requestId: []const u8, // "INTERCEPT-{d}"
|
||||||
|
errorReason: ErrorReason,
|
||||||
|
})) orelse return error.InvalidParams;
|
||||||
|
|
||||||
|
const request_id = try idFromRequestId(params.requestId);
|
||||||
|
const entry = state.waiting.fetchSwapRemove(request_id) orelse return error.RequestNotFound;
|
||||||
|
// entry.value is the transfer
|
||||||
|
entry.value.abort();
|
||||||
|
|
||||||
|
log.info(.cdp, "Request aborted by intercept", .{ .reason = params.errorReason });
|
||||||
|
return cmd.sendResult(null, .{});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get u64 from requestId which is formatted as: "INTERCEPT-{d}"
|
||||||
|
fn idFromRequestId(request_id: []const u8) !u64 {
|
||||||
|
if (!std.mem.startsWith(u8, request_id, "INTERCEPT-")) return error.InvalidParams;
|
||||||
|
return std.fmt.parseInt(u64, request_id[10..], 10) catch return error.InvalidParams;
|
||||||
|
}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ const Allocator = std.mem.Allocator;
|
|||||||
const Notification = @import("../../notification.zig").Notification;
|
const Notification = @import("../../notification.zig").Notification;
|
||||||
const log = @import("../../log.zig");
|
const log = @import("../../log.zig");
|
||||||
const CdpStorage = @import("storage.zig");
|
const CdpStorage = @import("storage.zig");
|
||||||
|
const Transfer = @import("../../http/Client.zig").Transfer;
|
||||||
|
|
||||||
pub fn processMessage(cmd: anytype) !void {
|
pub fn processMessage(cmd: anytype) !void {
|
||||||
const action = std.meta.stringToEnum(enum {
|
const action = std.meta.stringToEnum(enum {
|
||||||
@@ -51,6 +52,16 @@ pub fn processMessage(cmd: anytype) !void {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Response = struct {
|
||||||
|
status: u16,
|
||||||
|
headers: std.StringArrayHashMapUnmanaged([]const u8) = .empty,
|
||||||
|
// These may not be complete yet, but we only tell the client
|
||||||
|
// Network.responseReceived when all the headers are in.
|
||||||
|
// Later should store body as well to support getResponseBody which should
|
||||||
|
// only work once Network.loadingFinished is sent but the body itself would
|
||||||
|
// be loaded with each chunks as Network.dataReceiveds are coming in.
|
||||||
|
};
|
||||||
|
|
||||||
fn enable(cmd: anytype) !void {
|
fn enable(cmd: anytype) !void {
|
||||||
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
const bc = cmd.browser_context orelse return error.BrowserContextNotLoaded;
|
||||||
try bc.networkEnable();
|
try bc.networkEnable();
|
||||||
@@ -78,7 +89,8 @@ fn setExtraHTTPHeaders(cmd: anytype) !void {
|
|||||||
try extra_headers.ensureTotalCapacity(arena, params.headers.map.count());
|
try extra_headers.ensureTotalCapacity(arena, params.headers.map.count());
|
||||||
var it = params.headers.map.iterator();
|
var it = params.headers.map.iterator();
|
||||||
while (it.next()) |header| {
|
while (it.next()) |header| {
|
||||||
extra_headers.appendAssumeCapacity(.{ .name = try arena.dupe(u8, header.key_ptr.*), .value = try arena.dupe(u8, header.value_ptr.*) });
|
const header_string = try std.fmt.allocPrintZ(arena, "{s}: {s}", .{ header.key_ptr.*, header.value_ptr.* });
|
||||||
|
extra_headers.appendAssumeCapacity(header_string);
|
||||||
}
|
}
|
||||||
|
|
||||||
return cmd.sendResult(null, .{});
|
return cmd.sendResult(null, .{});
|
||||||
@@ -190,20 +202,7 @@ fn getCookies(cmd: anytype) !void {
|
|||||||
try cmd.sendResult(.{ .cookies = writer }, .{});
|
try cmd.sendResult(.{ .cookies = writer }, .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upsert a header into the headers array.
|
pub fn httpRequestFail(arena: Allocator, bc: anytype, data: *const Notification.RequestFail) !void {
|
||||||
// returns true if the header was added, false if it was updated
|
|
||||||
fn putAssumeCapacity(headers: *std.ArrayListUnmanaged(std.http.Header), extra: std.http.Header) bool {
|
|
||||||
for (headers.items) |*header| {
|
|
||||||
if (std.mem.eql(u8, header.name, extra.name)) {
|
|
||||||
header.value = extra.value;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
headers.appendAssumeCapacity(extra);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn httpRequestFail(arena: Allocator, bc: anytype, request: *const Notification.RequestFail) !void {
|
|
||||||
// It's possible that the request failed because we aborted when the client
|
// It's possible that the request failed because we aborted when the client
|
||||||
// sent Target.closeTarget. In that case, bc.session_id will be cleared
|
// sent Target.closeTarget. In that case, bc.session_id will be cleared
|
||||||
// already, and we can skip sending these messages to the client.
|
// already, and we can skip sending these messages to the client.
|
||||||
@@ -215,15 +214,15 @@ pub fn httpRequestFail(arena: Allocator, bc: anytype, request: *const Notificati
|
|||||||
|
|
||||||
// We're missing a bunch of fields, but, for now, this seems like enough
|
// We're missing a bunch of fields, but, for now, this seems like enough
|
||||||
try bc.cdp.sendEvent("Network.loadingFailed", .{
|
try bc.cdp.sendEvent("Network.loadingFailed", .{
|
||||||
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{request.id}),
|
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{data.transfer.id}),
|
||||||
// Seems to be what chrome answers with. I assume it depends on the type of error?
|
// Seems to be what chrome answers with. I assume it depends on the type of error?
|
||||||
.type = "Ping",
|
.type = "Ping",
|
||||||
.errorText = request.err,
|
.errorText = data.err,
|
||||||
.canceled = false,
|
.canceled = false,
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn httpRequestStart(arena: Allocator, bc: anytype, request: *const Notification.RequestStart) !void {
|
pub fn httpRequestStart(arena: Allocator, bc: anytype, data: *const Notification.RequestStart) !void {
|
||||||
// Isn't possible to do a network request within a Browser (which our
|
// Isn't possible to do a network request within a Browser (which our
|
||||||
// notification is tied to), without a page.
|
// notification is tied to), without a page.
|
||||||
std.debug.assert(bc.session.page != null);
|
std.debug.assert(bc.session.page != null);
|
||||||
@@ -236,10 +235,8 @@ pub fn httpRequestStart(arena: Allocator, bc: anytype, request: *const Notificat
|
|||||||
const page = bc.session.currentPage() orelse unreachable;
|
const page = bc.session.currentPage() orelse unreachable;
|
||||||
|
|
||||||
// Modify request with extra CDP headers
|
// Modify request with extra CDP headers
|
||||||
try request.headers.ensureTotalCapacity(request.arena, request.headers.items.len + cdp.extra_headers.items.len);
|
|
||||||
for (cdp.extra_headers.items) |extra| {
|
for (cdp.extra_headers.items) |extra| {
|
||||||
const new = putAssumeCapacity(request.headers, extra);
|
try data.transfer.req.headers.add(extra);
|
||||||
if (!new) log.debug(.cdp, "request header overwritten", .{ .name = extra.name });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const document_url = try urlToString(arena, &page.url.uri, .{
|
const document_url = try urlToString(arena, &page.url.uri, .{
|
||||||
@@ -250,41 +247,38 @@ pub fn httpRequestStart(arena: Allocator, bc: anytype, request: *const Notificat
|
|||||||
.query = true,
|
.query = true,
|
||||||
});
|
});
|
||||||
|
|
||||||
const request_url = try urlToString(arena, request.url, .{
|
const transfer = data.transfer;
|
||||||
|
const full_request_url = transfer.uri;
|
||||||
|
const request_url = try urlToString(arena, &full_request_url, .{
|
||||||
.scheme = true,
|
.scheme = true,
|
||||||
.authentication = true,
|
.authentication = true,
|
||||||
.authority = true,
|
.authority = true,
|
||||||
.path = true,
|
.path = true,
|
||||||
.query = true,
|
.query = true,
|
||||||
});
|
});
|
||||||
|
const request_fragment = try urlToString(arena, &full_request_url, .{
|
||||||
const request_fragment = try urlToString(arena, request.url, .{
|
.fragment = true, // TODO since path is false, this likely does not work as intended
|
||||||
.fragment = true,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
var headers: std.StringArrayHashMapUnmanaged([]const u8) = .empty;
|
const headers = try transfer.req.headers.asHashMap(arena);
|
||||||
try headers.ensureTotalCapacity(arena, request.headers.items.len);
|
|
||||||
for (request.headers.items) |header| {
|
|
||||||
headers.putAssumeCapacity(header.name, header.value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We're missing a bunch of fields, but, for now, this seems like enough
|
// We're missing a bunch of fields, but, for now, this seems like enough
|
||||||
try cdp.sendEvent("Network.requestWillBeSent", .{
|
try cdp.sendEvent("Network.requestWillBeSent", .{
|
||||||
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{request.id}),
|
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{transfer.id}),
|
||||||
.frameId = target_id,
|
.frameId = target_id,
|
||||||
.loaderId = bc.loader_id,
|
.loaderId = bc.loader_id,
|
||||||
.documentUrl = document_url,
|
.documentUrl = document_url,
|
||||||
.request = .{
|
.request = .{
|
||||||
.url = request_url,
|
.url = request_url,
|
||||||
.urlFragment = request_fragment,
|
.urlFragment = request_fragment,
|
||||||
.method = @tagName(request.method),
|
.method = @tagName(transfer.req.method),
|
||||||
.hasPostData = request.has_body,
|
.hasPostData = transfer.req.body != null,
|
||||||
.headers = std.json.ArrayHashMap([]const u8){ .map = headers },
|
.headers = std.json.ArrayHashMap([]const u8){ .map = headers },
|
||||||
},
|
},
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn httpRequestComplete(arena: Allocator, bc: anytype, request: *const Notification.RequestComplete) !void {
|
pub fn httpHeadersDone(arena: Allocator, bc: anytype, request: *const Notification.ResponseHeadersDone) !void {
|
||||||
// Isn't possible to do a network request within a Browser (which our
|
// Isn't possible to do a network request within a Browser (which our
|
||||||
// notification is tied to), without a page.
|
// notification is tied to), without a page.
|
||||||
std.debug.assert(bc.session.page != null);
|
std.debug.assert(bc.session.page != null);
|
||||||
@@ -295,7 +289,7 @@ pub fn httpRequestComplete(arena: Allocator, bc: anytype, request: *const Notifi
|
|||||||
const session_id = bc.session_id orelse unreachable;
|
const session_id = bc.session_id orelse unreachable;
|
||||||
const target_id = bc.target_id orelse unreachable;
|
const target_id = bc.target_id orelse unreachable;
|
||||||
|
|
||||||
const url = try urlToString(arena, request.url, .{
|
const url = try urlToString(arena, &request.transfer.uri, .{
|
||||||
.scheme = true,
|
.scheme = true,
|
||||||
.authentication = true,
|
.authentication = true,
|
||||||
.authority = true,
|
.authority = true,
|
||||||
@@ -303,32 +297,48 @@ pub fn httpRequestComplete(arena: Allocator, bc: anytype, request: *const Notifi
|
|||||||
.query = true,
|
.query = true,
|
||||||
});
|
});
|
||||||
|
|
||||||
var headers: std.StringArrayHashMapUnmanaged([]const u8) = .empty;
|
const status = request.transfer.response_header.?.status;
|
||||||
try headers.ensureTotalCapacity(arena, request.headers.len);
|
|
||||||
for (request.headers) |header| {
|
|
||||||
headers.putAssumeCapacity(header.name, header.value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We're missing a bunch of fields, but, for now, this seems like enough
|
// We're missing a bunch of fields, but, for now, this seems like enough
|
||||||
try cdp.sendEvent("Network.responseReceived", .{
|
try cdp.sendEvent("Network.responseReceived", .{
|
||||||
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{request.id}),
|
.requestId = try std.fmt.allocPrint(arena, "REQ-{d}", .{request.transfer.id}),
|
||||||
.loaderId = bc.loader_id,
|
.loaderId = bc.loader_id,
|
||||||
.response = .{
|
.response = .{
|
||||||
.url = url,
|
.url = url,
|
||||||
.status = request.status,
|
.status = status,
|
||||||
.statusText = @as(std.http.Status, @enumFromInt(request.status)).phrase() orelse "Unknown",
|
.statusText = @as(std.http.Status, @enumFromInt(status)).phrase() orelse "Unknown",
|
||||||
.headers = std.json.ArrayHashMap([]const u8){ .map = headers },
|
.headers = ResponseHeaderWriter.init(request.transfer),
|
||||||
},
|
},
|
||||||
.frameId = target_id,
|
.frameId = target_id,
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
}
|
}
|
||||||
|
|
||||||
fn urlToString(arena: Allocator, url: *const std.Uri, opts: std.Uri.WriteToStreamOptions) ![]const u8 {
|
pub fn urlToString(arena: Allocator, url: *const std.Uri, opts: std.Uri.WriteToStreamOptions) ![]const u8 {
|
||||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||||
try url.writeToStream(opts, buf.writer(arena));
|
try url.writeToStream(opts, buf.writer(arena));
|
||||||
return buf.items;
|
return buf.items;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const ResponseHeaderWriter = struct {
|
||||||
|
transfer: *Transfer,
|
||||||
|
|
||||||
|
fn init(transfer: *Transfer) ResponseHeaderWriter {
|
||||||
|
return .{
|
||||||
|
.transfer = transfer,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn jsonStringify(self: *const ResponseHeaderWriter, writer: anytype) !void {
|
||||||
|
try writer.beginObject();
|
||||||
|
var it = self.transfer.responseHeaderIterator();
|
||||||
|
while (it.next()) |hdr| {
|
||||||
|
try writer.objectField(hdr.name);
|
||||||
|
try writer.write(hdr.value);
|
||||||
|
}
|
||||||
|
try writer.endObject();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const testing = @import("../testing.zig");
|
const testing = @import("../testing.zig");
|
||||||
test "cdp.network setExtraHTTPHeaders" {
|
test "cdp.network setExtraHTTPHeaders" {
|
||||||
var ctx = testing.context();
|
var ctx = testing.context();
|
||||||
|
|||||||
@@ -148,12 +148,10 @@ fn navigate(cmd: anytype) !void {
|
|||||||
return error.SessionIdNotLoaded;
|
return error.SessionIdNotLoaded;
|
||||||
}
|
}
|
||||||
|
|
||||||
const url = try URL.parse(params.url, "https");
|
|
||||||
|
|
||||||
var page = bc.session.currentPage() orelse return error.PageNotLoaded;
|
var page = bc.session.currentPage() orelse return error.PageNotLoaded;
|
||||||
bc.loader_id = bc.cdp.loader_id_gen.next();
|
bc.loader_id = bc.cdp.loader_id_gen.next();
|
||||||
|
|
||||||
try page.navigate(url, .{
|
try page.navigate(params.url, .{
|
||||||
.reason = .address_bar,
|
.reason = .address_bar,
|
||||||
.cdp_id = cmd.input.id,
|
.cdp_id = cmd.input.id,
|
||||||
});
|
});
|
||||||
@@ -191,13 +189,13 @@ pub fn pageNavigate(arena: Allocator, bc: anytype, event: *const Notification.Pa
|
|||||||
.frameId = target_id,
|
.frameId = target_id,
|
||||||
.delay = 0,
|
.delay = 0,
|
||||||
.reason = reason,
|
.reason = reason,
|
||||||
.url = event.url.raw,
|
.url = event.url,
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
|
|
||||||
try cdp.sendEvent("Page.frameRequestedNavigation", .{
|
try cdp.sendEvent("Page.frameRequestedNavigation", .{
|
||||||
.frameId = target_id,
|
.frameId = target_id,
|
||||||
.reason = reason,
|
.reason = reason,
|
||||||
.url = event.url.raw,
|
.url = event.url,
|
||||||
.disposition = "currentTab",
|
.disposition = "currentTab",
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
}
|
}
|
||||||
@@ -205,7 +203,7 @@ pub fn pageNavigate(arena: Allocator, bc: anytype, event: *const Notification.Pa
|
|||||||
// frameStartedNavigating event
|
// frameStartedNavigating event
|
||||||
try cdp.sendEvent("Page.frameStartedNavigating", .{
|
try cdp.sendEvent("Page.frameStartedNavigating", .{
|
||||||
.frameId = target_id,
|
.frameId = target_id,
|
||||||
.url = event.url.raw,
|
.url = event.url,
|
||||||
.loaderId = loader_id,
|
.loaderId = loader_id,
|
||||||
.navigationType = "differentDocument",
|
.navigationType = "differentDocument",
|
||||||
}, .{ .session_id = session_id });
|
}, .{ .session_id = session_id });
|
||||||
@@ -308,7 +306,7 @@ pub fn pageNavigated(bc: anytype, event: *const Notification.PageNavigated) !voi
|
|||||||
.type = "Navigation",
|
.type = "Navigation",
|
||||||
.frame = Frame{
|
.frame = Frame{
|
||||||
.id = target_id,
|
.id = target_id,
|
||||||
.url = event.url.raw,
|
.url = event.url,
|
||||||
.loaderId = bc.loader_id,
|
.loaderId = bc.loader_id,
|
||||||
.securityOrigin = bc.security_origin,
|
.securityOrigin = bc.security_origin,
|
||||||
.secureContextType = bc.secure_context_type,
|
.secureContextType = bc.secure_context_type,
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ fn getBrowserContexts(cmd: anytype) !void {
|
|||||||
fn createBrowserContext(cmd: anytype) !void {
|
fn createBrowserContext(cmd: anytype) !void {
|
||||||
const params = try cmd.params(struct {
|
const params = try cmd.params(struct {
|
||||||
disposeOnDetach: bool = false,
|
disposeOnDetach: bool = false,
|
||||||
proxyServer: ?[]const u8 = null,
|
proxyServer: ?[:0]const u8 = null,
|
||||||
proxyBypassList: ?[]const u8 = null,
|
proxyBypassList: ?[]const u8 = null,
|
||||||
originsWithUniversalNetworkAccess: ?[]const []const u8 = null,
|
originsWithUniversalNetworkAccess: ?[]const []const u8 = null,
|
||||||
});
|
});
|
||||||
@@ -84,9 +84,8 @@ fn createBrowserContext(cmd: anytype) !void {
|
|||||||
if (params) |p| {
|
if (params) |p| {
|
||||||
if (p.proxyServer) |proxy| {
|
if (p.proxyServer) |proxy| {
|
||||||
// For now the http client is not in the browser context so we assume there is just 1.
|
// For now the http client is not in the browser context so we assume there is just 1.
|
||||||
bc.http_proxy_before = cmd.cdp.browser.http_client.http_proxy;
|
try cmd.cdp.browser.http_client.changeProxy(proxy);
|
||||||
const proxy_cp = try cmd.cdp.browser.http_client.allocator.dupe(u8, proxy);
|
bc.http_proxy_changed = true;
|
||||||
cmd.cdp.browser.http_client.http_proxy = try std.Uri.parse(proxy_cp);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
780
src/http/Client.zig
Normal file
780
src/http/Client.zig
Normal file
@@ -0,0 +1,780 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const log = @import("../log.zig");
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
const Http = @import("Http.zig");
|
||||||
|
pub const Headers = Http.Headers;
|
||||||
|
const Notification = @import("../notification.zig").Notification;
|
||||||
|
const storage = @import("../browser/storage/storage.zig");
|
||||||
|
|
||||||
|
const c = Http.c;
|
||||||
|
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||||
|
|
||||||
|
const errorCheck = Http.errorCheck;
|
||||||
|
const errorMCheck = Http.errorMCheck;
|
||||||
|
|
||||||
|
pub const Method = Http.Method;
|
||||||
|
|
||||||
|
// This is loosely tied to a browser Page. Loading all the <scripts>, doing
|
||||||
|
// XHR requests, and loading imports all happens through here. Sine the app
|
||||||
|
// currently supports 1 browser and 1 page at-a-time, we only have 1 Client and
|
||||||
|
// re-use it from page to page. This allows us better re-use of the various
|
||||||
|
// buffers/caches (including keepalive connections) that libcurl has.
|
||||||
|
//
|
||||||
|
// The app has other secondary http needs, like telemetry. While we want to
|
||||||
|
// share some things (namely the ca blob, and maybe some configuration
|
||||||
|
// (TODO: ??? should proxy settings be global ???)), we're able to do call
|
||||||
|
// client.abort() to abort the transfers being made by a page, without impacting
|
||||||
|
// those other http requests.
|
||||||
|
pub const Client = @This();
|
||||||
|
|
||||||
|
// count of active requests
|
||||||
|
active: usize,
|
||||||
|
|
||||||
|
// curl has 2 APIs: easy and multi. Multi is like a combination of some I/O block
|
||||||
|
// (e.g. epoll) and a bunch of pools. You add/remove easys to the multiple and
|
||||||
|
// then poll the multi.
|
||||||
|
multi: *c.CURLM,
|
||||||
|
|
||||||
|
// Our easy handles. Although the multi contains buffer pools and connections
|
||||||
|
// pools, re-using the easys is still recommended. This acts as our own pool
|
||||||
|
// of easys.
|
||||||
|
handles: Handles,
|
||||||
|
|
||||||
|
// Use to generate the next request ID
|
||||||
|
next_request_id: u64 = 0,
|
||||||
|
|
||||||
|
// When handles has no more available easys, requests get queued.
|
||||||
|
queue: TransferQueue,
|
||||||
|
|
||||||
|
// Memory pool for Queue nodes.
|
||||||
|
queue_node_pool: std.heap.MemoryPool(TransferQueue.Node),
|
||||||
|
|
||||||
|
// The main app allocator
|
||||||
|
allocator: Allocator,
|
||||||
|
|
||||||
|
// Once we have a handle/easy to process a request with, we create a Transfer
|
||||||
|
// which contains the Request as well as any state we need to process the
|
||||||
|
// request. These wil come and go with each request.
|
||||||
|
transfer_pool: std.heap.MemoryPool(Transfer),
|
||||||
|
|
||||||
|
// see ScriptManager.blockingGet
|
||||||
|
blocking: Handle,
|
||||||
|
|
||||||
|
// To notify registered subscribers of events, the browser sets/nulls this for us.
|
||||||
|
notification: ?*Notification = null,
|
||||||
|
|
||||||
|
// The only place this is meant to be used is in `makeRequest` BEFORE `perform`
|
||||||
|
// is called. It is used to generate our Cookie header. It can be used for other
|
||||||
|
// purposes, but keep in mind that, while single-threaded, calls like makeRequest
|
||||||
|
// can result in makeRequest being re-called (from a doneCallback).
|
||||||
|
arena: ArenaAllocator,
|
||||||
|
|
||||||
|
// only needed for CDP which can change the proxy and then restore it. When
|
||||||
|
// restoring, this originally-configured value is what it goes to.
|
||||||
|
http_proxy: ?[:0]const u8 = null,
|
||||||
|
|
||||||
|
const TransferQueue = std.DoublyLinkedList(*Transfer);
|
||||||
|
|
||||||
|
pub fn init(allocator: Allocator, ca_blob: ?c.curl_blob, opts: Http.Opts) !*Client {
|
||||||
|
var transfer_pool = std.heap.MemoryPool(Transfer).init(allocator);
|
||||||
|
errdefer transfer_pool.deinit();
|
||||||
|
|
||||||
|
var queue_node_pool = std.heap.MemoryPool(TransferQueue.Node).init(allocator);
|
||||||
|
errdefer queue_node_pool.deinit();
|
||||||
|
|
||||||
|
const client = try allocator.create(Client);
|
||||||
|
errdefer allocator.destroy(client);
|
||||||
|
|
||||||
|
const multi = c.curl_multi_init() orelse return error.FailedToInitializeMulti;
|
||||||
|
errdefer _ = c.curl_multi_cleanup(multi);
|
||||||
|
|
||||||
|
try errorMCheck(c.curl_multi_setopt(multi, c.CURLMOPT_MAX_HOST_CONNECTIONS, @as(c_long, opts.max_host_open)));
|
||||||
|
|
||||||
|
var handles = try Handles.init(allocator, client, ca_blob, &opts);
|
||||||
|
errdefer handles.deinit(allocator);
|
||||||
|
|
||||||
|
var blocking = try Handle.init(client, ca_blob, &opts);
|
||||||
|
errdefer blocking.deinit();
|
||||||
|
|
||||||
|
client.* = .{
|
||||||
|
.queue = .{},
|
||||||
|
.active = 0,
|
||||||
|
.multi = multi,
|
||||||
|
.handles = handles,
|
||||||
|
.blocking = blocking,
|
||||||
|
.allocator = allocator,
|
||||||
|
.http_proxy = opts.http_proxy,
|
||||||
|
.transfer_pool = transfer_pool,
|
||||||
|
.queue_node_pool = queue_node_pool,
|
||||||
|
.arena = ArenaAllocator.init(allocator),
|
||||||
|
};
|
||||||
|
|
||||||
|
return client;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Client) void {
|
||||||
|
self.abort();
|
||||||
|
self.blocking.deinit();
|
||||||
|
self.handles.deinit(self.allocator);
|
||||||
|
|
||||||
|
_ = c.curl_multi_cleanup(self.multi);
|
||||||
|
|
||||||
|
self.transfer_pool.deinit();
|
||||||
|
self.queue_node_pool.deinit();
|
||||||
|
self.arena.deinit();
|
||||||
|
self.allocator.destroy(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn abort(self: *Client) void {
|
||||||
|
while (self.handles.in_use.first) |node| {
|
||||||
|
var transfer = Transfer.fromEasy(node.data.conn.easy) catch |err| {
|
||||||
|
log.err(.http, "get private info", .{ .err = err, .source = "abort" });
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
transfer.abort();
|
||||||
|
}
|
||||||
|
std.debug.assert(self.active == 0);
|
||||||
|
|
||||||
|
var n = self.queue.first;
|
||||||
|
while (n) |node| {
|
||||||
|
n = node.next;
|
||||||
|
self.queue_node_pool.destroy(node);
|
||||||
|
}
|
||||||
|
self.queue = .{};
|
||||||
|
|
||||||
|
// Maybe a bit of overkill
|
||||||
|
// We can remove some (all?) of these once we're confident its right.
|
||||||
|
std.debug.assert(self.handles.in_use.first == null);
|
||||||
|
std.debug.assert(self.handles.available.len == self.handles.handles.len);
|
||||||
|
if (builtin.mode == .Debug) {
|
||||||
|
var running: c_int = undefined;
|
||||||
|
std.debug.assert(c.curl_multi_perform(self.multi, &running) == c.CURLE_OK);
|
||||||
|
std.debug.assert(running == 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn tick(self: *Client, timeout_ms: usize) !void {
|
||||||
|
var handles = &self.handles;
|
||||||
|
while (true) {
|
||||||
|
if (handles.hasAvailable() == false) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const queue_node = self.queue.popFirst() orelse break;
|
||||||
|
const req = queue_node.data;
|
||||||
|
self.queue_node_pool.destroy(queue_node);
|
||||||
|
|
||||||
|
// we know this exists, because we checked isEmpty() above
|
||||||
|
const handle = handles.getFreeHandle().?;
|
||||||
|
try self.makeRequest(handle, req);
|
||||||
|
}
|
||||||
|
|
||||||
|
try self.perform(@intCast(timeout_ms));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request(self: *Client, req: Request) !void {
|
||||||
|
const transfer = try self.makeTransfer(req);
|
||||||
|
|
||||||
|
if (self.notification) |notification| {
|
||||||
|
notification.dispatch(.http_request_start, &.{ .transfer = transfer });
|
||||||
|
|
||||||
|
var wait_for_interception = false;
|
||||||
|
notification.dispatch(.http_request_intercept, &.{ .transfer = transfer, .wait_for_interception = &wait_for_interception });
|
||||||
|
if (wait_for_interception) {
|
||||||
|
// The user is send an invitation to intercept this request.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.process(transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Above, request will not process if there's an interception request. In such
|
||||||
|
// cases, the interecptor is expected to call process to continue the transfer
|
||||||
|
// or transfer.abort() to abort it.
|
||||||
|
pub fn process(self: *Client, transfer: *Transfer) !void {
|
||||||
|
if (self.handles.getFreeHandle()) |handle| {
|
||||||
|
return self.makeRequest(handle, transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
const node = try self.queue_node_pool.create();
|
||||||
|
node.data = transfer;
|
||||||
|
self.queue.append(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
// See ScriptManager.blockingGet
|
||||||
|
pub fn blockingRequest(self: *Client, req: Request) !void {
|
||||||
|
const transfer = try self.makeTransfer(req);
|
||||||
|
return self.makeRequest(&self.blocking, transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn makeTransfer(self: *Client, req: Request) !*Transfer {
|
||||||
|
errdefer req.headers.deinit();
|
||||||
|
|
||||||
|
// we need this for cookies
|
||||||
|
const uri = std.Uri.parse(req.url) catch |err| {
|
||||||
|
log.warn(.http, "invalid url", .{ .err = err, .url = req.url });
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
|
||||||
|
const transfer = try self.transfer_pool.create();
|
||||||
|
errdefer self.transfer_pool.destroy(transfer);
|
||||||
|
|
||||||
|
const id = self.next_request_id + 1;
|
||||||
|
self.next_request_id = id;
|
||||||
|
transfer.* = .{
|
||||||
|
.id = id,
|
||||||
|
.uri = uri,
|
||||||
|
.req = req,
|
||||||
|
.ctx = req.ctx,
|
||||||
|
.client = self,
|
||||||
|
};
|
||||||
|
return transfer;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn requestFailed(self: *Client, transfer: *Transfer, err: anyerror) void {
|
||||||
|
// this shouldn't happen, we'll crash in debug mode. But in release, we'll
|
||||||
|
// just noop this state.
|
||||||
|
std.debug.assert(transfer._notified_fail == false);
|
||||||
|
if (transfer._notified_fail) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer._notified_fail = true;
|
||||||
|
|
||||||
|
if (self.notification) |notification| {
|
||||||
|
notification.dispatch(.http_request_fail, &.{
|
||||||
|
.transfer = transfer,
|
||||||
|
.err = err,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer.req.error_callback(transfer.ctx, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restrictive since it'll only work if there are no inflight requests. In some
|
||||||
|
// cases, the libcurl documentation is clear that changing settings while a
|
||||||
|
// connection is inflight is undefined. It doesn't say anything about CURLOPT_PROXY,
|
||||||
|
// but better to be safe than sorry.
|
||||||
|
// For now, this restriction is ok, since it's only called by CDP on
|
||||||
|
// createBrowserContext, at which point, if we do have an active connection,
|
||||||
|
// that's probably a bug (a previous abort failed?). But if we need to call this
|
||||||
|
// at any point in time, it could be worth digging into libcurl to see if this
|
||||||
|
// can be changed at any point in the easy's lifecycle.
|
||||||
|
pub fn changeProxy(self: *Client, proxy: [:0]const u8) !void {
|
||||||
|
try self.ensureNoActiveConnection();
|
||||||
|
|
||||||
|
for (self.handles.handles) |h| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(h.conn.easy, c.CURLOPT_PROXY, proxy.ptr));
|
||||||
|
}
|
||||||
|
try errorCheck(c.curl_easy_setopt(self.blocking.conn.easy, c.CURLOPT_PROXY, proxy.ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Same restriction as changeProxy. Should be ok since this is only called on
|
||||||
|
// BrowserContext deinit.
|
||||||
|
pub fn restoreOriginalProxy(self: *Client) !void {
|
||||||
|
try self.ensureNoActiveConnection();
|
||||||
|
|
||||||
|
const proxy = if (self.http_proxy) |p| p.ptr else null;
|
||||||
|
for (self.handles.handles) |h| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(h.conn.easy, c.CURLOPT_PROXY, proxy));
|
||||||
|
}
|
||||||
|
try errorCheck(c.curl_easy_setopt(self.blocking.conn.easy, c.CURLOPT_PROXY, proxy));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn makeRequest(self: *Client, handle: *Handle, transfer: *Transfer) !void {
|
||||||
|
const conn = handle.conn;
|
||||||
|
const easy = conn.easy;
|
||||||
|
const req = &transfer.req;
|
||||||
|
|
||||||
|
{
|
||||||
|
transfer._handle = handle;
|
||||||
|
errdefer transfer.deinit();
|
||||||
|
|
||||||
|
try conn.setURL(req.url);
|
||||||
|
try conn.setMethod(req.method);
|
||||||
|
if (req.body) |b| {
|
||||||
|
try conn.setBody(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
var header_list = req.headers;
|
||||||
|
try conn.secretHeaders(&header_list); // Add headers that must be hidden from intercepts
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list.headers));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PRIVATE, transfer));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Once soon as this is called, our "perform" loop is responsible for
|
||||||
|
// cleaning things up. That's why the above code is in a block. If anything
|
||||||
|
// fails BEFORE `curl_multi_add_handle` suceeds, the we still need to do
|
||||||
|
// cleanup. But if things fail after `curl_multi_add_handle`, we expect
|
||||||
|
// perfom to pickup the failure and cleanup.
|
||||||
|
try errorMCheck(c.curl_multi_add_handle(self.multi, easy));
|
||||||
|
|
||||||
|
if (req.start_callback) |cb| {
|
||||||
|
cb(transfer) catch |err| {
|
||||||
|
try errorMCheck(c.curl_multi_remove_handle(self.multi, easy));
|
||||||
|
transfer.deinit();
|
||||||
|
return err;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
self.active += 1;
|
||||||
|
return self.perform(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn perform(self: *Client, timeout_ms: c_int) !void {
|
||||||
|
const multi = self.multi;
|
||||||
|
|
||||||
|
var running: c_int = undefined;
|
||||||
|
try errorMCheck(c.curl_multi_perform(multi, &running));
|
||||||
|
|
||||||
|
if (running > 0 and timeout_ms > 0) {
|
||||||
|
try errorMCheck(c.curl_multi_poll(multi, null, 0, timeout_ms, null));
|
||||||
|
}
|
||||||
|
|
||||||
|
var messages_count: c_int = 0;
|
||||||
|
while (c.curl_multi_info_read(multi, &messages_count)) |msg_| {
|
||||||
|
const msg: *c.CURLMsg = @ptrCast(msg_);
|
||||||
|
// This is the only possible mesage type from CURL for now.
|
||||||
|
std.debug.assert(msg.msg == c.CURLMSG_DONE);
|
||||||
|
|
||||||
|
const easy = msg.easy_handle.?;
|
||||||
|
const transfer = try Transfer.fromEasy(easy);
|
||||||
|
|
||||||
|
// release it ASAP so that it's available; some done_callbacks
|
||||||
|
// will load more resources.
|
||||||
|
self.endTransfer(transfer);
|
||||||
|
|
||||||
|
defer transfer.deinit();
|
||||||
|
|
||||||
|
if (errorCheck(msg.data.result)) {
|
||||||
|
transfer.req.done_callback(transfer.ctx) catch |err| {
|
||||||
|
// transfer isn't valid at this point, don't use it.
|
||||||
|
log.err(.http, "done_callback", .{ .err = err });
|
||||||
|
self.requestFailed(transfer, err);
|
||||||
|
};
|
||||||
|
// self.requestComplete(transfer);
|
||||||
|
} else |err| {
|
||||||
|
self.requestFailed(transfer, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn endTransfer(self: *Client, transfer: *Transfer) void {
|
||||||
|
const handle = transfer._handle.?;
|
||||||
|
|
||||||
|
errorMCheck(c.curl_multi_remove_handle(self.multi, handle.conn.easy)) catch |err| {
|
||||||
|
log.fatal(.http, "Failed to remove handle", .{ .err = err });
|
||||||
|
};
|
||||||
|
|
||||||
|
self.handles.release(handle);
|
||||||
|
transfer._handle = null;
|
||||||
|
self.active -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensureNoActiveConnection(self: *const Client) !void {
|
||||||
|
if (self.active > 0) {
|
||||||
|
return error.InflightConnection;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const Handles = struct {
|
||||||
|
handles: []Handle,
|
||||||
|
in_use: HandleList,
|
||||||
|
available: HandleList,
|
||||||
|
|
||||||
|
const HandleList = std.DoublyLinkedList(*Handle);
|
||||||
|
|
||||||
|
// pointer to opts is not stable, don't hold a reference to it!
|
||||||
|
fn init(allocator: Allocator, client: *Client, ca_blob: ?c.curl_blob, opts: *const Http.Opts) !Handles {
|
||||||
|
const count = if (opts.max_concurrent == 0) 1 else opts.max_concurrent;
|
||||||
|
|
||||||
|
const handles = try allocator.alloc(Handle, count);
|
||||||
|
errdefer allocator.free(handles);
|
||||||
|
|
||||||
|
var available: HandleList = .{};
|
||||||
|
for (0..count) |i| {
|
||||||
|
handles[i] = try Handle.init(client, ca_blob, opts);
|
||||||
|
handles[i].node = .{ .data = &handles[i] };
|
||||||
|
available.append(&handles[i].node.?);
|
||||||
|
}
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.in_use = .{},
|
||||||
|
.handles = handles,
|
||||||
|
.available = available,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deinit(self: *Handles, allocator: Allocator) void {
|
||||||
|
for (self.handles) |*h| {
|
||||||
|
h.deinit();
|
||||||
|
}
|
||||||
|
allocator.free(self.handles);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hasAvailable(self: *const Handles) bool {
|
||||||
|
return self.available.first != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn getFreeHandle(self: *Handles) ?*Handle {
|
||||||
|
if (self.available.popFirst()) |node| {
|
||||||
|
node.prev = null;
|
||||||
|
node.next = null;
|
||||||
|
self.in_use.append(node);
|
||||||
|
return node.data;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn release(self: *Handles, handle: *Handle) void {
|
||||||
|
// client.blocking is a handle without a node, it doesn't exist in
|
||||||
|
// either the in_use or available lists.
|
||||||
|
const node = &(handle.node orelse return);
|
||||||
|
|
||||||
|
self.in_use.remove(node);
|
||||||
|
node.prev = null;
|
||||||
|
node.next = null;
|
||||||
|
self.available.append(node);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// wraps a c.CURL (an easy handle)
|
||||||
|
const Handle = struct {
|
||||||
|
client: *Client,
|
||||||
|
conn: Http.Connection,
|
||||||
|
node: ?Handles.HandleList.Node,
|
||||||
|
|
||||||
|
// pointer to opts is not stable, don't hold a reference to it!
|
||||||
|
fn init(client: *Client, ca_blob: ?c.curl_blob, opts: *const Http.Opts) !Handle {
|
||||||
|
const conn = try Http.Connection.init(ca_blob, opts);
|
||||||
|
errdefer conn.deinit();
|
||||||
|
|
||||||
|
const easy = conn.easy;
|
||||||
|
|
||||||
|
// callbacks
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HEADERDATA, easy));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HEADERFUNCTION, Transfer.headerCallback));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_WRITEDATA, easy));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_WRITEFUNCTION, Transfer.dataCallback));
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.conn = conn,
|
||||||
|
.node = null,
|
||||||
|
.client = client,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deinit(self: *const Handle) void {
|
||||||
|
self.conn.deinit();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const RequestCookie = struct {
|
||||||
|
is_http: bool,
|
||||||
|
is_navigation: bool,
|
||||||
|
origin: *const std.Uri,
|
||||||
|
jar: *@import("../browser/storage/cookie.zig").Jar,
|
||||||
|
|
||||||
|
pub fn headersForRequest(self: *const RequestCookie, temp: Allocator, url: [:0]const u8, headers: *Headers) !void {
|
||||||
|
const uri = std.Uri.parse(url) catch |err| {
|
||||||
|
log.warn(.http, "invalid url", .{ .err = err, .url = url });
|
||||||
|
return error.InvalidUrl;
|
||||||
|
};
|
||||||
|
|
||||||
|
var arr: std.ArrayListUnmanaged(u8) = .{};
|
||||||
|
try self.jar.forRequest(&uri, arr.writer(temp), .{
|
||||||
|
.is_http = self.is_http,
|
||||||
|
.is_navigation = self.is_navigation,
|
||||||
|
.origin_uri = self.origin,
|
||||||
|
.prefix = "Cookie: ",
|
||||||
|
});
|
||||||
|
|
||||||
|
if (arr.items.len > 0) {
|
||||||
|
try arr.append(temp, 0); //null terminate
|
||||||
|
try headers.add(@ptrCast(arr.items.ptr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Request = struct {
|
||||||
|
method: Method,
|
||||||
|
url: [:0]const u8,
|
||||||
|
headers: Headers,
|
||||||
|
body: ?[]const u8 = null,
|
||||||
|
cookie_jar: *storage.CookieJar,
|
||||||
|
|
||||||
|
// arbitrary data that can be associated with this request
|
||||||
|
ctx: *anyopaque = undefined,
|
||||||
|
|
||||||
|
start_callback: ?*const fn (transfer: *Transfer) anyerror!void = null,
|
||||||
|
header_callback: ?*const fn (transfer: *Transfer, header: []const u8) anyerror!void = null,
|
||||||
|
header_done_callback: *const fn (transfer: *Transfer) anyerror!void,
|
||||||
|
data_callback: *const fn (transfer: *Transfer, data: []const u8) anyerror!void,
|
||||||
|
done_callback: *const fn (ctx: *anyopaque) anyerror!void,
|
||||||
|
error_callback: *const fn (ctx: *anyopaque, err: anyerror) void,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Transfer = struct {
|
||||||
|
id: usize = 0,
|
||||||
|
req: Request,
|
||||||
|
uri: std.Uri, // used for setting/getting the cookie
|
||||||
|
ctx: *anyopaque, // copied from req.ctx to make it easier for callback handlers
|
||||||
|
client: *Client,
|
||||||
|
_notified_fail: bool = false,
|
||||||
|
|
||||||
|
// We'll store the response header here
|
||||||
|
response_header: ?Header = null,
|
||||||
|
|
||||||
|
_handle: ?*Handle = null,
|
||||||
|
|
||||||
|
_redirecting: bool = false,
|
||||||
|
|
||||||
|
fn deinit(self: *Transfer) void {
|
||||||
|
self.req.headers.deinit();
|
||||||
|
if (self._handle) |handle| {
|
||||||
|
self.client.handles.release(handle);
|
||||||
|
}
|
||||||
|
self.client.transfer_pool.destroy(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format(self: *const Transfer, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void {
|
||||||
|
const req = self.req;
|
||||||
|
return writer.print("{s} {s}", .{ @tagName(req.method), req.url });
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setBody(self: *Transfer, body: []const u8) !void {
|
||||||
|
const easy = self.handle.easy;
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn addHeader(self: *Transfer, value: [:0]const u8) !void {
|
||||||
|
self._request_header_list = c.curl_slist_append(self._request_header_list, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn updateURL(self: *Transfer, url: [:0]const u8) !void {
|
||||||
|
// for cookies
|
||||||
|
self.uri = try std.Uri.parse(url);
|
||||||
|
|
||||||
|
// for the request itself
|
||||||
|
self.req.url = url;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn abort(self: *Transfer) void {
|
||||||
|
self.client.requestFailed(self, error.Abort);
|
||||||
|
if (self._handle != null) {
|
||||||
|
self.client.endTransfer(self);
|
||||||
|
}
|
||||||
|
self.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn headerCallback(buffer: [*]const u8, header_count: usize, buf_len: usize, data: *anyopaque) callconv(.c) usize {
|
||||||
|
// libcurl should only ever emit 1 header at a time
|
||||||
|
std.debug.assert(header_count == 1);
|
||||||
|
|
||||||
|
const easy: *c.CURL = @alignCast(@ptrCast(data));
|
||||||
|
var transfer = fromEasy(easy) catch |err| {
|
||||||
|
log.err(.http, "get private info", .{ .err = err, .source = "header callback" });
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
std.debug.assert(std.mem.endsWith(u8, buffer[0..buf_len], "\r\n"));
|
||||||
|
|
||||||
|
const header = buffer[0 .. buf_len - 2];
|
||||||
|
|
||||||
|
if (transfer.response_header == null) {
|
||||||
|
if (buf_len < 13 or std.mem.startsWith(u8, header, "HTTP/") == false) {
|
||||||
|
if (transfer._redirecting) {
|
||||||
|
return buf_len;
|
||||||
|
}
|
||||||
|
log.debug(.http, "invalid response line", .{ .line = header });
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
const version_start: usize = if (header[5] == '2') 7 else 9;
|
||||||
|
const version_end = version_start + 3;
|
||||||
|
|
||||||
|
// a bit silly, but it makes sure that we don't change the length check
|
||||||
|
// above in a way that could break this.
|
||||||
|
std.debug.assert(version_end < 13);
|
||||||
|
|
||||||
|
const status = std.fmt.parseInt(u16, header[version_start..version_end], 10) catch {
|
||||||
|
log.debug(.http, "invalid status code", .{ .line = header });
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (status >= 300 and status <= 399) {
|
||||||
|
transfer._redirecting = true;
|
||||||
|
return buf_len;
|
||||||
|
}
|
||||||
|
transfer._redirecting = false;
|
||||||
|
|
||||||
|
var url: [*c]u8 = undefined;
|
||||||
|
errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_EFFECTIVE_URL, &url)) catch |err| {
|
||||||
|
log.err(.http, "failed to get URL", .{ .err = err });
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
transfer.response_header = .{
|
||||||
|
.url = url,
|
||||||
|
.status = status,
|
||||||
|
};
|
||||||
|
return buf_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
var hdr = &transfer.response_header.?;
|
||||||
|
|
||||||
|
if (hdr._content_type_len == 0) {
|
||||||
|
const CONTENT_TYPE_LEN = "content-type:".len;
|
||||||
|
if (header.len > CONTENT_TYPE_LEN) {
|
||||||
|
if (std.ascii.eqlIgnoreCase(header[0..CONTENT_TYPE_LEN], "content-type:")) {
|
||||||
|
const value = std.mem.trimLeft(u8, header[CONTENT_TYPE_LEN..], " ");
|
||||||
|
const len = @min(value.len, hdr._content_type.len);
|
||||||
|
hdr._content_type_len = len;
|
||||||
|
@memcpy(hdr._content_type[0..len], value[0..len]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const SET_COOKIE_LEN = "set-cookie:".len;
|
||||||
|
if (header.len > SET_COOKIE_LEN) {
|
||||||
|
if (std.ascii.eqlIgnoreCase(header[0..SET_COOKIE_LEN], "set-cookie:")) {
|
||||||
|
const value = std.mem.trimLeft(u8, header[SET_COOKIE_LEN..], " ");
|
||||||
|
transfer.req.cookie_jar.populateFromResponse(&transfer.uri, value) catch |err| {
|
||||||
|
log.err(.http, "set cookie", .{ .err = err, .req = transfer });
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buf_len == 2) {
|
||||||
|
if (getResponseHeader(easy, "content-type")) |value| {
|
||||||
|
const len = @min(value.len, hdr._content_type.len);
|
||||||
|
hdr._content_type_len = len;
|
||||||
|
@memcpy(hdr._content_type[0..len], value[0..len]);
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer.req.header_done_callback(transfer) catch |err| {
|
||||||
|
log.err(.http, "header_done_callback", .{ .err = err, .req = transfer });
|
||||||
|
// returning < buf_len terminates the request
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (transfer.client.notification) |notification| {
|
||||||
|
notification.dispatch(.http_headers_done, &.{
|
||||||
|
.transfer = transfer,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (transfer.req.header_callback) |cb| {
|
||||||
|
cb(transfer, header) catch |err| {
|
||||||
|
log.err(.http, "header_callback", .{ .err = err, .req = transfer });
|
||||||
|
return 0;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dataCallback(buffer: [*]const u8, chunk_count: usize, chunk_len: usize, data: *anyopaque) callconv(.c) usize {
|
||||||
|
// libcurl should only ever emit 1 chunk at a time
|
||||||
|
std.debug.assert(chunk_count == 1);
|
||||||
|
|
||||||
|
const easy: *c.CURL = @alignCast(@ptrCast(data));
|
||||||
|
var transfer = fromEasy(easy) catch |err| {
|
||||||
|
log.err(.http, "get private info", .{ .err = err, .source = "body callback" });
|
||||||
|
return c.CURL_WRITEFUNC_ERROR;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (transfer._redirecting) {
|
||||||
|
return chunk_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
transfer.req.data_callback(transfer, buffer[0..chunk_len]) catch |err| {
|
||||||
|
log.err(.http, "data_callback", .{ .err = err, .req = transfer });
|
||||||
|
return c.CURL_WRITEFUNC_ERROR;
|
||||||
|
};
|
||||||
|
return chunk_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
// we assume that the caller is smart and only calling this after being
|
||||||
|
// told that the header was ready.
|
||||||
|
pub fn responseHeaderIterator(self: *Transfer) HeaderIterator {
|
||||||
|
return .{ .easy = self._handle.?.conn.easy };
|
||||||
|
}
|
||||||
|
|
||||||
|
// pub because Page.printWaitAnalysis uses it
|
||||||
|
pub fn fromEasy(easy: *c.CURL) !*Transfer {
|
||||||
|
var private: *anyopaque = undefined;
|
||||||
|
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_PRIVATE, &private));
|
||||||
|
return @alignCast(@ptrCast(private));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Header = struct {
|
||||||
|
status: u16,
|
||||||
|
url: [*c]const u8,
|
||||||
|
_content_type_len: usize = 0,
|
||||||
|
_content_type: [64]u8 = undefined,
|
||||||
|
|
||||||
|
pub fn contentType(self: *Header) ?[]u8 {
|
||||||
|
if (self._content_type_len == 0) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return self._content_type[0..self._content_type_len];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const HeaderIterator = struct {
|
||||||
|
easy: *c.CURL,
|
||||||
|
prev: ?*c.curl_header = null,
|
||||||
|
|
||||||
|
pub fn next(self: *HeaderIterator) ?struct { name: []const u8, value: []const u8 } {
|
||||||
|
const h = c.curl_easy_nextheader(self.easy, c.CURLH_HEADER, -1, self.prev) orelse return null;
|
||||||
|
self.prev = h;
|
||||||
|
|
||||||
|
const header = h.*;
|
||||||
|
return .{
|
||||||
|
.name = std.mem.span(header.name),
|
||||||
|
.value = std.mem.span(header.value),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
fn getResponseHeader(easy: *c.CURL, name: [:0]const u8) ?[]const u8 {
|
||||||
|
var hdr: [*c]c.curl_header = null;
|
||||||
|
const result = c.curl_easy_header(easy, name, 0, c.CURLH_HEADER, -1, &hdr);
|
||||||
|
if (result == c.CURLE_OK) {
|
||||||
|
return std.mem.span(hdr.*.value);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result == c.CURLE_FAILED_INIT) {
|
||||||
|
// seems to be what it returns if the header isn't found
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
log.err(.http, "get response header", .{
|
||||||
|
.name = name,
|
||||||
|
.err = @import("errors.zig").fromCode(result),
|
||||||
|
});
|
||||||
|
return null;
|
||||||
|
}
|
||||||
378
src/http/Http.zig
Normal file
378
src/http/Http.zig
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
pub const c = @cImport({
|
||||||
|
@cInclude("curl/curl.h");
|
||||||
|
});
|
||||||
|
|
||||||
|
const Client = @import("Client.zig");
|
||||||
|
const errors = @import("errors.zig");
|
||||||
|
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||||
|
|
||||||
|
pub const ENABLE_DEBUG = false;
|
||||||
|
|
||||||
|
// Client.zig does the bulk of the work and is loosely tied to a browser Page.
|
||||||
|
// But we still need something above Client.zig for the "utility" http stuff
|
||||||
|
// we need to do, like telemetry. The most important thing we want from this
|
||||||
|
// is to be able to share the ca_blob, which can be quite large - loading it
|
||||||
|
// once for all http connections is a win.
|
||||||
|
const Http = @This();
|
||||||
|
|
||||||
|
opts: Opts,
|
||||||
|
client: *Client,
|
||||||
|
ca_blob: ?c.curl_blob,
|
||||||
|
arena: ArenaAllocator,
|
||||||
|
|
||||||
|
pub fn init(allocator: Allocator, opts: Opts) !Http {
|
||||||
|
try errorCheck(c.curl_global_init(c.CURL_GLOBAL_SSL));
|
||||||
|
errdefer c.curl_global_cleanup();
|
||||||
|
|
||||||
|
if (comptime ENABLE_DEBUG) {
|
||||||
|
std.debug.print("curl version: {s}\n\n", .{c.curl_version()});
|
||||||
|
}
|
||||||
|
|
||||||
|
var arena = ArenaAllocator.init(allocator);
|
||||||
|
errdefer arena.deinit();
|
||||||
|
|
||||||
|
var adjusted_opts = opts;
|
||||||
|
if (opts.proxy_bearer_token) |bt| {
|
||||||
|
adjusted_opts.proxy_bearer_token = try std.fmt.allocPrintZ(
|
||||||
|
arena.allocator(),
|
||||||
|
"Proxy-Authorization: Bearer {s}",
|
||||||
|
.{bt},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
var ca_blob: ?c.curl_blob = null;
|
||||||
|
if (opts.tls_verify_host) {
|
||||||
|
ca_blob = try loadCerts(allocator, arena.allocator());
|
||||||
|
}
|
||||||
|
|
||||||
|
var client = try Client.init(allocator, ca_blob, adjusted_opts);
|
||||||
|
errdefer client.deinit();
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.arena = arena,
|
||||||
|
.client = client,
|
||||||
|
.ca_blob = ca_blob,
|
||||||
|
.opts = adjusted_opts,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Http) void {
|
||||||
|
self.client.deinit();
|
||||||
|
c.curl_global_cleanup();
|
||||||
|
self.arena.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn newConnection(self: *Http) !Connection {
|
||||||
|
return Connection.init(self.ca_blob, &self.opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Connection = struct {
|
||||||
|
easy: *c.CURL,
|
||||||
|
opts: Connection.Opts,
|
||||||
|
|
||||||
|
const Opts = struct {
|
||||||
|
proxy_bearer_token: ?[:0]const u8,
|
||||||
|
};
|
||||||
|
|
||||||
|
// pointer to opts is not stable, don't hold a reference to it!
|
||||||
|
pub fn init(ca_blob_: ?c.curl_blob, opts: *const Http.Opts) !Connection {
|
||||||
|
const easy = c.curl_easy_init() orelse return error.FailedToInitializeEasy;
|
||||||
|
errdefer _ = c.curl_easy_cleanup(easy);
|
||||||
|
|
||||||
|
// timeouts
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_TIMEOUT_MS, @as(c_long, @intCast(opts.timeout_ms))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CONNECTTIMEOUT_MS, @as(c_long, @intCast(opts.connect_timeout_ms))));
|
||||||
|
|
||||||
|
// redirect behavior
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_MAXREDIRS, @as(c_long, @intCast(opts.max_redirects))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_FOLLOWLOCATION, @as(c_long, 2)));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_REDIR_PROTOCOLS_STR, "HTTP,HTTPS")); // remove FTP and FTPS from the default
|
||||||
|
|
||||||
|
// proxy
|
||||||
|
if (opts.http_proxy) |proxy| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY, proxy.ptr));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SUPPRESS_CONNECT_HEADERS, @as(c_long, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// tls
|
||||||
|
if (ca_blob_) |ca_blob| {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CAINFO_BLOB, ca_blob));
|
||||||
|
if (opts.http_proxy != null) {
|
||||||
|
// Note, this can be difference for the proxy and for the main
|
||||||
|
// request. Might be something worth exposting as command
|
||||||
|
// line arguments at some point.
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_CAINFO_BLOB, ca_blob));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std.debug.assert(opts.tls_verify_host == false);
|
||||||
|
|
||||||
|
// Verify peer checks that the cert is signed by a CA, verify host makes sure the
|
||||||
|
// cert contains the server name.
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYHOST, @as(c_long, 0)));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_SSL_VERIFYPEER, @as(c_long, 0)));
|
||||||
|
|
||||||
|
if (opts.http_proxy != null) {
|
||||||
|
// Note, this can be difference for the proxy and for the main
|
||||||
|
// request. Might be something worth exposting as command
|
||||||
|
// line arguments at some point.
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYHOST, @as(c_long, 0)));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_PROXY_SSL_VERIFYPEER, @as(c_long, 0)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// compression, don't remove this. CloudFront will send gzip content
|
||||||
|
// even if we don't support it, and then it won't be decompressed.
|
||||||
|
// empty string means: use whatever's available
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_ACCEPT_ENCODING, ""));
|
||||||
|
|
||||||
|
// debug
|
||||||
|
if (comptime Http.ENABLE_DEBUG) {
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_VERBOSE, @as(c_long, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.easy = easy,
|
||||||
|
.opts = .{
|
||||||
|
.proxy_bearer_token = opts.proxy_bearer_token,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *const Connection) void {
|
||||||
|
c.curl_easy_cleanup(self.easy);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setURL(self: *const Connection, url: [:0]const u8) !void {
|
||||||
|
try errorCheck(c.curl_easy_setopt(self.easy, c.CURLOPT_URL, url.ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setMethod(self: *const Connection, method: Method) !void {
|
||||||
|
const easy = self.easy;
|
||||||
|
switch (method) {
|
||||||
|
.GET => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPGET, @as(c_long, 1))),
|
||||||
|
.POST => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPPOST, @as(c_long, 1))),
|
||||||
|
.PUT => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "put")),
|
||||||
|
.DELETE => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "delete")),
|
||||||
|
.HEAD => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "head")),
|
||||||
|
.OPTIONS => try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_CUSTOMREQUEST, "options")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn setBody(self: *const Connection, body: []const u8) !void {
|
||||||
|
const easy = self.easy;
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDSIZE, @as(c_long, @intCast(body.len))));
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_POSTFIELDS, body.ptr));
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are headers that may not be send to the users for inteception.
|
||||||
|
pub fn secretHeaders(self: *const Connection, headers: *Headers) !void {
|
||||||
|
if (self.opts.proxy_bearer_token) |hdr| {
|
||||||
|
try headers.add(hdr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request(self: *const Connection) !u16 {
|
||||||
|
const easy = self.easy;
|
||||||
|
|
||||||
|
var header_list = try Headers.init();
|
||||||
|
defer header_list.deinit();
|
||||||
|
try self.secretHeaders(&header_list);
|
||||||
|
try errorCheck(c.curl_easy_setopt(easy, c.CURLOPT_HTTPHEADER, header_list.headers));
|
||||||
|
|
||||||
|
try errorCheck(c.curl_easy_perform(easy));
|
||||||
|
var http_code: c_long = undefined;
|
||||||
|
try errorCheck(c.curl_easy_getinfo(easy, c.CURLINFO_RESPONSE_CODE, &http_code));
|
||||||
|
if (http_code < 0 or http_code > std.math.maxInt(u16)) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return @intCast(http_code);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Headers = struct {
|
||||||
|
headers: *c.curl_slist,
|
||||||
|
|
||||||
|
pub fn init() !Headers {
|
||||||
|
const header_list = c.curl_slist_append(null, "User-Agent: Lightpanda/1.0");
|
||||||
|
if (header_list == null) return error.OutOfMemory;
|
||||||
|
return .{ .headers = header_list };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *const Headers) void {
|
||||||
|
c.curl_slist_free_all(self.headers);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add(self: *Headers, header: [*c]const u8) !void {
|
||||||
|
// Copies the value
|
||||||
|
const updated_headers = c.curl_slist_append(self.headers, header);
|
||||||
|
if (updated_headers == null) return error.OutOfMemory;
|
||||||
|
self.headers = updated_headers;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn asHashMap(self: *const Headers, allocator: Allocator) !std.StringArrayHashMapUnmanaged([]const u8) {
|
||||||
|
var list: std.StringArrayHashMapUnmanaged([]const u8) = .empty;
|
||||||
|
try list.ensureTotalCapacity(allocator, self.count());
|
||||||
|
|
||||||
|
var current: [*c]c.curl_slist = self.headers;
|
||||||
|
while (current) |node| {
|
||||||
|
const str = std.mem.span(@as([*:0]const u8, @ptrCast(node.*.data)));
|
||||||
|
const header = parseHeader(str) orelse return error.InvalidHeader;
|
||||||
|
list.putAssumeCapacity(header.name, header.value);
|
||||||
|
current = node.*.next;
|
||||||
|
}
|
||||||
|
return list;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parseHeader(header_str: []const u8) ?std.http.Header {
|
||||||
|
const colon_pos = std.mem.indexOfScalar(u8, header_str, ':') orelse return null;
|
||||||
|
|
||||||
|
const name = std.mem.trim(u8, header_str[0..colon_pos], " \t");
|
||||||
|
const value = std.mem.trim(u8, header_str[colon_pos + 1 ..], " \t");
|
||||||
|
|
||||||
|
return .{ .name = name, .value = value };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn count(self: *const Headers) usize {
|
||||||
|
var current: [*c]c.curl_slist = self.headers;
|
||||||
|
var num: usize = 0;
|
||||||
|
while (current) |node| {
|
||||||
|
num += 1;
|
||||||
|
current = node.*.next;
|
||||||
|
}
|
||||||
|
return num;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn errorCheck(code: c.CURLcode) errors.Error!void {
|
||||||
|
if (code == c.CURLE_OK) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return errors.fromCode(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn errorMCheck(code: c.CURLMcode) errors.Multi!void {
|
||||||
|
if (code == c.CURLM_OK) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (code == c.CURLM_CALL_MULTI_PERFORM) {
|
||||||
|
// should we can client.perform() here?
|
||||||
|
// or just wait until the next time we naturally call it?
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
return errors.fromMCode(code);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Opts = struct {
|
||||||
|
timeout_ms: u31,
|
||||||
|
max_host_open: u8,
|
||||||
|
max_concurrent: u8,
|
||||||
|
connect_timeout_ms: u31,
|
||||||
|
max_redirects: u8 = 10,
|
||||||
|
tls_verify_host: bool = true,
|
||||||
|
http_proxy: ?[:0]const u8 = null,
|
||||||
|
proxy_bearer_token: ?[:0]const u8 = null,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Method = enum {
|
||||||
|
GET,
|
||||||
|
PUT,
|
||||||
|
POST,
|
||||||
|
DELETE,
|
||||||
|
HEAD,
|
||||||
|
OPTIONS,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: on BSD / Linux, we could just read the PEM file directly.
|
||||||
|
// This whole rescan + decode is really just needed for MacOS. On Linux
|
||||||
|
// bundle.rescan does find the .pem file(s) which could be in a few different
|
||||||
|
// places, so it's still useful, just not efficient.
|
||||||
|
fn loadCerts(allocator: Allocator, arena: Allocator) !c.curl_blob {
|
||||||
|
var bundle: std.crypto.Certificate.Bundle = .{};
|
||||||
|
try bundle.rescan(allocator);
|
||||||
|
defer bundle.deinit(allocator);
|
||||||
|
|
||||||
|
var it = bundle.map.valueIterator();
|
||||||
|
const bytes = bundle.bytes.items;
|
||||||
|
|
||||||
|
const encoder = std.base64.standard.Encoder;
|
||||||
|
var arr: std.ArrayListUnmanaged(u8) = .empty;
|
||||||
|
|
||||||
|
const encoded_size = encoder.calcSize(bytes.len);
|
||||||
|
const buffer_size = encoded_size +
|
||||||
|
(bundle.map.count() * 75) + // start / end per certificate + extra, just in case
|
||||||
|
(encoded_size / 64) // newline per 64 characters
|
||||||
|
;
|
||||||
|
try arr.ensureTotalCapacity(arena, buffer_size);
|
||||||
|
var writer = arr.writer(arena);
|
||||||
|
|
||||||
|
while (it.next()) |index| {
|
||||||
|
const cert = try std.crypto.Certificate.der.Element.parse(bytes, index.*);
|
||||||
|
|
||||||
|
try writer.writeAll("-----BEGIN CERTIFICATE-----\n");
|
||||||
|
var line_writer = LineWriter{ .inner = writer };
|
||||||
|
try encoder.encodeWriter(&line_writer, bytes[index.*..cert.slice.end]);
|
||||||
|
try writer.writeAll("\n-----END CERTIFICATE-----\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final encoding should not be larger than our initial size estimate
|
||||||
|
std.debug.assert(buffer_size > arr.items.len);
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.len = arr.items.len,
|
||||||
|
.data = arr.items.ptr,
|
||||||
|
.flags = 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wraps lines @ 64 columns. A PEM is basically a base64 encoded DER (which is
|
||||||
|
// what Zig has), with lines wrapped at 64 characters and with a basic header
|
||||||
|
// and footer
|
||||||
|
const LineWriter = struct {
|
||||||
|
col: usize = 0,
|
||||||
|
inner: std.ArrayListUnmanaged(u8).Writer,
|
||||||
|
|
||||||
|
pub fn writeAll(self: *LineWriter, data: []const u8) !void {
|
||||||
|
var writer = self.inner;
|
||||||
|
|
||||||
|
var col = self.col;
|
||||||
|
const len = 64 - col;
|
||||||
|
|
||||||
|
var remain = data;
|
||||||
|
if (remain.len > len) {
|
||||||
|
col = 0;
|
||||||
|
try writer.writeAll(data[0..len]);
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
remain = data[len..];
|
||||||
|
}
|
||||||
|
|
||||||
|
while (remain.len > 64) {
|
||||||
|
try writer.writeAll(remain[0..64]);
|
||||||
|
try writer.writeByte('\n');
|
||||||
|
remain = data[len..];
|
||||||
|
}
|
||||||
|
try writer.writeAll(remain);
|
||||||
|
self.col = col + remain.len;
|
||||||
|
}
|
||||||
|
};
|
||||||
3885
src/http/client.zig
3885
src/http/client.zig
File diff suppressed because it is too large
Load Diff
238
src/http/errors.zig
Normal file
238
src/http/errors.zig
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
// Copyright (C) 2023-2025 Lightpanda (Selecy SAS)
|
||||||
|
//
|
||||||
|
// Francis Bouvier <francis@lightpanda.io>
|
||||||
|
// Pierre Tachoire <pierre@lightpanda.io>
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as
|
||||||
|
// published by the Free Software Foundation, either version 3 of the
|
||||||
|
// License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const c = @import("Http.zig").c;
|
||||||
|
|
||||||
|
pub const Error = error{
|
||||||
|
UnsupportedProtocol,
|
||||||
|
FailedInit,
|
||||||
|
UrlMalformat,
|
||||||
|
NotBuiltIn,
|
||||||
|
CouldntResolveProxy,
|
||||||
|
CouldntResolveHost,
|
||||||
|
CouldntConnect,
|
||||||
|
WeirdServerReply,
|
||||||
|
RemoteAccessDenied,
|
||||||
|
FtpAcceptFailed,
|
||||||
|
FtpWeirdPassReply,
|
||||||
|
FtpAcceptTimeout,
|
||||||
|
FtpWeirdPasvReply,
|
||||||
|
FtpWeird227Format,
|
||||||
|
FtpCantGetHost,
|
||||||
|
Http2,
|
||||||
|
FtpCouldntSetType,
|
||||||
|
PartialFile,
|
||||||
|
FtpCouldntRetrFile,
|
||||||
|
QuoteError,
|
||||||
|
HttpReturnedError,
|
||||||
|
WriteError,
|
||||||
|
UploadFailed,
|
||||||
|
ReadError,
|
||||||
|
OutOfMemory,
|
||||||
|
OperationTimedout,
|
||||||
|
FtpPortFailed,
|
||||||
|
FtpCouldntUseRest,
|
||||||
|
RangeError,
|
||||||
|
SslConnectError,
|
||||||
|
BadDownloadResume,
|
||||||
|
FileCouldntReadFile,
|
||||||
|
LdapCannotBind,
|
||||||
|
LdapSearchFailed,
|
||||||
|
AbortedByCallback,
|
||||||
|
BadFunctionArgument,
|
||||||
|
InterfaceFailed,
|
||||||
|
TooManyRedirects,
|
||||||
|
UnknownOption,
|
||||||
|
SetoptOptionSyntax,
|
||||||
|
GotNothing,
|
||||||
|
SslEngineNotfound,
|
||||||
|
SslEngineSetfailed,
|
||||||
|
SendError,
|
||||||
|
RecvError,
|
||||||
|
SslCertproblem,
|
||||||
|
SslCipher,
|
||||||
|
PeerFailedVerification,
|
||||||
|
BadContentEncoding,
|
||||||
|
FilesizeExceeded,
|
||||||
|
UseSslFailed,
|
||||||
|
SendFailRewind,
|
||||||
|
SslEngineInitfailed,
|
||||||
|
LoginDenied,
|
||||||
|
TftpNotfound,
|
||||||
|
TftpPerm,
|
||||||
|
RemoteDiskFull,
|
||||||
|
TftpIllegal,
|
||||||
|
TftpUnknownid,
|
||||||
|
RemoteFileExists,
|
||||||
|
TftpNosuchuser,
|
||||||
|
SslCacertBadfile,
|
||||||
|
RemoteFileNotFound,
|
||||||
|
Ssh,
|
||||||
|
SslShutdownFailed,
|
||||||
|
Again,
|
||||||
|
SslCrlBadfile,
|
||||||
|
SslIssuerError,
|
||||||
|
FtpPretFailed,
|
||||||
|
RtspCseqError,
|
||||||
|
RtspSessionError,
|
||||||
|
FtpBadFileList,
|
||||||
|
ChunkFailed,
|
||||||
|
NoConnectionAvailable,
|
||||||
|
SslPinnedpubkeynotmatch,
|
||||||
|
SslInvalidcertstatus,
|
||||||
|
Http2Stream,
|
||||||
|
RecursiveApiCall,
|
||||||
|
AuthError,
|
||||||
|
Http3,
|
||||||
|
QuicConnectError,
|
||||||
|
Proxy,
|
||||||
|
SslClientcert,
|
||||||
|
UnrecoverablePoll,
|
||||||
|
TooLarge,
|
||||||
|
Unknown,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn fromCode(code: c.CURLcode) Error {
|
||||||
|
std.debug.assert(code != c.CURLE_OK);
|
||||||
|
|
||||||
|
return switch (code) {
|
||||||
|
c.CURLE_UNSUPPORTED_PROTOCOL => Error.UnsupportedProtocol,
|
||||||
|
c.CURLE_FAILED_INIT => Error.FailedInit,
|
||||||
|
c.CURLE_URL_MALFORMAT => Error.UrlMalformat,
|
||||||
|
c.CURLE_NOT_BUILT_IN => Error.NotBuiltIn,
|
||||||
|
c.CURLE_COULDNT_RESOLVE_PROXY => Error.CouldntResolveProxy,
|
||||||
|
c.CURLE_COULDNT_RESOLVE_HOST => Error.CouldntResolveHost,
|
||||||
|
c.CURLE_COULDNT_CONNECT => Error.CouldntConnect,
|
||||||
|
c.CURLE_WEIRD_SERVER_REPLY => Error.WeirdServerReply,
|
||||||
|
c.CURLE_REMOTE_ACCESS_DENIED => Error.RemoteAccessDenied,
|
||||||
|
c.CURLE_FTP_ACCEPT_FAILED => Error.FtpAcceptFailed,
|
||||||
|
c.CURLE_FTP_WEIRD_PASS_REPLY => Error.FtpWeirdPassReply,
|
||||||
|
c.CURLE_FTP_ACCEPT_TIMEOUT => Error.FtpAcceptTimeout,
|
||||||
|
c.CURLE_FTP_WEIRD_PASV_REPLY => Error.FtpWeirdPasvReply,
|
||||||
|
c.CURLE_FTP_WEIRD_227_FORMAT => Error.FtpWeird227Format,
|
||||||
|
c.CURLE_FTP_CANT_GET_HOST => Error.FtpCantGetHost,
|
||||||
|
c.CURLE_HTTP2 => Error.Http2,
|
||||||
|
c.CURLE_FTP_COULDNT_SET_TYPE => Error.FtpCouldntSetType,
|
||||||
|
c.CURLE_PARTIAL_FILE => Error.PartialFile,
|
||||||
|
c.CURLE_FTP_COULDNT_RETR_FILE => Error.FtpCouldntRetrFile,
|
||||||
|
c.CURLE_QUOTE_ERROR => Error.QuoteError,
|
||||||
|
c.CURLE_HTTP_RETURNED_ERROR => Error.HttpReturnedError,
|
||||||
|
c.CURLE_WRITE_ERROR => Error.WriteError,
|
||||||
|
c.CURLE_UPLOAD_FAILED => Error.UploadFailed,
|
||||||
|
c.CURLE_READ_ERROR => Error.ReadError,
|
||||||
|
c.CURLE_OUT_OF_MEMORY => Error.OutOfMemory,
|
||||||
|
c.CURLE_OPERATION_TIMEDOUT => Error.OperationTimedout,
|
||||||
|
c.CURLE_FTP_PORT_FAILED => Error.FtpPortFailed,
|
||||||
|
c.CURLE_FTP_COULDNT_USE_REST => Error.FtpCouldntUseRest,
|
||||||
|
c.CURLE_RANGE_ERROR => Error.RangeError,
|
||||||
|
c.CURLE_SSL_CONNECT_ERROR => Error.SslConnectError,
|
||||||
|
c.CURLE_BAD_DOWNLOAD_RESUME => Error.BadDownloadResume,
|
||||||
|
c.CURLE_FILE_COULDNT_READ_FILE => Error.FileCouldntReadFile,
|
||||||
|
c.CURLE_LDAP_CANNOT_BIND => Error.LdapCannotBind,
|
||||||
|
c.CURLE_LDAP_SEARCH_FAILED => Error.LdapSearchFailed,
|
||||||
|
c.CURLE_ABORTED_BY_CALLBACK => Error.AbortedByCallback,
|
||||||
|
c.CURLE_BAD_FUNCTION_ARGUMENT => Error.BadFunctionArgument,
|
||||||
|
c.CURLE_INTERFACE_FAILED => Error.InterfaceFailed,
|
||||||
|
c.CURLE_TOO_MANY_REDIRECTS => Error.TooManyRedirects,
|
||||||
|
c.CURLE_UNKNOWN_OPTION => Error.UnknownOption,
|
||||||
|
c.CURLE_SETOPT_OPTION_SYNTAX => Error.SetoptOptionSyntax,
|
||||||
|
c.CURLE_GOT_NOTHING => Error.GotNothing,
|
||||||
|
c.CURLE_SSL_ENGINE_NOTFOUND => Error.SslEngineNotfound,
|
||||||
|
c.CURLE_SSL_ENGINE_SETFAILED => Error.SslEngineSetfailed,
|
||||||
|
c.CURLE_SEND_ERROR => Error.SendError,
|
||||||
|
c.CURLE_RECV_ERROR => Error.RecvError,
|
||||||
|
c.CURLE_SSL_CERTPROBLEM => Error.SslCertproblem,
|
||||||
|
c.CURLE_SSL_CIPHER => Error.SslCipher,
|
||||||
|
c.CURLE_PEER_FAILED_VERIFICATION => Error.PeerFailedVerification,
|
||||||
|
c.CURLE_BAD_CONTENT_ENCODING => Error.BadContentEncoding,
|
||||||
|
c.CURLE_FILESIZE_EXCEEDED => Error.FilesizeExceeded,
|
||||||
|
c.CURLE_USE_SSL_FAILED => Error.UseSslFailed,
|
||||||
|
c.CURLE_SEND_FAIL_REWIND => Error.SendFailRewind,
|
||||||
|
c.CURLE_SSL_ENGINE_INITFAILED => Error.SslEngineInitfailed,
|
||||||
|
c.CURLE_LOGIN_DENIED => Error.LoginDenied,
|
||||||
|
c.CURLE_TFTP_NOTFOUND => Error.TftpNotfound,
|
||||||
|
c.CURLE_TFTP_PERM => Error.TftpPerm,
|
||||||
|
c.CURLE_REMOTE_DISK_FULL => Error.RemoteDiskFull,
|
||||||
|
c.CURLE_TFTP_ILLEGAL => Error.TftpIllegal,
|
||||||
|
c.CURLE_TFTP_UNKNOWNID => Error.TftpUnknownid,
|
||||||
|
c.CURLE_REMOTE_FILE_EXISTS => Error.RemoteFileExists,
|
||||||
|
c.CURLE_TFTP_NOSUCHUSER => Error.TftpNosuchuser,
|
||||||
|
c.CURLE_SSL_CACERT_BADFILE => Error.SslCacertBadfile,
|
||||||
|
c.CURLE_REMOTE_FILE_NOT_FOUND => Error.RemoteFileNotFound,
|
||||||
|
c.CURLE_SSH => Error.Ssh,
|
||||||
|
c.CURLE_SSL_SHUTDOWN_FAILED => Error.SslShutdownFailed,
|
||||||
|
c.CURLE_AGAIN => Error.Again,
|
||||||
|
c.CURLE_SSL_CRL_BADFILE => Error.SslCrlBadfile,
|
||||||
|
c.CURLE_SSL_ISSUER_ERROR => Error.SslIssuerError,
|
||||||
|
c.CURLE_FTP_PRET_FAILED => Error.FtpPretFailed,
|
||||||
|
c.CURLE_RTSP_CSEQ_ERROR => Error.RtspCseqError,
|
||||||
|
c.CURLE_RTSP_SESSION_ERROR => Error.RtspSessionError,
|
||||||
|
c.CURLE_FTP_BAD_FILE_LIST => Error.FtpBadFileList,
|
||||||
|
c.CURLE_CHUNK_FAILED => Error.ChunkFailed,
|
||||||
|
c.CURLE_NO_CONNECTION_AVAILABLE => Error.NoConnectionAvailable,
|
||||||
|
c.CURLE_SSL_PINNEDPUBKEYNOTMATCH => Error.SslPinnedpubkeynotmatch,
|
||||||
|
c.CURLE_SSL_INVALIDCERTSTATUS => Error.SslInvalidcertstatus,
|
||||||
|
c.CURLE_HTTP2_STREAM => Error.Http2Stream,
|
||||||
|
c.CURLE_RECURSIVE_API_CALL => Error.RecursiveApiCall,
|
||||||
|
c.CURLE_AUTH_ERROR => Error.AuthError,
|
||||||
|
c.CURLE_HTTP3 => Error.Http3,
|
||||||
|
c.CURLE_QUIC_CONNECT_ERROR => Error.QuicConnectError,
|
||||||
|
c.CURLE_PROXY => Error.Proxy,
|
||||||
|
c.CURLE_SSL_CLIENTCERT => Error.SslClientcert,
|
||||||
|
c.CURLE_UNRECOVERABLE_POLL => Error.UnrecoverablePoll,
|
||||||
|
c.CURLE_TOO_LARGE => Error.TooLarge,
|
||||||
|
else => Error.Unknown,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Multi = error{
|
||||||
|
BadHandle,
|
||||||
|
BadEasyHandle,
|
||||||
|
OutOfMemory,
|
||||||
|
InternalError,
|
||||||
|
BadSocket,
|
||||||
|
UnknownOption,
|
||||||
|
AddedAlready,
|
||||||
|
RecursiveApiCall,
|
||||||
|
WakeupFailure,
|
||||||
|
BadFunctionArgument,
|
||||||
|
AbortedByCallback,
|
||||||
|
UnrecoverablePoll,
|
||||||
|
Unknown,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn fromMCode(code: c.CURLMcode) Multi {
|
||||||
|
std.debug.assert(code != c.CURLM_OK);
|
||||||
|
|
||||||
|
return switch (code) {
|
||||||
|
c.CURLM_BAD_HANDLE => Multi.BadHandle,
|
||||||
|
c.CURLM_BAD_EASY_HANDLE => Multi.BadEasyHandle,
|
||||||
|
c.CURLM_OUT_OF_MEMORY => Multi.OutOfMemory,
|
||||||
|
c.CURLM_INTERNAL_ERROR => Multi.InternalError,
|
||||||
|
c.CURLM_BAD_SOCKET => Multi.BadSocket,
|
||||||
|
c.CURLM_UNKNOWN_OPTION => Multi.UnknownOption,
|
||||||
|
c.CURLM_ADDED_ALREADY => Multi.AddedAlready,
|
||||||
|
c.CURLM_RECURSIVE_API_CALL => Multi.RecursiveApiCall,
|
||||||
|
c.CURLM_WAKEUP_FAILURE => Multi.WakeupFailure,
|
||||||
|
c.CURLM_BAD_FUNCTION_ARGUMENT => Multi.BadFunctionArgument,
|
||||||
|
c.CURLM_ABORTED_BY_CALLBACK => Multi.AbortedByCallback,
|
||||||
|
c.CURLM_UNRECOVERABLE_POLL => Multi.UnrecoverablePoll,
|
||||||
|
else => Multi.Unknown,
|
||||||
|
};
|
||||||
|
}
|
||||||
23
src/log.zig
23
src/log.zig
@@ -215,7 +215,8 @@ fn logPrettyPrefix(comptime scope: Scope, level: Level, comptime msg: []const u8
|
|||||||
if (@mod(padding, 2) == 1) {
|
if (@mod(padding, 2) == 1) {
|
||||||
try writer.writeByte(' ');
|
try writer.writeByte(' ');
|
||||||
}
|
}
|
||||||
try writer.print(" \x1b[0m[+{d}ms]", .{elapsed()});
|
const el = elapsed();
|
||||||
|
try writer.print(" \x1b[0m[+{d}{s}]", .{ el.time, el.unit });
|
||||||
try writer.writeByte('\n');
|
try writer.writeByte('\n');
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -320,22 +321,22 @@ fn timestamp() i64 {
|
|||||||
return std.time.milliTimestamp();
|
return std.time.milliTimestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
var last_log: i64 = 0;
|
var first_log: i64 = 0;
|
||||||
fn elapsed() i64 {
|
fn elapsed() struct { time: f64, unit: []const u8 } {
|
||||||
const now = timestamp();
|
const now = timestamp();
|
||||||
|
|
||||||
last_log_lock.lock();
|
last_log_lock.lock();
|
||||||
const previous = last_log;
|
defer last_log_lock.unlock();
|
||||||
last_log = now;
|
|
||||||
last_log_lock.unlock();
|
|
||||||
|
|
||||||
if (previous == 0) {
|
if (first_log == 0) {
|
||||||
return 0;
|
first_log = now;
|
||||||
}
|
}
|
||||||
if (previous > now) {
|
|
||||||
return 0;
|
const e = now - first_log;
|
||||||
|
if (e < 10_000) {
|
||||||
|
return .{ .time = @floatFromInt(e), .unit = "ms" };
|
||||||
}
|
}
|
||||||
return now - previous;
|
return .{ .time = @as(f64, @floatFromInt(e)) / @as(f64, 1000), .unit = "s" };
|
||||||
}
|
}
|
||||||
|
|
||||||
const testing = @import("testing.zig");
|
const testing = @import("testing.zig");
|
||||||
|
|||||||
361
src/main.zig
361
src/main.zig
@@ -23,7 +23,7 @@ const Allocator = std.mem.Allocator;
|
|||||||
const log = @import("log.zig");
|
const log = @import("log.zig");
|
||||||
const server = @import("server.zig");
|
const server = @import("server.zig");
|
||||||
const App = @import("app.zig").App;
|
const App = @import("app.zig").App;
|
||||||
const http = @import("http/client.zig");
|
const Http = @import("http/Http.zig");
|
||||||
const Platform = @import("runtime/js.zig").Platform;
|
const Platform = @import("runtime/js.zig").Platform;
|
||||||
const Browser = @import("browser/browser.zig").Browser;
|
const Browser = @import("browser/browser.zig").Browser;
|
||||||
|
|
||||||
@@ -85,9 +85,12 @@ fn run(alloc: Allocator) !void {
|
|||||||
.run_mode = args.mode,
|
.run_mode = args.mode,
|
||||||
.platform = &platform,
|
.platform = &platform,
|
||||||
.http_proxy = args.httpProxy(),
|
.http_proxy = args.httpProxy(),
|
||||||
.proxy_type = args.proxyType(),
|
.proxy_bearer_token = args.proxyBearerToken(),
|
||||||
.proxy_auth = args.proxyAuth(),
|
|
||||||
.tls_verify_host = args.tlsVerifyHost(),
|
.tls_verify_host = args.tlsVerifyHost(),
|
||||||
|
.http_timeout_ms = args.httpTimeout(),
|
||||||
|
.http_connect_timeout_ms = args.httpConnectTiemout(),
|
||||||
|
.http_max_host_open = args.httpMaxHostOpen(),
|
||||||
|
.http_max_concurrent = args.httpMaxConcurrent(),
|
||||||
});
|
});
|
||||||
defer app.deinit();
|
defer app.deinit();
|
||||||
app.telemetry.record(.{ .run = {} });
|
app.telemetry.record(.{ .run = {} });
|
||||||
@@ -107,8 +110,8 @@ fn run(alloc: Allocator) !void {
|
|||||||
};
|
};
|
||||||
},
|
},
|
||||||
.fetch => |opts| {
|
.fetch => |opts| {
|
||||||
log.debug(.app, "startup", .{ .mode = "fetch", .dump = opts.dump, .url = opts.url });
|
const url = opts.url;
|
||||||
const url = try @import("url.zig").URL.parse(opts.url, null);
|
log.debug(.app, "startup", .{ .mode = "fetch", .dump = opts.dump, .url = url });
|
||||||
|
|
||||||
// browser
|
// browser
|
||||||
var browser = try Browser.init(app);
|
var browser = try Browser.init(app);
|
||||||
@@ -130,7 +133,7 @@ fn run(alloc: Allocator) !void {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
try page.wait(std.time.ns_per_s * 3);
|
session.wait(5); // 5 seconds
|
||||||
|
|
||||||
// dump
|
// dump
|
||||||
if (opts.dump) {
|
if (opts.dump) {
|
||||||
@@ -156,23 +159,44 @@ const Command = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn httpProxy(self: *const Command) ?std.Uri {
|
fn httpProxy(self: *const Command) ?[:0]const u8 {
|
||||||
return switch (self.mode) {
|
return switch (self.mode) {
|
||||||
inline .serve, .fetch => |opts| opts.common.http_proxy,
|
inline .serve, .fetch => |opts| opts.common.http_proxy,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn proxyType(self: *const Command) ?http.ProxyType {
|
fn proxyBearerToken(self: *const Command) ?[:0]const u8 {
|
||||||
return switch (self.mode) {
|
return switch (self.mode) {
|
||||||
inline .serve, .fetch => |opts| opts.common.proxy_type,
|
inline .serve, .fetch => |opts| opts.common.proxy_bearer_token,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn proxyAuth(self: *const Command) ?http.ProxyAuth {
|
fn httpMaxConcurrent(self: *const Command) ?u8 {
|
||||||
return switch (self.mode) {
|
return switch (self.mode) {
|
||||||
inline .serve, .fetch => |opts| opts.common.proxy_auth,
|
inline .serve, .fetch => |opts| opts.common.http_max_concurrent,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn httpMaxHostOpen(self: *const Command) ?u8 {
|
||||||
|
return switch (self.mode) {
|
||||||
|
inline .serve, .fetch => |opts| opts.common.http_max_host_open,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn httpConnectTiemout(self: *const Command) ?u31 {
|
||||||
|
return switch (self.mode) {
|
||||||
|
inline .serve, .fetch => |opts| opts.common.http_connect_timeout,
|
||||||
|
else => unreachable,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn httpTimeout(self: *const Command) ?u31 {
|
||||||
|
return switch (self.mode) {
|
||||||
|
inline .serve, .fetch => |opts| opts.common.http_timeout,
|
||||||
else => unreachable,
|
else => unreachable,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -221,9 +245,12 @@ const Command = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const Common = struct {
|
const Common = struct {
|
||||||
http_proxy: ?std.Uri = null,
|
proxy_bearer_token: ?[:0]const u8 = null,
|
||||||
proxy_type: ?http.ProxyType = null,
|
http_proxy: ?[:0]const u8 = null,
|
||||||
proxy_auth: ?http.ProxyAuth = null,
|
http_max_concurrent: ?u8 = null,
|
||||||
|
http_max_host_open: ?u8 = null,
|
||||||
|
http_timeout: ?u31 = null,
|
||||||
|
http_connect_timeout: ?u31 = null,
|
||||||
tls_verify_host: bool = true,
|
tls_verify_host: bool = true,
|
||||||
log_level: ?log.Level = null,
|
log_level: ?log.Level = null,
|
||||||
log_format: ?log.Format = null,
|
log_format: ?log.Format = null,
|
||||||
@@ -231,31 +258,39 @@ const Command = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
fn printUsageAndExit(self: *const Command, success: bool) void {
|
fn printUsageAndExit(self: *const Command, success: bool) void {
|
||||||
|
// MAX_HELP_LEN|
|
||||||
const common_options =
|
const common_options =
|
||||||
\\
|
\\
|
||||||
\\--insecure_disable_tls_host_verification
|
\\--insecure_disable_tls_host_verification
|
||||||
\\ Disables host verification on all HTTP requests.
|
\\ Disables host verification on all HTTP requests. This is an
|
||||||
\\ This is an advanced option which should only be
|
\\ advanced option which should only be set if you understand
|
||||||
\\ set if you understand and accept the risk of
|
\\ and accept the risk of disabling host verification.
|
||||||
\\ disabling host verification.
|
|
||||||
\\
|
\\
|
||||||
\\--http_proxy The HTTP proxy to use for all HTTP requests.
|
\\--http_proxy The HTTP proxy to use for all HTTP requests.
|
||||||
|
\\ A username:password can be included for basic authentication.
|
||||||
\\ Defaults to none.
|
\\ Defaults to none.
|
||||||
\\
|
\\
|
||||||
\\--proxy_type The type of proxy: connect, forward.
|
|
||||||
\\ 'connect' creates a tunnel through the proxy via
|
|
||||||
\\ and initial CONNECT request.
|
|
||||||
\\ 'forward' sends the full URL in the request target
|
|
||||||
\\ and expects the proxy to MITM the request.
|
|
||||||
\\ Defaults to connect when --http_proxy is set.
|
|
||||||
\\
|
|
||||||
\\--proxy_bearer_token
|
\\--proxy_bearer_token
|
||||||
\\ The token to send for bearer authentication with the proxy
|
\\ The <token> to send for bearer authentication with the proxy
|
||||||
\\ Proxy-Authorization: Bearer <token>
|
\\ Proxy-Authorization: Bearer <token>
|
||||||
\\
|
\\
|
||||||
\\--proxy_basic_auth
|
\\--http_max_concurrent
|
||||||
\\ The user:password to send for basic authentication with the proxy
|
\\ The maximum number of concurrent HTTP requests.
|
||||||
\\ Proxy-Authorization: Basic <base64(user:password)>
|
\\ Defaults to 10.
|
||||||
|
\\
|
||||||
|
\\--http_max_host_open
|
||||||
|
\\ The maximum number of open connection to a given host:port.
|
||||||
|
\\ Defaults to 4.
|
||||||
|
\\
|
||||||
|
\\--http_connect_timeout
|
||||||
|
\\ The time, in milliseconds, for establishing an HTTP connection
|
||||||
|
\\ before timing out. 0 means it never times out.
|
||||||
|
\\ Defaults to 0.
|
||||||
|
\\
|
||||||
|
\\--http_timeout
|
||||||
|
\\ The maximum time, in milliseconds, the transfer is allowed
|
||||||
|
\\ to complete. 0 means it never times out.
|
||||||
|
\\ Defaults to 10000.
|
||||||
\\
|
\\
|
||||||
\\--log_level The log level: debug, info, warn, error or fatal.
|
\\--log_level The log level: debug, info, warn, error or fatal.
|
||||||
\\ Defaults to
|
\\ Defaults to
|
||||||
@@ -266,9 +301,9 @@ const Command = struct {
|
|||||||
\\ Defaults to
|
\\ Defaults to
|
||||||
++ (if (builtin.mode == .Debug) " pretty." else " logfmt.") ++
|
++ (if (builtin.mode == .Debug) " pretty." else " logfmt.") ++
|
||||||
\\
|
\\
|
||||||
\\
|
|
||||||
;
|
;
|
||||||
|
|
||||||
|
// MAX_HELP_LEN|
|
||||||
const usage =
|
const usage =
|
||||||
\\usage: {s} command [options] [URL]
|
\\usage: {s} command [options] [URL]
|
||||||
\\
|
\\
|
||||||
@@ -521,48 +556,68 @@ fn parseCommonArg(
|
|||||||
log.fatal(.app, "missing argument value", .{ .arg = "--http_proxy" });
|
log.fatal(.app, "missing argument value", .{ .arg = "--http_proxy" });
|
||||||
return error.InvalidArgument;
|
return error.InvalidArgument;
|
||||||
};
|
};
|
||||||
common.http_proxy = try std.Uri.parse(try allocator.dupe(u8, str));
|
common.http_proxy = try allocator.dupeZ(u8, str);
|
||||||
if (common.http_proxy.?.host == null) {
|
|
||||||
log.fatal(.app, "invalid http proxy", .{ .arg = "--http_proxy", .hint = "missing scheme?" });
|
|
||||||
return error.InvalidArgument;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (std.mem.eql(u8, "--proxy_type", opt)) {
|
|
||||||
const str = args.next() orelse {
|
|
||||||
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_type" });
|
|
||||||
return error.InvalidArgument;
|
|
||||||
};
|
|
||||||
common.proxy_type = std.meta.stringToEnum(http.ProxyType, str) orelse {
|
|
||||||
log.fatal(.app, "invalid option choice", .{ .arg = "--proxy_type", .value = str });
|
|
||||||
return error.InvalidArgument;
|
|
||||||
};
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (std.mem.eql(u8, "--proxy_bearer_token", opt)) {
|
if (std.mem.eql(u8, "--proxy_bearer_token", opt)) {
|
||||||
if (common.proxy_auth != null) {
|
|
||||||
log.fatal(.app, "proxy auth already set", .{ .arg = "--proxy_bearer_token" });
|
|
||||||
return error.InvalidArgument;
|
|
||||||
}
|
|
||||||
const str = args.next() orelse {
|
const str = args.next() orelse {
|
||||||
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_bearer_token" });
|
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_bearer_token" });
|
||||||
return error.InvalidArgument;
|
return error.InvalidArgument;
|
||||||
};
|
};
|
||||||
common.proxy_auth = .{ .bearer = .{ .token = str } };
|
common.proxy_bearer_token = try allocator.dupeZ(u8, str);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (std.mem.eql(u8, "--proxy_basic_auth", opt)) {
|
|
||||||
if (common.proxy_auth != null) {
|
if (std.mem.eql(u8, "--http_max_concurrent", opt)) {
|
||||||
log.fatal(.app, "proxy auth already set", .{ .arg = "--proxy_basic_auth" });
|
|
||||||
return error.InvalidArgument;
|
|
||||||
}
|
|
||||||
const str = args.next() orelse {
|
const str = args.next() orelse {
|
||||||
log.fatal(.app, "missing argument value", .{ .arg = "--proxy_basic_auth" });
|
log.fatal(.app, "missing argument value", .{ .arg = "--http_max_concurrent" });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
|
||||||
|
common.http_max_concurrent = std.fmt.parseInt(u8, str, 10) catch |err| {
|
||||||
|
log.fatal(.app, "invalid argument value", .{ .arg = "--http_max_concurrent", .err = err });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (std.mem.eql(u8, "--http_max_host_open", opt)) {
|
||||||
|
const str = args.next() orelse {
|
||||||
|
log.fatal(.app, "missing argument value", .{ .arg = "--http_max_host_open" });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
|
||||||
|
common.http_max_host_open = std.fmt.parseInt(u8, str, 10) catch |err| {
|
||||||
|
log.fatal(.app, "invalid argument value", .{ .arg = "--http_max_host_open", .err = err });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (std.mem.eql(u8, "--http_connect_timeout", opt)) {
|
||||||
|
const str = args.next() orelse {
|
||||||
|
log.fatal(.app, "missing argument value", .{ .arg = "--http_connect_timeout" });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
|
||||||
|
common.http_connect_timeout = std.fmt.parseInt(u31, str, 10) catch |err| {
|
||||||
|
log.fatal(.app, "invalid argument value", .{ .arg = "--http_connect_timeout", .err = err });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (std.mem.eql(u8, "--http_timeout", opt)) {
|
||||||
|
const str = args.next() orelse {
|
||||||
|
log.fatal(.app, "missing argument value", .{ .arg = "--http_timeout" });
|
||||||
|
return error.InvalidArgument;
|
||||||
|
};
|
||||||
|
|
||||||
|
common.http_timeout = std.fmt.parseInt(u31, str, 10) catch |err| {
|
||||||
|
log.fatal(.app, "invalid argument value", .{ .arg = "--http_timeout", .err = err });
|
||||||
return error.InvalidArgument;
|
return error.InvalidArgument;
|
||||||
};
|
};
|
||||||
common.proxy_auth = .{ .basic = .{ .user_pass = str } };
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -633,7 +688,7 @@ test "tests:beforeAll" {
|
|||||||
log.opts.level = .err;
|
log.opts.level = .err;
|
||||||
log.opts.format = .logfmt;
|
log.opts.format = .logfmt;
|
||||||
|
|
||||||
test_wg.startMany(3);
|
test_wg.startMany(2);
|
||||||
const platform = try Platform.init();
|
const platform = try Platform.init();
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -642,12 +697,6 @@ test "tests:beforeAll" {
|
|||||||
thread.detach();
|
thread.detach();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
const address = try std.net.Address.parseIp("127.0.0.1", 9581);
|
|
||||||
const thread = try std.Thread.spawn(.{}, serveHTTPS, .{address});
|
|
||||||
thread.detach();
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
const address = try std.net.Address.parseIp("127.0.0.1", 9583);
|
const address = try std.net.Address.parseIp("127.0.0.1", 9583);
|
||||||
const thread = try std.Thread.spawn(.{}, serveCDP, .{ address, &platform });
|
const thread = try std.Thread.spawn(.{}, serveCDP, .{ address, &platform });
|
||||||
@@ -673,161 +722,42 @@ fn serveHTTP(address: std.net.Address) !void {
|
|||||||
test_wg.finish();
|
test_wg.finish();
|
||||||
|
|
||||||
var read_buffer: [1024]u8 = undefined;
|
var read_buffer: [1024]u8 = undefined;
|
||||||
ACCEPT: while (true) {
|
while (true) {
|
||||||
defer _ = arena.reset(.{ .free_all = {} });
|
|
||||||
const aa = arena.allocator();
|
|
||||||
|
|
||||||
var conn = try listener.accept();
|
var conn = try listener.accept();
|
||||||
defer conn.stream.close();
|
defer conn.stream.close();
|
||||||
var http_server = std.http.Server.init(conn, &read_buffer);
|
var http_server = std.http.Server.init(conn, &read_buffer);
|
||||||
var connect_headers: std.ArrayListUnmanaged(std.http.Header) = .{};
|
|
||||||
REQUEST: while (true) {
|
|
||||||
var request = http_server.receiveHead() catch |err| switch (err) {
|
|
||||||
error.HttpConnectionClosing => continue :ACCEPT,
|
|
||||||
else => {
|
|
||||||
std.debug.print("Test HTTP Server error: {}\n", .{err});
|
|
||||||
return err;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
if (request.head.method == .CONNECT) {
|
var request = http_server.receiveHead() catch |err| switch (err) {
|
||||||
try request.respond("", .{ .status = .ok });
|
error.HttpConnectionClosing => continue,
|
||||||
|
else => {
|
||||||
// Proxy headers and destination headers are separated in the case of a CONNECT proxy
|
std.debug.print("Test HTTP Server error: {}\n", .{err});
|
||||||
// We store the CONNECT headers, then continue with the request for the destination
|
return err;
|
||||||
var it = request.iterateHeaders();
|
},
|
||||||
while (it.next()) |hdr| {
|
|
||||||
try connect_headers.append(aa, .{
|
|
||||||
.name = try std.fmt.allocPrint(aa, "__{s}", .{hdr.name}),
|
|
||||||
.value = try aa.dupe(u8, hdr.value),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
continue :REQUEST;
|
|
||||||
}
|
|
||||||
|
|
||||||
const path = request.head.target;
|
|
||||||
if (std.mem.eql(u8, path, "/loader")) {
|
|
||||||
try request.respond("Hello!", .{
|
|
||||||
.extra_headers = &.{.{ .name = "Connection", .value = "close" }},
|
|
||||||
});
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/simple")) {
|
|
||||||
try request.respond("", .{
|
|
||||||
.extra_headers = &.{.{ .name = "Connection", .value = "close" }},
|
|
||||||
});
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/redirect")) {
|
|
||||||
try request.respond("", .{
|
|
||||||
.status = .moved_permanently,
|
|
||||||
.extra_headers = &.{
|
|
||||||
.{ .name = "Connection", .value = "close" },
|
|
||||||
.{ .name = "LOCATION", .value = "../http_client/echo" },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/redirect/secure")) {
|
|
||||||
try request.respond("", .{
|
|
||||||
.status = .moved_permanently,
|
|
||||||
.extra_headers = &.{ .{ .name = "Connection", .value = "close" }, .{ .name = "LOCATION", .value = "https://127.0.0.1:9581/http_client/body" } },
|
|
||||||
});
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/gzip")) {
|
|
||||||
const body = &.{ 0x1f, 0x8b, 0x08, 0x08, 0x01, 0xc6, 0x19, 0x68, 0x00, 0x03, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x00, 0x73, 0x54, 0xc8, 0x4b, 0x2d, 0x57, 0x48, 0x2a, 0xca, 0x2f, 0x2f, 0x4e, 0x2d, 0x52, 0x48, 0x2a, 0xcd, 0xcc, 0x29, 0x51, 0x48, 0xcb, 0x2f, 0x52, 0xc8, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xe6, 0x02, 0x00, 0xe7, 0xc3, 0x4b, 0x27, 0x21, 0x00, 0x00, 0x00 };
|
|
||||||
try request.respond(body, .{
|
|
||||||
.extra_headers = &.{ .{ .name = "Connection", .value = "close" }, .{ .name = "Content-Encoding", .value = "gzip" } },
|
|
||||||
});
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/echo")) {
|
|
||||||
var headers: std.ArrayListUnmanaged(std.http.Header) = .{};
|
|
||||||
|
|
||||||
var it = request.iterateHeaders();
|
|
||||||
while (it.next()) |hdr| {
|
|
||||||
try headers.append(aa, .{
|
|
||||||
.name = try std.fmt.allocPrint(aa, "_{s}", .{hdr.name}),
|
|
||||||
.value = hdr.value,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (connect_headers.items.len > 0) {
|
|
||||||
try headers.appendSlice(aa, connect_headers.items);
|
|
||||||
connect_headers.clearRetainingCapacity();
|
|
||||||
}
|
|
||||||
try headers.append(aa, .{ .name = "Connection", .value = "Close" });
|
|
||||||
|
|
||||||
try request.respond("over 9000!", .{
|
|
||||||
.status = .created,
|
|
||||||
.extra_headers = headers.items,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
continue :ACCEPT;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a lot of work for testing TLS, but the TLS (async) code is complicated
|
|
||||||
// This "server" is written specifically to test the client. It assumes the client
|
|
||||||
// isn't a jerk.
|
|
||||||
fn serveHTTPS(address: std.net.Address) !void {
|
|
||||||
const tls = @import("tls");
|
|
||||||
|
|
||||||
var listener = try address.listen(.{ .reuse_address = true });
|
|
||||||
defer listener.deinit();
|
|
||||||
|
|
||||||
test_wg.finish();
|
|
||||||
|
|
||||||
var seed: u64 = undefined;
|
|
||||||
std.posix.getrandom(std.mem.asBytes(&seed)) catch unreachable;
|
|
||||||
var r = std.Random.DefaultPrng.init(seed);
|
|
||||||
const rand = r.random();
|
|
||||||
|
|
||||||
var read_buffer: [1024]u8 = undefined;
|
|
||||||
while (true) {
|
|
||||||
const stream = blk: {
|
|
||||||
const conn = try listener.accept();
|
|
||||||
break :blk conn.stream;
|
|
||||||
};
|
};
|
||||||
defer stream.close();
|
|
||||||
|
|
||||||
var conn = try tls.server(stream, .{ .auth = null });
|
const path = request.head.target;
|
||||||
defer conn.close() catch {};
|
|
||||||
|
|
||||||
var pos: usize = 0;
|
if (std.mem.eql(u8, path, "/loader")) {
|
||||||
while (true) {
|
try request.respond("Hello!", .{
|
||||||
const n = try conn.read(read_buffer[pos..]);
|
.extra_headers = &.{.{ .name = "Connection", .value = "close" }},
|
||||||
if (n == 0) {
|
});
|
||||||
break;
|
} else if (std.mem.eql(u8, path, "/xhr")) {
|
||||||
}
|
try request.respond("1234567890" ** 10, .{
|
||||||
pos += n;
|
.extra_headers = &.{
|
||||||
const header_end = std.mem.indexOf(u8, read_buffer[0..pos], "\r\n\r\n") orelse {
|
.{ .name = "Content-Type", .value = "text/html; charset=utf-8" },
|
||||||
continue;
|
.{ .name = "Connection", .value = "Close" },
|
||||||
};
|
},
|
||||||
var it = std.mem.splitScalar(u8, read_buffer[0..header_end], ' ');
|
});
|
||||||
_ = it.next() orelse unreachable; // method
|
} else if (std.mem.eql(u8, path, "/xhr/json")) {
|
||||||
const path = it.next() orelse unreachable;
|
try request.respond("{\"over\":\"9000!!!\"}", .{
|
||||||
|
.extra_headers = &.{
|
||||||
var fragment = false;
|
.{ .name = "Content-Type", .value = "application/json" },
|
||||||
var response: []const u8 = undefined;
|
.{ .name = "Connection", .value = "Close" },
|
||||||
if (std.mem.eql(u8, path, "/http_client/simple")) {
|
},
|
||||||
fragment = true;
|
});
|
||||||
response = "HTTP/1.1 200 \r\nContent-Length: 0\r\nConnection: Close\r\n\r\n";
|
} else {
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/body")) {
|
// should not have an unknown path
|
||||||
fragment = true;
|
unreachable;
|
||||||
response = "HTTP/1.1 201 CREATED\r\nContent-Length: 20\r\nConnection: Close\r\n Another : HEaDer \r\n\r\n1234567890abcdefhijk";
|
|
||||||
} else if (std.mem.eql(u8, path, "/http_client/redirect/insecure")) {
|
|
||||||
fragment = true;
|
|
||||||
response = "HTTP/1.1 307 GOTO\r\nLocation: http://127.0.0.1:9582/http_client/redirect\r\nConnection: Close\r\n\r\n";
|
|
||||||
} else if (std.mem.eql(u8, path, "/xhr")) {
|
|
||||||
response = "HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\nContent-Length: 100\r\nConnection: Close\r\n\r\n" ++ ("1234567890" ** 10);
|
|
||||||
} else if (std.mem.eql(u8, path, "/xhr/json")) {
|
|
||||||
response = "HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 18\r\nConnection: Close\r\n\r\n{\"over\":\"9000!!!\"}";
|
|
||||||
} else {
|
|
||||||
// should not have an unknown path
|
|
||||||
unreachable;
|
|
||||||
}
|
|
||||||
|
|
||||||
var unsent = response;
|
|
||||||
while (unsent.len > 0) {
|
|
||||||
const to_send = if (fragment) rand.intRangeAtMost(usize, 1, unsent.len) else unsent.len;
|
|
||||||
const sent = try conn.write(unsent[0..to_send]);
|
|
||||||
unsent = unsent[sent..];
|
|
||||||
std.time.sleep(std.time.ns_per_us * 5);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -838,6 +768,7 @@ fn serveCDP(address: std.net.Address, platform: *const Platform) !void {
|
|||||||
.run_mode = .serve,
|
.run_mode = .serve,
|
||||||
.tls_verify_host = false,
|
.tls_verify_host = false,
|
||||||
.platform = platform,
|
.platform = platform,
|
||||||
|
.http_max_concurrent = 2,
|
||||||
});
|
});
|
||||||
defer app.deinit();
|
defer app.deinit();
|
||||||
|
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ fn run(
|
|||||||
var try_catch: Env.TryCatch = undefined;
|
var try_catch: Env.TryCatch = undefined;
|
||||||
try_catch.init(runner.page.main_context);
|
try_catch.init(runner.page.main_context);
|
||||||
defer try_catch.deinit();
|
defer try_catch.deinit();
|
||||||
try runner.page.loop.run(std.time.ns_per_ms * 200);
|
runner.page.wait(std.time.ns_per_ms * 200);
|
||||||
|
|
||||||
if (try_catch.hasCaught()) {
|
if (try_catch.hasCaught()) {
|
||||||
err_out.* = (try try_catch.err(arena)) orelse "unknwon error";
|
err_out.* = (try try_catch.err(arena)) orelse "unknwon error";
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ const std = @import("std");
|
|||||||
const log = @import("log.zig");
|
const log = @import("log.zig");
|
||||||
const URL = @import("url.zig").URL;
|
const URL = @import("url.zig").URL;
|
||||||
const page = @import("browser/page.zig");
|
const page = @import("browser/page.zig");
|
||||||
const http_client = @import("http/client.zig");
|
const Http = @import("http/Http.zig");
|
||||||
|
const Transfer = @import("http/Client.zig").Transfer;
|
||||||
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
@@ -61,7 +62,8 @@ pub const Notification = struct {
|
|||||||
page_navigated: List = .{},
|
page_navigated: List = .{},
|
||||||
http_request_fail: List = .{},
|
http_request_fail: List = .{},
|
||||||
http_request_start: List = .{},
|
http_request_start: List = .{},
|
||||||
http_request_complete: List = .{},
|
http_request_intercept: List = .{},
|
||||||
|
http_headers_done: List = .{},
|
||||||
notification_created: List = .{},
|
notification_created: List = .{},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -72,7 +74,8 @@ pub const Notification = struct {
|
|||||||
page_navigated: *const PageNavigated,
|
page_navigated: *const PageNavigated,
|
||||||
http_request_fail: *const RequestFail,
|
http_request_fail: *const RequestFail,
|
||||||
http_request_start: *const RequestStart,
|
http_request_start: *const RequestStart,
|
||||||
http_request_complete: *const RequestComplete,
|
http_request_intercept: *const RequestIntercept,
|
||||||
|
http_headers_done: *const ResponseHeadersDone,
|
||||||
notification_created: *Notification,
|
notification_created: *Notification,
|
||||||
};
|
};
|
||||||
const EventType = std.meta.FieldEnum(Events);
|
const EventType = std.meta.FieldEnum(Events);
|
||||||
@@ -81,35 +84,31 @@ pub const Notification = struct {
|
|||||||
|
|
||||||
pub const PageNavigate = struct {
|
pub const PageNavigate = struct {
|
||||||
timestamp: u32,
|
timestamp: u32,
|
||||||
url: *const URL,
|
url: []const u8,
|
||||||
opts: page.NavigateOpts,
|
opts: page.NavigateOpts,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const PageNavigated = struct {
|
pub const PageNavigated = struct {
|
||||||
timestamp: u32,
|
timestamp: u32,
|
||||||
url: *const URL,
|
url: []const u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const RequestStart = struct {
|
pub const RequestStart = struct {
|
||||||
arena: Allocator,
|
transfer: *Transfer,
|
||||||
id: usize,
|
};
|
||||||
url: *const std.Uri,
|
|
||||||
method: http_client.Request.Method,
|
pub const RequestIntercept = struct {
|
||||||
headers: *std.ArrayListUnmanaged(std.http.Header),
|
transfer: *Transfer,
|
||||||
has_body: bool,
|
wait_for_interception: *bool,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ResponseHeadersDone = struct {
|
||||||
|
transfer: *Transfer,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const RequestFail = struct {
|
pub const RequestFail = struct {
|
||||||
id: usize,
|
transfer: *Transfer,
|
||||||
url: *const std.Uri,
|
err: anyerror,
|
||||||
err: []const u8,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const RequestComplete = struct {
|
|
||||||
id: usize,
|
|
||||||
url: *const std.Uri,
|
|
||||||
status: u16,
|
|
||||||
headers: []http_client.Header,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn init(allocator: Allocator, parent: ?*Notification) !*Notification {
|
pub fn init(allocator: Allocator, parent: ?*Notification) !*Notification {
|
||||||
|
|||||||
@@ -668,7 +668,11 @@ pub fn Env(comptime State: type, comptime WebApis: type) type {
|
|||||||
|
|
||||||
const ModuleLoader = struct {
|
const ModuleLoader = struct {
|
||||||
ptr: *anyopaque,
|
ptr: *anyopaque,
|
||||||
func: *const fn (ptr: *anyopaque, specifier: []const u8) anyerror!?[]const u8,
|
func: *const fn (ptr: *anyopaque, url: [:0]const u8) anyerror!BlockingResult,
|
||||||
|
|
||||||
|
// Don't like having to reach into ../browser/ here. But can't think
|
||||||
|
// of a good way to fix this.
|
||||||
|
const BlockingResult = @import("../browser/ScriptManager.zig").BlockingResult;
|
||||||
};
|
};
|
||||||
|
|
||||||
// no init, started with executor.createJsContext()
|
// no init, started with executor.createJsContext()
|
||||||
@@ -1416,11 +1420,7 @@ pub fn Env(comptime State: type, comptime WebApis: type) type {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn _resolveModuleCallback(
|
fn _resolveModuleCallback(self: *JsContext, referrer: v8.Module, specifier: []const u8) !?*const v8.C_Module {
|
||||||
self: *JsContext,
|
|
||||||
referrer: v8.Module,
|
|
||||||
specifier: []const u8,
|
|
||||||
) !?*const v8.C_Module {
|
|
||||||
const referrer_path = self.module_identifier.get(referrer.getIdentityHash()) orelse {
|
const referrer_path = self.module_identifier.get(referrer.getIdentityHash()) orelse {
|
||||||
// Shouldn't be possible.
|
// Shouldn't be possible.
|
||||||
return error.UnknownModuleReferrer;
|
return error.UnknownModuleReferrer;
|
||||||
@@ -1430,29 +1430,32 @@ pub fn Env(comptime State: type, comptime WebApis: type) type {
|
|||||||
self.call_arena,
|
self.call_arena,
|
||||||
specifier,
|
specifier,
|
||||||
referrer_path,
|
referrer_path,
|
||||||
.{ .alloc = .if_needed },
|
.{ .alloc = .if_needed, .null_terminated = true },
|
||||||
);
|
);
|
||||||
|
|
||||||
if (self.module_cache.get(normalized_specifier)) |pm| {
|
if (self.module_cache.get(normalized_specifier)) |pm| {
|
||||||
return pm.handle;
|
return pm.handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
const module_loader = self.module_loader;
|
const m: v8.Module = blk: {
|
||||||
const source = try module_loader.func(module_loader.ptr, normalized_specifier) orelse return null;
|
const module_loader = self.module_loader;
|
||||||
|
var fetch_result = try module_loader.func(module_loader.ptr, normalized_specifier);
|
||||||
|
defer fetch_result.deinit();
|
||||||
|
|
||||||
var try_catch: TryCatch = undefined;
|
var try_catch: TryCatch = undefined;
|
||||||
try_catch.init(self);
|
try_catch.init(self);
|
||||||
defer try_catch.deinit();
|
defer try_catch.deinit();
|
||||||
|
|
||||||
const m = compileModule(self.isolate, source, normalized_specifier) catch |err| {
|
break :blk compileModule(self.isolate, fetch_result.src(), normalized_specifier) catch |err| {
|
||||||
log.warn(.js, "compile resolved module", .{
|
log.warn(.js, "compile resolved module", .{
|
||||||
.specifier = specifier,
|
.specifier = specifier,
|
||||||
.stack = try_catch.stack(self.call_arena) catch null,
|
.stack = try_catch.stack(self.call_arena) catch null,
|
||||||
.src = try_catch.sourceLine(self.call_arena) catch "err",
|
.src = try_catch.sourceLine(self.call_arena) catch "err",
|
||||||
.line = try_catch.sourceLineNumber() orelse 0,
|
.line = try_catch.sourceLineNumber() orelse 0,
|
||||||
.exception = (try_catch.exception(self.call_arena) catch @errorName(err)) orelse @errorName(err),
|
.exception = (try_catch.exception(self.call_arena) catch @errorName(err)) orelse @errorName(err),
|
||||||
});
|
});
|
||||||
return null;
|
return null;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
// We were hoping to find the module in our cache, and thus used
|
// We were hoping to find the module in our cache, and thus used
|
||||||
@@ -1568,7 +1571,7 @@ pub fn Env(comptime State: type, comptime WebApis: type) type {
|
|||||||
context.context_arena,
|
context.context_arena,
|
||||||
specifier_str,
|
specifier_str,
|
||||||
resource_str,
|
resource_str,
|
||||||
.{ .alloc = .if_needed },
|
.{ .alloc = .if_needed, .null_terminated = true },
|
||||||
) catch unreachable;
|
) catch unreachable;
|
||||||
|
|
||||||
log.debug(.js, "dynamic import", .{
|
log.debug(.js, "dynamic import", .{
|
||||||
@@ -1590,41 +1593,41 @@ pub fn Env(comptime State: type, comptime WebApis: type) type {
|
|||||||
|
|
||||||
fn _dynamicModuleCallback(
|
fn _dynamicModuleCallback(
|
||||||
self: *JsContext,
|
self: *JsContext,
|
||||||
specifier: []const u8,
|
specifier: [:0]const u8,
|
||||||
resolver: *const v8.PromiseResolver,
|
resolver: *const v8.PromiseResolver,
|
||||||
) !void {
|
) !void {
|
||||||
const iso = self.isolate;
|
const iso = self.isolate;
|
||||||
const ctx = self.v8_context;
|
const ctx = self.v8_context;
|
||||||
|
|
||||||
const module_loader = self.module_loader;
|
|
||||||
const source = module_loader.func(module_loader.ptr, specifier) catch {
|
|
||||||
const error_msg = v8.String.initUtf8(iso, "Failed to load module");
|
|
||||||
_ = resolver.reject(ctx, error_msg.toValue());
|
|
||||||
return;
|
|
||||||
} orelse {
|
|
||||||
const error_msg = v8.String.initUtf8(iso, "Module source not available");
|
|
||||||
_ = resolver.reject(ctx, error_msg.toValue());
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
var try_catch: TryCatch = undefined;
|
var try_catch: TryCatch = undefined;
|
||||||
try_catch.init(self);
|
try_catch.init(self);
|
||||||
defer try_catch.deinit();
|
defer try_catch.deinit();
|
||||||
|
|
||||||
const maybe_promise = self.module(source, specifier, true) catch {
|
const maybe_promise: ?v8.Promise = blk: {
|
||||||
log.err(.js, "module compilation failed", .{
|
const module_loader = self.module_loader;
|
||||||
.specifier = specifier,
|
var fetch_result = module_loader.func(module_loader.ptr, specifier) catch {
|
||||||
.exception = try_catch.exception(self.call_arena) catch "unknown error",
|
const error_msg = v8.String.initUtf8(iso, "Failed to load module");
|
||||||
.stack = try_catch.stack(self.call_arena) catch null,
|
_ = resolver.reject(ctx, error_msg.toValue());
|
||||||
.line = try_catch.sourceLineNumber() orelse 0,
|
return;
|
||||||
});
|
};
|
||||||
const error_msg = if (try_catch.hasCaught()) blk: {
|
defer fetch_result.deinit();
|
||||||
const exception_str = try_catch.exception(self.call_arena) catch "Evaluation error";
|
|
||||||
break :blk v8.String.initUtf8(iso, exception_str orelse "Evaluation error");
|
break :blk self.module(fetch_result.src(), specifier, true) catch {
|
||||||
} else v8.String.initUtf8(iso, "Module evaluation failed");
|
log.err(.js, "module compilation failed", .{
|
||||||
_ = resolver.reject(ctx, error_msg.toValue());
|
.specifier = specifier,
|
||||||
return;
|
.exception = try_catch.exception(self.call_arena) catch "unknown error",
|
||||||
|
.stack = try_catch.stack(self.call_arena) catch null,
|
||||||
|
.line = try_catch.sourceLineNumber() orelse 0,
|
||||||
|
});
|
||||||
|
const error_msg = if (try_catch.hasCaught()) eblk: {
|
||||||
|
const exception_str = try_catch.exception(self.call_arena) catch "Evaluation error";
|
||||||
|
break :eblk v8.String.initUtf8(iso, exception_str orelse "Evaluation error");
|
||||||
|
} else v8.String.initUtf8(iso, "Module evaluation failed");
|
||||||
|
_ = resolver.reject(ctx, error_msg.toValue());
|
||||||
|
return;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
const new_module = self.module_cache.get(specifier).?.castToModule();
|
const new_module = self.module_cache.get(specifier).?.castToModule();
|
||||||
|
|
||||||
if (maybe_promise) |promise| {
|
if (maybe_promise) |promise| {
|
||||||
@@ -3815,7 +3818,11 @@ const NoopInspector = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const ErrorModuleLoader = struct {
|
const ErrorModuleLoader = struct {
|
||||||
pub fn fetchModuleSource(_: *anyopaque, _: []const u8) !?[]const u8 {
|
// Don't like having to reach into ../browser/ here. But can't think
|
||||||
|
// of a good way to fix this.
|
||||||
|
const BlockingResult = @import("../browser/ScriptManager.zig").BlockingResult;
|
||||||
|
|
||||||
|
pub fn fetchModuleSource(_: *anyopaque, _: [:0]const u8) !BlockingResult {
|
||||||
return error.NoModuleLoadConfigured;
|
return error.NoModuleLoadConfigured;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -121,6 +121,10 @@ pub const Loop = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn hasPendingTimeout(self: *Self) bool {
|
||||||
|
return self.pending_timeout_count > 0;
|
||||||
|
}
|
||||||
|
|
||||||
// JS callbacks APIs
|
// JS callbacks APIs
|
||||||
// -----------------
|
// -----------------
|
||||||
|
|
||||||
|
|||||||
@@ -48,8 +48,9 @@ const MAX_MESSAGE_SIZE = 512 * 1024 + 14;
|
|||||||
|
|
||||||
const Server = struct {
|
const Server = struct {
|
||||||
app: *App,
|
app: *App,
|
||||||
allocator: Allocator,
|
|
||||||
loop: *Loop,
|
loop: *Loop,
|
||||||
|
allocator: Allocator,
|
||||||
|
client: ?*Client = null,
|
||||||
|
|
||||||
// internal fields
|
// internal fields
|
||||||
listener: posix.socket_t,
|
listener: posix.socket_t,
|
||||||
@@ -96,6 +97,7 @@ const Server = struct {
|
|||||||
const client = try self.allocator.create(Client);
|
const client = try self.allocator.create(Client);
|
||||||
client.* = Client.init(socket, self);
|
client.* = Client.init(socket, self);
|
||||||
client.start();
|
client.start();
|
||||||
|
self.client = client;
|
||||||
|
|
||||||
if (log.enabled(.app, .info)) {
|
if (log.enabled(.app, .info)) {
|
||||||
var address: std.net.Address = undefined;
|
var address: std.net.Address = undefined;
|
||||||
@@ -107,6 +109,7 @@ const Server = struct {
|
|||||||
|
|
||||||
fn releaseClient(self: *Server, client: *Client) void {
|
fn releaseClient(self: *Server, client: *Client) void {
|
||||||
self.allocator.destroy(client);
|
self.allocator.destroy(client);
|
||||||
|
self.client = null;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -163,9 +166,7 @@ pub const Client = struct {
|
|||||||
|
|
||||||
const SendQueue = std.DoublyLinkedList(Outgoing);
|
const SendQueue = std.DoublyLinkedList(Outgoing);
|
||||||
|
|
||||||
const Self = @This();
|
fn init(socket: posix.socket_t, server: *Server) Client {
|
||||||
|
|
||||||
fn init(socket: posix.socket_t, server: *Server) Self {
|
|
||||||
return .{
|
return .{
|
||||||
.cdp = null,
|
.cdp = null,
|
||||||
.mode = .http,
|
.mode = .http,
|
||||||
@@ -185,7 +186,7 @@ pub const Client = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maybeDeinit(self: *Self) void {
|
fn maybeDeinit(self: *Client) void {
|
||||||
if (self.read_pending or self.write_pending) {
|
if (self.read_pending or self.write_pending) {
|
||||||
// We cannot do anything as long as we still have these pending
|
// We cannot do anything as long as we still have these pending
|
||||||
// They should not be pending for long as we're only here after
|
// They should not be pending for long as we're only here after
|
||||||
@@ -222,7 +223,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close(self: *Self) void {
|
fn close(self: *Client) void {
|
||||||
log.info(.app, "client disconnected", .{});
|
log.info(.app, "client disconnected", .{});
|
||||||
self.connected = false;
|
self.connected = false;
|
||||||
// recv only, because we might have pending writes we'd like to get
|
// recv only, because we might have pending writes we'd like to get
|
||||||
@@ -231,14 +232,14 @@ pub const Client = struct {
|
|||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start(self: *Self) void {
|
fn start(self: *Client) void {
|
||||||
self.queueRead();
|
self.queueRead();
|
||||||
self.queueTimeout();
|
self.queueTimeout();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueRead(self: *Self) void {
|
fn queueRead(self: *Client) void {
|
||||||
self.server.loop.io.recv(
|
self.server.loop.io.recv(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
callbackRead,
|
callbackRead,
|
||||||
&self.read_completion,
|
&self.read_completion,
|
||||||
@@ -248,7 +249,7 @@ pub const Client = struct {
|
|||||||
self.read_pending = true;
|
self.read_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callbackRead(self: *Self, _: *Completion, result: RecvError!usize) void {
|
fn callbackRead(self: *Client, _: *Completion, result: RecvError!usize) void {
|
||||||
self.read_pending = false;
|
self.read_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
@@ -277,11 +278,11 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn readBuf(self: *Self) []u8 {
|
fn readBuf(self: *Client) []u8 {
|
||||||
return self.reader.readBuf();
|
return self.reader.readBuf();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processData(self: *Self, len: usize) !bool {
|
fn processData(self: *Client, len: usize) !bool {
|
||||||
self.last_active = now();
|
self.last_active = now();
|
||||||
self.reader.len += len;
|
self.reader.len += len;
|
||||||
|
|
||||||
@@ -294,7 +295,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processHTTPRequest(self: *Self) !void {
|
fn processHTTPRequest(self: *Client) !void {
|
||||||
std.debug.assert(self.reader.pos == 0);
|
std.debug.assert(self.reader.pos == 0);
|
||||||
const request = self.reader.buf[0..self.reader.len];
|
const request = self.reader.buf[0..self.reader.len];
|
||||||
|
|
||||||
@@ -330,7 +331,7 @@ pub const Client = struct {
|
|||||||
self.reader.len = 0;
|
self.reader.len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handleHTTPRequest(self: *Self, request: []u8) !void {
|
fn handleHTTPRequest(self: *Client, request: []u8) !void {
|
||||||
if (request.len < 18) {
|
if (request.len < 18) {
|
||||||
// 18 is [generously] the smallest acceptable HTTP request
|
// 18 is [generously] the smallest acceptable HTTP request
|
||||||
return error.InvalidRequest;
|
return error.InvalidRequest;
|
||||||
@@ -365,7 +366,7 @@ pub const Client = struct {
|
|||||||
return error.NotFound;
|
return error.NotFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn upgradeConnection(self: *Self, request: []u8) !void {
|
fn upgradeConnection(self: *Client, request: []u8) !void {
|
||||||
// our caller already confirmed that we have a trailing \r\n\r\n
|
// our caller already confirmed that we have a trailing \r\n\r\n
|
||||||
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
|
const request_line_end = std.mem.indexOfScalar(u8, request, '\r') orelse unreachable;
|
||||||
const request_line = request[0..request_line_end];
|
const request_line = request[0..request_line_end];
|
||||||
@@ -462,7 +463,7 @@ pub const Client = struct {
|
|||||||
return self.send(arena, response);
|
return self.send(arena, response);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writeHTTPErrorResponse(self: *Self, comptime status: u16, comptime body: []const u8) void {
|
fn writeHTTPErrorResponse(self: *Client, comptime status: u16, comptime body: []const u8) void {
|
||||||
const response = std.fmt.comptimePrint(
|
const response = std.fmt.comptimePrint(
|
||||||
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
|
"HTTP/1.1 {d} \r\nConnection: Close\r\nContent-Length: {d}\r\n\r\n{s}",
|
||||||
.{ status, body.len, body },
|
.{ status, body.len, body },
|
||||||
@@ -473,7 +474,7 @@ pub const Client = struct {
|
|||||||
self.send(null, response) catch {};
|
self.send(null, response) catch {};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processWebsocketMessage(self: *Self) !bool {
|
fn processWebsocketMessage(self: *Client) !bool {
|
||||||
errdefer self.close();
|
errdefer self.close();
|
||||||
|
|
||||||
var reader = &self.reader;
|
var reader = &self.reader;
|
||||||
@@ -517,7 +518,7 @@ pub const Client = struct {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sendPong(self: *Self, data: []const u8) !void {
|
fn sendPong(self: *Client, data: []const u8) !void {
|
||||||
if (data.len == 0) {
|
if (data.len == 0) {
|
||||||
return self.send(null, &EMPTY_PONG);
|
return self.send(null, &EMPTY_PONG);
|
||||||
}
|
}
|
||||||
@@ -539,7 +540,7 @@ pub const Client = struct {
|
|||||||
// writev, so we need to get creative. We'll JSON serialize to a
|
// writev, so we need to get creative. We'll JSON serialize to a
|
||||||
// buffer, where the first 10 bytes are reserved. We can then backfill
|
// buffer, where the first 10 bytes are reserved. We can then backfill
|
||||||
// the header and send the slice.
|
// the header and send the slice.
|
||||||
pub fn sendJSON(self: *Self, message: anytype, opts: std.json.StringifyOptions) !void {
|
pub fn sendJSON(self: *Client, message: anytype, opts: std.json.StringifyOptions) !void {
|
||||||
var arena = ArenaAllocator.init(self.server.allocator);
|
var arena = ArenaAllocator.init(self.server.allocator);
|
||||||
errdefer arena.deinit();
|
errdefer arena.deinit();
|
||||||
|
|
||||||
@@ -557,7 +558,7 @@ pub const Client = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn sendJSONRaw(
|
pub fn sendJSONRaw(
|
||||||
self: *Self,
|
self: *Client,
|
||||||
arena: ArenaAllocator,
|
arena: ArenaAllocator,
|
||||||
buf: std.ArrayListUnmanaged(u8),
|
buf: std.ArrayListUnmanaged(u8),
|
||||||
) !void {
|
) !void {
|
||||||
@@ -567,9 +568,9 @@ pub const Client = struct {
|
|||||||
return self.send(arena, framed);
|
return self.send(arena, framed);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueTimeout(self: *Self) void {
|
fn queueTimeout(self: *Client) void {
|
||||||
self.server.loop.io.timeout(
|
self.server.loop.io.timeout(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
callbackTimeout,
|
callbackTimeout,
|
||||||
&self.timeout_completion,
|
&self.timeout_completion,
|
||||||
@@ -578,7 +579,7 @@ pub const Client = struct {
|
|||||||
self.timeout_pending = true;
|
self.timeout_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn callbackTimeout(self: *Self, _: *Completion, result: TimeoutError!void) void {
|
fn callbackTimeout(self: *Client, _: *Completion, result: TimeoutError!void) void {
|
||||||
self.timeout_pending = false;
|
self.timeout_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
if (self.read_pending == false and self.write_pending == false) {
|
if (self.read_pending == false and self.write_pending == false) {
|
||||||
@@ -614,7 +615,7 @@ pub const Client = struct {
|
|||||||
self.queueTimeout();
|
self.queueTimeout();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send(self: *Self, arena: ?ArenaAllocator, data: []const u8) !void {
|
fn send(self: *Client, arena: ?ArenaAllocator, data: []const u8) !void {
|
||||||
const node = try self.send_queue_node_pool.create();
|
const node = try self.send_queue_node_pool.create();
|
||||||
errdefer self.send_queue_node_pool.destroy(node);
|
errdefer self.send_queue_node_pool.destroy(node);
|
||||||
|
|
||||||
@@ -632,7 +633,7 @@ pub const Client = struct {
|
|||||||
self.queueSend();
|
self.queueSend();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queueSend(self: *Self) void {
|
fn queueSend(self: *Client) void {
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -643,7 +644,7 @@ pub const Client = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
self.server.loop.io.send(
|
self.server.loop.io.send(
|
||||||
*Self,
|
*Client,
|
||||||
self,
|
self,
|
||||||
sendCallback,
|
sendCallback,
|
||||||
&self.write_completion,
|
&self.write_completion,
|
||||||
@@ -653,7 +654,7 @@ pub const Client = struct {
|
|||||||
self.write_pending = true;
|
self.write_pending = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sendCallback(self: *Self, _: *Completion, result: SendError!usize) void {
|
fn sendCallback(self: *Client, _: *Completion, result: SendError!usize) void {
|
||||||
self.write_pending = false;
|
self.write_pending = false;
|
||||||
if (self.connected == false) {
|
if (self.connected == false) {
|
||||||
self.maybeDeinit();
|
self.maybeDeinit();
|
||||||
@@ -1052,7 +1053,23 @@ pub fn run(
|
|||||||
// infinite loop on I/O events, either:
|
// infinite loop on I/O events, either:
|
||||||
// - cmd from incoming connection on server socket
|
// - cmd from incoming connection on server socket
|
||||||
// - JS callbacks events from scripts
|
// - JS callbacks events from scripts
|
||||||
|
// var http_client = app.http_client;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
// @newhttp. This is a hack. We used to just have 1 loop, so we could
|
||||||
|
// sleep it it "forever" and any activity (message to this server,
|
||||||
|
// JS callback, http data) would wake it up.
|
||||||
|
// Now we have 2 loops. If we block on one, the other won't get woken
|
||||||
|
// up. We don't block "forever" but even 10ms adds a bunch of latency
|
||||||
|
// since this is called in a loop.
|
||||||
|
// Hopefully this is temporary and we can remove the io loop and then
|
||||||
|
// only have 1 loop. But, until then, we need to check both loops and
|
||||||
|
// pay some blocking penalty.
|
||||||
|
if (server.client) |client| {
|
||||||
|
if (client.cdp) |*cdp| {
|
||||||
|
cdp.pageWait();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
try loop.io.run_for_ns(10 * std.time.ns_per_ms);
|
try loop.io.run_for_ns(10 * std.time.ns_per_ms);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,26 +7,31 @@ const Allocator = std.mem.Allocator;
|
|||||||
|
|
||||||
const log = @import("../log.zig");
|
const log = @import("../log.zig");
|
||||||
const App = @import("../app.zig").App;
|
const App = @import("../app.zig").App;
|
||||||
|
const Http = @import("../http/Http.zig");
|
||||||
const telemetry = @import("telemetry.zig");
|
const telemetry = @import("telemetry.zig");
|
||||||
const HttpClient = @import("../http/client.zig").Client;
|
|
||||||
|
|
||||||
const URL = "https://telemetry.lightpanda.io";
|
const URL = "https://telemetry.lightpanda.io";
|
||||||
const MAX_BATCH_SIZE = 20;
|
const MAX_BATCH_SIZE = 20;
|
||||||
|
|
||||||
pub const LightPanda = struct {
|
pub const LightPanda = struct {
|
||||||
uri: std.Uri,
|
|
||||||
pending: List,
|
pending: List,
|
||||||
running: bool,
|
running: bool,
|
||||||
thread: ?std.Thread,
|
thread: ?std.Thread,
|
||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
mutex: std.Thread.Mutex,
|
mutex: std.Thread.Mutex,
|
||||||
cond: Thread.Condition,
|
cond: Thread.Condition,
|
||||||
client: *HttpClient,
|
connection: Http.Connection,
|
||||||
node_pool: std.heap.MemoryPool(List.Node),
|
node_pool: std.heap.MemoryPool(List.Node),
|
||||||
|
|
||||||
const List = std.DoublyLinkedList(LightPandaEvent);
|
const List = std.DoublyLinkedList(LightPandaEvent);
|
||||||
|
|
||||||
pub fn init(app: *App) LightPanda {
|
pub fn init(app: *App) !LightPanda {
|
||||||
|
const connection = try app.http.newConnection();
|
||||||
|
errdefer connection.deinit();
|
||||||
|
|
||||||
|
try connection.setURL(URL);
|
||||||
|
try connection.setMethod(.POST);
|
||||||
|
|
||||||
const allocator = app.allocator;
|
const allocator = app.allocator;
|
||||||
return .{
|
return .{
|
||||||
.cond = .{},
|
.cond = .{},
|
||||||
@@ -35,8 +40,7 @@ pub const LightPanda = struct {
|
|||||||
.thread = null,
|
.thread = null,
|
||||||
.running = true,
|
.running = true,
|
||||||
.allocator = allocator,
|
.allocator = allocator,
|
||||||
.client = &app.http_client,
|
.connection = connection,
|
||||||
.uri = std.Uri.parse(URL) catch unreachable,
|
|
||||||
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
|
.node_pool = std.heap.MemoryPool(List.Node).init(allocator),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -50,6 +54,7 @@ pub const LightPanda = struct {
|
|||||||
thread.join();
|
thread.join();
|
||||||
}
|
}
|
||||||
self.node_pool.deinit();
|
self.node_pool.deinit();
|
||||||
|
self.connection.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: App.RunMode, raw_event: telemetry.Event) !void {
|
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: App.RunMode, raw_event: telemetry.Event) !void {
|
||||||
@@ -102,15 +107,11 @@ pub const LightPanda = struct {
|
|||||||
try writer.writeByte('\n');
|
try writer.writeByte('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
var req = try self.client.request(.POST, &self.uri);
|
try self.connection.setBody(arr.items);
|
||||||
defer req.deinit();
|
const status = try self.connection.request();
|
||||||
req.body = arr.items;
|
|
||||||
|
|
||||||
// drain the response
|
if (status != 200) {
|
||||||
var res = try req.sendSync(.{});
|
log.warn(.telemetry, "server error", .{ .status = status });
|
||||||
while (try res.next()) |_| {}
|
|
||||||
if (res.header.status != 200) {
|
|
||||||
log.warn(.telemetry, "server error", .{ .status = res.header.status });
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,16 +29,19 @@ fn TelemetryT(comptime P: type) type {
|
|||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn init(app: *App, run_mode: App.RunMode) Self {
|
pub fn init(app: *App, run_mode: App.RunMode) !Self {
|
||||||
const disabled = std.process.hasEnvVarConstant("LIGHTPANDA_DISABLE_TELEMETRY");
|
const disabled = std.process.hasEnvVarConstant("LIGHTPANDA_DISABLE_TELEMETRY");
|
||||||
if (builtin.mode != .Debug and builtin.is_test == false) {
|
if (builtin.mode != .Debug and builtin.is_test == false) {
|
||||||
log.info(.telemetry, "telemetry status", .{ .disabled = disabled });
|
log.info(.telemetry, "telemetry status", .{ .disabled = disabled });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const provider = try P.init(app);
|
||||||
|
errdefer provider.deinit();
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.disabled = disabled,
|
.disabled = disabled,
|
||||||
.run_mode = run_mode,
|
.run_mode = run_mode,
|
||||||
.provider = P.init(app),
|
.provider = provider,
|
||||||
.iid = if (disabled) null else getOrCreateId(app.app_dir_path),
|
.iid = if (disabled) null else getOrCreateId(app.app_dir_path),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -79,7 +82,7 @@ fn TelemetryT(comptime P: type) type {
|
|||||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||||
self.record(.{ .navigate = .{
|
self.record(.{ .navigate = .{
|
||||||
.proxy = false,
|
.proxy = false,
|
||||||
.tls = std.ascii.eqlIgnoreCase(data.url.scheme(), "https"),
|
.tls = std.ascii.startsWithIgnoreCase(data.url, "https://"),
|
||||||
} });
|
} });
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -134,7 +137,7 @@ pub const Event = union(enum) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const NoopProvider = struct {
|
const NoopProvider = struct {
|
||||||
fn init(_: *App) NoopProvider {
|
fn init(_: *App) !NoopProvider {
|
||||||
return .{};
|
return .{};
|
||||||
}
|
}
|
||||||
fn deinit(_: NoopProvider) void {}
|
fn deinit(_: NoopProvider) void {}
|
||||||
@@ -150,7 +153,7 @@ test "telemetry: disabled by environment" {
|
|||||||
defer _ = unsetenv(@constCast("LIGHTPANDA_DISABLE_TELEMETRY"));
|
defer _ = unsetenv(@constCast("LIGHTPANDA_DISABLE_TELEMETRY"));
|
||||||
|
|
||||||
const FailingProvider = struct {
|
const FailingProvider = struct {
|
||||||
fn init(_: *App) @This() {
|
fn init(_: *App) !@This() {
|
||||||
return .{};
|
return .{};
|
||||||
}
|
}
|
||||||
fn deinit(_: @This()) void {}
|
fn deinit(_: @This()) void {}
|
||||||
@@ -159,7 +162,7 @@ test "telemetry: disabled by environment" {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var telemetry = TelemetryT(FailingProvider).init(undefined, .serve);
|
var telemetry = try TelemetryT(FailingProvider).init(undefined, .serve);
|
||||||
defer telemetry.deinit();
|
defer telemetry.deinit();
|
||||||
telemetry.record(.{ .run = {} });
|
telemetry.record(.{ .run = {} });
|
||||||
}
|
}
|
||||||
@@ -186,7 +189,7 @@ test "telemetry: sends event to provider" {
|
|||||||
var app = testing.createApp(.{});
|
var app = testing.createApp(.{});
|
||||||
defer app.deinit();
|
defer app.deinit();
|
||||||
|
|
||||||
var telemetry = TelemetryT(MockProvider).init(app, .serve);
|
var telemetry = try TelemetryT(MockProvider).init(app, .serve);
|
||||||
defer telemetry.deinit();
|
defer telemetry.deinit();
|
||||||
const mock = &telemetry.provider;
|
const mock = &telemetry.provider;
|
||||||
|
|
||||||
@@ -206,7 +209,7 @@ const MockProvider = struct {
|
|||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
events: std.ArrayListUnmanaged(Event),
|
events: std.ArrayListUnmanaged(Event),
|
||||||
|
|
||||||
fn init(app: *App) @This() {
|
fn init(app: *App) !@This() {
|
||||||
return .{
|
return .{
|
||||||
.iid = null,
|
.iid = null,
|
||||||
.run_mode = null,
|
.run_mode = null,
|
||||||
|
|||||||
@@ -406,8 +406,9 @@ pub const JsRunner = struct {
|
|||||||
.url = try page.url.toWebApi(page.arena),
|
.url = try page.url.toWebApi(page.arena),
|
||||||
});
|
});
|
||||||
|
|
||||||
var html = std.io.fixedBufferStream(opts.html);
|
const html_doc = try parser.documentHTMLParseFromStr(opts.html);
|
||||||
try page.loadHTMLDoc(html.reader(), "UTF-8");
|
try page.setDocument(html_doc);
|
||||||
|
page.mode = .{ .parsed = {} };
|
||||||
|
|
||||||
return .{
|
return .{
|
||||||
.app = app,
|
.app = app,
|
||||||
@@ -441,7 +442,7 @@ pub const JsRunner = struct {
|
|||||||
}
|
}
|
||||||
return err;
|
return err;
|
||||||
};
|
};
|
||||||
try self.page.loop.run(std.time.ns_per_ms * 200);
|
self.page.session.wait(1);
|
||||||
@import("root").js_runner_duration += std.time.Instant.since(try std.time.Instant.now(), start);
|
@import("root").js_runner_duration += std.time.Instant.since(try std.time.Instant.now(), start);
|
||||||
|
|
||||||
if (case.@"1") |expected| {
|
if (case.@"1") |expected| {
|
||||||
|
|||||||
153
src/url.zig
153
src/url.zig
@@ -87,6 +87,7 @@ pub const URL = struct {
|
|||||||
|
|
||||||
const StitchOpts = struct {
|
const StitchOpts = struct {
|
||||||
alloc: AllocWhen = .always,
|
alloc: AllocWhen = .always,
|
||||||
|
null_terminated: bool = false,
|
||||||
|
|
||||||
const AllocWhen = enum {
|
const AllocWhen = enum {
|
||||||
always,
|
always,
|
||||||
@@ -102,9 +103,13 @@ pub const URL = struct {
|
|||||||
allocator: Allocator,
|
allocator: Allocator,
|
||||||
path: []const u8,
|
path: []const u8,
|
||||||
base: []const u8,
|
base: []const u8,
|
||||||
opts: StitchOpts,
|
comptime opts: StitchOpts,
|
||||||
) ![]const u8 {
|
) !StitchReturn(opts) {
|
||||||
if (base.len == 0 or isComleteHTTPUrl(path)) {
|
if (base.len == 0 or isComleteHTTPUrl(path)) {
|
||||||
|
if (comptime opts.null_terminated) {
|
||||||
|
return allocator.dupeZ(u8, path);
|
||||||
|
}
|
||||||
|
|
||||||
if (opts.alloc == .always) {
|
if (opts.alloc == .always) {
|
||||||
return allocator.dupe(u8, path);
|
return allocator.dupe(u8, path);
|
||||||
}
|
}
|
||||||
@@ -112,6 +117,10 @@ pub const URL = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (path.len == 0) {
|
if (path.len == 0) {
|
||||||
|
if (comptime opts.null_terminated) {
|
||||||
|
return allocator.dupeZ(u8, base);
|
||||||
|
}
|
||||||
|
|
||||||
if (opts.alloc == .always) {
|
if (opts.alloc == .always) {
|
||||||
return allocator.dupe(u8, base);
|
return allocator.dupe(u8, base);
|
||||||
}
|
}
|
||||||
@@ -126,6 +135,9 @@ pub const URL = struct {
|
|||||||
|
|
||||||
if (path[0] == '/') {
|
if (path[0] == '/') {
|
||||||
const pos = std.mem.indexOfScalarPos(u8, base, protocol_end, '/') orelse base.len;
|
const pos = std.mem.indexOfScalarPos(u8, base, protocol_end, '/') orelse base.len;
|
||||||
|
if (comptime opts.null_terminated) {
|
||||||
|
return std.fmt.allocPrintZ(allocator, "{s}{s}", .{ base[0..pos], path });
|
||||||
|
}
|
||||||
return std.fmt.allocPrint(allocator, "{s}{s}", .{ base[0..pos], path });
|
return std.fmt.allocPrint(allocator, "{s}{s}", .{ base[0..pos], path });
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -134,17 +146,22 @@ pub const URL = struct {
|
|||||||
normalized_base = base[0 .. pos + protocol_end];
|
normalized_base = base[0 .. pos + protocol_end];
|
||||||
}
|
}
|
||||||
|
|
||||||
var out = try std.fmt.allocPrint(allocator, "{s}/{s}", .{
|
// that extra spacelets us handle opts.null_terminated. If we end up
|
||||||
|
// not trimming anything, it ensures that we have 1 extra byte to store
|
||||||
|
// our null terminator.
|
||||||
|
var out = try std.fmt.allocPrint(allocator, "{s}/{s}" ++ if (comptime opts.null_terminated) " " else "", .{
|
||||||
normalized_base,
|
normalized_base,
|
||||||
path,
|
path,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const end = if (comptime opts.null_terminated) out.len - 1 else out.len;
|
||||||
|
|
||||||
// Strip out ./ and ../. This is done in-place, because doing so can
|
// Strip out ./ and ../. This is done in-place, because doing so can
|
||||||
// only ever make `out` smaller. After this, `out` cannot be freed by
|
// only ever make `out` smaller. After this, `out` cannot be freed by
|
||||||
// an allocator, which is ok, because we expect allocator to be an arena.
|
// an allocator, which is ok, because we expect allocator to be an arena.
|
||||||
var in_i: usize = 0;
|
var in_i: usize = 0;
|
||||||
var out_i: usize = 0;
|
var out_i: usize = 0;
|
||||||
while (in_i < out.len) {
|
while (in_i < end) {
|
||||||
if (std.mem.startsWith(u8, out[in_i..], "./")) {
|
if (std.mem.startsWith(u8, out[in_i..], "./")) {
|
||||||
in_i += 2;
|
in_i += 2;
|
||||||
continue;
|
continue;
|
||||||
@@ -165,9 +182,19 @@ pub const URL = struct {
|
|||||||
in_i += 1;
|
in_i += 1;
|
||||||
out_i += 1;
|
out_i += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (comptime opts.null_terminated) {
|
||||||
|
// we always have an extra space
|
||||||
|
out[out_i] = 0;
|
||||||
|
return out[0..out_i :0];
|
||||||
|
}
|
||||||
return out[0..out_i];
|
return out[0..out_i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn StitchReturn(comptime opts: StitchOpts) type {
|
||||||
|
return if (opts.null_terminated) [:0]const u8 else []const u8;
|
||||||
|
}
|
||||||
|
|
||||||
pub fn concatQueryString(arena: Allocator, url: []const u8, query_string: []const u8) ![]const u8 {
|
pub fn concatQueryString(arena: Allocator, url: []const u8, query_string: []const u8) ![]const u8 {
|
||||||
std.debug.assert(url.len != 0);
|
std.debug.assert(url.len != 0);
|
||||||
|
|
||||||
@@ -362,6 +389,124 @@ test "URL: stitch" {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
test "URL: stitch null terminated" {
|
||||||
|
defer testing.reset();
|
||||||
|
|
||||||
|
const Case = struct {
|
||||||
|
base: []const u8,
|
||||||
|
path: []const u8,
|
||||||
|
expected: []const u8,
|
||||||
|
};
|
||||||
|
|
||||||
|
const cases = [_]Case{
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/xyz/abc/123",
|
||||||
|
.path = "something.js",
|
||||||
|
.expected = "https://lightpanda.io/xyz/abc/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/xyz/abc/123",
|
||||||
|
.path = "/something.js",
|
||||||
|
.expected = "https://lightpanda.io/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/",
|
||||||
|
.path = "something.js",
|
||||||
|
.expected = "https://lightpanda.io/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/",
|
||||||
|
.path = "/something.js",
|
||||||
|
.expected = "https://lightpanda.io/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io",
|
||||||
|
.path = "something.js",
|
||||||
|
.expected = "https://lightpanda.io/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io",
|
||||||
|
.path = "abc/something.js",
|
||||||
|
.expected = "https://lightpanda.io/abc/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/nested",
|
||||||
|
.path = "abc/something.js",
|
||||||
|
.expected = "https://lightpanda.io/abc/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/nested/",
|
||||||
|
.path = "abc/something.js",
|
||||||
|
.expected = "https://lightpanda.io/nested/abc/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/nested/",
|
||||||
|
.path = "/abc/something.js",
|
||||||
|
.expected = "https://lightpanda.io/abc/something.js",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/nested/",
|
||||||
|
.path = "http://www.github.com/lightpanda-io/",
|
||||||
|
.expected = "http://www.github.com/lightpanda-io/",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/nested/",
|
||||||
|
.path = "",
|
||||||
|
.expected = "https://lightpanda.io/nested/",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/abc/aaa",
|
||||||
|
.path = "./hello/./world",
|
||||||
|
.expected = "https://lightpanda.io/abc/hello/world",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/abc/aaa/",
|
||||||
|
.path = "../hello",
|
||||||
|
.expected = "https://lightpanda.io/abc/hello",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/abc/aaa",
|
||||||
|
.path = "../hello",
|
||||||
|
.expected = "https://lightpanda.io/hello",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "https://lightpanda.io/abc/aaa/",
|
||||||
|
.path = "./.././.././hello",
|
||||||
|
.expected = "https://lightpanda.io/hello",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "some/page",
|
||||||
|
.path = "hello",
|
||||||
|
.expected = "some/hello",
|
||||||
|
},
|
||||||
|
.{
|
||||||
|
.base = "some/page/",
|
||||||
|
.path = "hello",
|
||||||
|
.expected = "some/page/hello",
|
||||||
|
},
|
||||||
|
|
||||||
|
.{
|
||||||
|
.base = "some/page/other",
|
||||||
|
.path = ".././hello",
|
||||||
|
.expected = "some/hello",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
for (cases) |case| {
|
||||||
|
const result = try stitch(testing.arena_allocator, case.path, case.base, .{ .null_terminated = true });
|
||||||
|
try testing.expectString(case.expected, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
try testing.expectError(
|
||||||
|
error.InvalidURL,
|
||||||
|
stitch(testing.arena_allocator, "../hello", "https://lightpanda.io/", .{ .null_terminated = true }),
|
||||||
|
);
|
||||||
|
try testing.expectError(
|
||||||
|
error.InvalidURL,
|
||||||
|
stitch(testing.arena_allocator, "../hello", "http://lightpanda.io/", .{ .null_terminated = true }),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
test "URL: concatQueryString" {
|
test "URL: concatQueryString" {
|
||||||
defer testing.reset();
|
defer testing.reset();
|
||||||
const arena = testing.arena_allocator;
|
const arena = testing.arena_allocator;
|
||||||
|
|||||||
1
vendor/curl
vendored
Submodule
1
vendor/curl
vendored
Submodule
Submodule vendor/curl added at 6845533e24
5
vendor/lightpanda/nghttp2/nghttp2ver.h
vendored
Normal file
5
vendor/lightpanda/nghttp2/nghttp2ver.h
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#ifndef NGHTTP2VER_H
|
||||||
|
#define NGHTTP2VER_H
|
||||||
|
#define NGHTTP2_VERSION "1.66"
|
||||||
|
#define NGHTTP2_VERSION_NUM 0x014300
|
||||||
|
#endif /* NGHTTP2VER_H */
|
||||||
1
vendor/mbedtls
vendored
Submodule
1
vendor/mbedtls
vendored
Submodule
Submodule vendor/mbedtls added at c765c831e5
1
vendor/nghttp2
vendored
Submodule
1
vendor/nghttp2
vendored
Submodule
Submodule vendor/nghttp2 added at ac22e0efe3
1
vendor/zlib
vendored
Submodule
1
vendor/zlib
vendored
Submodule
Submodule vendor/zlib added at 51b7f2abda
Reference in New Issue
Block a user