mirror of
https://github.com/lightpanda-io/browser.git
synced 2025-10-30 15:41:48 +00:00
http: add full async client
This commit is contained in:
318
src/http/async/std/http.zig
Normal file
318
src/http/async/std/http.zig
Normal file
@@ -0,0 +1,318 @@
|
||||
pub const Client = @import("http/Client.zig");
|
||||
pub const Server = @import("http/Server.zig");
|
||||
pub const protocol = @import("http/protocol.zig");
|
||||
pub const HeadParser = std.http.HeadParser;
|
||||
pub const ChunkParser = std.http.ChunkParser;
|
||||
pub const HeaderIterator = std.http.HeaderIterator;
|
||||
|
||||
pub const Version = enum {
|
||||
@"HTTP/1.0",
|
||||
@"HTTP/1.1",
|
||||
};
|
||||
|
||||
/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
|
||||
pub const Method = enum(u64) {
|
||||
GET = parse("GET"),
|
||||
HEAD = parse("HEAD"),
|
||||
POST = parse("POST"),
|
||||
PUT = parse("PUT"),
|
||||
DELETE = parse("DELETE"),
|
||||
CONNECT = parse("CONNECT"),
|
||||
OPTIONS = parse("OPTIONS"),
|
||||
TRACE = parse("TRACE"),
|
||||
PATCH = parse("PATCH"),
|
||||
|
||||
_,
|
||||
|
||||
/// Converts `s` into a type that may be used as a `Method` field.
|
||||
/// Asserts that `s` is 24 or fewer bytes.
|
||||
pub fn parse(s: []const u8) u64 {
|
||||
var x: u64 = 0;
|
||||
const len = @min(s.len, @sizeOf(@TypeOf(x)));
|
||||
@memcpy(std.mem.asBytes(&x)[0..len], s[0..len]);
|
||||
return x;
|
||||
}
|
||||
|
||||
pub fn write(self: Method, w: anytype) !void {
|
||||
const bytes = std.mem.asBytes(&@intFromEnum(self));
|
||||
const str = std.mem.sliceTo(bytes, 0);
|
||||
try w.writeAll(str);
|
||||
}
|
||||
|
||||
/// Returns true if a request of this method is allowed to have a body
|
||||
/// Actual behavior from servers may vary and should still be checked
|
||||
pub fn requestHasBody(self: Method) bool {
|
||||
return switch (self) {
|
||||
.POST, .PUT, .PATCH => true,
|
||||
.GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns true if a response to this method is allowed to have a body
|
||||
/// Actual behavior from clients may vary and should still be checked
|
||||
pub fn responseHasBody(self: Method) bool {
|
||||
return switch (self) {
|
||||
.GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
|
||||
.HEAD, .PUT, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
/// An HTTP method is safe if it doesn't alter the state of the server.
|
||||
///
|
||||
/// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
|
||||
pub fn safe(self: Method) bool {
|
||||
return switch (self) {
|
||||
.GET, .HEAD, .OPTIONS, .TRACE => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// An HTTP method is idempotent if an identical request can be made once or several times in a row with the same effect while leaving the server in the same state.
|
||||
///
|
||||
/// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
|
||||
pub fn idempotent(self: Method) bool {
|
||||
return switch (self) {
|
||||
.GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
|
||||
.CONNECT, .POST, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// A cacheable response is an HTTP response that can be cached, that is stored to be retrieved and used later, saving a new request to the server.
|
||||
///
|
||||
/// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
|
||||
///
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
|
||||
pub fn cacheable(self: Method) bool {
|
||||
return switch (self) {
|
||||
.GET, .HEAD => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
|
||||
pub const Status = enum(u10) {
|
||||
@"continue" = 100, // RFC7231, Section 6.2.1
|
||||
switching_protocols = 101, // RFC7231, Section 6.2.2
|
||||
processing = 102, // RFC2518
|
||||
early_hints = 103, // RFC8297
|
||||
|
||||
ok = 200, // RFC7231, Section 6.3.1
|
||||
created = 201, // RFC7231, Section 6.3.2
|
||||
accepted = 202, // RFC7231, Section 6.3.3
|
||||
non_authoritative_info = 203, // RFC7231, Section 6.3.4
|
||||
no_content = 204, // RFC7231, Section 6.3.5
|
||||
reset_content = 205, // RFC7231, Section 6.3.6
|
||||
partial_content = 206, // RFC7233, Section 4.1
|
||||
multi_status = 207, // RFC4918
|
||||
already_reported = 208, // RFC5842
|
||||
im_used = 226, // RFC3229
|
||||
|
||||
multiple_choice = 300, // RFC7231, Section 6.4.1
|
||||
moved_permanently = 301, // RFC7231, Section 6.4.2
|
||||
found = 302, // RFC7231, Section 6.4.3
|
||||
see_other = 303, // RFC7231, Section 6.4.4
|
||||
not_modified = 304, // RFC7232, Section 4.1
|
||||
use_proxy = 305, // RFC7231, Section 6.4.5
|
||||
temporary_redirect = 307, // RFC7231, Section 6.4.7
|
||||
permanent_redirect = 308, // RFC7538
|
||||
|
||||
bad_request = 400, // RFC7231, Section 6.5.1
|
||||
unauthorized = 401, // RFC7235, Section 3.1
|
||||
payment_required = 402, // RFC7231, Section 6.5.2
|
||||
forbidden = 403, // RFC7231, Section 6.5.3
|
||||
not_found = 404, // RFC7231, Section 6.5.4
|
||||
method_not_allowed = 405, // RFC7231, Section 6.5.5
|
||||
not_acceptable = 406, // RFC7231, Section 6.5.6
|
||||
proxy_auth_required = 407, // RFC7235, Section 3.2
|
||||
request_timeout = 408, // RFC7231, Section 6.5.7
|
||||
conflict = 409, // RFC7231, Section 6.5.8
|
||||
gone = 410, // RFC7231, Section 6.5.9
|
||||
length_required = 411, // RFC7231, Section 6.5.10
|
||||
precondition_failed = 412, // RFC7232, Section 4.2][RFC8144, Section 3.2
|
||||
payload_too_large = 413, // RFC7231, Section 6.5.11
|
||||
uri_too_long = 414, // RFC7231, Section 6.5.12
|
||||
unsupported_media_type = 415, // RFC7231, Section 6.5.13][RFC7694, Section 3
|
||||
range_not_satisfiable = 416, // RFC7233, Section 4.4
|
||||
expectation_failed = 417, // RFC7231, Section 6.5.14
|
||||
teapot = 418, // RFC 7168, 2.3.3
|
||||
misdirected_request = 421, // RFC7540, Section 9.1.2
|
||||
unprocessable_entity = 422, // RFC4918
|
||||
locked = 423, // RFC4918
|
||||
failed_dependency = 424, // RFC4918
|
||||
too_early = 425, // RFC8470
|
||||
upgrade_required = 426, // RFC7231, Section 6.5.15
|
||||
precondition_required = 428, // RFC6585
|
||||
too_many_requests = 429, // RFC6585
|
||||
request_header_fields_too_large = 431, // RFC6585
|
||||
unavailable_for_legal_reasons = 451, // RFC7725
|
||||
|
||||
internal_server_error = 500, // RFC7231, Section 6.6.1
|
||||
not_implemented = 501, // RFC7231, Section 6.6.2
|
||||
bad_gateway = 502, // RFC7231, Section 6.6.3
|
||||
service_unavailable = 503, // RFC7231, Section 6.6.4
|
||||
gateway_timeout = 504, // RFC7231, Section 6.6.5
|
||||
http_version_not_supported = 505, // RFC7231, Section 6.6.6
|
||||
variant_also_negotiates = 506, // RFC2295
|
||||
insufficient_storage = 507, // RFC4918
|
||||
loop_detected = 508, // RFC5842
|
||||
not_extended = 510, // RFC2774
|
||||
network_authentication_required = 511, // RFC6585
|
||||
|
||||
_,
|
||||
|
||||
pub fn phrase(self: Status) ?[]const u8 {
|
||||
return switch (self) {
|
||||
// 1xx statuses
|
||||
.@"continue" => "Continue",
|
||||
.switching_protocols => "Switching Protocols",
|
||||
.processing => "Processing",
|
||||
.early_hints => "Early Hints",
|
||||
|
||||
// 2xx statuses
|
||||
.ok => "OK",
|
||||
.created => "Created",
|
||||
.accepted => "Accepted",
|
||||
.non_authoritative_info => "Non-Authoritative Information",
|
||||
.no_content => "No Content",
|
||||
.reset_content => "Reset Content",
|
||||
.partial_content => "Partial Content",
|
||||
.multi_status => "Multi-Status",
|
||||
.already_reported => "Already Reported",
|
||||
.im_used => "IM Used",
|
||||
|
||||
// 3xx statuses
|
||||
.multiple_choice => "Multiple Choice",
|
||||
.moved_permanently => "Moved Permanently",
|
||||
.found => "Found",
|
||||
.see_other => "See Other",
|
||||
.not_modified => "Not Modified",
|
||||
.use_proxy => "Use Proxy",
|
||||
.temporary_redirect => "Temporary Redirect",
|
||||
.permanent_redirect => "Permanent Redirect",
|
||||
|
||||
// 4xx statuses
|
||||
.bad_request => "Bad Request",
|
||||
.unauthorized => "Unauthorized",
|
||||
.payment_required => "Payment Required",
|
||||
.forbidden => "Forbidden",
|
||||
.not_found => "Not Found",
|
||||
.method_not_allowed => "Method Not Allowed",
|
||||
.not_acceptable => "Not Acceptable",
|
||||
.proxy_auth_required => "Proxy Authentication Required",
|
||||
.request_timeout => "Request Timeout",
|
||||
.conflict => "Conflict",
|
||||
.gone => "Gone",
|
||||
.length_required => "Length Required",
|
||||
.precondition_failed => "Precondition Failed",
|
||||
.payload_too_large => "Payload Too Large",
|
||||
.uri_too_long => "URI Too Long",
|
||||
.unsupported_media_type => "Unsupported Media Type",
|
||||
.range_not_satisfiable => "Range Not Satisfiable",
|
||||
.expectation_failed => "Expectation Failed",
|
||||
.teapot => "I'm a teapot",
|
||||
.misdirected_request => "Misdirected Request",
|
||||
.unprocessable_entity => "Unprocessable Entity",
|
||||
.locked => "Locked",
|
||||
.failed_dependency => "Failed Dependency",
|
||||
.too_early => "Too Early",
|
||||
.upgrade_required => "Upgrade Required",
|
||||
.precondition_required => "Precondition Required",
|
||||
.too_many_requests => "Too Many Requests",
|
||||
.request_header_fields_too_large => "Request Header Fields Too Large",
|
||||
.unavailable_for_legal_reasons => "Unavailable For Legal Reasons",
|
||||
|
||||
// 5xx statuses
|
||||
.internal_server_error => "Internal Server Error",
|
||||
.not_implemented => "Not Implemented",
|
||||
.bad_gateway => "Bad Gateway",
|
||||
.service_unavailable => "Service Unavailable",
|
||||
.gateway_timeout => "Gateway Timeout",
|
||||
.http_version_not_supported => "HTTP Version Not Supported",
|
||||
.variant_also_negotiates => "Variant Also Negotiates",
|
||||
.insufficient_storage => "Insufficient Storage",
|
||||
.loop_detected => "Loop Detected",
|
||||
.not_extended => "Not Extended",
|
||||
.network_authentication_required => "Network Authentication Required",
|
||||
|
||||
else => return null,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Class = enum {
|
||||
informational,
|
||||
success,
|
||||
redirect,
|
||||
client_error,
|
||||
server_error,
|
||||
};
|
||||
|
||||
pub fn class(self: Status) Class {
|
||||
return switch (@intFromEnum(self)) {
|
||||
100...199 => .informational,
|
||||
200...299 => .success,
|
||||
300...399 => .redirect,
|
||||
400...499 => .client_error,
|
||||
else => .server_error,
|
||||
};
|
||||
}
|
||||
|
||||
test {
|
||||
try std.testing.expectEqualStrings("OK", Status.ok.phrase().?);
|
||||
try std.testing.expectEqualStrings("Not Found", Status.not_found.phrase().?);
|
||||
}
|
||||
|
||||
test {
|
||||
try std.testing.expectEqual(Status.Class.success, Status.ok.class());
|
||||
try std.testing.expectEqual(Status.Class.client_error, Status.not_found.class());
|
||||
}
|
||||
};
|
||||
|
||||
pub const TransferEncoding = enum {
|
||||
chunked,
|
||||
none,
|
||||
// compression is intentionally omitted here, as std.http.Client stores it as content-encoding
|
||||
};
|
||||
|
||||
pub const ContentEncoding = enum {
|
||||
identity,
|
||||
compress,
|
||||
@"x-compress",
|
||||
deflate,
|
||||
gzip,
|
||||
@"x-gzip",
|
||||
zstd,
|
||||
};
|
||||
|
||||
pub const Connection = enum {
|
||||
keep_alive,
|
||||
close,
|
||||
};
|
||||
|
||||
pub const Header = struct {
|
||||
name: []const u8,
|
||||
value: []const u8,
|
||||
};
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
|
||||
test {
|
||||
_ = Client;
|
||||
_ = Method;
|
||||
_ = Server;
|
||||
_ = Status;
|
||||
}
|
||||
2545
src/http/async/std/http/Client.zig
Normal file
2545
src/http/async/std/http/Client.zig
Normal file
File diff suppressed because it is too large
Load Diff
1148
src/http/async/std/http/Server.zig
Normal file
1148
src/http/async/std/http/Server.zig
Normal file
File diff suppressed because it is too large
Load Diff
447
src/http/async/std/http/protocol.zig
Normal file
447
src/http/async/std/http/protocol.zig
Normal file
@@ -0,0 +1,447 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const use_vectors = builtin.zig_backend != .stage2_x86_64;
|
||||
|
||||
pub const State = enum {
|
||||
invalid,
|
||||
|
||||
// Begin header and trailer parsing states.
|
||||
|
||||
start,
|
||||
seen_n,
|
||||
seen_r,
|
||||
seen_rn,
|
||||
seen_rnr,
|
||||
finished,
|
||||
|
||||
// Begin transfer-encoding: chunked parsing states.
|
||||
|
||||
chunk_head_size,
|
||||
chunk_head_ext,
|
||||
chunk_head_r,
|
||||
chunk_data,
|
||||
chunk_data_suffix,
|
||||
chunk_data_suffix_r,
|
||||
|
||||
/// Returns true if the parser is in a content state (ie. not waiting for more headers).
|
||||
pub fn isContent(self: State) bool {
|
||||
return switch (self) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => false,
|
||||
.finished, .chunk_head_size, .chunk_head_ext, .chunk_head_r, .chunk_data, .chunk_data_suffix, .chunk_data_suffix_r => true,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const HeadersParser = struct {
|
||||
state: State = .start,
|
||||
/// A fixed buffer of len `max_header_bytes`.
|
||||
/// Pointers into this buffer are not stable until after a message is complete.
|
||||
header_bytes_buffer: []u8,
|
||||
header_bytes_len: u32,
|
||||
next_chunk_length: u64,
|
||||
/// `false`: headers. `true`: trailers.
|
||||
done: bool,
|
||||
|
||||
/// Initializes the parser with a provided buffer `buf`.
|
||||
pub fn init(buf: []u8) HeadersParser {
|
||||
return .{
|
||||
.header_bytes_buffer = buf,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Reinitialize the parser.
|
||||
/// Asserts the parser is in the "done" state.
|
||||
pub fn reset(hp: *HeadersParser) void {
|
||||
assert(hp.done);
|
||||
hp.* = .{
|
||||
.state = .start,
|
||||
.header_bytes_buffer = hp.header_bytes_buffer,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get(hp: HeadersParser) []u8 {
|
||||
return hp.header_bytes_buffer[0..hp.header_bytes_len];
|
||||
}
|
||||
|
||||
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var hp: std.http.HeadParser = .{
|
||||
.state = switch (r.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
const result = hp.feed(bytes);
|
||||
r.state = switch (hp.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
};
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var cp: std.http.ChunkParser = .{
|
||||
.state = switch (r.state) {
|
||||
.chunk_head_size => .head_size,
|
||||
.chunk_head_ext => .head_ext,
|
||||
.chunk_head_r => .head_r,
|
||||
.chunk_data => .data,
|
||||
.chunk_data_suffix => .data_suffix,
|
||||
.chunk_data_suffix_r => .data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
else => unreachable,
|
||||
},
|
||||
.chunk_len = r.next_chunk_length,
|
||||
};
|
||||
const result = cp.feed(bytes);
|
||||
r.state = switch (cp.state) {
|
||||
.head_size => .chunk_head_size,
|
||||
.head_ext => .chunk_head_ext,
|
||||
.head_r => .chunk_head_r,
|
||||
.data => .chunk_data,
|
||||
.data_suffix => .chunk_data_suffix,
|
||||
.data_suffix_r => .chunk_data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
};
|
||||
r.next_chunk_length = cp.chunk_len;
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
/// Returns whether or not the parser has finished parsing a complete
|
||||
/// message. A message is only complete after the entire body has been read
|
||||
/// and any trailing headers have been parsed.
|
||||
pub fn isComplete(r: *HeadersParser) bool {
|
||||
return r.done and r.state == .finished;
|
||||
}
|
||||
|
||||
pub const CheckCompleteHeadError = error{HttpHeadersOversize};
|
||||
|
||||
/// Pushes `in` into the parser. Returns the number of bytes consumed by
|
||||
/// the header. Any header bytes are appended to `header_bytes_buffer`.
|
||||
pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
|
||||
if (hp.state.isContent()) return 0;
|
||||
|
||||
const i = hp.findHeadersEnd(in);
|
||||
const data = in[0..i];
|
||||
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
|
||||
return error.HttpHeadersOversize;
|
||||
|
||||
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
|
||||
hp.header_bytes_len += @intCast(data.len);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
pub const ReadError = error{
|
||||
HttpChunkInvalid,
|
||||
};
|
||||
|
||||
/// Reads the body of the message into `buffer`. Returns the number of
|
||||
/// bytes placed in the buffer.
|
||||
///
|
||||
/// If `skip` is true, the buffer will be unused and the body will be skipped.
|
||||
///
|
||||
/// See `std.http.Client.Connection for an example of `conn`.
|
||||
pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usize {
|
||||
assert(r.state.isContent());
|
||||
if (r.done) return 0;
|
||||
|
||||
var out_index: usize = 0;
|
||||
while (true) {
|
||||
switch (r.state) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
|
||||
.finished => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
|
||||
if (skip) {
|
||||
try conn.fill();
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return out_index;
|
||||
} else if (out_index < buffer.len) {
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
|
||||
const nread = try conn.read(buffer[0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return nread;
|
||||
} else {
|
||||
return out_index;
|
||||
}
|
||||
},
|
||||
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
|
||||
try conn.fill();
|
||||
|
||||
const i = r.findChunkedLen(conn.peek());
|
||||
conn.drop(@intCast(i));
|
||||
|
||||
switch (r.state) {
|
||||
.invalid => return error.HttpChunkInvalid,
|
||||
.chunk_data => if (r.next_chunk_length == 0) {
|
||||
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
|
||||
r.state = .finished;
|
||||
conn.drop(2);
|
||||
} else {
|
||||
// The trailer section is formatted identically
|
||||
// to the header section.
|
||||
r.state = .seen_rn;
|
||||
}
|
||||
r.done = true;
|
||||
|
||||
return out_index;
|
||||
},
|
||||
else => return out_index,
|
||||
}
|
||||
|
||||
continue;
|
||||
},
|
||||
.chunk_data => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
if (skip) {
|
||||
try conn.fill();
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
} else if (out_avail > 0) {
|
||||
const can_read: usize = @intCast(@min(data_avail, out_avail));
|
||||
const nread = try conn.read(buffer[out_index..][0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
out_index += nread;
|
||||
}
|
||||
|
||||
if (r.next_chunk_length == 0) {
|
||||
r.state = .chunk_data_suffix;
|
||||
continue;
|
||||
}
|
||||
|
||||
return out_index;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline fn int16(array: *const [2]u8) u16 {
|
||||
return @as(u16, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int24(array: *const [3]u8) u24 {
|
||||
return @as(u24, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int32(array: *const [4]u8) u32 {
|
||||
return @as(u32, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn intShift(comptime T: type, x: anytype) T {
|
||||
switch (@import("builtin").cpu.arch.endian()) {
|
||||
.little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
|
||||
.big => return @as(T, @truncate(x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// A buffered (and peekable) Connection.
|
||||
const MockBufferedConnection = struct {
|
||||
pub const buffer_size = 0x2000;
|
||||
|
||||
conn: std.io.FixedBufferStream([]const u8),
|
||||
buf: [buffer_size]u8 = undefined,
|
||||
start: u16 = 0,
|
||||
end: u16 = 0,
|
||||
|
||||
pub fn fill(conn: *MockBufferedConnection) ReadError!void {
|
||||
if (conn.end != conn.start) return;
|
||||
|
||||
const nread = try conn.conn.read(conn.buf[0..]);
|
||||
if (nread == 0) return error.EndOfStream;
|
||||
conn.start = 0;
|
||||
conn.end = @as(u16, @truncate(nread));
|
||||
}
|
||||
|
||||
pub fn peek(conn: *MockBufferedConnection) []const u8 {
|
||||
return conn.buf[conn.start..conn.end];
|
||||
}
|
||||
|
||||
pub fn drop(conn: *MockBufferedConnection, num: u16) void {
|
||||
conn.start += num;
|
||||
}
|
||||
|
||||
pub fn readAtLeast(conn: *MockBufferedConnection, buffer: []u8, len: usize) ReadError!usize {
|
||||
var out_index: u16 = 0;
|
||||
while (out_index < len) {
|
||||
const available = conn.end - conn.start;
|
||||
const left = buffer.len - out_index;
|
||||
|
||||
if (available > 0) {
|
||||
const can_read = @as(u16, @truncate(@min(available, left)));
|
||||
|
||||
@memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]);
|
||||
out_index += can_read;
|
||||
conn.start += can_read;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (left > conn.buf.len) {
|
||||
// skip the buffer if the output is large enough
|
||||
return conn.conn.read(buffer[out_index..]);
|
||||
}
|
||||
|
||||
try conn.fill();
|
||||
}
|
||||
|
||||
return out_index;
|
||||
}
|
||||
|
||||
pub fn read(conn: *MockBufferedConnection, buffer: []u8) ReadError!usize {
|
||||
return conn.readAtLeast(buffer, 1);
|
||||
}
|
||||
|
||||
pub const ReadError = std.io.FixedBufferStream([]const u8).ReadError || error{EndOfStream};
|
||||
pub const Reader = std.io.Reader(*MockBufferedConnection, ReadError, read);
|
||||
|
||||
pub fn reader(conn: *MockBufferedConnection) Reader {
|
||||
return Reader{ .context = conn };
|
||||
}
|
||||
|
||||
pub fn writeAll(conn: *MockBufferedConnection, buffer: []const u8) WriteError!void {
|
||||
return conn.conn.writeAll(buffer);
|
||||
}
|
||||
|
||||
pub fn write(conn: *MockBufferedConnection, buffer: []const u8) WriteError!usize {
|
||||
return conn.conn.write(buffer);
|
||||
}
|
||||
|
||||
pub const WriteError = std.io.FixedBufferStream([]const u8).WriteError;
|
||||
pub const Writer = std.io.Writer(*MockBufferedConnection, WriteError, write);
|
||||
|
||||
pub fn writer(conn: *MockBufferedConnection) Writer {
|
||||
return Writer{ .context = conn };
|
||||
}
|
||||
};
|
||||
|
||||
test "HeadersParser.read length" {
|
||||
// mock BufferedConnection for read
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\nHello";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.next_chunk_length = 5;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked trailer" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\nContent-Type: text/plain\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = std.io.fixedBufferStream(data),
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.get());
|
||||
}
|
||||
2050
src/http/async/std/net.zig
Normal file
2050
src/http/async/std/net.zig
Normal file
File diff suppressed because it is too large
Load Diff
335
src/http/async/std/net/test.zig
Normal file
335
src/http/async/std/net/test.zig
Normal file
@@ -0,0 +1,335 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const net = std.net;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
|
||||
test "parse and render IP addresses at comptime" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
comptime {
|
||||
var ipAddrBuffer: [16]u8 = undefined;
|
||||
// Parses IPv6 at comptime
|
||||
const ipv6addr = net.Address.parseIp("::1", 0) catch unreachable;
|
||||
var ipv6 = std.fmt.bufPrint(ipAddrBuffer[0..], "{}", .{ipv6addr}) catch unreachable;
|
||||
try std.testing.expect(std.mem.eql(u8, "::1", ipv6[1 .. ipv6.len - 3]));
|
||||
|
||||
// Parses IPv4 at comptime
|
||||
const ipv4addr = net.Address.parseIp("127.0.0.1", 0) catch unreachable;
|
||||
var ipv4 = std.fmt.bufPrint(ipAddrBuffer[0..], "{}", .{ipv4addr}) catch unreachable;
|
||||
try std.testing.expect(std.mem.eql(u8, "127.0.0.1", ipv4[0 .. ipv4.len - 2]));
|
||||
|
||||
// Returns error for invalid IP addresses at comptime
|
||||
try testing.expectError(error.InvalidIPAddressFormat, net.Address.parseIp("::123.123.123.123", 0));
|
||||
try testing.expectError(error.InvalidIPAddressFormat, net.Address.parseIp("127.01.0.1", 0));
|
||||
try testing.expectError(error.InvalidIPAddressFormat, net.Address.resolveIp("::123.123.123.123", 0));
|
||||
try testing.expectError(error.InvalidIPAddressFormat, net.Address.resolveIp("127.01.0.1", 0));
|
||||
}
|
||||
}
|
||||
|
||||
test "parse and render IPv6 addresses" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
var buffer: [100]u8 = undefined;
|
||||
const ips = [_][]const u8{
|
||||
"FF01:0:0:0:0:0:0:FB",
|
||||
"FF01::Fb",
|
||||
"::1",
|
||||
"::",
|
||||
"1::",
|
||||
"2001:db8::",
|
||||
"::1234:5678",
|
||||
"2001:db8::1234:5678",
|
||||
"FF01::FB%1234",
|
||||
"::ffff:123.5.123.5",
|
||||
};
|
||||
const printed = [_][]const u8{
|
||||
"ff01::fb",
|
||||
"ff01::fb",
|
||||
"::1",
|
||||
"::",
|
||||
"1::",
|
||||
"2001:db8::",
|
||||
"::1234:5678",
|
||||
"2001:db8::1234:5678",
|
||||
"ff01::fb",
|
||||
"::ffff:123.5.123.5",
|
||||
};
|
||||
for (ips, 0..) |ip, i| {
|
||||
const addr = net.Address.parseIp6(ip, 0) catch unreachable;
|
||||
var newIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
|
||||
try std.testing.expect(std.mem.eql(u8, printed[i], newIp[1 .. newIp.len - 3]));
|
||||
|
||||
if (builtin.os.tag == .linux) {
|
||||
const addr_via_resolve = net.Address.resolveIp6(ip, 0) catch unreachable;
|
||||
var newResolvedIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr_via_resolve}) catch unreachable;
|
||||
try std.testing.expect(std.mem.eql(u8, printed[i], newResolvedIp[1 .. newResolvedIp.len - 3]));
|
||||
}
|
||||
}
|
||||
|
||||
try testing.expectError(error.InvalidCharacter, net.Address.parseIp6(":::", 0));
|
||||
try testing.expectError(error.Overflow, net.Address.parseIp6("FF001::FB", 0));
|
||||
try testing.expectError(error.InvalidCharacter, net.Address.parseIp6("FF01::Fb:zig", 0));
|
||||
try testing.expectError(error.InvalidEnd, net.Address.parseIp6("FF01:0:0:0:0:0:0:FB:", 0));
|
||||
try testing.expectError(error.Incomplete, net.Address.parseIp6("FF01:", 0));
|
||||
try testing.expectError(error.InvalidIpv4Mapping, net.Address.parseIp6("::123.123.123.123", 0));
|
||||
try testing.expectError(error.Incomplete, net.Address.parseIp6("1", 0));
|
||||
// TODO Make this test pass on other operating systems.
|
||||
if (builtin.os.tag == .linux or comptime builtin.os.tag.isDarwin()) {
|
||||
try testing.expectError(error.Incomplete, net.Address.resolveIp6("ff01::fb%", 0));
|
||||
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%wlp3s0s0s0s0s0s0s0s0", 0));
|
||||
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%12345678901234", 0));
|
||||
}
|
||||
}
|
||||
|
||||
test "invalid but parseable IPv6 scope ids" {
|
||||
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) {
|
||||
// Currently, resolveIp6 with alphanumerical scope IDs only works on Linux.
|
||||
// TODO Make this test pass on other operating systems.
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
try testing.expectError(error.InterfaceNotFound, net.Address.resolveIp6("ff01::fb%123s45678901234", 0));
|
||||
}
|
||||
|
||||
test "parse and render IPv4 addresses" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
var buffer: [18]u8 = undefined;
|
||||
for ([_][]const u8{
|
||||
"0.0.0.0",
|
||||
"255.255.255.255",
|
||||
"1.2.3.4",
|
||||
"123.255.0.91",
|
||||
"127.0.0.1",
|
||||
}) |ip| {
|
||||
const addr = net.Address.parseIp4(ip, 0) catch unreachable;
|
||||
var newIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
|
||||
try std.testing.expect(std.mem.eql(u8, ip, newIp[0 .. newIp.len - 2]));
|
||||
}
|
||||
|
||||
try testing.expectError(error.Overflow, net.Address.parseIp4("256.0.0.1", 0));
|
||||
try testing.expectError(error.InvalidCharacter, net.Address.parseIp4("x.0.0.1", 0));
|
||||
try testing.expectError(error.InvalidEnd, net.Address.parseIp4("127.0.0.1.1", 0));
|
||||
try testing.expectError(error.Incomplete, net.Address.parseIp4("127.0.0.", 0));
|
||||
try testing.expectError(error.InvalidCharacter, net.Address.parseIp4("100..0.1", 0));
|
||||
try testing.expectError(error.NonCanonical, net.Address.parseIp4("127.01.0.1", 0));
|
||||
}
|
||||
|
||||
test "parse and render UNIX addresses" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
if (!net.has_unix_sockets) return error.SkipZigTest;
|
||||
|
||||
var buffer: [14]u8 = undefined;
|
||||
const addr = net.Address.initUnix("/tmp/testpath") catch unreachable;
|
||||
const fmt_addr = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
|
||||
try std.testing.expectEqualSlices(u8, "/tmp/testpath", fmt_addr);
|
||||
|
||||
const too_long = [_]u8{'a'} ** 200;
|
||||
try testing.expectError(error.NameTooLong, net.Address.initUnix(too_long[0..]));
|
||||
}
|
||||
|
||||
test "resolve DNS" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
_ = try std.os.windows.WSAStartup(2, 2);
|
||||
}
|
||||
defer {
|
||||
if (builtin.os.tag == .windows) {
|
||||
std.os.windows.WSACleanup() catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve localhost, this should not fail.
|
||||
{
|
||||
const localhost_v4 = try net.Address.parseIp("127.0.0.1", 80);
|
||||
const localhost_v6 = try net.Address.parseIp("::2", 80);
|
||||
|
||||
const result = try net.getAddressList(testing.allocator, "localhost", 80);
|
||||
defer result.deinit();
|
||||
for (result.addrs) |addr| {
|
||||
if (addr.eql(localhost_v4) or addr.eql(localhost_v6)) break;
|
||||
} else @panic("unexpected address for localhost");
|
||||
}
|
||||
|
||||
{
|
||||
// The tests are required to work even when there is no Internet connection,
|
||||
// so some of these errors we must accept and skip the test.
|
||||
const result = net.getAddressList(testing.allocator, "example.com", 80) catch |err| switch (err) {
|
||||
error.UnknownHostName => return error.SkipZigTest,
|
||||
error.TemporaryNameServerFailure => return error.SkipZigTest,
|
||||
else => return err,
|
||||
};
|
||||
result.deinit();
|
||||
}
|
||||
}
|
||||
|
||||
test "listen on a port, send bytes, receive bytes" {
|
||||
if (builtin.single_threaded) return error.SkipZigTest;
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
_ = try std.os.windows.WSAStartup(2, 2);
|
||||
}
|
||||
defer {
|
||||
if (builtin.os.tag == .windows) {
|
||||
std.os.windows.WSACleanup() catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
// Try only the IPv4 variant as some CI builders have no IPv6 localhost
|
||||
// configured.
|
||||
const localhost = try net.Address.parseIp("127.0.0.1", 0);
|
||||
|
||||
var server = try localhost.listen(.{});
|
||||
defer server.deinit();
|
||||
|
||||
const S = struct {
|
||||
fn clientFn(server_address: net.Address) !void {
|
||||
const socket = try net.tcpConnectToAddress(server_address);
|
||||
defer socket.close();
|
||||
|
||||
_ = try socket.writer().writeAll("Hello world!");
|
||||
}
|
||||
};
|
||||
|
||||
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.listen_address});
|
||||
defer t.join();
|
||||
|
||||
var client = try server.accept();
|
||||
defer client.stream.close();
|
||||
var buf: [16]u8 = undefined;
|
||||
const n = try client.stream.reader().read(&buf);
|
||||
|
||||
try testing.expectEqual(@as(usize, 12), n);
|
||||
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
||||
}
|
||||
|
||||
test "listen on an in use port" {
|
||||
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) {
|
||||
// TODO build abstractions for other operating systems
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
const localhost = try net.Address.parseIp("127.0.0.1", 0);
|
||||
|
||||
var server1 = try localhost.listen(.{ .reuse_port = true });
|
||||
defer server1.deinit();
|
||||
|
||||
var server2 = try server1.listen_address.listen(.{ .reuse_port = true });
|
||||
defer server2.deinit();
|
||||
}
|
||||
|
||||
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
const connection = try net.tcpConnectToHost(allocator, name, port);
|
||||
defer connection.close();
|
||||
|
||||
var buf: [100]u8 = undefined;
|
||||
const len = try connection.read(&buf);
|
||||
const msg = buf[0..len];
|
||||
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
|
||||
}
|
||||
|
||||
fn testClient(addr: net.Address) anyerror!void {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
const socket_file = try net.tcpConnectToAddress(addr);
|
||||
defer socket_file.close();
|
||||
|
||||
var buf: [100]u8 = undefined;
|
||||
const len = try socket_file.read(&buf);
|
||||
const msg = buf[0..len];
|
||||
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
|
||||
}
|
||||
|
||||
fn testServer(server: *net.Server) anyerror!void {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
var client = try server.accept();
|
||||
|
||||
const stream = client.stream.writer();
|
||||
try stream.print("hello from server\n", .{});
|
||||
}
|
||||
|
||||
test "listen on a unix socket, send bytes, receive bytes" {
|
||||
if (builtin.single_threaded) return error.SkipZigTest;
|
||||
if (!net.has_unix_sockets) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
_ = try std.os.windows.WSAStartup(2, 2);
|
||||
}
|
||||
defer {
|
||||
if (builtin.os.tag == .windows) {
|
||||
std.os.windows.WSACleanup() catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
const socket_path = try generateFileName("socket.unix");
|
||||
defer testing.allocator.free(socket_path);
|
||||
|
||||
const socket_addr = try net.Address.initUnix(socket_path);
|
||||
defer std.fs.cwd().deleteFile(socket_path) catch {};
|
||||
|
||||
var server = try socket_addr.listen(.{});
|
||||
defer server.deinit();
|
||||
|
||||
const S = struct {
|
||||
fn clientFn(path: []const u8) !void {
|
||||
const socket = try net.connectUnixSocket(path);
|
||||
defer socket.close();
|
||||
|
||||
_ = try socket.writer().writeAll("Hello world!");
|
||||
}
|
||||
};
|
||||
|
||||
const t = try std.Thread.spawn(.{}, S.clientFn, .{socket_path});
|
||||
defer t.join();
|
||||
|
||||
var client = try server.accept();
|
||||
defer client.stream.close();
|
||||
var buf: [16]u8 = undefined;
|
||||
const n = try client.stream.reader().read(&buf);
|
||||
|
||||
try testing.expectEqual(@as(usize, 12), n);
|
||||
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
||||
}
|
||||
|
||||
fn generateFileName(base_name: []const u8) ![]const u8 {
|
||||
const random_bytes_count = 12;
|
||||
const sub_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
|
||||
var random_bytes: [12]u8 = undefined;
|
||||
std.crypto.random.bytes(&random_bytes);
|
||||
var sub_path: [sub_path_len]u8 = undefined;
|
||||
_ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
|
||||
return std.fmt.allocPrint(testing.allocator, "{s}-{s}", .{ sub_path[0..], base_name });
|
||||
}
|
||||
|
||||
test "non-blocking tcp server" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
if (true) {
|
||||
// https://github.com/ziglang/zig/issues/18315
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
const localhost = try net.Address.parseIp("127.0.0.1", 0);
|
||||
var server = localhost.listen(.{ .force_nonblocking = true });
|
||||
defer server.deinit();
|
||||
|
||||
const accept_err = server.accept();
|
||||
try testing.expectError(error.WouldBlock, accept_err);
|
||||
|
||||
const socket_file = try net.tcpConnectToAddress(server.listen_address);
|
||||
defer socket_file.close();
|
||||
|
||||
var client = try server.accept();
|
||||
defer client.stream.close();
|
||||
const stream = client.stream.writer();
|
||||
try stream.print("hello from server\n", .{});
|
||||
|
||||
var buf: [100]u8 = undefined;
|
||||
const len = try socket_file.read(&buf);
|
||||
const msg = buf[0..len];
|
||||
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
|
||||
}
|
||||
Reference in New Issue
Block a user