mirror of
https://github.com/lightpanda-io/browser.git
synced 2026-03-22 04:34:44 +00:00
157 lines
5.5 KiB
Zig
157 lines
5.5 KiB
Zig
// Copyright (C) 2023-2026 Lightpanda (Selecy SAS)
|
|
//
|
|
// Francis Bouvier <francis@lightpanda.io>
|
|
// Pierre Tachoire <pierre@lightpanda.io>
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Affero General Public License as
|
|
// published by the Free Software Foundation, either version 3 of the
|
|
// License, or (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Affero General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
const std = @import("std");
|
|
const lp = @import("lightpanda");
|
|
const builtin = @import("builtin");
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const log = lp.log;
|
|
const App = lp.App;
|
|
const Config = lp.Config;
|
|
const SigHandler = @import("Sighandler.zig");
|
|
pub const panic = lp.crash_handler.panic;
|
|
|
|
pub fn main() !void {
|
|
// allocator
|
|
// - in Debug mode we use the General Purpose Allocator to detect memory leaks
|
|
// - in Release mode we use the c allocator
|
|
var gpa_instance: std.heap.DebugAllocator(.{ .stack_trace_frames = 10 }) = .init;
|
|
const gpa = if (builtin.mode == .Debug) gpa_instance.allocator() else std.heap.c_allocator;
|
|
|
|
defer if (builtin.mode == .Debug) {
|
|
if (gpa_instance.detectLeaks()) std.posix.exit(1);
|
|
};
|
|
|
|
// arena for main-specific allocations
|
|
var main_arena_instance = std.heap.ArenaAllocator.init(gpa);
|
|
const main_arena = main_arena_instance.allocator();
|
|
defer main_arena_instance.deinit();
|
|
|
|
run(gpa, main_arena) catch |err| {
|
|
log.fatal(.app, "exit", .{ .err = err });
|
|
std.posix.exit(1);
|
|
};
|
|
}
|
|
|
|
fn run(allocator: Allocator, main_arena: Allocator) !void {
|
|
const args = try Config.parseArgs(main_arena);
|
|
defer args.deinit(main_arena);
|
|
|
|
switch (args.mode) {
|
|
.help => {
|
|
args.printUsageAndExit(args.mode.help);
|
|
return std.process.cleanExit();
|
|
},
|
|
.version => {
|
|
std.debug.print("{s}\n", .{lp.build_config.git_commit});
|
|
return std.process.cleanExit();
|
|
},
|
|
else => {},
|
|
}
|
|
|
|
if (args.logLevel()) |ll| {
|
|
log.opts.level = ll;
|
|
}
|
|
if (args.logFormat()) |lf| {
|
|
log.opts.format = lf;
|
|
}
|
|
if (args.logFilterScopes()) |lfs| {
|
|
log.opts.filter_scopes = lfs;
|
|
}
|
|
|
|
// _app is global to handle graceful shutdown.
|
|
var app = try App.init(allocator, &args);
|
|
|
|
defer app.deinit();
|
|
app.telemetry.record(.{ .run = {} });
|
|
|
|
switch (args.mode) {
|
|
.serve => |opts| {
|
|
const sighandler = try main_arena.create(SigHandler);
|
|
sighandler.* = .{ .arena = main_arena };
|
|
try sighandler.install();
|
|
|
|
log.debug(.app, "startup", .{ .mode = "serve", .snapshot = app.snapshot.fromEmbedded() });
|
|
const address = std.net.Address.parseIp(opts.host, opts.port) catch |err| {
|
|
log.fatal(.app, "invalid server address", .{ .err = err, .host = opts.host, .port = opts.port });
|
|
return args.printUsageAndExit(false);
|
|
};
|
|
|
|
var server = lp.Server.init(app, address) catch |err| {
|
|
if (err == error.AddressInUse) {
|
|
log.fatal(.app, "address already in use", .{
|
|
.host = opts.host,
|
|
.port = opts.port,
|
|
.hint = "Another process is already listening on this address. " ++
|
|
"Stop the other process or use --port to choose a different port.",
|
|
});
|
|
} else {
|
|
log.fatal(.app, "server run error", .{ .err = err });
|
|
}
|
|
return err;
|
|
};
|
|
defer server.deinit();
|
|
|
|
try sighandler.on(lp.Network.stop, .{&app.network});
|
|
app.network.run();
|
|
},
|
|
.fetch => |opts| {
|
|
const url = opts.url;
|
|
log.debug(.app, "startup", .{ .mode = "fetch", .dump_mode = opts.dump_mode, .url = url, .snapshot = app.snapshot.fromEmbedded() });
|
|
|
|
var fetch_opts = lp.FetchOpts{
|
|
.wait_ms = 5000,
|
|
.dump_mode = opts.dump_mode,
|
|
.dump = .{
|
|
.strip = opts.strip,
|
|
.with_base = opts.with_base,
|
|
.with_frames = opts.with_frames,
|
|
},
|
|
};
|
|
|
|
var stdout = std.fs.File.stdout();
|
|
var writer = stdout.writer(&.{});
|
|
if (opts.dump_mode != null) {
|
|
fetch_opts.writer = &writer.interface;
|
|
}
|
|
|
|
lp.fetch(app, url, fetch_opts) catch |err| {
|
|
log.fatal(.app, "fetch error", .{ .err = err, .url = url });
|
|
return err;
|
|
};
|
|
},
|
|
.mcp => {
|
|
log.info(.mcp, "starting server", .{});
|
|
|
|
log.opts.format = .logfmt;
|
|
|
|
var stdout = std.fs.File.stdout().writer(&.{});
|
|
|
|
var mcp_server: *lp.mcp.Server = try .init(allocator, app, &stdout.interface);
|
|
defer mcp_server.deinit();
|
|
|
|
var stdin_buf: [64 * 1024]u8 = undefined;
|
|
var stdin = std.fs.File.stdin().reader(&stdin_buf);
|
|
|
|
try lp.mcp.router.processRequests(mcp_server, &stdin.interface);
|
|
},
|
|
else => unreachable,
|
|
}
|
|
}
|