Used ring buffer for telemetry events buffer

This commit is contained in:
Nikolay Govorov
2026-03-16 23:22:25 +00:00
parent b14ae02548
commit 5fb561dc9c
4 changed files with 155 additions and 106 deletions

View File

@@ -11,84 +11,97 @@ const telemetry = @import("telemetry.zig");
const Runtime = @import("../network/Runtime.zig");
const Connection = @import("../network/http.zig").Connection;
const URL = "https://telemetry.lightpanda.io";
const BATCH_SIZE = 20;
const BUFFER_SIZE = BATCH_SIZE * 2;
// const URL = "https://telemetry.lightpanda.io";
const URL = "http://localhost:9876";
const BUFFER_SIZE = 1024;
const LightPanda = @This();
allocator: Allocator,
runtime: *Runtime,
/// Protects concurrent producers in send().
mutex: std.Thread.Mutex = .{},
pcount: usize = 0,
pending: [BUFFER_SIZE]LightPandaEvent = undefined,
iid: ?[36]u8 = null,
run_mode: Config.RunMode = .serve,
pub fn init(app: *App) !LightPanda {
return .{
head: std.atomic.Value(usize) = .init(0),
tail: std.atomic.Value(usize) = .init(0),
dropped: std.atomic.Value(usize) = .init(0),
buffer: [BUFFER_SIZE]telemetry.Event = undefined,
pub fn init(self: *LightPanda, app: *App, iid: ?[36]u8, run_mode: Config.RunMode) !void {
self.* = .{
.allocator = app.allocator,
.runtime = &app.network,
};
}
pub fn deinit(self: *LightPanda) void {
self.flush();
}
pub fn send(self: *LightPanda, iid: ?[]const u8, run_mode: Config.RunMode, raw_event: telemetry.Event) !void {
const pending_count = blk: {
self.mutex.lock();
defer self.mutex.unlock();
if (self.pcount == BUFFER_SIZE) {
log.err(.telemetry, "telemetry buffer exhausted", .{});
return;
}
self.pending[self.pcount] = .{
.iid = iid,
.mode = run_mode,
.event = raw_event,
};
self.pcount += 1;
break :blk self.pcount;
.iid = iid,
.run_mode = run_mode,
};
if (pending_count >= BATCH_SIZE) {
self.flush();
self.runtime.onTick(@ptrCast(self), flushCallback);
}
pub fn deinit(_: *LightPanda) void {}
pub fn send(self: *LightPanda, raw_event: telemetry.Event) !void {
self.mutex.lock();
defer self.mutex.unlock();
const t = self.tail.load(.monotonic);
const h = self.head.load(.acquire);
if (t - h >= BUFFER_SIZE) {
_ = self.dropped.fetchAdd(1, .monotonic);
return;
}
self.buffer[t % BUFFER_SIZE] = raw_event;
self.tail.store(t + 1, .release);
}
pub fn flush(self: *LightPanda) void {
fn flushCallback(ctx: *anyopaque) void {
const self: *LightPanda = @ptrCast(@alignCast(ctx));
self.postEvent() catch |err| {
log.warn(.telemetry, "flush error", .{ .err = err });
};
}
fn postEvent(self: *LightPanda) !void {
const h = self.head.load(.monotonic);
const t = self.tail.load(.acquire);
const dropped = self.dropped.swap(0, .monotonic);
if (h == t and dropped == 0) return;
errdefer _ = self.dropped.fetchAdd(dropped, .monotonic);
var writer = std.Io.Writer.Allocating.init(self.allocator);
defer writer.deinit();
self.mutex.lock();
defer self.mutex.unlock();
const iid: ?[]const u8 = if (self.iid) |*id| id else null;
const events = self.pending[0..self.pcount];
if (events.len == 0) return;
for (events) |*event| {
try std.json.Stringify.value(event, .{ .emit_null_optional_fields = false }, &writer.writer);
for (h..t) |i| {
const wrapped = LightPandaEvent{ .iid = iid, .mode = self.run_mode, .event = self.buffer[i % BUFFER_SIZE] };
try std.json.Stringify.value(&wrapped, .{ .emit_null_optional_fields = false }, &writer.writer);
try writer.writer.writeByte('\n');
}
const conn = self.runtime.getConnection() orelse return;
if (dropped > 0) {
const wrapped = LightPandaEvent{ .iid = iid, .mode = self.run_mode, .event = .{ .buffer_overflow = .{ .dropped = dropped } } };
try std.json.Stringify.value(&wrapped, .{ .emit_null_optional_fields = false }, &writer.writer);
try writer.writer.writeByte('\n');
}
const conn = self.runtime.getConnection() orelse {
_ = self.dropped.fetchAdd(dropped, .monotonic);
return;
};
errdefer self.runtime.releaseConnection(conn);
try conn.setURL(URL);
try conn.setMethod(.POST);
try conn.setBody(writer.written());
self.pcount = 0;
self.head.store(t, .release);
self.runtime.submitRequest(conn);
}