From 0861703ddce8c46b732cfb773aabe9daa9c5da48 Mon Sep 17 00:00:00 2001 From: Robby Zambito Date: Sat, 10 Jan 2026 16:08:23 -0500 Subject: Sleep to go faster The problem was I was basically flushing twice for every message when doing request reply. This gives the sender the opportunity to finish writing a full message to the queue, which we then check for before flushing. This makes request reply latency benchmarks go down from like 90ms to 200us. --- src/Server/Client.zig | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'src/Server/Client.zig') diff --git a/src/Server/Client.zig b/src/Server/Client.zig index 77034fd..6ad1804 100644 --- a/src/Server/Client.zig +++ b/src/Server/Client.zig @@ -40,6 +40,16 @@ pub fn start(self: *Client, io: std.Io) !void { std.debug.assert(self.to_client.end == 0); while (true) { self.to_client.end = try self.recv_queue.get(io, self.to_client.buffer, 1); + // Wait 1 nanosecond to see if more data is in the queue. + // If there is, add it to the write buffer before sending it. + // The reason for this is because if we send the first chunk as soon as we get it, + // we will likely be sending a partial message, which will end up being way slower. + try io.sleep(.fromNanoseconds(1), .awake); + self.to_client.end += try self.recv_queue.get( + io, + self.to_client.buffer[self.to_client.end..], + 0, + ); try self.to_client.flush(); } } -- cgit