diff options
| author | Robby Zambito <contact@robbyzambito.me> | 2026-01-10 16:08:23 -0500 |
|---|---|---|
| committer | Robby Zambito <contact@robbyzambito.me> | 2026-01-10 16:42:43 -0500 |
| commit | 0861703ddce8c46b732cfb773aabe9daa9c5da48 (patch) | |
| tree | 7b40b541547da898546a1f2e62f86dadf1dd441e | |
| parent | 99ea7556581a678684e30202a8eee654a001588a (diff) | |
Sleep to go faster
The problem was I was basically flushing twice for every message when
doing request reply.
This gives the sender the opportunity to finish writing a full message
to the queue, which we then check for before flushing.
This makes request reply latency benchmarks go down from like 90ms to
200us.
| -rw-r--r-- | src/Server.zig | 6 | ||||
| -rw-r--r-- | src/Server/Client.zig | 10 |
2 files changed, 15 insertions, 1 deletions
diff --git a/src/Server.zig b/src/Server.zig index e20f5c0..49f54e2 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -47,6 +47,7 @@ const Subscription = struct { // would put an invalid set series of bytes in the receivers queue. _ = try self.queue.putUncancelable(io, chunk, chunk.len); } + try io.checkCancel(); } }; @@ -382,7 +383,10 @@ fn publishMessage( ) catch unreachable; msg_chunks.appendBounded(msg.payload) catch unreachable; - try subscription.send(io, msg_chunks.items[0..chunk_count]); + subscription.send(io, msg_chunks.items[0..chunk_count]) catch |err| switch (err) { + error.Closed => {}, + error.Canceled => |e| return e, + }; } } diff --git a/src/Server/Client.zig b/src/Server/Client.zig index 77034fd..6ad1804 100644 --- a/src/Server/Client.zig +++ b/src/Server/Client.zig @@ -40,6 +40,16 @@ pub fn start(self: *Client, io: std.Io) !void { std.debug.assert(self.to_client.end == 0); while (true) { self.to_client.end = try self.recv_queue.get(io, self.to_client.buffer, 1); + // Wait 1 nanosecond to see if more data is in the queue. + // If there is, add it to the write buffer before sending it. + // The reason for this is because if we send the first chunk as soon as we get it, + // we will likely be sending a partial message, which will end up being way slower. + try io.sleep(.fromNanoseconds(1), .awake); + self.to_client.end += try self.recv_queue.get( + io, + self.to_client.buffer[self.to_client.end..], + 0, + ); try self.to_client.flush(); } } |
