summaryrefslogtreecommitdiff
path: root/src/server/Server.zig
diff options
context:
space:
mode:
authorRobby Zambito <contact@robbyzambito.me>2026-01-03 05:33:13 +0000
committerRobby Zambito <contact@robbyzambito.me>2026-01-03 05:33:56 +0000
commitbd9829f6842f0c989389aa4ce9784ab6e3cb4ee5 (patch)
tree95da64a0a288fa3128e7e91bbb292701a56c3a77 /src/server/Server.zig
parenta4ec798521bda5564e2dc96c2184100116b54d28 (diff)
Organize things
Making it easier to use the server as a library
Diffstat (limited to 'src/server/Server.zig')
-rw-r--r--src/server/Server.zig305
1 files changed, 305 insertions, 0 deletions
diff --git a/src/server/Server.zig b/src/server/Server.zig
new file mode 100644
index 0000000..ebbac19
--- /dev/null
+++ b/src/server/Server.zig
@@ -0,0 +1,305 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
+const AutoHashMapUnmanaged = std.AutoHashMapUnmanaged;
+
+const Io = std.Io;
+const Group = Io.Group;
+const IpAddress = std.Io.net.IpAddress;
+const Mutex = Io.Mutex;
+const Queue = Io.Queue;
+const Stream = std.Io.net.Stream;
+
+const message_parser = @import("./message_parser.zig");
+pub const MessageType = message_parser.MessageType;
+pub const Message = message_parser.Message;
+const ServerInfo = Message.ServerInfo;
+pub const Client = @import("./Client.zig");
+const Server = @This();
+
+pub const Subscription = struct {
+ subject: []const u8,
+ client_id: usize,
+ sid: []const u8,
+
+ fn deinit(self: Subscription, alloc: Allocator) void {
+ alloc.free(self.subject);
+ alloc.free(self.sid);
+ }
+};
+
+const eql = std.mem.eql;
+const log = std.log;
+const panic = std.debug.panic;
+
+info: ServerInfo,
+clients: AutoHashMapUnmanaged(usize, *Client) = .empty,
+
+subs_lock: Mutex = .init,
+subscriptions: ArrayList(Subscription) = .empty,
+
+pub fn deinit(server: *Server, io: Io, alloc: Allocator) void {
+ server.subs_lock.lockUncancelable(io);
+ defer server.subs_lock.unlock(io);
+ for (server.subscriptions.items) |sub| {
+ sub.deinit(alloc);
+ }
+ server.subscriptions.deinit(alloc);
+ server.clients.deinit(alloc);
+}
+
+pub fn start(server: *Server, io: Io, gpa: Allocator) !void {
+ var tcp_server = try IpAddress.listen(try IpAddress.parse(
+ server.info.host,
+ server.info.port,
+ ), io, .{ .reuse_address = true });
+ defer tcp_server.deinit(io);
+ log.debug("Server headers: {s}", .{if (server.info.headers) "true" else "false"});
+ log.debug("Server max payload: {d}", .{server.info.max_payload});
+ log.info("Server ID: {s}", .{server.info.server_id});
+ log.info("Server name: {s}", .{server.info.server_name});
+ log.info("Server listening on {s}:{d}", .{ server.info.host, server.info.port });
+
+ var client_group: Group = .init;
+ defer client_group.cancel(io);
+
+ var id: usize = 0;
+ while (true) : (id +%= 1) {
+ if (server.clients.contains(id)) continue;
+ log.debug("Accepting next client", .{});
+ const stream = try tcp_server.accept(io);
+ log.debug("Accepted connection {d}", .{id});
+ _ = client_group.concurrent(io, handleConnectionInfallible, .{ server, gpa, io, id, stream }) catch {
+ log.err("Could not start concurrent handler for {d}", .{id});
+ stream.close(io);
+ };
+ }
+}
+
+fn addClient(server: *Server, allocator: Allocator, id: usize, client: *Client) !void {
+ try server.clients.put(allocator, id, client);
+}
+
+fn removeClient(server: *Server, io: Io, allocator: Allocator, id: usize) void {
+ server.subs_lock.lockUncancelable(io);
+ defer server.subs_lock.unlock(io);
+ if (server.clients.remove(id)) {
+ const len = server.subscriptions.items.len;
+ for (0..len) |from_end| {
+ const i = len - from_end - 1;
+ const sub = server.subscriptions.items[i];
+ if (sub.client_id == id) {
+ sub.deinit(allocator);
+ _ = server.subscriptions.swapRemove(i);
+ }
+ }
+ }
+}
+
+fn handleConnectionInfallible(server: *Server, server_allocator: Allocator, io: Io, id: usize, stream: Stream) void {
+ handleConnection(server, server_allocator, io, id, stream) catch |err| {
+ log.err("Failed processing client {d}: {any}", .{ id, err });
+ };
+}
+
+fn handleConnection(server: *Server, server_allocator: Allocator, io: Io, id: usize, stream: Stream) !void {
+ defer stream.close(io);
+
+ // TODO: use a client allocator for things that should only live for as long as the client?
+ // I had this before, but it seemed to have made lifetimes harder to track.
+ // Messages made sense to parse using a client allocator, but that makes it hard to free
+ // messages when done processing them (usually outside the client process, ie: publish).
+
+ // Set up client writer
+ // TODO: how many bytes can fit in a network write syscall? cat /proc/sys/net/core/wmem_max
+ var w_buffer: [1024 * 16]u8 = undefined;
+ var writer = stream.writer(io, &w_buffer);
+ const out = &writer.interface;
+
+ // Set up client reader
+ // TODO: how many bytes can fit in a network read syscall? cat /proc/sys/net/core/rmem_max
+ var r_buffer: [1024 * 16]u8 = undefined;
+ var reader = stream.reader(io, &r_buffer);
+ const in = &reader.interface;
+
+ // Set up buffer queue
+ var qbuf: [8]Message = undefined;
+ var queue: Queue(Message) = .init(&qbuf);
+ defer {
+ queue.close(io);
+ while (queue.getOne(io)) |msg| {
+ switch (msg) {
+ .msg => |m| m.deinit(server_allocator),
+ .hmsg => |h| h.deinit(server_allocator),
+ else => {},
+ }
+ } else |_| {}
+ }
+
+ // Create client
+ var client: Client = .init(null, &queue, in, out);
+ defer client.deinit(server_allocator);
+
+ try server.addClient(server_allocator, id, &client);
+ defer server.removeClient(io, server_allocator, id);
+
+ // Do initial handshake with client
+ try queue.putOne(io, .{ .info = server.info });
+
+ var client_task = try io.concurrent(Client.start, .{ &client, io, server_allocator });
+ defer client_task.cancel(io) catch {};
+
+ // Messages are owned by the server after they are received from the client
+ while (client.next(server_allocator)) |msg| {
+ switch (msg) {
+ .ping => {
+ // Respond to ping with pong.
+ try client.send(io, .pong);
+ },
+ .@"pub", .hpub => {
+ defer switch (msg) {
+ .@"pub" => |pb| pb.deinit(server_allocator),
+ .hpub => |hp| hp.deinit(server_allocator),
+ else => unreachable,
+ };
+ try server.publishMessage(io, server_allocator, &client, msg);
+ },
+ .sub => |sub| {
+ defer sub.deinit(server_allocator);
+ try server.subscribe(io, server_allocator, id, sub);
+ },
+ .unsub => |unsub| {
+ defer unsub.deinit(server_allocator);
+ try server.unsubscribe(io, server_allocator, id, unsub);
+ },
+ .connect => |connect| {
+ if (client.connect) |*current| {
+ current.deinit(server_allocator);
+ }
+ client.connect = connect;
+ },
+ else => |e| {
+ panic("Unimplemented message: {any}\n", .{e});
+ },
+ }
+ } else |err| switch (err) {
+ error.EndOfStream => {
+ log.debug("Client {d} disconnected", .{id});
+ },
+ else => {
+ return err;
+ },
+ }
+}
+
+fn subjectMatches(sub_subject: []const u8, pub_subject: []const u8) bool {
+ // TODO: assert that sub_subject and pub_subject are valid.
+ var sub_iter = std.mem.splitScalar(u8, sub_subject, '.');
+ var pub_iter = std.mem.splitScalar(u8, pub_subject, '.');
+
+ while (sub_iter.next()) |st| {
+ const pt = pub_iter.next() orelse return false;
+
+ if (eql(u8, st, ">")) return true;
+
+ if (!eql(u8, st, "*") and !eql(u8, st, pt)) {
+ return false;
+ }
+ }
+
+ return pub_iter.next() == null;
+}
+
+test subjectMatches {
+ const expect = std.testing.expect;
+ try expect(subjectMatches("foo", "foo"));
+ try expect(!subjectMatches("foo", "bar"));
+
+ try expect(subjectMatches("foo.*", "foo.bar"));
+ try expect(!subjectMatches("foo.*", "foo"));
+ try expect(!subjectMatches("foo.>", "foo"));
+
+ // the wildcard subscriptions foo.*.quux and foo.> both match foo.bar.quux, but only the latter matches foo.bar.baz.
+ try expect(subjectMatches("foo.*.quux", "foo.bar.quux"));
+ try expect(subjectMatches("foo.>", "foo.bar.quux"));
+ try expect(!subjectMatches("foo.*.quux", "foo.bar.baz"));
+ try expect(subjectMatches("foo.>", "foo.bar.baz"));
+}
+
+fn publishMessage(server: *Server, io: Io, alloc: Allocator, source_client: *Client, msg: Message) !void {
+ errdefer {
+ if (source_client.connect) |c| {
+ if (c.verbose) {
+ source_client.send(io, .{ .@"-err" = "Slow Consumer" }) catch {};
+ }
+ }
+ }
+ const subject = switch (msg) {
+ .@"pub" => |pb| pb.subject,
+ .hpub => |hp| hp.@"pub".subject,
+ else => unreachable,
+ };
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ for (server.subscriptions.items) |subscription| {
+ if (subjectMatches(subscription.subject, subject)) {
+ const client = server.clients.get(subscription.client_id) orelse {
+ log.debug("Trying to publish to a client that no longer exists: {d}\n", .{subscription.client_id});
+ continue;
+ };
+
+ switch (msg) {
+ .@"pub" => |pb| client.send(io, .{
+ .msg = try pb.toMsg(alloc, subscription.sid),
+ }) catch |err| switch (err) {
+ error.Canceled => return err,
+ else => {},
+ },
+ .hpub => |hp| client.send(io, .{ .hmsg = try hp.toHMsg(
+ alloc,
+ subscription.sid,
+ ) }) catch |err| switch (err) {
+ error.Canceled => return err,
+ else => {},
+ },
+ else => unreachable,
+ }
+ }
+ }
+ if (source_client.connect) |c| {
+ if (c.verbose) {
+ source_client.send(io, .@"+ok") catch {};
+ }
+ }
+}
+
+fn subscribe(server: *Server, io: Io, gpa: Allocator, id: usize, msg: Message.Sub) !void {
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ const subject = try gpa.dupe(u8, msg.subject);
+ errdefer gpa.free(subject);
+ const sid = try gpa.dupe(u8, msg.sid);
+ errdefer gpa.free(sid);
+ try server.subscriptions.append(gpa, .{
+ .subject = subject,
+ .client_id = id,
+ .sid = sid,
+ });
+}
+
+fn unsubscribe(server: *Server, io: Io, gpa: Allocator, id: usize, msg: Message.Unsub) !void {
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ const len = server.subscriptions.items.len;
+ for (0..len) |from_end| {
+ const i = len - from_end - 1;
+ const sub = server.subscriptions.items[i];
+ if (sub.client_id == id and eql(u8, sub.sid, msg.sid)) {
+ sub.deinit(gpa);
+ _ = server.subscriptions.swapRemove(i);
+ }
+ }
+}
+
+pub const default_id = "server-id-123";
+pub const default_name = "Zits Server";