summaryrefslogtreecommitdiff
path: root/src/Server.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/Server.zig')
-rw-r--r--src/Server.zig422
1 files changed, 422 insertions, 0 deletions
diff --git a/src/Server.zig b/src/Server.zig
new file mode 100644
index 0000000..e7d00b1
--- /dev/null
+++ b/src/Server.zig
@@ -0,0 +1,422 @@
+const std = @import("std");
+const Allocator = std.mem.Allocator;
+const ArrayList = std.ArrayList;
+const AutoHashMapUnmanaged = std.AutoHashMapUnmanaged;
+
+const Io = std.Io;
+const Dir = Io.Dir;
+const Group = Io.Group;
+const IpAddress = std.Io.net.IpAddress;
+const Mutex = Io.Mutex;
+const Queue = Io.Queue;
+const Stream = std.Io.net.Stream;
+
+pub const Client = @import("./Server/Client.zig");
+
+const message_parser = @import("./Server/message_parser.zig");
+
+pub const MessageType = message_parser.MessageType;
+pub const Message = message_parser.Message;
+const ServerInfo = Message.ServerInfo;
+
+const Msgs = Client.Msgs;
+const Server = @This();
+
+const builtin = @import("builtin");
+
+pub const Subscription = struct {
+ subject: []const u8,
+ client_id: usize,
+ sid: []const u8,
+ queue_group: ?[]const u8,
+ queue: *Queue(Msgs),
+ // used to alloc messages in the queue
+ alloc: Allocator,
+
+ fn deinit(self: Subscription, alloc: Allocator) void {
+ alloc.free(self.subject);
+ alloc.free(self.sid);
+ if (self.queue_group) |g| alloc.free(g);
+ }
+};
+
+const eql = std.mem.eql;
+const log = std.log.scoped(.zits);
+const panic = std.debug.panic;
+
+info: ServerInfo,
+clients: AutoHashMapUnmanaged(usize, *Client) = .empty,
+
+subs_lock: Mutex = .init,
+subscriptions: ArrayList(Subscription) = .empty,
+
+pub fn deinit(server: *Server, io: Io, alloc: Allocator) void {
+ server.subs_lock.lockUncancelable(io);
+ defer server.subs_lock.unlock(io);
+ for (server.subscriptions.items) |sub| {
+ sub.deinit(alloc);
+ }
+ // TODO drain subscription queues
+ server.subscriptions.deinit(alloc);
+ server.clients.deinit(alloc);
+}
+
+pub fn start(server: *Server, io: Io, gpa: Allocator) !void {
+ var tcp_server = try IpAddress.listen(try IpAddress.parse(
+ server.info.host,
+ server.info.port,
+ ), io, .{});
+ defer tcp_server.deinit(io);
+ log.debug("Server headers: {s}", .{if (server.info.headers) "true" else "false"});
+ log.debug("Server max payload: {d}", .{server.info.max_payload});
+ log.info("Server ID: {s}", .{server.info.server_id});
+ log.info("Server name: {s}", .{server.info.server_name});
+ log.info("Server listening on {s}:{d}", .{ server.info.host, server.info.port });
+
+ var client_group: Group = .init;
+ defer client_group.cancel(io);
+
+ const read_buffer_size, const write_buffer_size = getBufferSizes(io);
+ log.debug("read buf: {d} write buf: {d}", .{ read_buffer_size, write_buffer_size });
+
+ var id: usize = 0;
+ while (true) : (id +%= 1) {
+ if (server.clients.contains(id)) continue;
+ log.debug("Accepting next client", .{});
+ const stream = try tcp_server.accept(io);
+ log.debug("Accepted connection {d}", .{id});
+ _ = client_group.concurrent(io, handleConnectionInfallible, .{
+ server,
+ gpa,
+ io,
+ id,
+ stream,
+ read_buffer_size,
+ write_buffer_size,
+ }) catch {
+ log.err("Could not start concurrent handler for {d}", .{id});
+ stream.close(io);
+ };
+ }
+}
+
+fn addClient(server: *Server, allocator: Allocator, id: usize, client: *Client) !void {
+ try server.clients.put(allocator, id, client);
+}
+
+fn removeClient(server: *Server, io: Io, allocator: Allocator, id: usize) void {
+ server.subs_lock.lockUncancelable(io);
+ defer server.subs_lock.unlock(io);
+ if (server.clients.remove(id)) {
+ const len = server.subscriptions.items.len;
+ for (0..len) |from_end| {
+ const i = len - from_end - 1;
+ const sub = server.subscriptions.items[i];
+ if (sub.client_id == id) {
+ sub.deinit(allocator);
+ _ = server.subscriptions.swapRemove(i);
+ }
+ }
+ }
+}
+
+fn handleConnectionInfallible(
+ server: *Server,
+ server_allocator: Allocator,
+ io: Io,
+ id: usize,
+ stream: Stream,
+ r_buf_size: usize,
+ w_buf_size: usize,
+) !void {
+ handleConnection(server, server_allocator, io, id, stream, r_buf_size, w_buf_size) catch |err| switch (err) {
+ error.Canceled => return error.Canceled,
+ else => log.err("Failed processing client {d}: {any}", .{ id, err }),
+ };
+}
+
+fn handleConnection(
+ server: *Server,
+ server_allocator: Allocator,
+ io: Io,
+ id: usize,
+ stream: Stream,
+ r_buf_size: usize,
+ w_buf_size: usize,
+) !void {
+ defer stream.close(io);
+
+ var dba: std.heap.DebugAllocator(.{}) = .init;
+ dba.backing_allocator = server_allocator;
+ defer _ = dba.deinit();
+ const alloc = if (builtin.mode == .Debug or builtin.mode == .ReleaseSafe)
+ dba.allocator()
+ else
+ server_allocator;
+
+ // Set up client writer
+ const w_buffer: []u8 = try alloc.alloc(u8, w_buf_size);
+ defer alloc.free(w_buffer);
+ var writer = stream.writer(io, w_buffer);
+ const out = &writer.interface;
+
+ // Set up client reader
+ const r_buffer: []u8 = try alloc.alloc(u8, r_buf_size);
+ defer alloc.free(r_buffer);
+ var reader = stream.reader(io, r_buffer);
+ const in = &reader.interface;
+
+ // Set up buffer queue
+ const qbuf: []Message = try alloc.alloc(Message, 16);
+ defer alloc.free(qbuf);
+ var recv_queue: Queue(Message) = .init(qbuf);
+ defer recv_queue.close(io);
+
+ const mbuf: []Msgs = try alloc.alloc(Msgs, w_buf_size / @sizeOf(Msgs));
+ defer alloc.free(mbuf);
+ var msgs_queue: Queue(Msgs) = .init(mbuf);
+ defer {
+ msgs_queue.close(io);
+ while (msgs_queue.getOne(io)) |msg| {
+ switch (msg) {
+ .MSG => |m| m.deinit(alloc),
+ .HMSG => |h| h.deinit(alloc),
+ }
+ } else |_| {}
+ }
+
+ // Create client
+ var client: Client = .init(null, alloc, &recv_queue, &msgs_queue, in, out);
+ defer client.deinit(server_allocator);
+
+ try server.addClient(server_allocator, id, &client);
+ defer server.removeClient(io, server_allocator, id);
+
+ // Do initial handshake with client
+ // try recv_queue.putOne(io, .PONG);
+ try recv_queue.putOne(io, .{ .INFO = server.info });
+
+ var client_task = try io.concurrent(Client.start, .{ &client, io });
+ defer client_task.cancel(io) catch {};
+
+ // Messages are owned by the server after they are received from the client
+ while (client.next(server_allocator)) |msg| {
+ switch (msg) {
+ .PING => {
+ // Respond to ping with pong.
+ try client.send(io, .PONG);
+ },
+ .PUB => |pb| {
+ @branchHint(.likely);
+ defer pb.deinit(server_allocator);
+ try server.publishMessage(io, server_allocator, &client, msg);
+ },
+ .HPUB => |hp| {
+ @branchHint(.likely);
+ defer hp.deinit(server_allocator);
+ try server.publishMessage(io, server_allocator, &client, msg);
+ },
+ .SUB => |sub| {
+ defer sub.deinit(server_allocator);
+ try server.subscribe(io, server_allocator, client, id, sub);
+ },
+ .UNSUB => |unsub| {
+ defer unsub.deinit(server_allocator);
+ try server.unsubscribe(io, server_allocator, id, unsub);
+ },
+ .CONNECT => |connect| {
+ if (client.connect) |*current| {
+ current.deinit(server_allocator);
+ }
+ client.connect = connect;
+ },
+ else => |e| {
+ panic("Unimplemented message: {any}\n", .{e});
+ },
+ }
+ } else |err| switch (err) {
+ error.EndOfStream, error.ReadFailed => {
+ log.debug("Client {d} disconnected", .{id});
+ return error.Canceled;
+ },
+ else => {
+ return err;
+ },
+ }
+}
+
+fn subjectMatches(sub_subject: []const u8, pub_subject: []const u8) bool {
+ // TODO: assert that sub_subject and pub_subject are valid.
+ var sub_iter = std.mem.splitScalar(u8, sub_subject, '.');
+ var pub_iter = std.mem.splitScalar(u8, pub_subject, '.');
+
+ while (sub_iter.next()) |st| {
+ const pt = pub_iter.next() orelse return false;
+
+ if (eql(u8, st, ">")) return true;
+
+ if (!eql(u8, st, "*") and !eql(u8, st, pt)) {
+ return false;
+ }
+ }
+
+ return pub_iter.next() == null;
+}
+
+test subjectMatches {
+ const expect = std.testing.expect;
+ try expect(subjectMatches("foo", "foo"));
+ try expect(!subjectMatches("foo", "bar"));
+
+ try expect(subjectMatches("foo.*", "foo.bar"));
+ try expect(!subjectMatches("foo.*", "foo"));
+ try expect(!subjectMatches("foo.>", "foo"));
+
+ // the wildcard subscriptions foo.*.quux and foo.> both match foo.bar.quux, but only the latter matches foo.bar.baz.
+ try expect(subjectMatches("foo.*.quux", "foo.bar.quux"));
+ try expect(subjectMatches("foo.>", "foo.bar.quux"));
+ try expect(!subjectMatches("foo.*.quux", "foo.bar.baz"));
+ try expect(subjectMatches("foo.>", "foo.bar.baz"));
+}
+
+fn publishMessage(
+ server: *Server,
+ io: Io,
+ alloc: Allocator,
+ source_client: *Client,
+ msg: Message,
+) !void {
+ defer if (source_client.connect) |c| {
+ if (c.verbose) {
+ source_client.send(io, .@"+OK") catch {};
+ }
+ };
+
+ const subject = switch (msg) {
+ .PUB => |pb| pb.subject,
+ .HPUB => |hp| hp.@"pub".subject,
+ else => unreachable,
+ };
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ var published_queue_groups: ArrayList([]const u8) = .empty;
+ defer published_queue_groups.deinit(alloc);
+ var published_queue_sub_idxs: ArrayList(usize) = .empty;
+ defer published_queue_sub_idxs.deinit(alloc);
+
+ subs: for (0..server.subscriptions.items.len) |i| {
+ const subscription = server.subscriptions.items[i];
+ if (subjectMatches(subscription.subject, subject)) {
+ if (subscription.queue_group) |sg| {
+ for (published_queue_groups.items) |g| {
+ if (eql(u8, g, sg)) {
+ continue :subs;
+ }
+ }
+ // Don't republish to the same queue
+ try published_queue_groups.append(alloc, sg);
+ // Move this index to the end of the subscription list,
+ // to prioritize other subscriptions in the queue next time.
+ try published_queue_sub_idxs.append(alloc, i);
+ }
+ switch (msg) {
+ .PUB => |pb| {
+ try subscription.queue.putOne(io, .{
+ .MSG = try pb.toMsg(subscription.alloc, subscription.sid),
+ });
+ },
+ .HPUB => |hp| {
+ try subscription.queue.putOne(io, .{
+ .HMSG = try hp.toHMsg(subscription.alloc, subscription.sid),
+ });
+ },
+ else => unreachable,
+ }
+ }
+ }
+
+ for (0..published_queue_sub_idxs.items.len) |from_end| {
+ const i = published_queue_sub_idxs.items.len - from_end - 1;
+ server.subscriptions.appendAssumeCapacity(server.subscriptions.orderedRemove(i));
+ }
+}
+
+fn subscribe(
+ server: *Server,
+ io: Io,
+ gpa: Allocator,
+ client: Client,
+ id: usize,
+ msg: Message.Sub,
+) !void {
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ const subject = try gpa.dupe(u8, msg.subject);
+ errdefer gpa.free(subject);
+ const sid = try gpa.dupe(u8, msg.sid);
+ errdefer gpa.free(sid);
+ const queue_group = if (msg.queue_group) |q| try gpa.dupe(u8, q) else null;
+ errdefer if (queue_group) |q| gpa.free(q);
+ try server.subscriptions.append(gpa, .{
+ .subject = subject,
+ .client_id = id,
+ .sid = sid,
+ .queue_group = queue_group,
+ .queue = client.msg_queue,
+ .alloc = client.alloc,
+ });
+}
+
+fn unsubscribe(
+ server: *Server,
+ io: Io,
+ gpa: Allocator,
+ id: usize,
+ msg: Message.Unsub,
+) !void {
+ try server.subs_lock.lock(io);
+ defer server.subs_lock.unlock(io);
+ const len = server.subscriptions.items.len;
+ for (0..len) |from_end| {
+ const i = len - from_end - 1;
+ const sub = server.subscriptions.items[i];
+ if (sub.client_id == id and eql(u8, sub.sid, msg.sid)) {
+ sub.deinit(gpa);
+ _ = server.subscriptions.swapRemove(i);
+ }
+ }
+}
+
+const parseUnsigned = std.fmt.parseUnsigned;
+
+fn getBufferSizes(io: Io) struct { usize, usize } {
+ const default_size = 4 * 1024;
+ const default = .{ default_size, default_size };
+
+ const dir = Dir.openDirAbsolute(io, "/proc/sys/net/core", .{}) catch {
+ log.warn("couldn't open /proc/sys/net/core", .{});
+ return default;
+ };
+
+ var buf: [64]u8 = undefined;
+
+ const rmem_max = readBufferSize(io, dir, "rmem_max", &buf, default_size);
+ const wmem_max = readBufferSize(io, dir, "wmem_max", &buf, default_size);
+
+ return .{ rmem_max, wmem_max };
+}
+
+fn readBufferSize(io: Io, dir: anytype, filename: []const u8, buf: []u8, default: usize) usize {
+ const bytes = dir.readFile(io, filename, buf) catch |err| {
+ log.err("couldn't open {s}: {any}", .{ filename, err });
+ return default;
+ };
+
+ return parseUnsigned(usize, bytes[0 .. bytes.len - 1], 10) catch |err| {
+ log.err("couldn't parse {s}: {any}", .{ bytes[0 .. bytes.len - 1], err });
+ return default;
+ };
+}
+
+pub const default_id = "server-id-123";
+pub const default_name = "Zits Server";