~novakane/zelbar

3e5a59799e865e32ac418e51f4248b7d5c012f15 — Hugo Machet 9 months ago 7c57bad
Backend: Rewrite BufferPool
2 files changed, 112 insertions(+), 86 deletions(-)

M src/Backend.zig
M src/Bar.zig
M src/Backend.zig => src/Backend.zig +96 -81
@@ 363,7 363,6 @@ const Surface = struct {
                    ctx.backend.width = w * @intCast(u31, ctx.backend.scale);
                    ctx.backend.height = h * @intCast(u31, ctx.backend.scale);

                    ctx.backend.schedule_and_commit() catch return;
                    ctx.bar.render("") catch return;
                }
            },


@@ 376,6 375,98 @@ const Surface = struct {
    }
};

// TODO: Credit source when published
const BufferPool = struct {
    /// The amount of buffers per surface we consider the reasonable upper limit.
    /// Some compositors sometimes tripple-buffer, so three seems to be ok.
    /// Note that we can absolutely work with higher buffer numbers if needed,
    /// however we consider that to be an anomaly and therefore do not want to
    /// keep all those extra buffers around if we can avoid it, as to not have
    /// unecessary memory overhead.
    const max_buffer_multiplicity = 3;

    /// The buffers. This is a linked list and not an array list, because we
    /// need stable pointers for the listener of the wl_buffer object.
    buffers: std.TailQueue(Buffer) = .{},

    /// Deinit the buffer pool, destroying all buffers and freeing all memory.
    pub fn deinit(self: *BufferPool) void {
        var it = self.buffers.first;
        while (it) |node| {
            // We need to get the next node before destroying the current one.
            it = node.next;
            node.data.deinit();
            ctx.gpa.destroy(node);
        }
    }

    /// Get a buffer of the specified dimenisons. If possible an idle buffer is
    /// reused, otherweise a new one is created.
    pub fn next_buffer(self: *BufferPool, width: u31, height: u31) !*Buffer {
        log.debug("Next buffer: {}x{}; Total buffers: {}", .{ width, height, self.buffers.len });
        defer {
            if (self.buffers.len > max_buffer_multiplicity) {
                self.cull_buffers();
            }
        }
        if (try self.find_suitable_buffer(width, height)) |buffer| {
            return buffer;
        } else {
            return try self.new_buffer(width, height);
        }
    }

    fn find_suitable_buffer(self: *BufferPool, width: u31, height: u31) !?*Buffer {
        var it = self.buffers.first;
        var first_unbusy_buffer_node: ?*std.TailQueue(Buffer).Node = null;
        while (it) |node| : (it = node.next) {
            if (node.data.busy) continue;
            if (node.data.width == width and node.data.height == height) {
                return &node.data;
            } else {
                first_unbusy_buffer_node = node;
            }
        }

        // No buffer has matching dimensions, however we do have an unbusy
        // buffer which we can just re-init.
        if (first_unbusy_buffer_node) |node| {
            node.data.deinit();
            try node.data.init(width, height);
            return &node.data;
        }

        return null;
    }

    fn new_buffer(self: *BufferPool, width: u31, height: u31) !*Buffer {
        log.debug("New buffer: {}x{}", .{ width, height });
        const node = try ctx.gpa.create(std.TailQueue(Buffer).Node);
        errdefer ctx.gpa.destroy(node);
        try node.data.init(width, height);
        self.buffers.append(node);
        return &node.data;
    }

    fn cull_buffers(self: *BufferPool) void {
        log.debug("Culling buffers.", .{});
        var overhead = self.buffers.len - max_buffer_multiplicity;
        var it = self.buffers.first;
        while (it) |node| {
            if (overhead == 0) break;
            // We need to get the next node before destroying the current one.
            it = node.next;
            if (!node.data.busy) {
                node.data.deinit();
                self.buffers.remove(node);
                ctx.gpa.destroy(node);
                overhead -= 1;
            }
        }
        log.debug(" -> new buffer count: {}", .{self.buffers.len});
    }
};

const Buffer = struct {
    wl_buffer: ?*wl.Buffer = null,
    data: ?[]align(std.mem.page_size) u8 = null,


@@ 386,7 477,7 @@ const Buffer = struct {

    busy: bool = false,

    fn init(buffer: *Buffer, shm: *wl.Shm, width: u31, height: u31) !void {
    fn init(buffer: *Buffer, width: u31, height: u31) !void {
        const stride = width << 2;
        const size: u31 = stride * height;



@@ 406,6 497,7 @@ const Buffer = struct {
        errdefer os.munmap(data);

        // Create a Wayland shm buffer for the same memory file.
        const shm = ctx.backend.shm orelse return error.NoShm;
        const pool = try shm.createPool(fd, size);
        defer pool.destroy();



@@ 464,8 556,7 @@ seats: std.SinglyLinkedList(Seat) = .{},

surface: ?Surface = null,

pool: std.AutoHashMapUnmanaged(u32, Buffer) = .{},
next_buffer: ?*Buffer = null,
pool: BufferPool = .{},

width: u31 = 0,
height: u31 = 0,


@@ 520,15 611,11 @@ pub fn init(backend: *Backend, cfg: Bar.Config) !bool {
    backend.surface = Surface{};
    try backend.surface.?.init();

    try backend.init_pool_buffer(2);

    return backend.display.roundtrip() == .SUCCESS;
}

pub fn deinit(backend: *Backend) void {
    var it = backend.pool.valueIterator();
    while (it.next()) |buf| buf.deinit();
    backend.pool.deinit(ctx.gpa);
    backend.pool.deinit();

    if (backend.surface) |*s| {
        s.deinit();


@@ 554,78 641,6 @@ pub fn deinit(backend: *Backend) void {
    backend.* = undefined;
}

pub fn schedule_and_commit(backend: *Backend) !void {
    const surface = backend.surface.?;
    if (!surface.configured) {
        log.warn("surface is not configured", .{});
        return;
    }

    if (backend.next_buffer) |pending| {
        if (ctx.bar.render_scheduled) {
            surface.wl_surface.setBufferScale(backend.scale);
            surface.wl_surface.attach(pending.wl_buffer, 0, 0);
            surface.wl_surface.damageBuffer(0, 0, pending.width, pending.height);
            surface.wl_surface.commit();

            ctx.bar.render_scheduled = false;

            backend.next_buffer = try backend.get_next_buffer(
                backend.shm.?,
                backend.width,
                backend.height,
            );

            if (backend.next_buffer) |next| {
                assert(next.busy);
                ctx.bar.image = next.pixman_image;
            } else {
                log.err("no buffer available", .{});
                return;
            }
        }
    } else {
        backend.next_buffer = try backend.get_next_buffer(
            backend.shm.?,
            backend.width,
            backend.height,
        );

        if (backend.next_buffer) |buf| {
            assert(buf.busy);
            ctx.bar.image = buf.pixman_image;
        } else {
            log.err("no buffer available", .{});
            return;
        }
    }
}

fn init_pool_buffer(backend: *Backend, comptime n: usize) !void {
    var key: u32 = 0;
    while (key < n) : (key += 1) {
        try backend.pool.putNoClobber(ctx.gpa, key, Buffer{});
    }
}

fn get_next_buffer(backend: Backend, shm: *wl.Shm, width: u31, height: u31) !?*Buffer {
    var it = backend.pool.valueIterator();
    while (it.next()) |buf| {
        if (buf.busy) continue;
        if (buf.wl_buffer == null) try buf.init(shm, width, height);

        if (buf.width != width or buf.height != height) {
            buf.deinit();
            try buf.init(shm, width, height);
        }

        buf.busy = true;
        return buf;
    }

    return null;
}

/// ref: https://github.com/ifreund/waylock/blob/master/src/Lock.zig#L184
/// This function does the following:
///  1. Dispatch buffered wayland events to their listener callbacks.

M src/Bar.zig => src/Bar.zig +16 -5
@@ 65,8 65,6 @@ image: ?*pixman.Image = null,
/// received in the layer_surface configure event is used.
req_width: bool = false,

render_scheduled: bool = false,

pub fn new(config: Config) !Bar {
    return Bar{
        .config = config,


@@ 81,6 79,10 @@ pub fn deinit(bar: *Bar) void {
}

pub fn render(bar: *Bar, input: []const u8) !void {
    const buffer = try ctx.backend.pool.next_buffer(ctx.backend.width, ctx.backend.height);
    std.debug.assert(!buffer.busy);
    ctx.bar.image = buffer.pixman_image;

    const w = if (bar.req_width) bar.config.width else @intCast(u16, ctx.backend.width);

    bar.config.border.check_size(w, bar.config.height) catch |err| switch (err) {


@@ 101,7 103,7 @@ pub fn render(bar: *Bar, input: []const u8) !void {
    };

    // Render the background and the borders of the bar.
    const pix = bar.image orelse return error.PixmanImageEmpty;
    const pix = buffer.pixman_image orelse return error.PixmanImageEmpty;
    const bg = bar.config.colors.get(.bg).?;
    try decorations.draw_rectangle(pix, @intCast(i16, x), 0, w, bar.config.height, bg);
    try bar.config.border.draw(pix, @intCast(i16, x), 0, w, bar.config.height);


@@ 109,6 111,15 @@ pub fn render(bar: *Bar, input: []const u8) !void {
    // Parse and render the raw input if any.
    try data.expose(input, x, w);

    bar.render_scheduled = true;
    try ctx.backend.schedule_and_commit();
    const surface = ctx.backend.surface.?;
    if (!surface.configured) {
        log.warn("surface is not configured", .{});
        return;
    }
    surface.wl_surface.setBufferScale(ctx.backend.scale);
    surface.wl_surface.attach(buffer.wl_buffer, 0, 0);
    surface.wl_surface.damageBuffer(0, 0, buffer.width, buffer.height);
    surface.wl_surface.commit();

    buffer.busy = true;
}