~rycwo/forge

ref: HEAD forge/src/memory_pool.c -rw-r--r-- 4.2 KiB
d7ee94d6Ryan Chan Fix missing cd in .build.yml 8 days ago
                                                                                
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// This file is part of Forge, the foundation library for Forge tools.
//
// Copyright (C) 2021 Ryan Chan <rycwo@posteo.net>
// SPDX-License-Identifier: GPL-3.0-only
//
// This Source Code Form is subject to the terms of the GNU General Public
// License v3.0 only. You should have received a copy of the license along with
// this program. If not, see <https://www.gnu.org/licenses/>.

#include "forge/memory_pool.h"

#include <assert.h>
#include <stddef.h>
#include <string.h>

struct free_item {
	int32_t next;
	int32_t count;
};

static inline unsigned char*
get_block(struct fg_memory_pool const* pool, int pos) {
	assert(pos >= 0 && pos <= pool->count);
	return &pool->mem[pool->block_size * pos];
}

void
fg_alloc_memory_pool(
		struct fg_memory_pool* pool,
		struct fg_allocator const* alloc,
		int32_t count,
		int32_t block_size) {
	assert(count >= 1);
	assert((size_t)block_size >= sizeof(struct free_item));

	// Allocate an additional block as the zero-th element to use as the
	// header to track the beginning of a (cyclical) free list.
	pool->mem = (unsigned char*)alloc->alloc((count + 1) * block_size);

	pool->ver = (int32_t*)alloc->alloc(sizeof(int32_t) * count);
	memset(pool->ver, 0, sizeof(int32_t) * count);

	pool->alloc = *alloc;
	pool->count = count;
	pool->block_size = block_size;

	struct free_item* head = (struct free_item*)get_block(pool, 0);
	head->next = 0;
	head->count = count + 1;
}

void
fg_free_memory_pool(struct fg_memory_pool* pool) {
	pool->alloc.free(pool->mem);
	pool->alloc.free(pool->ver);
	pool->count = 0;
	pool->block_size = 0;
}

struct fg_handle
fg_alloc_memory_pool_block(struct fg_memory_pool* pool) {
	struct free_item* head = (struct free_item*)get_block(pool, 0);
	// Header points to itself if there are no free slots
	if (!head->next && head->count <= 1)
		return (struct fg_handle){0, 0};

	// New items are always inserted at the end of the span of free items.
	// This makes it much simpler to manage indices and free item lengths.
	int id = 0;

	// Header is handled specially as it has a minimum count of one
	if (head->count > 1) {
		head->count--;
		id = head->count;
	}
	else
	{
		struct free_item* next = (struct free_item*)get_block(pool, head->next);
		assert(next->count >= 1);

		if (next->count == 1) {
			id = head->next;
			head->next = next->next;
		}
		else {
			next->count--;
			id = head->next + next->count;
		}
	}
	return (struct fg_handle){id, pool->ver[id - 1]};
}

void
fg_free_memory_pool_block(
		struct fg_memory_pool* pool,
		struct fg_handle handle) {
	assert(fg_handle_valid(pool, handle));
	struct free_item* head = (struct free_item*)get_block(pool, 0);
	struct free_item* next = (struct free_item*)get_block(pool, head->next);

	// If the block is adjacent to end of either the header or the next free
	// item, we can simply extend the existing free items. Otherwise we naively
	// insert into the free list.
	//
	// This means free lists are not guaranteed to be ordered. This is also
	// prone to fragmentation over usage.

	if (handle.id == head->count)
		head->count++;
	else if (handle.id == (head->next + next->count))
		next->count++;
	else {
		struct free_item* new = (struct free_item*)get_block(pool, handle.id);
		new->next = head->next;
		new->count = 1;
		head->next = handle.id;
	}

	// Generation version is important to guarantee valid access of a handle
	// to an occupied block of the memory pool.
	pool->ver[handle.id - 1]++;
}

unsigned char*
fg_access_memory_pool_block(
		struct fg_memory_pool const* pool,
		struct fg_handle handle) {
	assert(fg_handle_valid(pool, handle));
	return get_block(pool, handle.id);
}

bool
fg_handle_valid(struct fg_memory_pool const* pool, struct fg_handle handle) {
	// Note that ids are offset by one because the header
	return handle.id > 0
		&& handle.id <= pool->count
		&& handle.ver == pool->ver[handle.id - 1];
}

void
fg_compact_memory_pool(struct fg_memory_pool* pool) {
	// TODO: Existing block handles will be invalid unless an additional array
	// is used to map the new indices during the compaction. This would add
	// another (constant) lookup for every operation!
	//
	// Maybe we should consider making a separate memory pool implementation
	// that guarantees contiguous layout (using swap method) without handles.
}