#include <sys/bitmap.h>
#include <sys/console.h>
#include <sys/kernel.h>
#include <sys/memory.h>
#include <sys/paging.h>
#define DEBUG
#include <debug.h>
#define KMEM_ARENA_START 0x200000
#define KMEM_ARENA_END 0x400000
// First non-reserved phys memory address
#define PHYS_RESERVED 0x200000
struct memory_map_entry {
uint32_t base_low;
uint32_t base_high;
uint32_t length_low;
uint32_t length_high;
uint16_t type;
uint16_t pad;
} __attribute__ ((__packed__));
struct kmem {
void *free; // first free page
uint32_t phys_pages_total;
uint32_t phys_pages_available;
struct bitmap *phys_pages;
};
struct free_info {
struct free_info *next; // start of next free page
};
static struct kmem kmem_state;
static void *lowmem_next = 0x10000;
static const void *lowmem_max = 0x80000;
static void *
lowmem_alloc(size_t size) {
if (lowmem_next + size >= lowmem_max) { return NULL; }
void *p = lowmem_next;
lowmem_next += size;
return p;
}
static int
memory_usable(struct memory_map_entry *m) {
return ((m->type == 1) &&
(m->base_high == 0) &&
(m->base_low + m->length_low > PHYS_RESERVED) &&
(m->length_low > 0) &&
(m->length_high == 0) &&
!UINT32_ADD_OVERFLOW(m->base_low, m->length_low));
}
void kmem_detect(uint32_t count, struct memory_map_entry *mmap);
void kmem_init(uint32_t memory_map_entry_count, struct memory_map_entry *entries) {
kmem_detect(memory_map_entry_count, entries);
// begin with the last page
void *page = (void *)(KMEM_ARENA_END - PAGE_SIZE);
void *previous = (void *)0;
struct free_info *free;
while (page >= (void *)KMEM_ARENA_START) {
free = (struct free_info *)page;
free->next = previous;
previous = page;
page -= PAGE_SIZE;
}
kmem_state.free = previous;
}
void
kmem_detect(uint32_t count, struct memory_map_entry *mmap) {
uint32_t highest = 0;
for (int i=0; i<count; ++i) {
if (!memory_usable(mmap + i)) {
khexprint("=========== SKIP mmap entry", i);
khexprint(" base_low", mmap[i].base_low);
khexprint(" base_high", mmap[i].base_high);
khexprint(" length_low", mmap[i].length_low);
khexprint(" length_high", mmap[i].length_high);
khexprint(" type", mmap[i].type);
continue;
}
if ((mmap[i].base_low + mmap[i].length_low) > highest) {
highest = mmap[i].base_low + mmap[i].length_low;
}
}
kmem_state.phys_pages_total = highest / PAGE_SIZE;
void *bm_mem = lowmem_alloc(bitmap_required_size(kmem_state.phys_pages_total));
struct bitmap *bm = kmem_state.phys_pages = bitmap_create(bm_mem,
bitmap_required_size(kmem_state.phys_pages_total),
kmem_state.phys_pages_total);
// initally, mark all memory as unusable
bitmap_range_update(bm, 0, kmem_state.phys_pages_total, 1);
uint32_t available_count = 0;
for (int i=0; i<count; ++i) {
if (!memory_usable(mmap + i)) {
continue;
}
uint32_t start = mmap[i].base_low;
uint32_t end = mmap[i].base_low + mmap[i].length_low;
if (end < PHYS_RESERVED) {
continue;
}
if (start < PHYS_RESERVED) {
start = PHYS_RESERVED;
}
khexprint("=========== USE mmap entry", i);
khexprint(" base_low", mmap[i].base_low);
khexprint(" base_high", mmap[i].base_high);
khexprint(" length_low", mmap[i].length_low);
khexprint(" length_high", mmap[i].length_high);
khexprint(" type", mmap[i].type);
khexprint(" actual_start", start);
khexprint(" actual_end", end);
// first completely usable page
uint32_t first_page = (start + PAGE_SIZE - 1) / PAGE_SIZE;
// first page that is not completely usable
uint32_t last_page = end / PAGE_SIZE;
// mark memory as unused(available)
bitmap_range_update(bm, first_page, last_page, 0);
available_count += (last_page - first_page);
}
khexprint("============ Memory detection complete\n total memory",
available_count*PAGE_SIZE);
}
uint32_t
phys_aquire_page(void) {
uint32_t page = bitmap_find_clear(kmem_state.phys_pages);
bitmap_set(kmem_state.phys_pages, page);
return page;
}
void
phys_release_page(uint32_t page) {
bitmap_clear(kmem_state.phys_pages, page);
}
void *kmem_alloc_pages(size_t pages) {
if (pages != 1) {
panic("can only alloc one page at a time");
}
void *p = kmem_state.free;
struct free_info *fi = (struct free_info *)kmem_state.free;
kmem_state.free = fi->next;
return p;
}
void kmem_free_pages(void *start, size_t pages) {
if (pages != 1) {
panic("can only free one page at a time");
}
struct free_info *fi = (struct free_info *)start;
if (start < kmem_state.free) {
// page will be the first free page
fi->next = kmem_state.free;
kmem_state.free = start;
} else {
// find previous page
struct free_info *free = (struct free_info *)kmem_state.free;
while (free->next && (void *)free->next < start) {
free = free->next;
}
fi->next = free->next;
free->next = fi;
}
}
void *kmalloc(size_t size) {
size_t pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
return kmem_alloc_pages(pages);
/**(size_t *)km_state.next = size;*/
/*km_state.next += sizeof(size_t);*/
/*void *p = km_state.next;*/
/*km_state.next += size;*/
/*return p;*/
}
void *kmalloc_align(size_t alignment, size_t size) {
size_t pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
return kmem_alloc_pages(pages);
/*size_t align_offset = (((size_t)km_state.next) + sizeof(size_t)) % alignment;*/
/*if (align_offset == 0) {*/
/*return kmalloc(size);*/
/*} else {*/
/*size_t padding_size = alignment - align_offset;*/
/*if (padding_size <= sizeof(size_t)) {*/
/*padding_size += alignment;*/
/*}*/
/*void *padding = kmalloc(padding_size - sizeof(size_t));*/
/*void *p = kmalloc(size);*/
/*kfree(padding);*/
/*return p;*/
/*}*/
}
void kfree(void *p) {
if (p == NULL) return;
// TODO: implement
kmem_free_pages(p, 1);
}