~cypheon/xos

xos/kmem.c -rw-r--r-- 5.8 KiB
567db604 — Johann Rudloff Implement scanning and allocation of physical memory. 7 years ago
                                                                                
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#include <sys/bitmap.h>
#include <sys/console.h>
#include <sys/kernel.h>
#include <sys/memory.h>
#include <sys/paging.h>

#define DEBUG
#include <debug.h>

#define KMEM_ARENA_START 0x200000
#define KMEM_ARENA_END 0x400000

// First non-reserved phys memory address
#define PHYS_RESERVED 0x200000

struct memory_map_entry {
  uint32_t base_low;
  uint32_t base_high;
  uint32_t length_low;
  uint32_t length_high;
  uint16_t type;
  uint16_t pad;
} __attribute__ ((__packed__));

struct kmem {
  void *free; // first free page
  uint32_t phys_pages_total;
  uint32_t phys_pages_available;
  struct bitmap *phys_pages;
};

struct free_info {
  struct free_info *next; // start of next free page
};

static struct kmem kmem_state;

static void *lowmem_next = 0x10000;
static const void *lowmem_max = 0x80000;
static void *
lowmem_alloc(size_t size) {
  if (lowmem_next + size >= lowmem_max) { return NULL; }
  void *p = lowmem_next;
  lowmem_next += size;
  return p;
}

static int
memory_usable(struct memory_map_entry *m) {
  return ((m->type == 1) &&
        (m->base_high == 0) &&
        (m->base_low + m->length_low > PHYS_RESERVED) &&
        (m->length_low > 0) &&
        (m->length_high == 0) &&
        !UINT32_ADD_OVERFLOW(m->base_low, m->length_low));
}

void kmem_detect(uint32_t count, struct memory_map_entry *mmap);

void kmem_init(uint32_t memory_map_entry_count, struct memory_map_entry *entries) {
  kmem_detect(memory_map_entry_count, entries);

  // begin with the last page
  void *page = (void *)(KMEM_ARENA_END - PAGE_SIZE);

  void *previous = (void *)0;
  struct free_info *free;

  while (page >= (void *)KMEM_ARENA_START) {
    free = (struct free_info *)page;
    free->next = previous;
    previous = page;
    page -= PAGE_SIZE;
  }

  kmem_state.free = previous;
}

void
kmem_detect(uint32_t count, struct memory_map_entry *mmap) {
  uint32_t highest = 0;
  for (int i=0; i<count; ++i) {
    if (!memory_usable(mmap + i)) {
      khexprint("=========== SKIP mmap entry", i);
      khexprint("  base_low", mmap[i].base_low);
      khexprint("  base_high", mmap[i].base_high);
      khexprint("  length_low", mmap[i].length_low);
      khexprint("  length_high", mmap[i].length_high);
      khexprint("  type", mmap[i].type);
      continue;
    }
    if ((mmap[i].base_low + mmap[i].length_low) > highest) {
      highest = mmap[i].base_low + mmap[i].length_low;
    }
  }
  kmem_state.phys_pages_total = highest / PAGE_SIZE;
  void *bm_mem = lowmem_alloc(bitmap_required_size(kmem_state.phys_pages_total));
  struct bitmap *bm = kmem_state.phys_pages = bitmap_create(bm_mem,
      bitmap_required_size(kmem_state.phys_pages_total),
      kmem_state.phys_pages_total);

  // initally, mark all memory as unusable
  bitmap_range_update(bm, 0, kmem_state.phys_pages_total, 1);

  uint32_t available_count = 0;
  for (int i=0; i<count; ++i) {
    if (!memory_usable(mmap + i)) {
      continue;
    }
    uint32_t start = mmap[i].base_low;
    uint32_t end = mmap[i].base_low + mmap[i].length_low;
    if (end < PHYS_RESERVED) {
      continue;
    }
    if (start < PHYS_RESERVED) {
      start = PHYS_RESERVED;
    }

    khexprint("=========== USE mmap entry", i);
    khexprint("  base_low", mmap[i].base_low);
    khexprint("  base_high", mmap[i].base_high);
    khexprint("  length_low", mmap[i].length_low);
    khexprint("  length_high", mmap[i].length_high);
    khexprint("  type", mmap[i].type);
    khexprint("  actual_start", start);
    khexprint("  actual_end", end);

    // first completely usable page
    uint32_t first_page = (start + PAGE_SIZE - 1) / PAGE_SIZE;

    // first page that is not completely usable
    uint32_t last_page = end / PAGE_SIZE;

    // mark memory as unused(available)
    bitmap_range_update(bm, first_page, last_page, 0);
    available_count += (last_page - first_page);
  }
  khexprint("============ Memory detection complete\n  total memory",
      available_count*PAGE_SIZE);
}

uint32_t
phys_aquire_page(void) {
  uint32_t page = bitmap_find_clear(kmem_state.phys_pages);
  bitmap_set(kmem_state.phys_pages, page);
  return page;
}

void
phys_release_page(uint32_t page) {
  bitmap_clear(kmem_state.phys_pages, page);
}

void *kmem_alloc_pages(size_t pages) {
  if (pages != 1) {
    panic("can only alloc one page at a time");
  }

  void *p = kmem_state.free;
  struct free_info *fi = (struct free_info *)kmem_state.free;
  kmem_state.free = fi->next;

  return p;
}

void kmem_free_pages(void *start, size_t pages) {
  if (pages != 1) {
    panic("can only free one page at a time");
  }

  struct free_info *fi = (struct free_info *)start;
  if (start < kmem_state.free) {
    // page will be the first free page
    fi->next = kmem_state.free;
    kmem_state.free = start;
  } else {
    // find previous page
    struct free_info *free = (struct free_info *)kmem_state.free;
    while (free->next && (void *)free->next < start) {
      free = free->next;
    }
    fi->next = free->next;
    free->next = fi;
  }
}

void *kmalloc(size_t size) {
  size_t pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  return kmem_alloc_pages(pages);
  /**(size_t *)km_state.next = size;*/
  /*km_state.next += sizeof(size_t);*/
  /*void *p = km_state.next;*/
  /*km_state.next += size;*/
  /*return p;*/
}

void *kmalloc_align(size_t alignment, size_t size) {
  size_t pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
  return kmem_alloc_pages(pages);
  /*size_t align_offset = (((size_t)km_state.next) + sizeof(size_t)) % alignment;*/
  /*if (align_offset == 0) {*/
    /*return kmalloc(size);*/
  /*} else {*/
    /*size_t padding_size = alignment - align_offset;*/
    /*if (padding_size <= sizeof(size_t)) {*/
      /*padding_size += alignment;*/
    /*}*/
    /*void *padding = kmalloc(padding_size - sizeof(size_t));*/
    /*void *p = kmalloc(size);*/
    /*kfree(padding);*/
    /*return p;*/
  /*}*/
}

void kfree(void *p) {
  if (p == NULL) return;

  // TODO: implement
  kmem_free_pages(p, 1);
}