[Krafton Jungle] PintOS 2.0.0
크래프톤 정글 PintOS
 
Loading...
Searching...
No Matches
palloc.c File Reference
#include "threads/palloc.h"
#include <bitmap.h>
#include <debug.h>
#include <inttypes.h>
#include <round.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "threads/init.h"
#include "threads/loader.h"
#include "threads/synch.h"
#include "threads/vaddr.h"
Include dependency graph for palloc.c:

Classes

struct  pool
 
struct  multiboot_info
 
struct  e820_entry
 
struct  area
 

Macros

#define BASE_MEM_THRESHOLD   0x100000
 
#define USABLE   1
 
#define ACPI_RECLAIMABLE   3
 
#define APPEND_HILO(hi, lo)   (((uint64_t) ((hi)) << 32) + (lo))
 

Functions

static void init_pool (struct pool *p, void **bm_base, uint64_t start, uint64_t end)
 
static bool page_from_pool (const struct pool *, void *page)
 
static void resolve_area_info (struct area *base_mem, struct area *ext_mem)
 
static void populate_pools (struct area *base_mem, struct area *ext_mem)
 
uint64_t palloc_init (void)
 
void * palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
 
void * palloc_get_page (enum palloc_flags flags)
 
void palloc_free_multiple (void *pages, size_t page_cnt)
 
void palloc_free_page (void *page)
 

Variables

static struct pool kernel_pool user_pool
 
size_t user_page_limit = SIZE_MAX
 

Macro Definition Documentation

◆ ACPI_RECLAIMABLE

#define ACPI_RECLAIMABLE   3

◆ APPEND_HILO

#define APPEND_HILO (   hi,
  lo 
)    (((uint64_t) ((hi)) << 32) + (lo))

◆ BASE_MEM_THRESHOLD

#define BASE_MEM_THRESHOLD   0x100000

◆ USABLE

#define USABLE   1

Function Documentation

◆ init_pool()

static void init_pool ( struct pool p,
void **  bm_base,
uint64_t  start,
uint64_t  end 
)
static
339 {
340 /* We'll put the pool's used_map at its base.
341 Calculate the space needed for the bitmap
342 and subtract it from the pool's size. */
343 uint64_t pgcnt = (end - start) / PGSIZE;
344 size_t bm_pages = DIV_ROUND_UP (bitmap_buf_size (pgcnt), PGSIZE) * PGSIZE;
345
346 lock_init(&p->lock);
347 p->used_map = bitmap_create_in_buf (pgcnt, *bm_base, bm_pages);
348 p->base = (void *) start;
349
350 // Mark all to unusable.
351 bitmap_set_all(p->used_map, true);
352
353 *bm_base += bm_pages;
354}
struct bitmap * bitmap_create_in_buf(size_t bit_cnt, void *, size_t byte_cnt)
size_t bitmap_buf_size(size_t bit_cnt)
Definition: bitmap.c:105
void bitmap_set_all(struct bitmap *, bool)
Definition: bitmap.c:191
#define DIV_ROUND_UP(X, STEP)
Definition: round.h:10
unsigned long long int uint64_t
Definition: stdint.h:29
struct bitmap * used_map
Definition: palloc.c:32
uint8_t * base
Definition: palloc.c:33
struct lock lock
Definition: palloc.c:31
void lock_init(struct lock *)
Definition: synch.c:186
#define PGSIZE
Definition: vaddr.h:20
Here is the call graph for this function:
Here is the caller graph for this function:

◆ page_from_pool()

static bool page_from_pool ( const struct pool pool,
void *  page 
)
static
359 {
360 size_t page_no = pg_no (page);
361 size_t start_page = pg_no (pool->base);
362 size_t end_page = start_page + bitmap_size (pool->used_map);
363 return page_no >= start_page && page_no < end_page;
364}
size_t bitmap_size(const struct bitmap *)
Definition: bitmap.c:124
Definition: vm.h:51
Definition: palloc.c:30
#define pg_no(va)
Definition: vaddr.h:26
Here is the call graph for this function:
Here is the caller graph for this function:

◆ palloc_free_multiple()

void palloc_free_multiple ( void *  pages,
size_t  page_cnt 
)
307 {
308 struct pool *pool;
309 size_t page_idx;
310
311 ASSERT (pg_ofs (pages) == 0);
312 if (pages == NULL || page_cnt == 0)
313 return;
314
315 if (page_from_pool (&kernel_pool, pages))
316 pool = &kernel_pool;
317 else if (page_from_pool (&user_pool, pages))
318 pool = &user_pool;
319 else
320 NOT_REACHED ();
321
322 page_idx = pg_no (pages) - pg_no (pool->base);
323
324#ifndef NDEBUG
325 memset (pages, 0xcc, PGSIZE * page_cnt);
326#endif
327 ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
328 bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
329}
void bitmap_set_multiple(struct bitmap *, size_t start, size_t cnt, bool)
Definition: bitmap.c:199
bool bitmap_all(const struct bitmap *, size_t start, size_t cnt)
Definition: bitmap.c:260
#define ASSERT(CONDITION)
Definition: debug.h:30
#define NOT_REACHED()
Definition: debug.h:34
static bool page_from_pool(const struct pool *, void *page)
Definition: palloc.c:359
static struct pool kernel_pool user_pool
Definition: palloc.c:37
#define NULL
Definition: stddef.h:4
void * memset(void *, int, size_t)
Definition: string.c:258
#define pg_ofs(va)
Definition: vaddr.h:24
Here is the call graph for this function:
Here is the caller graph for this function:

◆ palloc_free_page()

void palloc_free_page ( void *  page)
333 {
335}
void palloc_free_multiple(void *pages, size_t page_cnt)
Definition: palloc.c:307
Here is the call graph for this function:
Here is the caller graph for this function:

◆ palloc_get_multiple()

void * palloc_get_multiple ( enum palloc_flags  flags,
size_t  page_cnt 
)
263 {
264 struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
265
267 size_t page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
269 void *pages;
270
271 if (page_idx != BITMAP_ERROR)
272 pages = pool->base + PGSIZE * page_idx;
273 else
274 pages = NULL;
275
276 if (pages) {
277 if (flags & PAL_ZERO)
278 memset (pages, 0, PGSIZE * page_cnt);
279 } else {
280 if (flags & PAL_ASSERT)
281 PANIC ("palloc_get: out of pages");
282 }
283
284 return pages;
285}
size_t bitmap_scan_and_flip(struct bitmap *, size_t start, size_t cnt, bool)
Definition: bitmap.c:293
#define BITMAP_ERROR
Definition: bitmap.h:36
#define PANIC(...)
Definition: debug.h:14
@ PAL_ZERO
Definition: palloc.h:10
@ PAL_USER
Definition: palloc.h:11
@ PAL_ASSERT
Definition: palloc.h:9
void lock_release(struct lock *)
Definition: synch.c:243
void lock_acquire(struct lock *)
Definition: synch.c:202
Here is the call graph for this function:
Here is the caller graph for this function:

◆ palloc_get_page()

void * palloc_get_page ( enum palloc_flags  flags)
301 {
302 return palloc_get_multiple (flags, 1);
303}
void * palloc_get_multiple(enum palloc_flags flags, size_t page_cnt)
Definition: palloc.c:263
Here is the call graph for this function:
Here is the caller graph for this function:

◆ palloc_init()

uint64_t palloc_init ( void  )
239 {
240 /* End of the kernel as recorded by the linker.
241 See kernel.lds.S. */
242 extern char _end;
243 struct area base_mem = { .size = 0 };
244 struct area ext_mem = { .size = 0 };
245
246 resolve_area_info (&base_mem, &ext_mem);
247 printf ("Pintos booting with: \n");
248 printf ("\tbase_mem: 0x%llx ~ 0x%llx (Usable: %'llu kB)\n",
249 base_mem.start, base_mem.end, base_mem.size / 1024);
250 printf ("\text_mem: 0x%llx ~ 0x%llx (Usable: %'llu kB)\n",
251 ext_mem.start, ext_mem.end, ext_mem.size / 1024);
252 populate_pools (&base_mem, &ext_mem);
253 return ext_mem.end;
254}
static void resolve_area_info(struct area *base_mem, struct area *ext_mem)
Definition: palloc.c:80
static void populate_pools(struct area *base_mem, struct area *ext_mem)
Definition: palloc.c:123
int printf(const char *,...) PRINTF_FORMAT(1
Definition: palloc.c:67
uint64_t start
Definition: palloc.c:68
uint64_t size
Definition: palloc.c:70
uint64_t end
Definition: palloc.c:69
Here is the call graph for this function:
Here is the caller graph for this function:

◆ populate_pools()

static void populate_pools ( struct area base_mem,
struct area ext_mem 
)
static
123 {
124 extern char _end;
125 void *free_start = pg_round_up (&_end);
126
127 uint64_t total_pages = (base_mem->size + ext_mem->size) / PGSIZE;
128 uint64_t user_pages = total_pages / 2 > user_page_limit ?
129 user_page_limit : total_pages / 2;
130 uint64_t kern_pages = total_pages - user_pages;
131
132 // Parse E820 map to claim the memory region for each pool.
133 enum { KERN_START, KERN, USER_START, USER } state = KERN_START;
134 uint64_t rem = kern_pages;
135 uint64_t region_start = 0, end = 0, start, size, size_in_pg;
136
137 struct multiboot_info *mb_info = ptov (MULTIBOOT_INFO);
138 struct e820_entry *entries = ptov (mb_info->mmap_base);
139
140 uint32_t i;
141 for (i = 0; i < mb_info->mmap_len / sizeof (struct e820_entry); i++) {
142 struct e820_entry *entry = &entries[i];
143 if (entry->type == ACPI_RECLAIMABLE || entry->type == USABLE) {
144 start = (uint64_t) ptov (APPEND_HILO (entry->mem_hi, entry->mem_lo));
145 size = APPEND_HILO (entry->len_hi, entry->len_lo);
146 end = start + size;
147 size_in_pg = size / PGSIZE;
148
149 if (state == KERN_START) {
150 region_start = start;
151 state = KERN;
152 }
153
154 switch (state) {
155 case KERN:
156 if (rem > size_in_pg) {
157 rem -= size_in_pg;
158 break;
159 }
160 // generate kernel pool
161 init_pool (&kernel_pool,
162 &free_start, region_start, start + rem * PGSIZE);
163 // Transition to the next state
164 if (rem == size_in_pg) {
165 rem = user_pages;
166 state = USER_START;
167 } else {
168 region_start = start + rem * PGSIZE;
169 rem = user_pages - size_in_pg + rem;
170 state = USER;
171 }
172 break;
173 case USER_START:
174 region_start = start;
175 state = USER;
176 break;
177 case USER:
178 if (rem > size_in_pg) {
179 rem -= size_in_pg;
180 break;
181 }
182 ASSERT (rem == size);
183 break;
184 default:
185 NOT_REACHED ();
186 }
187 }
188 }
189
190 // generate the user pool
191 init_pool(&user_pool, &free_start, region_start, end);
192
193 // Iterate over the e820_entry. Setup the usable.
194 uint64_t usable_bound = (uint64_t) free_start;
195 struct pool *pool;
196 void *pool_end;
197 size_t page_idx, page_cnt;
198
199 for (i = 0; i < mb_info->mmap_len / sizeof (struct e820_entry); i++) {
200 struct e820_entry *entry = &entries[i];
201 if (entry->type == ACPI_RECLAIMABLE || entry->type == USABLE) {
202 uint64_t start = (uint64_t)
203 ptov (APPEND_HILO (entry->mem_hi, entry->mem_lo));
204 uint64_t size = APPEND_HILO (entry->len_hi, entry->len_lo);
205 uint64_t end = start + size;
206
207 // TODO: add 0x1000 ~ 0x200000, This is not a matter for now.
208 // All the pages are unuable
209 if (end < usable_bound)
210 continue;
211
212 start = (uint64_t)
213 pg_round_up (start >= usable_bound ? start : usable_bound);
214split:
215 if (page_from_pool (&kernel_pool, (void *) start))
216 pool = &kernel_pool;
217 else if (page_from_pool (&user_pool, (void *) start))
218 pool = &user_pool;
219 else
220 NOT_REACHED ();
221
222 pool_end = pool->base + bitmap_size (pool->used_map) * PGSIZE;
223 page_idx = pg_no (start) - pg_no (pool->base);
224 if ((uint64_t) pool_end < end) {
225 page_cnt = ((uint64_t) pool_end - start) / PGSIZE;
226 bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
227 start = (uint64_t) pool_end;
228 goto split;
229 } else {
230 page_cnt = ((uint64_t) end - start) / PGSIZE;
231 bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
232 }
233 }
234 }
235}
#define MULTIBOOT_INFO
Definition: loader.h:15
uint16_t size
Definition: mmu.h:0
static void init_pool(struct pool *p, void **bm_base, uint64_t start, uint64_t end)
Definition: palloc.c:339
#define APPEND_HILO(hi, lo)
Definition: palloc.c:76
#define ACPI_RECLAIMABLE
Definition: palloc.c:75
#define USABLE
Definition: palloc.c:74
size_t user_page_limit
Definition: palloc.c:40
unsigned int uint32_t
Definition: stdint.h:26
Definition: palloc.c:57
uint32_t len_hi
Definition: palloc.c:62
uint32_t mem_lo
Definition: palloc.c:59
uint32_t len_lo
Definition: palloc.c:61
uint32_t type
Definition: palloc.c:63
uint32_t mem_hi
Definition: palloc.c:60
Definition: palloc.c:47
uint32_t mmap_len
Definition: palloc.c:52
uint32_t mmap_base
Definition: palloc.c:53
#define pg_round_up(va)
Definition: vaddr.h:29
#define ptov(paddr)
Definition: vaddr.h:49
Here is the call graph for this function:
Here is the caller graph for this function:

◆ resolve_area_info()

static void resolve_area_info ( struct area base_mem,
struct area ext_mem 
)
static
80 {
81 struct multiboot_info *mb_info = ptov (MULTIBOOT_INFO);
82 struct e820_entry *entries = ptov (mb_info->mmap_base);
83 uint32_t i;
84
85 for (i = 0; i < mb_info->mmap_len / sizeof (struct e820_entry); i++) {
86 struct e820_entry *entry = &entries[i];
87 if (entry->type == ACPI_RECLAIMABLE || entry->type == USABLE) {
88 uint64_t start = APPEND_HILO (entry->mem_hi, entry->mem_lo);
89 uint64_t size = APPEND_HILO (entry->len_hi, entry->len_lo);
90 uint64_t end = start + size;
91 printf("%llx ~ %llx %d\n", start, end, entry->type);
92
93 struct area *area = start < BASE_MEM_THRESHOLD ? base_mem : ext_mem;
94
95 // First entry that belong to this area.
96 if (area->size == 0) {
97 *area = (struct area) {
98 .start = start,
99 .end = end,
100 .size = size,
101 };
102 } else { // otherwise
103 // Extend start
104 if (area->start > start)
105 area->start = start;
106 // Extend end
107 if (area->end < end)
108 area->end = end;
109 // Extend size
110 area->size += size;
111 }
112 }
113 }
114}
#define BASE_MEM_THRESHOLD
Definition: palloc.c:73
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ user_page_limit

size_t user_page_limit = SIZE_MAX

◆ user_pool

struct pool kernel_pool user_pool
static