[pmm] add a few more convenience routines

Add a routine to just allocate a single page and a fast page for
allocating a single kvaddr page.
This commit is contained in:
Travis Geiselbrecht
2020-05-10 16:43:49 -07:00
parent d6bba37cec
commit 7cc7d79e74
2 changed files with 24 additions and 1 deletions

View File

@@ -129,6 +129,9 @@ status_t pmm_add_arena(pmm_arena_t *arena) __NONNULL((1));
*/
size_t pmm_alloc_pages(uint count, struct list_node *list) __NONNULL((2));
/* Allocate a single page */
vm_page_t *pmm_alloc_page(void);
/* Allocate a specific range of physical pages, adding to the tail of the passed list.
* The list must be initialized.
* Returns the number of pages allocated.

View File

@@ -137,6 +137,19 @@ done:
return allocated;
}
vm_page_t *pmm_alloc_page(void) {
struct list_node list = LIST_INITIAL_VALUE(list);
size_t ret = pmm_alloc_pages(1, &list);
if (ret == 0) {
return NULL;
}
DEBUG_ASSERT(ret == 1);
return list_peek_head_type(&list, vm_page_t, node);
}
size_t pmm_alloc_range(paddr_t address, uint count, struct list_node *list) {
LTRACEF("address 0x%lx, count %u\n", address, count);
@@ -228,8 +241,15 @@ size_t pmm_free_page(vm_page_t *page) {
void *pmm_alloc_kpages(uint count, struct list_node *list) {
LTRACEF("count %u\n", count);
// XXX do fast path for single page
/* fast path for single page */
if (count == 1) {
vm_page_t *p = pmm_alloc_page();
if (!p) {
return NULL;
}
return paddr_to_kvaddr(vm_page_to_paddr(p));
}
paddr_t pa;
size_t alloc_count = pmm_alloc_contiguous(count, PAGE_SIZE_SHIFT, &pa, list);