[kernel][vm] add support for multiple user address spaces

Generically add support for more than one user address space,
attach to a thread, and context switch as thread swaps happen.

Add support for ARM and ARM64, and stub out support for x86 platforms.
This commit is contained in:
Travis Geiselbrecht
2016-02-14 12:46:24 -08:00
parent a4ca0a6e00
commit 9a5cd5c529
17 changed files with 783 additions and 106 deletions

View File

@@ -39,18 +39,24 @@ vmm_aspace_t _kernel_aspace;
static void dump_aspace(const vmm_aspace_t *a);
static void dump_region(const vmm_region_t *r);
void vmm_init(void)
void vmm_init_preheap(void)
{
/* initialize the kernel address space */
strlcpy(_kernel_aspace.name, "kernel", sizeof(_kernel_aspace.name));
_kernel_aspace.base = KERNEL_ASPACE_BASE,
_kernel_aspace.size = KERNEL_ASPACE_SIZE,
_kernel_aspace.flags = VMM_FLAG_ASPACE_KERNEL;
_kernel_aspace.base = KERNEL_ASPACE_BASE;
_kernel_aspace.size = KERNEL_ASPACE_SIZE;
_kernel_aspace.flags = VMM_ASPACE_FLAG_KERNEL;
list_initialize(&_kernel_aspace.region_list);
arch_mmu_init_aspace(&_kernel_aspace.arch_aspace, KERNEL_ASPACE_BASE, KERNEL_ASPACE_SIZE, ARCH_ASPACE_FLAG_KERNEL);
list_add_head(&aspace_list, &_kernel_aspace.node);
}
void vmm_init(void)
{
}
static inline bool is_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr)
{
return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1);
@@ -105,7 +111,7 @@ static vmm_region_t *alloc_region_struct(const char *name, vaddr_t base, size_t
{
DEBUG_ASSERT(name);
vmm_region_t *r = malloc(sizeof(vmm_region_t));
vmm_region_t *r = calloc(1, sizeof(vmm_region_t));
if (!r)
return NULL;
@@ -169,7 +175,7 @@ static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r)
*
* Arch can override this to impose it's own restrictions.
*/
__WEAK vaddr_t arch_mmu_pick_spot(vaddr_t base, uint prev_region_arch_mmu_flags,
__WEAK vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace, vaddr_t base, uint prev_region_arch_mmu_flags,
vaddr_t end, uint next_region_arch_mmu_flags,
vaddr_t align, size_t size, uint arch_mmu_flags)
{
@@ -205,7 +211,7 @@ static inline bool check_gap(vmm_aspace_t *aspace,
gap_end = aspace->base + aspace->size - 1;
}
*pva = arch_mmu_pick_spot(gap_beg, prev ? prev->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
*pva = arch_mmu_pick_spot(&aspace->arch_aspace, gap_beg, prev ? prev->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
gap_end, next ? next->arch_mmu_flags : ARCH_MMU_FLAG_INVALID,
align, size, arch_mmu_flags);
if (*pva < gap_beg)
@@ -332,7 +338,7 @@ status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size,
/* lookup how it's already mapped */
uint arch_mmu_flags = 0;
arch_mmu_query(vaddr, NULL, &arch_mmu_flags);
arch_mmu_query(&aspace->arch_aspace, vaddr, NULL, &arch_mmu_flags);
/* build a new region structure */
vmm_region_t *r = alloc_region(aspace, name, size, vaddr, 0, VMM_FLAG_VALLOC_SPECIFIC, VMM_REGION_FLAG_RESERVED, arch_mmu_flags);
@@ -388,7 +394,7 @@ status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size,
*ptr = (void *)r->base;
/* map all of the pages */
int err = arch_mmu_map(r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
int err = arch_mmu_map(&aspace->arch_aspace, r->base, paddr, size / PAGE_SIZE, arch_mmu_flags);
LTRACEF("arch_mmu_map returns %d\n", err);
ret = NO_ERROR;
@@ -455,7 +461,7 @@ status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t siz
*ptr = (void *)r->base;
/* map all of the pages */
arch_mmu_map(r->base, pa, size / PAGE_SIZE, arch_mmu_flags);
arch_mmu_map(&aspace->arch_aspace, r->base, pa, size / PAGE_SIZE, arch_mmu_flags);
// XXX deal with error mapping here
vm_page_t *p;
@@ -542,7 +548,7 @@ status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **p
paddr_t pa = page_to_address(p);
DEBUG_ASSERT(IS_PAGE_ALIGNED(pa));
arch_mmu_map(va, pa, 1, arch_mmu_flags);
arch_mmu_map(&aspace->arch_aspace, va, pa, 1, arch_mmu_flags);
// XXX deal with error mapping here
list_add_tail(&r->page_list, &p->node);
@@ -592,7 +598,7 @@ status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t vaddr)
list_delete(&r->node);
/* unmap it */
arch_mmu_unmap(r->base, r->size / PAGE_SIZE);
arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE);
mutex_release(&vmm_lock);
@@ -607,7 +613,9 @@ status_t vmm_free_region(vmm_aspace_t *aspace, vaddr_t vaddr)
status_t vmm_create_aspace(vmm_aspace_t **_aspace, const char *name, uint flags)
{
vmm_aspace_t *aspace = malloc(sizeof(vmm_aspace_t));
status_t err;
vmm_aspace_t *aspace = calloc(1, sizeof(vmm_aspace_t));
if (!aspace)
return ERR_NO_MEMORY;
@@ -616,7 +624,9 @@ status_t vmm_create_aspace(vmm_aspace_t **_aspace, const char *name, uint flags)
else
strlcpy(aspace->name, "unnamed", sizeof(aspace->name));
if (flags & VMM_FLAG_ASPACE_KERNEL) {
aspace->flags = flags;
if (aspace->flags & VMM_ASPACE_FLAG_KERNEL) {
aspace->base = KERNEL_ASPACE_BASE;
aspace->size = KERNEL_ASPACE_SIZE;
} else {
@@ -624,6 +634,14 @@ status_t vmm_create_aspace(vmm_aspace_t **_aspace, const char *name, uint flags)
aspace->size = USER_ASPACE_SIZE;
}
/* initialize the arch specific component to our address space */
err = arch_mmu_init_aspace(&aspace->arch_aspace, aspace->base, aspace->size,
(aspace->flags & VMM_ASPACE_FLAG_KERNEL) ? ARCH_ASPACE_FLAG_KERNEL : 0);
if (err < 0) {
free(aspace);
return err;
}
list_clear_node(&aspace->node);
list_initialize(&aspace->region_list);
@@ -655,7 +673,7 @@ status_t vmm_free_aspace(vmm_aspace_t *aspace)
list_add_tail(&region_list, &r->node);
/* unmap it */
arch_mmu_unmap(r->base, r->size / PAGE_SIZE);
arch_mmu_unmap(&aspace->arch_aspace, r->base, r->size / PAGE_SIZE);
}
mutex_release(&vmm_lock);
@@ -668,12 +686,29 @@ status_t vmm_free_aspace(vmm_aspace_t *aspace)
free(r);
}
/* make sure the current thread does not map the aspace */
thread_t *current_thread = get_current_thread();
if (current_thread->aspace == aspace) {
THREAD_LOCK(state);
current_thread->aspace = NULL;
vmm_context_switch(aspace, NULL);
THREAD_UNLOCK(state);
}
/* destroy the arch portion of the aspace */
arch_mmu_destroy_aspace(&aspace->arch_aspace);
/* free the aspace */
free(aspace);
return NO_ERROR;
}
void vmm_context_switch(vmm_aspace_t *oldspace, vmm_aspace_t *newaspace)
{
arch_mmu_context_switch(newaspace ? &newaspace->arch_aspace : NULL);
}
static void dump_region(const vmm_region_t *r)
{
printf("\tregion %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x mmu_flags 0x%x\n",
@@ -703,10 +738,18 @@ usage:
printf("%s alloc <size> <align_pow2>\n", argv[0].str);
printf("%s alloc_physical <paddr> <size> <align_pow2>\n", argv[0].str);
printf("%s alloc_contig <size> <align_pow2>\n", argv[0].str);
printf("%s free_region <address>\n", argv[0].str);
printf("%s create_aspace\n", argv[0].str);
printf("%s create_test_aspace\n", argv[0].str);
printf("%s free_aspace <address>\n", argv[0].str);
printf("%s set_test_aspace <address>\n", argv[0].str);
return ERR_GENERIC;
}
static vmm_aspace_t *test_aspace;
if (!test_aspace)
test_aspace = vmm_get_kernel_aspace();
if (!strcmp(argv[1].str, "aspaces")) {
vmm_aspace_t *a;
list_for_every_entry(&aspace_list, a, vmm_aspace_t, node) {
@@ -716,24 +759,59 @@ usage:
if (argc < 4) goto notenoughargs;
void *ptr = (void *)0x99;
status_t err = vmm_alloc(vmm_get_kernel_aspace(), "alloc test", argv[2].u, &ptr, argv[3].u, 0, 0);
status_t err = vmm_alloc(test_aspace, "alloc test", argv[2].u, &ptr, argv[3].u, 0, 0);
printf("vmm_alloc returns %d, ptr %p\n", err, ptr);
} else if (!strcmp(argv[1].str, "alloc_physical")) {
if (argc < 4) goto notenoughargs;
void *ptr = (void *)0x99;
status_t err = vmm_alloc_physical(vmm_get_kernel_aspace(), "physical test", argv[3].u, &ptr, argv[4].u, argv[2].u, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
status_t err = vmm_alloc_physical(test_aspace, "physical test", argv[3].u, &ptr, argv[4].u, argv[2].u, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE);
printf("vmm_alloc_physical returns %d, ptr %p\n", err, ptr);
} else if (!strcmp(argv[1].str, "alloc_contig")) {
if (argc < 4) goto notenoughargs;
void *ptr = (void *)0x99;
status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "contig test", argv[2].u, &ptr, argv[3].u, 0, 0);
status_t err = vmm_alloc_contiguous(test_aspace, "contig test", argv[2].u, &ptr, argv[3].u, 0, 0);
printf("vmm_alloc_contig returns %d, ptr %p\n", err, ptr);
} else if (!strcmp(argv[1].str, "free_region")) {
if (argc < 2) goto notenoughargs;
status_t err = vmm_free_region(test_aspace, (vaddr_t)argv[2].u);
printf("vmm_free_region returns %d\n", err);
} else if (!strcmp(argv[1].str, "create_aspace")) {
vmm_aspace_t *aspace;
status_t err = vmm_create_aspace(&aspace, "test", 0);
printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
} else if (!strcmp(argv[1].str, "create_test_aspace")) {
vmm_aspace_t *aspace;
status_t err = vmm_create_aspace(&aspace, "test", 0);
printf("vmm_create_aspace returns %d, aspace %p\n", err, aspace);
if (err < 0)
return err;
test_aspace = aspace;
get_current_thread()->aspace = aspace;
thread_sleep(1); // XXX hack to force it to reschedule and thus load the aspace
} else if (!strcmp(argv[1].str, "free_aspace")) {
if (argc < 2) goto notenoughargs;
vmm_aspace_t *aspace = (void *)argv[2].u;
if (test_aspace == aspace)
test_aspace = NULL;
if (get_current_thread()->aspace == aspace) {
get_current_thread()->aspace = NULL;
thread_sleep(1); // hack
}
status_t err = vmm_free_aspace(aspace);
printf("vmm_free_aspace returns %d\n", err);
} else if (!strcmp(argv[1].str, "set_test_aspace")) {
if (argc < 2) goto notenoughargs;
test_aspace = (void *)argv[2].u;
get_current_thread()->aspace = test_aspace;
thread_sleep(1); // XXX hack to force it to reschedule and thus load the aspace
} else {
printf("unknown command\n");
goto usage;