[kernel][vm] first stab at a virtual memory manager
Add kernel/vm code to handle memory management in large page-aligned regions of virtual space. This is composed via a pmm (physical memory manager) and vmm (virtual memory manager) and the architecturally specific code to manage the mmu. Add ARMv7 paging code.
This commit is contained in:
162
kernel/vm/vm.c
Normal file
162
kernel/vm/vm.c
Normal file
@@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Travis Geiselbrecht
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files
|
||||
* (the "Software"), to deal in the Software without restriction,
|
||||
* including without limitation the rights to use, copy, modify, merge,
|
||||
* publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so,
|
||||
* subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <kernel/vm.h>
|
||||
#include "vm_priv.h"
|
||||
|
||||
#include <trace.h>
|
||||
#include <err.h>
|
||||
#include <string.h>
|
||||
#include <lk/init.h>
|
||||
#include <lib/console.h>
|
||||
#include <arch/mmu.h>
|
||||
|
||||
#define LOCAL_TRACE 1
|
||||
|
||||
extern int _start;
|
||||
extern int _end;
|
||||
|
||||
/* mark the physical pages backing a range of virtual as in use.
|
||||
* allocate the physical pages and throw them away */
|
||||
static void mark_pages_in_use(vaddr_t va, size_t len)
|
||||
{
|
||||
LTRACEF("va 0x%lx, len 0x%zx\n", va, len);
|
||||
for (size_t offset = 0; offset < len; offset += PAGE_SIZE) {
|
||||
uint flags;
|
||||
paddr_t pa;
|
||||
|
||||
status_t err = arch_mmu_query(va + offset, &pa, &flags);
|
||||
if (err >= 0) {
|
||||
//LTRACEF("va 0x%x, pa 0x%x, flags 0x%x, err %d\n", va + offset, pa, flags, err);
|
||||
|
||||
/* alloate the range, throw the results away */
|
||||
struct list_node list;
|
||||
pmm_alloc_range(pa, 1, &list);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vm_init_preheap(uint level)
|
||||
{
|
||||
LTRACE_ENTRY;
|
||||
|
||||
/* mark all of the kernel pages in use */
|
||||
LTRACEF("marking all kernel pages as used\n");
|
||||
mark_pages_in_use((vaddr_t)&_start, ((uintptr_t)&_end - (uintptr_t)&_start));
|
||||
|
||||
/* mark the physical pages used by the boot time allocator */
|
||||
if (boot_alloc_end != boot_alloc_start) {
|
||||
LTRACEF("marking boot alloc used from 0x%lx to 0x%lx\n", boot_alloc_start, boot_alloc_end);
|
||||
|
||||
// XXX handle last partial page?
|
||||
mark_pages_in_use(boot_alloc_start, boot_alloc_end - boot_alloc_start);
|
||||
}
|
||||
}
|
||||
|
||||
static void vm_init_postheap(uint level)
|
||||
{
|
||||
LTRACE_ENTRY;
|
||||
|
||||
vmm_init();
|
||||
|
||||
/* create vmm regions to cover what is already there from the initial mapping table */
|
||||
struct mmu_initial_mapping *map = mmu_initial_mappings;
|
||||
while (map->size > 0) {
|
||||
if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY)) {
|
||||
vmm_reserve_space(vmm_get_kernel_aspace(), map->name, map->size, map->virt);
|
||||
}
|
||||
|
||||
map++;
|
||||
}
|
||||
}
|
||||
|
||||
void *paddr_to_kvaddr(paddr_t pa)
|
||||
{
|
||||
/* slow path to do reverse lookup */
|
||||
struct mmu_initial_mapping *map = mmu_initial_mappings;
|
||||
while (map->size > 0) {
|
||||
if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY) &&
|
||||
pa >= map->phys &&
|
||||
pa <= map->phys + map->size) {
|
||||
return (void *)(map->virt + (pa - map->phys));
|
||||
}
|
||||
map++;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int cmd_vm(int argc, const cmd_args *argv)
|
||||
{
|
||||
if (argc < 2) {
|
||||
notenoughargs:
|
||||
printf("not enough arguments\n");
|
||||
usage:
|
||||
printf("usage:\n");
|
||||
printf("%s phys2virt <address>\n", argv[0].str);
|
||||
printf("%s virt2phys <address>\n", argv[0].str);
|
||||
printf("%s map <phys> <virt> <count> <flags>\n", argv[0].str);
|
||||
printf("%s unmap <virt> <count>\n", argv[0].str);
|
||||
return ERR_GENERIC;
|
||||
}
|
||||
|
||||
if (!strcmp(argv[1].str, "phys2virt")) {
|
||||
if (argc < 3) goto notenoughargs;
|
||||
|
||||
void *ptr = paddr_to_kvaddr(argv[2].u);
|
||||
printf("paddr_to_kvaddr returns %p\n", ptr);
|
||||
} else if (!strcmp(argv[1].str, "virt2phys")) {
|
||||
if (argc < 3) goto notenoughargs;
|
||||
|
||||
paddr_t pa;
|
||||
uint flags;
|
||||
status_t err = arch_mmu_query(argv[2].u, &pa, &flags);
|
||||
printf("arch_mmu_query returns %d\n", err);
|
||||
if (err >= 0) {
|
||||
printf("\tpa 0x%lx, flags 0x%x\n", pa, flags);
|
||||
}
|
||||
} else if (!strcmp(argv[1].str, "map")) {
|
||||
if (argc < 6) goto notenoughargs;
|
||||
|
||||
int err = arch_mmu_map(argv[3].u, argv[2].u, argv[4].u, argv[5].u);
|
||||
printf("arch_mmu_map returns %d\n", err);
|
||||
} else if (!strcmp(argv[1].str, "unmap")) {
|
||||
if (argc < 4) goto notenoughargs;
|
||||
|
||||
int err = arch_mmu_unmap(argv[2].u, argv[3].u);
|
||||
printf("arch_mmu_unmap returns %d\n", err);
|
||||
} else {
|
||||
printf("unknown command\n");
|
||||
goto usage;
|
||||
}
|
||||
|
||||
return NO_ERROR;
|
||||
}
|
||||
|
||||
STATIC_COMMAND_START
|
||||
#if LK_DEBUGLEVEL > 0
|
||||
{ "vm", "vm commands", &cmd_vm },
|
||||
#endif
|
||||
STATIC_COMMAND_END(vm);
|
||||
|
||||
LK_INIT_HOOK(vm_preheap, &vm_init_preheap, LK_INIT_LEVEL_HEAP - 1);
|
||||
LK_INIT_HOOK(vm, &vm_init_postheap, LK_INIT_LEVEL_VM);
|
||||
|
||||
Reference in New Issue
Block a user