[arch][riscv] add simple support for context switching user space aspaces

Pretty inefficient but probably works well enough.
This commit is contained in:
Travis Geiselbrecht
2021-04-09 02:00:57 -07:00
parent 790916d14e
commit a3713e8b39
3 changed files with 19 additions and 5 deletions

View File

@@ -59,6 +59,9 @@
#define RISCV_CSR_XSTATUS_IE (1u << (RISCV_XMODE_OFFSET + 0))
#define RISCV_CSR_XSTATUS_PIE (1u << (RISCV_XMODE_OFFSET + 4))
#define RISCV_CSR_XSTATUS_SPP (1u << 8)
#define RISCV_CSR_XSTATUS_SUM (1u << 18)
#define RISCV_CSR_XSTATUS_MXR (1u << 19)
#define RISCV_CSR_XIE_SIE (1u << (RISCV_XMODE_OFFSET + 0))
#define RISCV_CSR_XIE_TIE (1u << (RISCV_XMODE_OFFSET + 4))

View File

@@ -98,8 +98,8 @@ uintptr_t constexpr page_mask_per_level(uint level) {
constexpr uint kernel_start_index = vaddr_to_index(KERNEL_ASPACE_BASE, RISCV_MMU_PT_LEVELS - 1);
constexpr uint kernel_end_index = vaddr_to_index(KERNEL_ASPACE_BASE + KERNEL_ASPACE_SIZE - 1UL, RISCV_MMU_PT_LEVELS - 1);
static_assert(kernel_end_index >= kernel_start_index && kernel_end_index < RISCV_MMU_PT_ENTRIES);
static_assert(kernel_end_index - kernel_start_index + 1 == RISCV_MMU_KERNEL_PT_ENTRIES);
static_assert(kernel_end_index >= kernel_start_index && kernel_end_index < RISCV_MMU_PT_ENTRIES, "");
static_assert(kernel_end_index - kernel_start_index + 1 == RISCV_MMU_KERNEL_PT_ENTRIES, "");
void riscv_set_satp(uint asid, paddr_t pt) {
ulong satp;
@@ -576,15 +576,26 @@ int arch_mmu_unmap(arch_aspace_t *aspace, const vaddr_t _vaddr, const uint _coun
void arch_mmu_context_switch(arch_aspace_t *aspace) {
LTRACEF("aspace %p\n", aspace);
DEBUG_ASSERT(aspace->magic == RISCV_ASPACE_MAGIC);
DEBUG_ASSERT(!aspace || aspace->magic == RISCV_ASPACE_MAGIC);
PANIC_UNIMPLEMENTED;
if (!aspace) {
// switch to the kernel address space
riscv_set_satp(0, kernel_aspace->pt_phys);
} else {
riscv_set_satp(0, aspace->pt_phys);
}
// TODO: deal with TLB flushes.
// for now, riscv_set_satp() does a full local TLB dump
}
extern "C"
void riscv_mmu_init_secondaries() {
// switch to the proper kernel pgtable, with the trampoline parts unmapped
riscv_set_satp(0, kernel_pgtable_phys);
// set the SUM bit so we can access user space directly (for now)
riscv_csr_set(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_SUM);
}
// called once on the boot cpu during very early (single threaded) init

View File

@@ -203,7 +203,7 @@ size_t pmm_free(struct list_node *list) {
mutex_acquire(&lock);
uint count = 0;
size_t count = 0;
while (!list_is_empty(list)) {
vm_page_t *page = list_remove_head_type(list, vm_page_t, node);