[arch][x86] save the translated kernel pgdir physical address

This keeps from needing to recompute it on every context switch back to
the kernel aspace.
This commit is contained in:
Travis Geiselbrecht
2025-04-13 22:43:28 -07:00
parent a04776ba78
commit 0ac0911404
5 changed files with 32 additions and 13 deletions

View File

@@ -55,7 +55,8 @@ vaddr_t arch_mmu_pick_spot(arch_aspace_t *aspace,
vaddr_t end, uint next_region_arch_mmu_flags,
vaddr_t align, size_t size, uint arch_mmu_flags) __NONNULL((1));
/* load a new user address space context.
/*
* load a new user address space context.
* aspace argument NULL should unload user space.
*/
void arch_mmu_context_switch(arch_aspace_t *aspace);

View File

@@ -37,6 +37,7 @@
map_addr_t kernel_pt[NO_OF_PT_ENTRIES][4] __ALIGNED(PAGE_SIZE);
#endif
map_addr_t kernel_pd[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
paddr_t kernel_pd_phys;
static inline paddr_t get_pfn_from_pde(map_addr_t pde) {
return pde & X86_4MB_PAGE_FRAME;
@@ -472,7 +473,7 @@ status_t arch_mmu_init_aspace(arch_aspace_t * const aspace, const vaddr_t base,
aspace->base = base;
aspace->size = size;
aspace->cr3 = kernel_pd;
aspace->cr3_phys = vaddr_to_paddr(aspace->cr3);
aspace->cr3_phys = kernel_pd_phys;
} else {
DEBUG_ASSERT(base == USER_ASPACE_BASE);
DEBUG_ASSERT(size == USER_ASPACE_SIZE);
@@ -499,6 +500,16 @@ status_t arch_mmu_init_aspace(arch_aspace_t * const aspace, const vaddr_t base,
}
status_t arch_mmu_destroy_aspace(arch_aspace_t * const aspace) {
// TODO: assert that we're not active on any cpus
if (aspace->flags & ARCH_ASPACE_FLAG_KERNEL) {
// can't destroy the kernel aspace
panic("attempt to destroy kernel aspace\n");
return ERR_NOT_ALLOWED;
}
// free the page table
pmm_free_kpages(aspace->cr3, 1);
return NO_ERROR;
}
@@ -512,8 +523,7 @@ void arch_mmu_context_switch(arch_aspace_t * const aspace) {
cr3 = aspace->cr3_phys;
} else {
// TODO save copy of this
cr3 = vaddr_to_paddr(kernel_pd);
cr3 = kernel_pd_phys;
}
if (TRACE_CONTEXT_SWITCH) {
TRACEF("cr3 %#llx\n", cr3);

View File

@@ -155,6 +155,9 @@ paging_setup:
movl $PHYS(kernel_pd), %eax
mov %eax, %cr3
/* save a copy of the address of the kernel page directory */
movl %eax, PHYS(kernel_pd_phys)
/* Enabling Paging and from this point we are in */
mov %cr0, %eax
btsl $(31), %eax

View File

@@ -42,6 +42,9 @@ map_addr_t kernel_pml4[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
map_addr_t kernel_pdp[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE); /* temporary */
map_addr_t kernel_pte[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
/* saed physical address of the kernel_pml4 table */
paddr_t kernel_pml4_phys;
/* top level pdp needed to map the -512GB..0 space */
map_addr_t kernel_pdp_high[NO_OF_PT_ENTRIES] __ALIGNED(PAGE_SIZE);
@@ -666,7 +669,7 @@ void x86_mmu_early_init(void) {
kernel_pml4[0] = 0;
/* tlb flush */
x86_set_cr3(x86_get_cr3());
x86_set_cr3(kernel_pml4_phys);
}
void x86_mmu_init(void) {
@@ -697,7 +700,7 @@ status_t arch_mmu_init_aspace(arch_aspace_t * const aspace, const vaddr_t base,
aspace->base = base;
aspace->size = size;
aspace->cr3 = kernel_pml4;
aspace->cr3_phys = vaddr_to_paddr(aspace->cr3);
aspace->cr3_phys = kernel_pml4_phys;
} else {
DEBUG_ASSERT(base == USER_ASPACE_BASE);
DEBUG_ASSERT(size == USER_ASPACE_SIZE);
@@ -737,18 +740,17 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
return NO_ERROR;
}
void arch_mmu_context_switch(arch_aspace_t *aspace) {
void arch_mmu_context_switch(arch_aspace_t *new_aspace) {
if (TRACE_CONTEXT_SWITCH)
TRACEF("aspace %p\n", aspace);
TRACEF("aspace %p\n", new_aspace);
uint64_t cr3;
if (aspace) {
DEBUG_ASSERT((aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0);
if (new_aspace) {
DEBUG_ASSERT((new_aspace->flags & ARCH_ASPACE_FLAG_KERNEL) == 0);
cr3 = aspace->cr3_phys;
cr3 = new_aspace->cr3_phys;
} else {
// TODO save copy of this
cr3 = vaddr_to_paddr(kernel_pml4);
cr3 = kernel_pml4_phys;
}
if (TRACE_CONTEXT_SWITCH) {
TRACEF("cr3 %#llx\n", cr3);

View File

@@ -103,6 +103,9 @@ paging_setup:
movl $PHYS(kernel_pml4), %eax
mov %eax, %cr3
/* save it into a global variable that is used by the kernel */
movl %eax, PHYS(kernel_pml4_phys)
/* Long Mode Enabled at this point*/
movl $MSR_EFER ,%ecx
rdmsr