[arch][arm/arm64][mmu] Add missing barriers/tlbinv/pagetableinit.

Change-Id: Ia9f6bd89f981213aa2086a1a96cb95167df55ff4
This commit is contained in:
Arve Hjønnevåg
2016-05-26 21:26:53 -07:00
committed by Travis Geiselbrecht
parent 060236b7ed
commit e3939c9a3f
2 changed files with 41 additions and 7 deletions

View File

@@ -34,6 +34,7 @@
#include <arch/arm.h>
#include <arch/arm/mmu.h>
#include <kernel/vm.h>
#include <lk/init.h>
#define LOCAL_TRACE 0
#define TRACE_CONTEXT_SWITCH 0
@@ -224,6 +225,13 @@ void arm_mmu_init(void)
uint32_t n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
DEBUG_ASSERT(n <= 7);
/*
* TODO: support other sizes, arch_mmu_init_aspace below allocates a
* page table for a 1GB user address space, so ttbcr should match.
*/
DEBUG_ASSERT(n <= 2);
n = 2;
uint32_t ttbcr = (1<<4) | n; /* disable TTBCR0 and set the split between TTBR0 and TTBR1 */
arm_write_ttbr1(arm_read_ttbr0());
@@ -235,6 +243,28 @@ void arm_mmu_init(void)
#endif
}
static void arm_secondary_mmu_init(uint level)
{
uint32_t cur_ttbr0;
cur_ttbr0 = arm_read_ttbr0();
/* push out kernel mappings to ttbr1 */
arm_write_ttbr1(cur_ttbr0);
/*
* TODO: support other sizes, arch_mmu_init_aspace below allocates a
* page table for a 1GB user address space, so ttbcr should match.
*/
/* setup a user-kernel split */
arm_write_ttbcr(2);
arm_invalidate_tlb_global();
}
LK_INIT_HOOK_FLAGS(archarmmmu, arm_secondary_mmu_init,
LK_INIT_LEVEL_ARCH_EARLY, LK_INIT_FLAG_SECONDARY_CPUS);
void arch_disable_mmu(void)
{
arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled
@@ -259,6 +289,7 @@ void arch_mmu_context_switch(arch_aspace_t *aspace)
LTRACEF("ttbr 0x%x, ttbcr 0x%x\n", ttbr, ttbcr);
arm_write_ttbr0(ttbr);
arm_write_ttbcr(ttbcr);
arm_invalidate_tlb_global();
}
status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, uint *flags)
@@ -738,6 +769,10 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_virt = va;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
/* zero the top level translation table */
/* XXX remove when PMM starts returning pre-zeroed pages */
memset(aspace->tt_virt, 0, PAGE_SIZE);
}
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);

View File

@@ -531,7 +531,7 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
aspace->tt_virt, MMU_ARM64_GLOBAL_ASID);
} else {
ret = arm64_mmu_map(vaddr, paddr, count * PAGE_SIZE,
mmu_flags_to_pte_attr(flags),
mmu_flags_to_pte_attr(flags) | MMU_PTE_ATTR_NON_GLOBAL,
0, MMU_USER_SIZE_SHIFT,
MMU_USER_TOP_SHIFT, MMU_USER_PAGE_SIZE_SHIFT,
aspace->tt_virt, MMU_ARM64_USER_ASID);
@@ -595,13 +595,14 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_virt = arm64_kernel_translation_table;
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
} else {
size_t page_table_size = MMU_USER_PAGE_TABLE_ENTRIES_TOP * sizeof(pte_t);
//DEBUG_ASSERT(base >= 0);
DEBUG_ASSERT(base + size <= 1UL << MMU_USER_SIZE_SHIFT);
aspace->base = base;
aspace->size = size;
pte_t *va = pmm_alloc_kpages(1, NULL);
pte_t *va = memalign(page_table_size, page_table_size);
if (!va)
return ERR_NO_MEMORY;
@@ -609,8 +610,7 @@ status_t arch_mmu_init_aspace(arch_aspace_t *aspace, vaddr_t base, size_t size,
aspace->tt_phys = vaddr_to_paddr(aspace->tt_virt);
/* zero the top level translation table */
/* XXX remove when PMM starts returning pre-zeroed pages */
memset(aspace->tt_virt, 0, PAGE_SIZE);
memset(aspace->tt_virt, 0, page_table_size);
}
LTRACEF("tt_phys 0x%lx tt_virt %p\n", aspace->tt_phys, aspace->tt_virt);
@@ -627,9 +627,7 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace)
// XXX make sure it's not mapped
vm_page_t *page = paddr_to_vm_page(aspace->tt_phys);
DEBUG_ASSERT(page);
pmm_free_page(page);
free(aspace->tt_virt);
return NO_ERROR;
}
@@ -651,6 +649,7 @@ void arch_mmu_context_switch(arch_aspace_t *aspace)
if (TRACE_CONTEXT_SWITCH)
TRACEF("ttbr 0x%llx, tcr 0x%llx\n", ttbr, tcr);
ARM64_TLBI(aside1, (uint64_t)MMU_ARM64_USER_ASID << 48);
DSB;
} else {
tcr = MMU_TCR_FLAGS_KERNEL;