[arch][tests] add a way to query some arch mmu features

Allow asking the arch layer if it supports NX pages or NS pages.
Have the arch mmu test code test accordingly.
Also tweak the tests to pass on arm32 mmu, which does not precisely
match the return semantics of the rest of the mmu routines on map/unmap.
This commit is contained in:
Travis Geiselbrecht
2022-10-21 00:00:49 -07:00
parent f1dad5f4c8
commit d5451cc8e6
7 changed files with 53 additions and 14 deletions

View File

@@ -716,4 +716,7 @@ status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) {
return NO_ERROR; return NO_ERROR;
} }
bool arch_mmu_supports_nx_mappings(void) { return true; }
bool arch_mmu_supports_ns_mappings(void) { return true; }
#endif // ARCH_HAS_MMU #endif // ARCH_HAS_MMU

View File

@@ -655,3 +655,5 @@ void arch_mmu_context_switch(arch_aspace_t *aspace) {
ARM64_WRITE_SYSREG(tcr_el1, tcr); ARM64_WRITE_SYSREG(tcr_el1, tcr);
} }
bool arch_mmu_supports_nx_mappings(void) { return true; }
bool arch_mmu_supports_ns_mappings(void) { return true; }

View File

@@ -10,14 +10,16 @@
#if ARCH_HAS_MMU #if ARCH_HAS_MMU
#include <arch.h> #include <arch.h>
#include <sys/types.h>
#include <lk/compiler.h> #include <lk/compiler.h>
#include <stdbool.h>
#include <sys/types.h>
/* to bring in definition of arch_aspace */ /* to bring in definition of arch_aspace */
#include <arch/aspace.h> #include <arch/aspace.h>
__BEGIN_CDECLS __BEGIN_CDECLS
/* flags to pass to the arch_mmu_map and arch_mmu_query routines */
#define ARCH_MMU_FLAG_CACHED (0U<<0) #define ARCH_MMU_FLAG_CACHED (0U<<0)
#define ARCH_MMU_FLAG_UNCACHED (1U<<0) #define ARCH_MMU_FLAG_UNCACHED (1U<<0)
#define ARCH_MMU_FLAG_UNCACHED_DEVICE (2U<<0) /* only exists on some arches, otherwise UNCACHED */ #define ARCH_MMU_FLAG_UNCACHED_DEVICE (2U<<0) /* only exists on some arches, otherwise UNCACHED */
@@ -25,9 +27,13 @@ __BEGIN_CDECLS
#define ARCH_MMU_FLAG_PERM_USER (1U<<2) #define ARCH_MMU_FLAG_PERM_USER (1U<<2)
#define ARCH_MMU_FLAG_PERM_RO (1U<<3) #define ARCH_MMU_FLAG_PERM_RO (1U<<3)
#define ARCH_MMU_FLAG_PERM_NO_EXECUTE (1U<<4) #define ARCH_MMU_FLAG_PERM_NO_EXECUTE (1U<<4) /* supported on most, but not all arches */
#define ARCH_MMU_FLAG_NS (1U<<5) /* NON-SECURE */ #define ARCH_MMU_FLAG_NS (1U<<5) /* supported on some arches */
#define ARCH_MMU_FLAG_INVALID (1U<<7) /* indicates that flags are not specified */ #define ARCH_MMU_FLAG_INVALID (1U<<6) /* indicates that flags are not specified */
/* arch level query of some features at the mapping/query level */
bool arch_mmu_supports_nx_mappings(void);
bool arch_mmu_supports_ns_mappings(void);
/* forward declare the per-address space arch-specific context object */ /* forward declare the per-address space arch-specific context object */
typedef struct arch_aspace arch_aspace_t; typedef struct arch_aspace arch_aspace_t;

View File

@@ -399,15 +399,20 @@ int arch_mmu_map(arch_aspace_t *aspace, const vaddr_t _vaddr, paddr_t paddr, uin
DEBUG_ASSERT(aspace); DEBUG_ASSERT(aspace);
DEBUG_ASSERT(aspace->magic == RISCV_ASPACE_MAGIC); DEBUG_ASSERT(aspace->magic == RISCV_ASPACE_MAGIC);
if (count == 0) { if (flags & ARCH_MMU_FLAG_NS) {
return NO_ERROR; return ERR_INVALID_ARGS;
} }
// trim the vaddr to the aspace // trim the vaddr to the aspace
if (_vaddr < aspace->base || _vaddr > aspace->base + aspace->size - 1) { if (_vaddr < aspace->base || _vaddr > aspace->base + aspace->size - 1) {
return ERR_OUT_OF_RANGE; return ERR_OUT_OF_RANGE;
} }
// TODO: make sure _vaddr + count * PAGE_SIZE is within the address space // TODO: make sure _vaddr + count * PAGE_SIZE is within the address space
if (count == 0) {
return NO_ERROR;
}
// construct a local callback for the walker routine that // construct a local callback for the walker routine that
// a) tells the walker to build a page table if it's not present // a) tells the walker to build a page table if it's not present
// b) fills in a terminal page table entry with a page and tells the walker to start over // b) fills in a terminal page table entry with a page and tells the walker to start over
@@ -591,6 +596,9 @@ void arch_mmu_context_switch(arch_aspace_t *aspace) {
// for now, riscv_set_satp() does a full local TLB dump // for now, riscv_set_satp() does a full local TLB dump
} }
bool arch_mmu_supports_nx_mappings(void) { return true; }
bool arch_mmu_supports_ns_mappings(void) { return false; }
extern "C" extern "C"
void riscv_mmu_init_secondaries() { void riscv_mmu_init_secondaries() {
// switch to the proper kernel pgtable, with the trampoline parts unmapped // switch to the proper kernel pgtable, with the trampoline parts unmapped
@@ -634,6 +642,4 @@ void riscv_mmu_init() {
printf("RISCV: MMU ASID mask %#lx\n", riscv_asid_mask); printf("RISCV: MMU ASID mask %#lx\n", riscv_asid_mask);
} }
#endif #endif

View File

@@ -49,7 +49,7 @@ static bool map_user_pages(void) {
vm_page_t *p; vm_page_t *p;
list_for_every_entry(&pages, p, vm_page_t, node) { list_for_every_entry(&pages, p, vm_page_t, node) {
err = arch_mmu_map(&as, va, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER); err = arch_mmu_map(&as, va, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
EXPECT_EQ(NO_ERROR, err, "map page"); EXPECT_LE(NO_ERROR, err, "map page");
va += PAGE_SIZE; va += PAGE_SIZE;
} }
@@ -112,13 +112,23 @@ static bool map_query_pages(void) {
// try mapping pages in the kernel address space with various permissions and read them back via arch query // try mapping pages in the kernel address space with various permissions and read them back via arch query
EXPECT_TRUE(map_region_query_result(kaspace, 0), "0"); EXPECT_TRUE(map_region_query_result(kaspace, 0), "0");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO), "1"); EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO), "1");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_NO_EXECUTE), "2"); if (arch_mmu_supports_nx_mappings()) {
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "3"); EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_NO_EXECUTE), "2");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "3");
} else {
EXPECT_FALSE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_NO_EXECUTE), "2");
EXPECT_FALSE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "3");
}
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER), "4"); EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER), "4");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO), "5"); EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO), "5");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "6"); if (arch_mmu_supports_nx_mappings()) {
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "7"); EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "6");
EXPECT_TRUE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "7");
} else {
EXPECT_FALSE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "6");
EXPECT_FALSE(map_region_query_result(kaspace, ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO | ARCH_MMU_FLAG_PERM_NO_EXECUTE), "7");
}
END_TEST; END_TEST;
} }
@@ -142,7 +152,7 @@ static bool context_switch(void) {
// map it // map it
err = arch_mmu_map(&as, USER_ASPACE_BASE, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER); err = arch_mmu_map(&as, USER_ASPACE_BASE, vm_page_to_paddr(p), 1, ARCH_MMU_FLAG_PERM_USER);
ASSERT_EQ(NO_ERROR, err, "map"); ASSERT_LE(NO_ERROR, err, "map");
// write a known value to the kvaddr portion of the page // write a known value to the kvaddr portion of the page
volatile int *kv = static_cast<volatile int *>(paddr_to_kvaddr(vm_page_to_paddr(p))); volatile int *kv = static_cast<volatile int *>(paddr_to_kvaddr(vm_page_to_paddr(p)));

View File

@@ -572,6 +572,9 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
DEBUG_ASSERT(aspace); DEBUG_ASSERT(aspace);
if (flags & (ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_NS))
return ERR_INVALID_ARGS;
if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE))) if ((!IS_ALIGNED(paddr, PAGE_SIZE)) || (!IS_ALIGNED(vaddr, PAGE_SIZE)))
return ERR_INVALID_ARGS; return ERR_INVALID_ARGS;
@@ -588,6 +591,9 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
return (x86_mmu_map_range(X86_PHYS_TO_VIRT(current_cr3_val), &range, flags)); return (x86_mmu_map_range(X86_PHYS_TO_VIRT(current_cr3_val), &range, flags));
} }
bool arch_mmu_supports_nx_mappings(void) { return false; }
bool arch_mmu_supports_ns_mappings(void) { return false; }
void x86_mmu_early_init(void) { void x86_mmu_early_init(void) {
volatile uint32_t cr0; volatile uint32_t cr0;

View File

@@ -654,6 +654,9 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
LTRACEF("aspace %p, vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", aspace, vaddr, paddr, count, flags); LTRACEF("aspace %p, vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", aspace, vaddr, paddr, count, flags);
if (flags & ARCH_MMU_FLAG_NS)
return ERR_INVALID_ARGS;
if ((!x86_mmu_check_paddr(paddr))) if ((!x86_mmu_check_paddr(paddr)))
return ERR_INVALID_ARGS; return ERR_INVALID_ARGS;
@@ -673,6 +676,9 @@ int arch_mmu_map(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t paddr, uint count
return (x86_mmu_map_range(X86_PHYS_TO_VIRT(current_cr3_val), &range, flags)); return (x86_mmu_map_range(X86_PHYS_TO_VIRT(current_cr3_val), &range, flags));
} }
bool arch_mmu_supports_nx_mappings(void) { return true; }
bool arch_mmu_supports_ns_mappings(void) { return false; }
void x86_mmu_early_init(void) { void x86_mmu_early_init(void) {
volatile uint64_t efer_msr, cr0, cr4; volatile uint64_t efer_msr, cr0, cr4;