[arch][x86-64] MMU fixes - arch_mmu_query, page walking, kernel ld script

This commit is contained in:
Shreyas Nagaraj
2014-10-20 04:14:33 -04:00
committed by Travis Geiselbrecht
parent e2a45c32b7
commit 2ff04720f7
10 changed files with 66 additions and 175 deletions

View File

@@ -32,7 +32,6 @@ void clock_tests(void);
void float_tests(void);
void benchmarks(void);
int fibo(int argc, const cmd_args *argv);
int x86_mmu_tests(void);
#endif

View File

@@ -16,7 +16,6 @@ MODULE_SRCS += \
$(LOCAL_DIR)/float_test_vec.c \
$(LOCAL_DIR)/fibo.c \
$(LOCAL_DIR)/mem_tests.c \
$(LOCAL_DIR)/x86_mmu_tests.c
MODULE_COMPILEFLAGS += -Wno-format

View File

@@ -33,9 +33,6 @@ STATIC_COMMAND("printf_tests", "test printf", (console_cmd)&printf_tests)
STATIC_COMMAND("printf_tests_float", "test printf with floating point", (console_cmd)&printf_tests_float)
STATIC_COMMAND("thread_tests", "test the scheduler", (console_cmd)&thread_tests)
STATIC_COMMAND("clock_tests", "test clocks", (console_cmd)&clock_tests)
#ifdef ARCH_X86_64
STATIC_COMMAND("x86_mmu_tests", "x86 64 Mapping tests", (console_cmd)&x86_mmu_tests)
#endif
#if ARM_WITH_VFP
STATIC_COMMAND("float_tests", "floating point test", (console_cmd)&float_tests)
#endif

View File

@@ -1,135 +0,0 @@
/*
* Copyright (c) 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <app/tests.h>
#include <stdio.h>
#include <arch/mmu.h>
#include <err.h>
#include <debug.h>
#include <arch/x86/mmu.h>
#include <err.h>
#define test_value1 (0xdeadbeef)
#define test_value2 (0xdeadcafe)
/* testing the ARCH independent map & query routine */
int arch_mmu_map_test(vaddr_t vaddr, paddr_t paddr, uint count, uint map_flags, uint skip_unmap)
{
vaddr_t *test_vaddr_first, *test_vaddr_last;
paddr_t out_paddr;
uint ret_flags;
status_t ret;
/* Do the Mapping */
ret = arch_mmu_map(vaddr, paddr, count, map_flags);
if(ret)
return ret;
printf("\nAdd Mapping => Vaddr:%llx Paddr:%llx pages=%d flags:%llx\n", vaddr, paddr, count, map_flags);
if(count > 0) {
/* Check the mapping */
ret = arch_mmu_query((vaddr_t)vaddr, &out_paddr, &ret_flags);
if(ret)
return ret;
if(out_paddr != paddr)
return 1;
if(map_flags != ret_flags)
return 2;
printf("\nQuery of the existing mapping - successfull (ret Paddr:%llx) (ret Flags:%llx)\n", out_paddr, ret_flags);
/* Write and Read test */
if (ret_flags & X86_MMU_PG_RW) {
/* first page */
test_vaddr_first = (vaddr_t *)vaddr;
*test_vaddr_first = test_value1;
printf("\nReading MAPPED addr => Vaddr:%llx Value=%llx\n", test_vaddr_first, *test_vaddr_first);
/* last page */
test_vaddr_last = (vaddr_t *)(vaddr + ((count-1)*PAGE_SIZE));
*test_vaddr_last = test_value2;
printf("\nReading MAPPED addr => Vaddr:%llx Value=%llx\n", test_vaddr_last, *test_vaddr_last);
}
else
printf("\n Can't write onto these addresses (NO RW permission) - Will cause FAULT\n");
}
if (skip_unmap) {
/* Unmap */
ret = arch_mmu_unmap((vaddr_t)vaddr, count);
if(ret != (int)count)
return 3;
printf("\nRemove Mapping => Vaddr:%llx pages=%d\n", vaddr, count);
/* Check the mapping again - mappnig should NOT be found now */
ret = arch_mmu_query((vaddr_t)vaddr, &out_paddr, &ret_flags);
if(ret != ERR_NOT_FOUND)
return 4;
}
return NO_ERROR;
}
int x86_mmu_tests(void)
{
int return_status;
/* Test Case # 1 */
return_status = arch_mmu_map_test((0x17efe000),(0x17efe000),512, X86_MMU_PG_RW | X86_MMU_PG_P, 0);
if(!return_status)
printf("\n\n---- x86 MMU Test result:SUCCESS ----\n\n");
else
printf("\n\n ----- x86 MMU Test result:FAILURE (Return status:%d) ----\n", return_status);
/* Test Case # 2 */
return_status = arch_mmu_map_test((0x17efe000),(0x17efe000), 0, X86_MMU_PG_RW | X86_MMU_PG_P, 0);
if(!return_status)
printf("\n\n ----- x86 MMU Test result:SUCCESS ---- \n\n");
else
printf("\n\n ----- x86 MMU Test result:FAILURE (Return status:%d) ----\n", return_status);
/* Test Case # 3 */
return_status = arch_mmu_map_test((0x7fffe0000),(0x17ffe000), 256, X86_MMU_PG_RW | X86_MMU_PG_P, 0);
if(!return_status)
printf("\n\n ----- x86 MMU Test result:SUCCESS ---- \n\n");
else
printf("\n\n ----- x86 MMU Test result:FAILURE (Return status:%d) ----\n", return_status);
/* Test case # 4 */
return_status = arch_mmu_map_test((0x7fffe0000),(0x17ffe000), 1024, X86_MMU_PG_P, 1);
if(!return_status)
printf("\n\n ----- x86 MMU Test result:SUCCESS ---- \n\n");
else
printf("\n\n ----- x86 MMU Test result:FAILURE (Return status:%d) ----\n", return_status);
return_status = arch_mmu_map_test((0x7fffe0000),(0x17ffe000), 1024, X86_MMU_PG_RW | X86_MMU_PG_P, 0);
if(!return_status)
printf("\n\n ----- x86 MMU Test result:SUCCESS ---- \n\n");
else
printf("\n\n ----- x86 MMU Test result:FAILURE (Return status:%d) ----\n", return_status);
return 0;
}

View File

@@ -37,7 +37,7 @@ __BEGIN_CDECLS
#define X86_8BYTE_MASK 0xFFFFFFFF
#define X86_CPUID_ADDR_WIDTH 0x80000008
void x86_mmu_init(void);
void arch_mmu_init(void);
struct x86_iframe {
uint64_t pivot; // stack switch pivot

View File

@@ -42,6 +42,9 @@ void x86_mmu_init(void);
#define X86_PHY_ADDR_MASK (0x000ffffffffffffful)
#define X86_FLAGS_MASK (0x8000000000000ffful)
#define X86_PTE_NOT_PRESENT (0xFFFFFFFFFFFFFFFEul)
#define X86_2MB_PAGE_FRAME (0x000fffffffe00000ul)
#define PAGE_OFFSET_MASK_4KB (0x0000000000000ffful)
#define PAGE_OFFSET_MASK_2MB (0x00000000001ffffful)
#define PAGE_SIZE 4096
#define PAGING_LEVELS 4
@@ -71,7 +74,6 @@ struct map_range {
uint32_t size;
};
status_t x86_mmu_add_mapping (addr_t pml4, paddr_t paddr, vaddr_t vaddr, uint64_t flags);
status_t x86_mmu_map_range (addr_t pml4, struct map_range *range, uint64_t flags);
status_t x86_mmu_check_mapping (addr_t pml4, paddr_t paddr,
vaddr_t vaddr, uint64_t in_flags,

View File

@@ -59,7 +59,7 @@ INCLUDE "arch/shared_data_sections.ld"
__data_end = .;
.bss : ALIGN(8) {
.bss : ALIGN(4096) {
__bss_start = .;
*(.bss*)
*(.gnu.linkonce.b.*)

View File

@@ -44,7 +44,7 @@ extern uint32_t g_addr_width;
* @brief check if the address is valid
*
*/
bool x86_mmu_check_map_addr(addr_t address)
static bool x86_mmu_check_map_addr(addr_t address)
{
uint64_t addr = (uint64_t)address;
@@ -107,7 +107,15 @@ static inline uint64_t get_pfn_from_pte(uint64_t pte)
return X86_PHYS_TO_VIRT(pfn);
}
void map_zero_page(addr_t *ptr)
static inline uint64_t get_pfn_from_pde(uint64_t pde)
{
uint64_t pfn;
pfn = (pde & X86_2MB_PAGE_FRAME);
return X86_PHYS_TO_VIRT(pfn);
}
static void map_zero_page(addr_t *ptr)
{
if(ptr)
memset(ptr, 0, PAGE_SIZE);
@@ -120,14 +128,13 @@ void map_zero_page(addr_t *ptr)
* 4KB pages.
*
*/
status_t x86_mmu_page_walking(addr_t pml4, vaddr_t vaddr, uint32_t *ret_level,
static status_t x86_mmu_page_walking(addr_t pml4, vaddr_t vaddr, uint32_t *ret_level,
uint64_t *existing_flags, uint64_t *last_valid_entry)
{
uint64_t pml4e, pdpe, pde, pte;
DEBUG_ASSERT(pml4);
if((!ret_level) || (!last_valid_entry) || (!existing_flags) ||
(!x86_mmu_check_map_addr(vaddr))) {
if((!ret_level) || (!last_valid_entry) || (!existing_flags)) {
return ERR_INVALID_ARGS;
}
@@ -154,6 +161,15 @@ status_t x86_mmu_page_walking(addr_t pml4, vaddr_t vaddr, uint32_t *ret_level,
return ERR_NOT_FOUND;
}
/* 2 MB pages */
if (pde & X86_MMU_PG_PS) {
/* Getting the Page frame & adding the 4KB page offset from the vaddr */
*last_valid_entry = get_pfn_from_pde(pde) + ((uint64_t)vaddr & PAGE_OFFSET_MASK_2MB);
*existing_flags = (X86_PHYS_TO_VIRT(pde)) & X86_FLAGS_MASK;
goto last;
}
/* 4 KB pages */
pte = get_pt_entry_from_pt_table(vaddr, pde);
if ((pte & X86_MMU_PG_P) == 0) {
*ret_level = PT_L;
@@ -161,9 +177,12 @@ status_t x86_mmu_page_walking(addr_t pml4, vaddr_t vaddr, uint32_t *ret_level,
return ERR_NOT_FOUND;
}
*last_valid_entry = get_pfn_from_pte(pte);
*ret_level = PF_L;
/* Getting the Page frame & adding the 4KB page offset from the vaddr */
*last_valid_entry = get_pfn_from_pte(pte) + ((uint64_t)vaddr & PAGE_OFFSET_MASK_4KB);
*existing_flags = (X86_PHYS_TO_VIRT(pte)) & X86_FLAGS_MASK;
last:
*ret_level = PF_L;
return NO_ERROR;
}
@@ -206,7 +225,7 @@ status_t x86_mmu_check_mapping(addr_t pml4, paddr_t paddr,
return ERR_NOT_FOUND;
}
void update_pt_entry(vaddr_t vaddr, paddr_t paddr, uint64_t flags, uint64_t pde)
static void update_pt_entry(vaddr_t vaddr, paddr_t paddr, uint64_t flags, uint64_t pde)
{
uint32_t pt_index;
@@ -216,7 +235,7 @@ void update_pt_entry(vaddr_t vaddr, paddr_t paddr, uint64_t flags, uint64_t pde)
pt_table[pt_index] |= flags;
}
void update_pd_entry(vaddr_t vaddr, uint64_t pdpe, addr_t *m)
static void update_pd_entry(vaddr_t vaddr, uint64_t pdpe, addr_t *m)
{
uint32_t pd_index;
@@ -226,7 +245,7 @@ void update_pd_entry(vaddr_t vaddr, uint64_t pdpe, addr_t *m)
pd_table[pd_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
}
void update_pdp_entry(vaddr_t vaddr, uint64_t pml4e, addr_t *m)
static void update_pdp_entry(vaddr_t vaddr, uint64_t pml4e, addr_t *m)
{
uint32_t pdp_index;
@@ -236,7 +255,7 @@ void update_pdp_entry(vaddr_t vaddr, uint64_t pml4e, addr_t *m)
pdp_table[pdp_index] |= X86_MMU_PG_P | X86_MMU_PG_RW;
}
void update_pml4_entry(vaddr_t vaddr, addr_t pml4_addr, addr_t *m)
static void update_pml4_entry(vaddr_t vaddr, addr_t pml4_addr, addr_t *m)
{
uint32_t pml4_index;
uint64_t *pml4_table = (uint64_t *)(pml4_addr);
@@ -249,7 +268,7 @@ void update_pml4_entry(vaddr_t vaddr, addr_t pml4_addr, addr_t *m)
/**
* @brief Allocating a new page table
*/
addr_t *_map_alloc(size_t size)
static addr_t *_map_alloc(size_t size)
{
addr_t *page_ptr = memalign(PAGE_SIZE, size);
if(page_ptr)
@@ -268,7 +287,7 @@ addr_t *_map_alloc(size_t size)
* 4KB pages.
*
*/
status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
static status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
vaddr_t vaddr, uint64_t flags)
{
uint32_t pd_new = 0, pdp_new = 0;
@@ -351,7 +370,7 @@ status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
* @brief x86-64 MMU unmap an entry in the page tables recursively and clear out tables
*
*/
void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry)
static void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry)
{
uint32_t offset = 0, next_level_offset = 0;
vaddr_t *table, *next_table_addr, value;
@@ -417,7 +436,7 @@ void x86_mmu_unmap_entry(vaddr_t vaddr, int level, vaddr_t table_entry)
}
}
int x86_mmu_unmap(addr_t pml4, vaddr_t vaddr, uint count)
static int x86_mmu_unmap(addr_t pml4, vaddr_t vaddr, uint count)
{
int unmapped = 0;
vaddr_t next_aligned_v_addr;
@@ -501,7 +520,7 @@ status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
uint64_t ret_flags;
status_t stat;
if(!paddr || !flags || (!x86_mmu_check_map_addr(vaddr)))
if(!paddr || !flags)
return ERR_INVALID_ARGS;
DEBUG_ASSERT(x86_get_cr3());
@@ -512,8 +531,17 @@ status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags)
return stat;
*paddr = (paddr_t)(last_valid_entry);
// Re-visit: typecasting 64 bit value to 32 - NX bit lost
*flags = (uint)ret_flags;
/* converting x86 arch specific flags to arch mmu flags */
*flags = 0;
if(!(ret_flags & X86_MMU_PG_RW))
*flags |= ARCH_MMU_FLAG_PERM_RO;
if(ret_flags & X86_MMU_PG_U)
*flags |= ARCH_MMU_FLAG_PERM_USER;
if(ret_flags & X86_MMU_PG_NX)
*flags |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
return NO_ERROR;
}
@@ -522,6 +550,7 @@ int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
{
addr_t current_cr3_val;
struct map_range range;
uint64_t arch_flags = X86_MMU_PG_P;
if((!x86_mmu_check_map_addr(paddr)) || (!x86_mmu_check_map_addr(vaddr)))
return ERR_INVALID_ARGS;
@@ -536,15 +565,24 @@ int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
range.start_paddr = paddr;
range.size = count * PAGE_SIZE;
// Re-visit: Can't set the x86-64 NX Bit here
return(x86_mmu_map_range(current_cr3_val, &range, flags));
/* converting arch mmu flags to x86 arch specific flags */
if(!(flags & ARCH_MMU_FLAG_PERM_RO))
arch_flags |= X86_MMU_PG_RW;
if(flags & ARCH_MMU_FLAG_PERM_USER)
arch_flags |= X86_MMU_PG_U;
if(flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)
arch_flags |= X86_MMU_PG_NX;
return(x86_mmu_map_range(current_cr3_val, &range, arch_flags));
}
/**
* @brief x86-64 MMU basic initialization
*
*/
void x86_mmu_init(void)
void arch_mmu_init(void)
{
uint64_t efer_msr, cr0;

View File

@@ -19,7 +19,7 @@ MODULE_SRCS += \
# set the default toolchain to x86 elf and set a #define
ifndef TOOLCHAIN_PREFIX
TOOLCHAIN_PREFIX := x86_64-elf-
TOOLCHAIN_PREFIX :=
endif
LIBGCC := $(shell $(TOOLCHAIN_PREFIX)gcc $(CFLAGS) -print-libgcc-file-name)

View File

@@ -92,15 +92,6 @@ void platform_init_mmu_mappings(void)
x86_mmu_map_range(phy_pml4, &range, access);
x86_set_cr3(phy_pml4);
/*
// Testing arch_mmu_query()
paddr_t temp_paddr;
uint temp_ret_flags;
arch_mmu_query((addr_t) &__code_start, &temp_paddr, &temp_ret_flags);
dprintf(SPEW, "\nVaddr: %lx paddr:%lx flags:%x\n",
(uint64_t) &__code_start, temp_paddr, temp_ret_flags);
*/
#endif
}
@@ -164,7 +155,7 @@ void platform_init(void)
/* MMU init for x86_64 done after the heap is setup */
#ifdef ARCH_X86_64
x86_mmu_init();
arch_mmu_init();
platform_init_mmu_mappings();
#endif