[arch][x86_64][mmu] fix virtual addr and physical addr validity check

Canonical address is meaningful only for VIRTUAL address, for physical
address, just check max supported address reported by CPUID capability.
This commit is contained in:
Bing Zhu
2015-11-05 13:58:28 +08:00
committed by Travis Geiselbrecht
parent 3049c8bedc
commit 60c8eb2e56
3 changed files with 59 additions and 18 deletions

View File

@@ -370,8 +370,12 @@ static inline uint32_t x86_get_address_width(void)
:"=a" (rv)
:"a" (X86_CPUID_ADDR_WIDTH));
/* Extracting bit 15:8 from eax register */
return ((rv >> 8) & 0x0ff);
/*
Extracting bit 15:0 from eax register
Bits 07-00: #Physical Address Bits
Bits 15-08: #Linear Address Bits
*/
return (rv & 0x0000ffff);
}
static inline uint64_t check_smep_avail(void)

View File

@@ -37,28 +37,59 @@
extern map_addr_t g_CR3;
/* Address width */
extern uint32_t g_addr_width;
extern uint8_t g_vaddr_width;
extern uint8_t g_paddr_width;
/**
* @brief check if the address is valid
* @brief check if the virtual address is aligned and canonical
*
*/
static bool x86_mmu_check_map_addr(addr_t address)
static bool x86_mmu_check_vaddr(vaddr_t vaddr)
{
uint64_t addr = (uint64_t)address;
uint64_t addr = (uint64_t)vaddr;
uint64_t max_vaddr_lohalf,
min_vaddr_hihalf;
/* Check to see if the address is PAGE aligned */
if(!IS_ALIGNED(addr, PAGE_SIZE))
return false;
/* get max address in lower-half canonical addr space */
/* e.g. if width is 48, then 0x00007FFF_FFFFFFFF */
max_vaddr_lohalf = ((uint64_t)1ull << (g_vaddr_width - 1)) - 1;
/* get min address in higher-half canonical addr space */
/* e.g. if width is 48, then 0xFFFF8000_00000000*/
min_vaddr_hihalf = ~ max_vaddr_lohalf;
/* Check to see if the address in a canonical address */
if(addr >> (g_addr_width - 1))
if((addr >> (g_addr_width - 1)) ^ ((1ul << (64 - (g_addr_width - 1))) - 1))
return false;
if((addr > max_vaddr_lohalf) && (addr < min_vaddr_hihalf))
return false;
return true;
}
/**
* @brief check if the physical address is valid and aligned
*
*/
static bool x86_mmu_check_paddr(paddr_t paddr)
{
uint64_t addr = (uint64_t)paddr;
uint64_t max_paddr;
/* Check to see if the address is PAGE aligned */
if(!IS_ALIGNED(addr, PAGE_SIZE))
return false;
max_paddr = ((uint64_t)1ull << g_paddr_width) - 1;
return addr <= max_paddr;
}
static inline uint64_t get_pml4_entry_from_pml4_table(vaddr_t vaddr, addr_t pml4_addr)
{
uint32_t pml4_index;
@@ -244,8 +275,8 @@ status_t x86_mmu_check_mapping(addr_t pml4, paddr_t paddr,
DEBUG_ASSERT(pml4);
if((!ret_level) || (!last_valid_entry) || (!ret_flags) ||
(!x86_mmu_check_map_addr(vaddr)) ||
(!x86_mmu_check_map_addr(paddr))) {
(!x86_mmu_check_vaddr(vaddr)) ||
(!x86_mmu_check_paddr(paddr))) {
return ERR_INVALID_ARGS;
}
@@ -383,7 +414,7 @@ status_t x86_mmu_add_mapping(addr_t pml4, paddr_t paddr,
status_t ret = NO_ERROR;
DEBUG_ASSERT(pml4);
if((!x86_mmu_check_map_addr(vaddr)) || (!x86_mmu_check_map_addr(paddr)) )
if((!x86_mmu_check_vaddr(vaddr)) || (!x86_mmu_check_paddr(paddr)) )
return ERR_INVALID_ARGS;
pml4e = get_pml4_entry_from_pml4_table(vaddr, pml4);
@@ -525,7 +556,7 @@ status_t x86_mmu_unmap(addr_t pml4, vaddr_t vaddr, uint count)
vaddr_t next_aligned_v_addr;
DEBUG_ASSERT(pml4);
if(!(x86_mmu_check_map_addr(vaddr)))
if(!(x86_mmu_check_vaddr(vaddr)))
return ERR_INVALID_ARGS;
if (count == 0)
@@ -544,7 +575,7 @@ int arch_mmu_unmap(vaddr_t vaddr, uint count)
{
addr_t current_cr3_val;
if(!(x86_mmu_check_map_addr(vaddr)))
if(!(x86_mmu_check_vaddr(vaddr)))
return ERR_INVALID_ARGS;
if (count == 0)
@@ -626,7 +657,7 @@ int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags)
addr_t current_cr3_val;
struct map_range range;
if((!x86_mmu_check_map_addr(paddr)) || (!x86_mmu_check_map_addr(vaddr)))
if((!x86_mmu_check_paddr(paddr)) || (!x86_mmu_check_vaddr(vaddr)))
return ERR_INVALID_ARGS;
if (count == 0)

View File

@@ -61,8 +61,9 @@ extern uint64_t __bss_end;
extern void pci_init(void);
extern void arch_mmu_init(void);
/* Address width */
uint32_t g_addr_width;
/* Address width including virtual/physical address*/
uint8_t g_vaddr_width = 0;
uint8_t g_paddr_width = 0;
/* Kernel global CR3 */
map_addr_t g_CR3 = 0;
@@ -72,9 +73,14 @@ void platform_init_mmu_mappings(void)
struct map_range range;
arch_flags_t access;
map_addr_t *init_table, phy_init_table;
uint32_t addr_width;
/* getting the address width from CPUID instr */
g_addr_width = x86_get_address_width();
/* Bits 07-00: Physical Address width info */
/* Bits 15-08: Linear Address width info */
addr_width = x86_get_address_width();
g_paddr_width = (uint8_t)(addr_width & 0xFF);
g_vaddr_width = (uint8_t)((addr_width >> 8) & 0xFF);
/* Creating the First page in the page table hirerachy */
/* Can be pml4, pdpt or pdt based on x86_64, x86 PAE mode & x86 non-PAE mode respectively */