diff --git a/arch/arm/arm-m/arch.c b/arch/arm/arm-m/arch.c index 1804e9e8..fbb6849c 100644 --- a/arch/arm/arm-m/arch.c +++ b/arch/arm/arm-m/arch.c @@ -31,9 +31,6 @@ extern void *vectab; -extern int _end_of_ram; -void *_heap_end = &_end_of_ram; - #if ARM_CM_DYNAMIC_PRIORITY_SIZE unsigned int arm_cm_num_irq_pri_bits; unsigned int arm_cm_irq_pri_mask; diff --git a/arch/arm/arm/arch.c b/arch/arm/arm/arch.c index 3cbd552b..2477d797 100644 --- a/arch/arm/arm/arch.c +++ b/arch/arm/arm/arch.c @@ -35,7 +35,7 @@ void arch_early_init(void) /* set the vector base to our exception vectors so we dont need to double map at 0 */ #if ARM_ISA_ARMV7 - arm_write_vbar(MEMBASE); + arm_write_vbar(KERNEL_BASE); #endif #if ARM_WITH_MMU diff --git a/arch/arm/arm/mmu.c b/arch/arm/arm/mmu.c index 36f09db9..7b84ea47 100644 --- a/arch/arm/arm/mmu.c +++ b/arch/arm/arm/mmu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Travis Geiselbrecht + * Copyright (c) 2008-2014 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -21,91 +21,421 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include +#include +#include #include +#include +#include #include #include +#include +#include #include #include +#include + +#define LOCAL_TRACE 1 #if ARM_WITH_MMU -#define MB (1024*1024) +#define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE) +#define IS_SUPERSECTION_ALIGNED(x) IS_ALIGNED(x, SUPERSECTION_SIZE) + +/* locals */ +static void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags); +static void arm_mmu_unmap_section(addr_t vaddr); -/* the location of the table may be brought in from outside */ -#if WITH_EXTERNAL_TRANSLATION_TABLE -#if !defined(MMU_TRANSLATION_TABLE_ADDR) -#error must set MMU_TRANSLATION_TABLE_ADDR in the make configuration -#endif -uint32_t *tt = (void *)MMU_TRANSLATION_TABLE_ADDR; -#else /* the main translation table */ -uint32_t tt[4096] __ALIGNED(16384) __SECTION(".bss.prebss.translation_table"); -#endif +uint32_t arm_kernel_translation_table[4096] __ALIGNED(16384) __SECTION(".bss.prebss.translation_table"); -#define MMU_FLAG_CACHED 0x1 -#define MMU_FLAG_BUFFERED 0x2 -#define MMU_FLAG_READWRITE 0x4 - -void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags) +/* convert user level mmu flags to flags that go in L1 descriptors */ +static uint32_t mmu_flags_to_l1_arch_flags(uint flags) { - int index; + uint32_t arch_flags = 0; + switch (flags & ARCH_MMU_FLAG_CACHE_MASK) { + case ARCH_MMU_FLAG_CACHED: + arch_flags |= MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE; + break; + case ARCH_MMU_FLAG_UNCACHED: + arch_flags |= MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED; + break; + case ARCH_MMU_FLAG_UNCACHED_DEVICE: + arch_flags |= MMU_MEMORY_L1_TYPE_DEVICE_SHARED; + break; + default: + /* invalid user-supplied flag */ + DEBUG_ASSERT(1); + return ERR_INVALID_ARGS; + } - /* Get the index into the translation table */ - index = vaddr / MB; + switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) { + case 0: + arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_NA; + break; + case ARCH_MMU_FLAG_PERM_RO: + /* this mapping is a lie, we don't support RO kernel mapping */ + arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_NA; + break; + case ARCH_MMU_FLAG_PERM_USER: + arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_RW; + break; + case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO: + arch_flags |= MMU_MEMORY_L1_AP_P_RW_U_RO; + break; + } - /* Set the entry value: - * (2<<0): Section entry - * (0<<5): Domain = 0 - * flags: TEX, CB and AP bit settings provided by the caller. - */ - tt[index] = (paddr & ~(MB-1)) | (MMU_MEMORY_DOMAIN_MEM << 5) | (2<<0) | flags; - - arm_invalidate_tlb(); + return arch_flags; } -void arm_mmu_unmap_section(addr_t vaddr) +/* convert user level mmu flags to flags that go in L2 descriptors */ +static uint32_t mmu_flags_to_l2_arch_flags(uint flags) { - uint index = vaddr / MB; - tt[index] = 0; + uint32_t arch_flags = 0; + switch (flags & ARCH_MMU_FLAG_CACHE_MASK) { + case ARCH_MMU_FLAG_CACHED: + arch_flags |= MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE; + break; + case ARCH_MMU_FLAG_UNCACHED: + arch_flags |= MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED; + break; + case ARCH_MMU_FLAG_UNCACHED_DEVICE: + arch_flags |= MMU_MEMORY_L2_TYPE_DEVICE_SHARED; + break; + default: + /* invalid user-supplied flag */ + DEBUG_ASSERT(1); + return ERR_INVALID_ARGS; + } - arm_invalidate_tlb(); + switch (flags & (ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO)) { + case 0: + arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_NA; + break; + case ARCH_MMU_FLAG_PERM_RO: + /* this mapping is a lie, we don't support RO kernel mapping */ + arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_NA; + break; + case ARCH_MMU_FLAG_PERM_USER: + arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_RW; + break; + case ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO: + arch_flags |= MMU_MEMORY_L2_AP_P_RW_U_RO; + break; + } + + return arch_flags; } -#if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7) -#define MMU_INIT_MAP_FLAGS (MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED | \ - MMU_MEMORY_L1_AP_P_RW_U_NA) -#else -#define MMU_INIT_MAP_FLAGS MMU_FLAG_READWRITE -#endif +static void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags) +{ + int index; + + LTRACEF("pa 0x%lx va 0x%lx flags 0x%x\n", paddr, vaddr, flags); + + DEBUG_ASSERT(IS_SECTION_ALIGNED(paddr)); + DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr)); + DEBUG_ASSERT((flags & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_SECTION); + + /* Get the index into the translation table */ + index = vaddr / SECTION_SIZE; + + /* Set the entry value: + * (2<<0): Section entry + * (0<<5): Domain = 0 + * flags: TEX, CB and AP bit settings provided by the caller. + */ + arm_kernel_translation_table[index] = (paddr & ~(MB-1)) | (MMU_MEMORY_DOMAIN_MEM << 5) | MMU_MEMORY_L1_DESCRIPTOR_SECTION | flags; +} + +static void arm_mmu_unmap_section(addr_t vaddr) +{ + DEBUG_ASSERT(IS_SECTION_ALIGNED(vaddr)); + + uint index = vaddr / SECTION_SIZE; + arm_kernel_translation_table[index] = 0; + + arm_invalidate_tlb_mva(vaddr); +} void arm_mmu_init(void) { -#if !WITH_MMU_RELOC - /* set some mmu specific control bits */ - arm_write_sctlr(arm_read_sctlr() & ~((1<<29)|(1<<28)|(1<<0))); // access flag disabled, TEX remap disabled, mmu disabled + /* unmap the initial mapings that are marked temporary */ + struct mmu_initial_mapping *map = mmu_initial_mappings; + while (map->size > 0) { + if (map->flags & MMU_INITIAL_MAPPING_TEMPORARY) { + vaddr_t va = map->virt; + size_t size = map->size; - /* set up an identity-mapped translation table with - * strongly ordered memory type and read/write access. - */ - for (addr_t i=0; i < 4096; i++) { - arm_mmu_map_section(i * MB, i * MB, MMU_INIT_MAP_FLAGS); - } + DEBUG_ASSERT(IS_SECTION_ALIGNED(size)); - /* set up the translation table base */ - arm_write_ttbr0((uint32_t)tt); - - /* set up the domain access register */ - arm_write_dacr(0x1 << (MMU_MEMORY_DOMAIN_MEM * 2)); - - /* turn on the mmu */ - arm_write_sctlr(arm_read_sctlr() | 0x1); -#endif + while (size > 0) { + arm_mmu_unmap_section(va); + va += MB; + size -= MB; + } + } + map++; + } } void arch_disable_mmu(void) { - arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // access flag disabled, TEX remap disabled, mmu disabled + arm_write_sctlr(arm_read_sctlr() & ~(1<<0)); // mmu disabled } +status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags) +{ + //LTRACEF("vaddr 0x%lx\n", vaddr); + + /* Get the index into the translation table */ + uint index = vaddr / MB; + + /* decode it */ + uint32_t tt_entry = arm_kernel_translation_table[index]; + switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) { + case MMU_MEMORY_L1_DESCRIPTOR_INVALID: + return ERR_NOT_FOUND; + case MMU_MEMORY_L1_DESCRIPTOR_SECTION: + if (tt_entry & (1<<18)) { + /* supersection */ + PANIC_UNIMPLEMENTED; + } + + /* section */ + if (paddr) + *paddr = MMU_MEMORY_L1_SECTION_ADDR(tt_entry) + (vaddr & (SECTION_SIZE - 1)); + + if (flags) { + *flags = 0; + switch (tt_entry & MMU_MEMORY_L1_TYPE_MASK) { + case MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED: + *flags |= ARCH_MMU_FLAG_UNCACHED; + break; + case MMU_MEMORY_L1_TYPE_DEVICE_SHARED: + case MMU_MEMORY_L1_TYPE_DEVICE_NON_SHARED: + *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE; + break; + } + switch (tt_entry & MMU_MEMORY_L1_AP_MASK) { + case MMU_MEMORY_L1_AP_P_NA_U_NA: + // XXX no access, what to return? + break; + case MMU_MEMORY_L1_AP_P_RW_U_NA: + break; + case MMU_MEMORY_L1_AP_P_RW_U_RO: + *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO; // XXX should it be rw anyway since kernel can rw it? + break; + case MMU_MEMORY_L1_AP_P_RW_U_RW: + *flags |= ARCH_MMU_FLAG_PERM_USER; + break; + } + } + break; + case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: { + uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry)); + uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE; + uint32_t l2_entry = l2_table[l2_index]; + + //LTRACEF("l2_table at %p, index %u, entry 0x%x\n", l2_table, l2_index, l2_entry); + + switch (l2_entry & MMU_MEMORY_L2_DESCRIPTOR_MASK) { + default: + case MMU_MEMORY_L2_DESCRIPTOR_INVALID: + return ERR_NOT_FOUND; + case MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE: + PANIC_UNIMPLEMENTED; + break; + case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE: + case MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN: + if (paddr) + *paddr = MMU_MEMORY_L2_SMALL_PAGE_ADDR(l2_entry); + + if (flags) { + *flags = 0; + switch (l2_entry & MMU_MEMORY_L2_TYPE_MASK) { + case MMU_MEMORY_L2_TYPE_STRONGLY_ORDERED: + *flags |= ARCH_MMU_FLAG_UNCACHED; + break; + case MMU_MEMORY_L2_TYPE_DEVICE_SHARED: + case MMU_MEMORY_L2_TYPE_DEVICE_NON_SHARED: + *flags |= ARCH_MMU_FLAG_UNCACHED_DEVICE; + break; + } + switch (l2_entry & MMU_MEMORY_L2_AP_MASK) { + case MMU_MEMORY_L2_AP_P_NA_U_NA: + // XXX no access, what to return? + break; + case MMU_MEMORY_L2_AP_P_RW_U_NA: + break; + case MMU_MEMORY_L2_AP_P_RW_U_RO: + *flags |= ARCH_MMU_FLAG_PERM_USER | ARCH_MMU_FLAG_PERM_RO; // XXX should it be rw anyway since kernel can rw it? + break; + case MMU_MEMORY_L2_AP_P_RW_U_RW: + *flags |= ARCH_MMU_FLAG_PERM_USER; + break; + } + } + break; + } + + break; + } + default: + PANIC_UNIMPLEMENTED; + } + + return NO_ERROR; +} + +int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags) +{ + LTRACEF("vaddr 0x%lx paddr 0x%lx count %u flags 0x%x\n", vaddr, paddr, count, flags); + + /* paddr and vaddr must be aligned */ + DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr)); + if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(paddr)) + return ERR_INVALID_ARGS; + + if (count == 0) + return NO_ERROR; + + /* see what kind of mapping we can use */ + int mapped = 0; + while (count > 0) { + if (IS_SECTION_ALIGNED(vaddr) && IS_SECTION_ALIGNED(paddr) && count >= SECTION_SIZE / PAGE_SIZE) { + /* we can use a section */ + + /* compute the arch flags for L1 sections */ + uint arch_flags = mmu_flags_to_l1_arch_flags(flags) | + MMU_MEMORY_L1_DESCRIPTOR_SECTION; + + /* map it */ + arm_mmu_map_section(paddr, vaddr, arch_flags); + count -= SECTION_SIZE / PAGE_SIZE; + mapped += SECTION_SIZE / PAGE_SIZE; + vaddr += SECTION_SIZE; + paddr += SECTION_SIZE; + } else { + /* will have to use a L2 mapping */ + uint l1_index = vaddr / SECTION_SIZE; + uint32_t tt_entry = arm_kernel_translation_table[l1_index]; + + LTRACEF("tt_entry 0x%x\n", tt_entry); + switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) { + case MMU_MEMORY_L1_DESCRIPTOR_SECTION: + // XXX will have to break L1 mapping into a L2 page table + PANIC_UNIMPLEMENTED; + break; + case MMU_MEMORY_L1_DESCRIPTOR_INVALID: { + /* alloc and put in a L2 page table */ + uint32_t *l2_table = pmm_alloc_kpage(); + if (!l2_table) { + TRACEF("failed to allocate pagetable\n"); + goto done; + } + + /* get physical address */ + paddr_t l2_pa = 0; + arm_vtop((vaddr_t)l2_table, &l2_pa); + + LTRACEF("allocated pagetable at %p, pa 0x%lx\n", l2_table, l2_pa); + + DEBUG_ASSERT(IS_PAGE_ALIGNED((vaddr_t)l2_table)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(l2_pa)); + + /* zero the L2 table and add it to the L1 table */ + memset(l2_table, 0, PAGE_SIZE); + + /* put it in the adjacent 4 entries filling in 1K page tables at once */ + l1_index = ROUNDDOWN(l1_index, 4); + arm_kernel_translation_table[l1_index] = l2_pa | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE; + arm_kernel_translation_table[l1_index + 1] = (l2_pa + 1024) | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE; + arm_kernel_translation_table[l1_index + 2] = (l2_pa + 2048) | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE; + arm_kernel_translation_table[l1_index + 3] = (l2_pa + 3072) | MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE; + tt_entry = arm_kernel_translation_table[l1_index]; + + /* fallthrough */ + } + case MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE: { + uint32_t *l2_table = paddr_to_kvaddr(MMU_MEMORY_L1_PAGE_TABLE_ADDR(tt_entry)); + LTRACEF("l2_table at %p\n", l2_table); + + DEBUG_ASSERT(l2_table); + + // XXX handle 64K pages here + + /* compute the arch flags for L2 4K pages */ + uint arch_flags = mmu_flags_to_l2_arch_flags(flags) | + MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE; + + /* add the entry */ + uint l2_index = (vaddr % SECTION_SIZE) / PAGE_SIZE; + l2_table[l2_index] = paddr | arch_flags; + + count--; + mapped++; + vaddr += PAGE_SIZE; + paddr += PAGE_SIZE; + break; + } + default: + PANIC_UNIMPLEMENTED; + } + } + } + +done: + return mapped; +} + +int arch_mmu_unmap(vaddr_t vaddr, uint count) +{ + DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); + if (!IS_PAGE_ALIGNED(vaddr)) + return ERR_INVALID_ARGS; + + int unmapped = 0; + while (count > 0) { + uint l1_index = vaddr / SECTION_SIZE; + uint32_t tt_entry = arm_kernel_translation_table[l1_index]; + + switch (tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) { + case MMU_MEMORY_L1_DESCRIPTOR_INVALID: + /* this top level page is not mapped, move on to the next one */ + goto next_page; + case MMU_MEMORY_L1_DESCRIPTOR_SECTION: + if (IS_SECTION_ALIGNED(vaddr) && count >= SECTION_SIZE / PAGE_SIZE) { + /* we're asked to remove at least all of this section, so just zero it out */ + // XXX test for supersection + arm_mmu_unmap_section(vaddr); + + vaddr += SECTION_SIZE; + count -= SECTION_SIZE / PAGE_SIZE; + unmapped += SECTION_SIZE / PAGE_SIZE; + goto next; + } else { + // XXX handle unmapping just part of a section + // will need to convert to a L2 table and then unmap the parts we are asked to + PANIC_UNIMPLEMENTED; + } + break; + default: + // XXX not implemented supersections or L2 tables + PANIC_UNIMPLEMENTED; + } + +next_page: + vaddr += PAGE_SIZE; + count--; +next: + ; + } + + return unmapped; +} + + #endif // ARM_WITH_MMU +/* vim: set ts=4 sw=4 expandtab: */ diff --git a/arch/arm/arm/start.S b/arch/arm/arm/start.S index ce43ddb8..aca3a7e9 100644 --- a/arch/arm/arm/start.S +++ b/arch/arm/arm/start.S @@ -22,6 +22,8 @@ */ #include #include +#include +#include .section ".text.boot" .globl _start @@ -62,6 +64,124 @@ arm_reset: bl __cpu_early_init #endif +#if WITH_KERNEL_VM + /* set up the mmu according to mmu_initial_mappings */ + + /* calculate our physical to virtual offset */ + mov r12, pc + ldr r1, =.Laddr +.Laddr: + sub r12, r1 + + /* r12 now holds the offset from virtual to physical: + * virtual + r12 = physical */ + + /* load the base of the translation table and clear the table */ + ldr r0, =arm_kernel_translation_table + add r0, r12 + + mov r1, #0 + mov r2, #0 + + /* walk through all the entries in the translation table, setting them up */ +0: + str r1, [r0, r2, lsl #2] + add r2, #1 + cmp r2, #4096 + bne 0b + + /* load the address of the mmu_initial_mappings table and start processing */ + ldr r1, =mmu_initial_mappings + add r1, r12 + +.Linitial_mapping_loop: + ldmia r1!, { r2-r6 } + /* r2 = phys, r3 = virt, r4 = size, r5 = flags, r6 = name */ + + /* mask all the addresses and sizes to 1MB boundaries */ + lsr r2, #20 /* r2 = physical address / 1MB */ + lsr r3, #20 /* r3 = virtual address / 1MB */ + lsr r4, #20 /* r4 = size in 1MB chunks */ + + /* if size == 0, end of list */ + cmp r4, #0 + beq .Linitial_mapping_done + + /* set up the flags */ + ldr r6, =MMU_KERNEL_L1_PTE_FLAGS + teq r5, #MMU_INITIAL_MAPPING_FLAG_UNCACHED + ldreq r6, =MMU_INITIAL_MAP_STRONGLY_ORDERED + beq 0f + teq r5, #MMU_INITIAL_MAPPING_FLAG_DEVICE + ldreq r6, =MMU_INITIAL_MAP_DEVICE + /* r6 = mmu entry flags */ + +0: + orr r7, r6, r2, lsl #20 + /* r7 = phys addr | flags */ + + /* store into appropriate translation table entry */ + str r7, [r0, r3, lsl #2] + + /* loop until we're done */ + add r2, #1 + add r3, #1 + subs r4, #1 + bne 0b + + b .Linitial_mapping_loop + +.Linitial_mapping_done: + + /* set up the mmu */ + + /* Invalidate TLB */ + mov r3, #0 + mcr p15, 0, r3, c8, c7, 0 + isb + + /* Write 0 to TTBCR */ + mcr p15, 0, r3, c2, c0, 2 + isb + + /* set cacheable attributes on translation walk */ + /* (SMP extensions) non-shareable, inner write-back write-allocate */ + orr r0, #(1<<6 | 0<<1) + /* outer write-back write-allocate */ + orr r0, #(1<<3) + + /* Write ttbr with phys addr of the translation table */ + mcr p15, 0, r0, c2, c0, 0 + isb + + /* Write DACR */ + mov r3, #0x1 + mcr p15, 0, r3, c3, c0, 0 + isb + + /* Read SCTLR into r3 */ + mrc p15, 0, r3, c1, c0, 0 + + /* Disable TRE/AFE */ + bic r3, r3, #(1<<29 | 1<<28) + + /* Turn on the MMU */ + orr r3, r3, #0x1 + + /* Write back SCTLR */ + mcr p15, 0, r3, c1, c0, 0 + isb + + /* Jump to virtual code address */ + ldr pc, =1f +1: + + /* Invalidate TLB */ + mov r3, #0 + mcr p15, 0, r3, c8, c7, 0 + isb + +#else /* see if we need to relocate */ mov r0, pc sub r0, r0, #(.Laddr - _start) @@ -82,9 +202,9 @@ arm_reset: /* we're relocated, jump to the right address */ ldr r0, =.Lstack_setup bx r0 +#endif -.ltorg - + /* at this point we're running at our final location in virtual memory (if enabled) */ .Lstack_setup: /* set up the stack for irq, fiq, abort, undefined, system/user, and lastly supervisor mode */ ldr r2, =abort_stack_top @@ -152,10 +272,4 @@ LOCAL_DATA(abort_stack_top) .data .align 2 -/* define the heap end a variable containing the end defined in the - * linker script. this could be updated during init. - */ -DATA(_heap_end) - .int _end_of_ram - /* vim: set ts=4 sw=4 noexpandtab: */ diff --git a/arch/arm/include/arch/arm.h b/arch/arm/include/arch/arm.h index 575cb9a9..8842b5c5 100644 --- a/arch/arm/include/arch/arm.h +++ b/arch/arm/include/arch/arm.h @@ -149,9 +149,21 @@ GEN_CP15_REG_FUNCS(ats12nsour, 0, c7, c8, 6); GEN_CP15_REG_FUNCS(ats12nsouw, 0, c7, c8, 7); GEN_CP15_REG_FUNCS(par, 0, c7, c4, 0); -void arm_invalidate_tlb(void); - -status_t arm_vtop(addr_t va, addr_t *pa); +/* tlb registers */ +GEN_CP15_REG_FUNCS(tlbiallis, 0, c8, c3, 0); +GEN_CP15_REG_FUNCS(tlbimvais, 0, c8, c3, 1); +GEN_CP15_REG_FUNCS(tlbiasidis, 0, c8, c3, 2); +GEN_CP15_REG_FUNCS(tlbimvaais, 0, c8, c3, 3); +GEN_CP15_REG_FUNCS(itlbiall, 0, c8, c5, 0); +GEN_CP15_REG_FUNCS(itlbimva, 0, c8, c5, 1); +GEN_CP15_REG_FUNCS(itlbiasid, 0, c8, c5, 2); +GEN_CP15_REG_FUNCS(dtlbiall, 0, c8, c6, 0); +GEN_CP15_REG_FUNCS(dtlbimva, 0, c8, c6, 1); +GEN_CP15_REG_FUNCS(dtlbiasid, 0, c8, c6, 2); +GEN_CP15_REG_FUNCS(tlbiall, 0, c8, c7, 0); +GEN_CP15_REG_FUNCS(tlbimva, 0, c8, c7, 1); +GEN_CP15_REG_FUNCS(tlbiasid, 0, c8, c7, 2); +GEN_CP15_REG_FUNCS(tlbimvaa, 0, c8, c7, 3); /* fpu */ void arm_fpu_set_enable(bool enable); diff --git a/arch/arm/include/arch/arm/mmu.h b/arch/arm/include/arch/arm/mmu.h index 9a8c9ccc..2d20e09a 100644 --- a/arch/arm/include/arch/arm/mmu.h +++ b/arch/arm/include/arch/arm/mmu.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008 Travis Geiselbrecht + * Copyright (c) 2008-2014 Travis Geiselbrecht * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved * * Permission is hereby granted, free of charge, to any person obtaining @@ -24,12 +24,23 @@ #ifndef __ARCH_ARM_MMU_H #define __ARCH_ARM_MMU_H +#define MB (1024U*1024U) +#define SECTION_SIZE MB +#define SUPERSECTION_SIZE (16 * MB) + #if defined(ARM_ISA_ARMV6) | defined(ARM_ISA_ARMV7) -#define MMU_MEMORY_L1_DESCRIPTOR_INVALID (0x0 << 0) -#define MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE (0x1 << 0) -#define MMU_MEMORY_L1_DESCRIPTOR_SECTION (0x2 << 0) -#define MMU_MEMORY_L1_DESCRIPTOR_SUPERSECTION ((0x2 << 0) | (0x1 << 18)) +#define MMU_MEMORY_L1_DESCRIPTOR_INVALID (0x0 << 0) +#define MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE (0x1 << 0) +#define MMU_MEMORY_L1_DESCRIPTOR_SECTION (0x2 << 0) +#define MMU_MEMORY_L1_DESCRIPTOR_SUPERSECTION ((0x2 << 0) | (0x1 << 18)) +#define MMU_MEMORY_L1_DESCRIPTOR_MASK (0x3 << 0) + +#define MMU_MEMORY_L2_DESCRIPTOR_INVALID (0x0 << 0) +#define MMU_MEMORY_L2_DESCRIPTOR_LARGE_PAGE (0x1 << 0) +#define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE (0x2 << 0) +#define MMU_MEMORY_L2_DESCRIPTOR_SMALL_PAGE_XN (0x3 << 0) +#define MMU_MEMORY_L2_DESCRIPTOR_MASK (0x3 << 0) /* C, B and TEX[2:0] encodings without TEX remap (for first level descriptors) */ /* TEX | CB */ @@ -40,6 +51,7 @@ #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_THROUGH ((0x0 << 12) | (0x2 << 2)) #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 12) | (0x3 << 2)) #define MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE ((0x1 << 12) | (0x3 << 2)) +#define MMU_MEMORY_L1_TYPE_MASK ((0x3 << 12) | (0x3 << 2)) /* C, B and TEX[2:0] encodings without TEX remap (for second level descriptors) */ /* TEX | CB */ @@ -50,8 +62,9 @@ #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_THROUGH ((0x0 << 6) | (0x2 << 2)) #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_NO_ALLOCATE ((0x0 << 6) | (0x3 << 2)) #define MMU_MEMORY_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE ((0x1 << 6) | (0x3 << 2)) +#define MMU_MEMORY_L2_TYPE_MASK ((0x3 << 6) | (0x3 << 2)) -#define MMU_MEMORY_DOMAIN_MEM (0) +#define MMU_MEMORY_DOMAIN_MEM (0) /* * AP (Access Permissions) @@ -77,43 +90,51 @@ * U = ~P * */ -#define MMU_MEMORY_L1_AP_P_NA_U_NA ((0x0 << 15) | (0x0 << 10)) -#define MMU_MEMORY_L1_AP_P_RW_U_RO ((0x0 << 15) | (0x2 << 10)) -#define MMU_MEMORY_L1_AP_P_RW_U_RW ((0x0 << 15) | (0x3 << 10)) -#define MMU_MEMORY_L1_AP_P_RW_U_NA ((0x0 << 15) | (0x1 << 10)) +#define MMU_MEMORY_L1_AP_P_NA_U_NA ((0x0 << 15) | (0x0 << 10)) +#define MMU_MEMORY_L1_AP_P_RW_U_RO ((0x0 << 15) | (0x2 << 10)) +#define MMU_MEMORY_L1_AP_P_RW_U_RW ((0x0 << 15) | (0x3 << 10)) +#define MMU_MEMORY_L1_AP_P_RW_U_NA ((0x0 << 15) | (0x1 << 10)) +#define MMU_MEMORY_L1_AP_MASK ((0x1 << 15) | (0x3 << 10)) -#define MMU_MEMORY_L2_AP_P_NA_U_NA ((0x0 << 9) | (0x0 << 4)) -#define MMU_MEMORY_L2_AP_P_RW_U_RO ((0x0 << 9) | (0x2 << 4)) -#define MMU_MEMORY_L2_AP_P_RW_U_RW ((0x0 << 9) | (0x3 << 4)) -#define MMU_MEMORY_L2_AP_P_RW_U_NA ((0x0 << 9) | (0x1 << 4)) +#define MMU_MEMORY_L2_AP_P_NA_U_NA ((0x0 << 9) | (0x0 << 4)) +#define MMU_MEMORY_L2_AP_P_RW_U_RO ((0x0 << 9) | (0x2 << 4)) +#define MMU_MEMORY_L2_AP_P_RW_U_RW ((0x0 << 9) | (0x3 << 4)) +#define MMU_MEMORY_L2_AP_P_RW_U_NA ((0x0 << 9) | (0x1 << 4)) +#define MMU_MEMORY_L2_AP_MASK ((0x1 << 9) | (0x3 << 4)) -#define MMU_MEMORY_L1_PAGETABLE_NON_SECURE (1 << 3) +#define MMU_MEMORY_L1_PAGETABLE_NON_SECURE (1 << 3) -#define MMU_MEMORY_L1_SECTION_NON_SECURE (1 << 19) -#define MMU_MEMORY_L1_SECTION_SHAREABLE (1 << 16) -#define MMU_MEMORY_L1_SECTION_NON_GLOBAL (1 << 17) +#define MMU_MEMORY_L1_SECTION_NON_SECURE (1 << 19) +#define MMU_MEMORY_L1_SECTION_SHAREABLE (1 << 16) +#define MMU_MEMORY_L1_SECTION_NON_GLOBAL (1 << 17) +#define MMU_MEMORY_L1_SECTION_XN (1 << 4) -#define MMU_MEMORY_L2_SHAREABLE (1 << 10) -#define MMU_MEMORY_L2_NON_GLOBAL (1 << 11) +#define MMU_MEMORY_L2_SHAREABLE (1 << 10) +#define MMU_MEMORY_L2_NON_GLOBAL (1 << 11) -#define MMU_MEMORY_L2_CB_SHIFT 2 -#define MMU_MEMORY_L2_TEX_SHIFT 6 +#define MMU_MEMORY_L2_CB_SHIFT 2 +#define MMU_MEMORY_L2_TEX_SHIFT 6 -#define MMU_MEMORY_NON_CACHEABLE 0 -#define MMU_MEMORY_WRITE_BACK_ALLOCATE 1 -#define MMU_MEMORY_WRITE_THROUGH_NO_ALLOCATE 2 -#define MMU_MEMORY_WRITE_BACK_NO_ALLOCATE 3 +#define MMU_MEMORY_NON_CACHEABLE 0 +#define MMU_MEMORY_WRITE_BACK_ALLOCATE 1 +#define MMU_MEMORY_WRITE_THROUGH_NO_ALLOCATE 2 +#define MMU_MEMORY_WRITE_BACK_NO_ALLOCATE 3 -#define MMU_MEMORY_SET_L2_INNER(val) (((val) & 0x3) << MMU_MEMORY_L2_CB_SHIFT) -#define MMU_MEMORY_SET_L2_OUTER(val) (((val) & 0x3) << MMU_MEMORY_L2_TEX_SHIFT) -#define MMU_MEMORY_SET_L2_CACHEABLE_MEM (0x4 << MMU_MEMORY_L2_TEX_SHIFT) +#define MMU_MEMORY_SET_L2_INNER(val) (((val) & 0x3) << MMU_MEMORY_L2_CB_SHIFT) +#define MMU_MEMORY_SET_L2_OUTER(val) (((val) & 0x3) << MMU_MEMORY_L2_TEX_SHIFT) +#define MMU_MEMORY_SET_L2_CACHEABLE_MEM (0x4 << MMU_MEMORY_L2_TEX_SHIFT) -#define MMU_MEMORY_TTBR_RGN(x) (((x) & 0x3) << 3) +#define MMU_MEMORY_L1_SECTION_ADDR(x) ((x) & ~((1<<20)-1)) +#define MMU_MEMORY_L1_PAGE_TABLE_ADDR(x) ((x) & ~((1<<10)-1)) + +#define MMU_MEMORY_L2_SMALL_PAGE_ADDR(x) ((x) & ~((1<<12)-1)) +#define MMU_MEMORY_L2_LARGE_PAGE_ADDR(x) ((x) & ~((1<<16)-1)) + +#define MMU_MEMORY_TTBR_RGN(x) (((x) & 0x3) << 3) /* IRGN[1:0] is encoded as: IRGN[0] in TTBRx[6], and IRGN[1] in TTBRx[0] */ -#define MMU_MEMORY_TTBR_IRGN(x) ((((x) & 0x1) << 6) | \ - ((((x) >> 1) & 0x1) << 0)) +#define MMU_MEMORY_TTBR_IRGN(x) ((((x) & 0x1) << 6) | \ + ((((x) >> 1) & 0x1) << 0)) -#if WITH_MMU_RELOC /* Default configuration for main kernel page table: * - section mappings for memory * - do cached translation walks @@ -122,72 +143,60 @@ /* Enable cached page table walks: * inner/outer (IRGN/RGN): write-back + write-allocate */ -#define MMU_TTBRx_FLAGS \ - (MMU_MEMORY_TTBR_RGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) |\ - MMU_MEMORY_TTBR_IRGN(MMU_MEMORY_WRITE_BACK_ALLOCATE)) +#define MMU_TTBRx_FLAGS \ + (MMU_MEMORY_TTBR_RGN(MMU_MEMORY_WRITE_BACK_ALLOCATE) |\ + MMU_MEMORY_TTBR_IRGN(MMU_MEMORY_WRITE_BACK_ALLOCATE)) /* Section mapping, TEX[2:0]=001, CB=11, S=1, AP[2:0]=001 */ -#define MMU_KERNEL_L1_PTE_FLAGS \ - (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \ - MMU_MEMORY_L1_SECTION_SHAREABLE | \ - MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \ - MMU_MEMORY_L1_AP_P_RW_U_NA) -#endif +#define MMU_KERNEL_L1_PTE_FLAGS \ + (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \ + MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | \ + MMU_MEMORY_L1_AP_P_RW_U_NA) +/* XXX add with smp to above */ +// MMU_MEMORY_L1_SECTION_SHAREABLE | -#else +#define MMU_INITIAL_MAP_STRONGLY_ORDERED \ + (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \ + MMU_MEMORY_L1_TYPE_STRONGLY_ORDERED | \ + MMU_MEMORY_L1_AP_P_RW_U_NA) -#define MMU_FLAG_CACHED 0x1 -#define MMU_FLAG_BUFFERED 0x2 -#define MMU_FLAG_READWRITE 0x4 +#define MMU_INITIAL_MAP_DEVICE \ + (MMU_MEMORY_L1_DESCRIPTOR_SECTION | \ + MMU_MEMORY_L1_TYPE_DEVICE_SHARED | \ + MMU_MEMORY_L1_AP_P_RW_U_NA) -#define MMU_MEMORY_DOMAIN_MEM (0) -#endif +#endif // armv6 | armv7 #ifndef ASSEMBLY #include #include #include +#include __BEGIN_CDECLS void arm_mmu_init(void); +status_t arm_vtop(addr_t va, addr_t *pa); -void arm_mmu_map_section(addr_t paddr, addr_t vaddr, uint flags); -void arm_mmu_unmap_section(addr_t vaddr); - -#if WITH_MMU_RELOC -static inline void validate_kvaddr(void *ptr) -{ - extern void _start(void); - extern unsigned int _heap_end; - ASSERT((void *)_start <= ptr && ptr <= (void *)(_heap_end - 1)); +/* tlb routines */ +static inline void arm_invalidate_tlb_global(void) { + CF; + arm_write_tlbiall(0); + DSB; } -extern uintptr_t phys_offset; -static inline paddr_t kvaddr_to_paddr(void *ptr) -{ - validate_kvaddr(ptr); - return (paddr_t)ptr + phys_offset; +static inline void arm_invalidate_tlb_mva(vaddr_t va) { + CF; + arm_write_tlbimva(va & 0xfffff000); + DSB; } -static inline void *paddr_to_kvaddr(paddr_t paddr) -{ - void *ptr = (void *)(paddr - phys_offset); - validate_kvaddr(ptr); - return ptr; +static inline void arm_invalidate_tlb_asid(uint8_t asid) { + CF; + arm_write_tlbiasid(asid); + DSB; } -#else -static inline __ALWAYS_INLINE paddr_t kvaddr_to_paddr(void *ptr) -{ - return (paddr_t)ptr; -} - -static inline __ALWAYS_INLINE void *paddr_to_kvaddr(paddr_t paddr) -{ - return (void *)paddr; -} -#endif __END_CDECLS diff --git a/arch/arm/rules.mk b/arch/arm/rules.mk index 2fa3b81d..8fb05534 100644 --- a/arch/arm/rules.mk +++ b/arch/arm/rules.mk @@ -158,6 +158,16 @@ GLOBAL_DEFINES += \ ARCH_DEFAULT_STACK_SIZE=4096 ARCH_OPTFLAGS := -O2 + +# we have a mmu and want the vmm/pmm +WITH_KERNEL_VM=1 + +KERNEL_BASE ?= 0x80000000 +KERNEL_LOAD_OFFSET ?= 0 + +GLOBAL_DEFINES += \ + KERNEL_BASE=$(KERNEL_BASE) \ + KERNEL_LOAD_OFFSET=$(KERNEL_LOAD_OFFSET) endif ifeq ($(SUBARCH),arm-m) MODULE_SRCS += \ @@ -242,7 +252,7 @@ GENERATED += \ $(BUILDDIR)/system-onesegment.ld: $(LOCAL_DIR)/system-onesegment.ld $(wildcard arch/*.ld) @echo generating $@ @$(MKDIR) - $(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/" < $< > $@ + $(NOECHO)sed "s/%MEMBASE%/$(MEMBASE)/;s/%MEMSIZE%/$(MEMSIZE)/;s/%KERNEL_BASE%/$(KERNEL_BASE)/;s/%KERNEL_LOAD_OFFSET%/$(KERNEL_LOAD_OFFSET)/" < $< > $@ $(BUILDDIR)/system-twosegment.ld: $(LOCAL_DIR)/system-twosegment.ld $(wildcard arch/*.ld) @echo generating $@ diff --git a/arch/arm/system-onesegment.ld b/arch/arm/system-onesegment.ld index decc00af..380a34a8 100644 --- a/arch/arm/system-onesegment.ld +++ b/arch/arm/system-onesegment.ld @@ -4,10 +4,13 @@ OUTPUT_ARCH(arm) ENTRY(_start) SECTIONS { - . = %MEMBASE%; + . = %KERNEL_BASE% + %KERNEL_LOAD_OFFSET%; + + _start = .; /* text/read-only data */ - .text : { + /* set the load address to physical MEMBASE */ + .text : AT(%MEMBASE% + %KERNEL_LOAD_OFFSET%) { KEEP(*(.text.boot.vectab1)) KEEP(*(.text.boot.vectab2)) KEEP(*(.text.boot)) @@ -93,7 +96,7 @@ INCLUDE "arch/shared_data_sections.ld" _end = .; - . = %MEMBASE% + %MEMSIZE%; + . = %KERNEL_BASE% + %MEMSIZE%; _end_of_ram = .; /* Strip unnecessary stuff */ diff --git a/arch/arm64/arch.c b/arch/arm64/arch.c index cb78dd82..09f9e718 100644 --- a/arch/arm64/arch.c +++ b/arch/arm64/arch.c @@ -26,9 +26,6 @@ #include #include -extern int _end_of_ram; -void *_heap_end = &_end_of_ram; - void arch_early_init(void) { /* set the vector base */ diff --git a/arch/x86-64/crt0.S b/arch/x86-64/crt0.S index 3b71676f..b8f5848e 100644 --- a/arch/x86-64/crt0.S +++ b/arch/x86-64/crt0.S @@ -303,15 +303,6 @@ interrupt_common: .data .align 8 -/* define the heap end as read-write data containing the default end of the - * heap. dynamic memory length discovery can update this value during init. - * other archs can define this statically based on the memory layout of the - * platform. - */ -.global _heap_end -_heap_end: - .int 4096*1024 /* default to 4MB total */ - .global _multiboot_info _multiboot_info: .int 0 diff --git a/arch/x86-64/kernel.ld b/arch/x86-64/kernel.ld index 86e5af1f..39d1728b 100644 --- a/arch/x86-64/kernel.ld +++ b/arch/x86-64/kernel.ld @@ -68,5 +68,9 @@ INCLUDE "arch/shared_data_sections.ld" _end = .; + /* put a symbol arbitrarily 4MB past the end of the kernel */ + /* used by the heap and other early boot time allocators */ + _end_of_ram = . + (4*1024*1024); + /DISCARD/ : { *(.comment .note .eh_frame) } } diff --git a/arch/x86/crt0.S b/arch/x86/crt0.S index 8f46760e..4550b8ab 100644 --- a/arch/x86/crt0.S +++ b/arch/x86/crt0.S @@ -198,15 +198,6 @@ interrupt_common: .data .align 4 -/* define the heap end as read-write data containing the default end of the - * heap. dynamic memory length discovery can update this value during init. - * other archs can define this statically based on the memory layout of the - * platform. - */ -.global _heap_end -_heap_end: - .int 4096*1024 /* default to 4MB total */ - .global _multiboot_info _multiboot_info: .int 0 diff --git a/arch/x86/kernel.ld b/arch/x86/kernel.ld index 3be73e64..302567f5 100644 --- a/arch/x86/kernel.ld +++ b/arch/x86/kernel.ld @@ -68,5 +68,9 @@ INCLUDE "arch/shared_data_sections.ld" _end = .; + /* put a symbol arbitrarily 4MB past the end of the kernel */ + /* used by the heap and other early boot time allocators */ + _end_of_ram = . + (4*1024*1024); + /DISCARD/ : { *(.comment .note .eh_frame) } } diff --git a/dev/cache/pl310/pl310.c b/dev/cache/pl310/pl310.c index dac85ac7..70721902 100644 --- a/dev/cache/pl310/pl310.c +++ b/dev/cache/pl310/pl310.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/arch/mmu.h b/include/arch/mmu.h new file mode 100644 index 00000000..a6d96e50 --- /dev/null +++ b/include/arch/mmu.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include +#include +#include + +__BEGIN_CDECLS + +#define ARCH_MMU_FLAG_CACHED (0<<0) +#define ARCH_MMU_FLAG_UNCACHED (1<<0) +#define ARCH_MMU_FLAG_UNCACHED_DEVICE (2<<0) /* only exists on some arches, otherwise UNCACHED */ +#define ARCH_MMU_FLAG_CACHE_MASK (3<<0) + +#define ARCH_MMU_FLAG_PERM_USER (1<<2) +#define ARCH_MMU_FLAG_PERM_RO (1<<3) + +int arch_mmu_map(vaddr_t vaddr, paddr_t paddr, uint count, uint flags); +int arch_mmu_unmap(vaddr_t vaddr, uint count); +status_t arch_mmu_query(vaddr_t vaddr, paddr_t *paddr, uint *flags); + +void arch_disable_mmu(void); + +__END_CDECLS + diff --git a/include/arch/ops.h b/include/arch/ops.h index f3b6e64c..a8f77ead 100644 --- a/include/arch/ops.h +++ b/include/arch/ops.h @@ -60,8 +60,6 @@ void arch_sync_cache_range(addr_t start, size_t len); void arch_idle(void); -void arch_disable_mmu(void); - __END_CDECLS #endif // !ASSEMBLY diff --git a/include/kernel/vm.h b/include/kernel/vm.h new file mode 100644 index 00000000..0bd9aa31 --- /dev/null +++ b/include/kernel/vm.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#pragma once + +/* some assembly #defines, need to match the structure below */ +#define __MMU_INITIAL_MAPPING_PHYS_OFFSET 0 +#define __MMU_INITIAL_MAPPING_VIRT_OFFSET 4 +#define __MMU_INITIAL_MAPPING_SIZE_OFFSET 8 +#define __MMU_INITIAL_MAPPING_FLAGS_OFFSET 12 +#define __MMU_INITIAL_MAPPING_SIZE 20 + +/* flags for initial mapping struct */ +#define MMU_INITIAL_MAPPING_TEMPORARY (0x1) +#define MMU_INITIAL_MAPPING_FLAG_UNCACHED (0x2) +#define MMU_INITIAL_MAPPING_FLAG_DEVICE (0x4) + +#ifndef ASSEMBLY + +#include +#include +#include +#include +#include +#include +#include + +__BEGIN_CDECLS + +#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) +#define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE) + +struct mmu_initial_mapping { + paddr_t phys; + vaddr_t virt; + size_t size; + unsigned int flags; + const char *name; +}; + +/* Assert that the assembly macros above match this struct. */ +STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, phys) == __MMU_INITIAL_MAPPING_PHYS_OFFSET); +STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, virt) == __MMU_INITIAL_MAPPING_VIRT_OFFSET); +STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, size) == __MMU_INITIAL_MAPPING_SIZE_OFFSET); +STATIC_ASSERT(__offsetof(struct mmu_initial_mapping, flags) == __MMU_INITIAL_MAPPING_FLAGS_OFFSET); +STATIC_ASSERT(sizeof(struct mmu_initial_mapping) == __MMU_INITIAL_MAPPING_SIZE); + +/* Platform or target must fill out one of these to set up the initial memory map + * for kernel and enough IO space to boot. + */ +extern struct mmu_initial_mapping mmu_initial_mappings[]; + +/* core per page structure */ +typedef struct vm_page { + struct list_node node; + + uint flags : 8; + uint ref : 24; +} vm_page_t; + +#define VM_PAGE_FLAG_NONFREE (0x1) + +/* physical allocator */ +typedef struct pmm_arena { + struct list_node node; + const char *name; + + uint flags; + uint priority; + + paddr_t base; + size_t size; + + size_t free_count; + + struct vm_page *page_array; + struct list_node free_list; +} pmm_arena_t; + +#define PMM_ARENA_FLAG_KMAP (0x1) /* this arena is already mapped and useful for kallocs */ + + /* Add a pre-filled memory arena to the physical allocator. */ +status_t pmm_add_arena(pmm_arena_t *arena) __NONNULL((1)); + + /* Allocate count pages of physical memory, adding to the tail of the passed list. + * The list must be initialized. + * Returns the number of pages allocated. + */ +uint pmm_alloc_pages(uint count, struct list_node *list) __NONNULL((2)); + + /* Allocate a specific range of physical pages, adding to the tail of the passed list. + * The list must be initialized. + * Returns the number of pages allocated. + */ +uint pmm_alloc_range(paddr_t address, uint count, struct list_node *list) __NONNULL((3)); + + /* Free a list of physical pages. + * Returns the number of pages freed. + */ +uint pmm_free(struct list_node *list) __NONNULL((1)); + + /* Helper routine for the above. */ +uint pmm_free_page(vm_page_t *page) __NONNULL((1)); + + /* Allocate a run of pages out of the kernel area and return the pointer in kernel space. + * If the optional list is passed, append the allocate page structures to the tail of the list. + */ +void *pmm_alloc_kpages(uint count, struct list_node *list); + + /* Helper routine for pmm_alloc_kpages. */ +static inline void *pmm_alloc_kpage(void) { return pmm_alloc_kpages(1, NULL); } + +/* physical to virtual */ +void *paddr_to_kvaddr(paddr_t pa); + +/* virtual allocator */ +typedef struct vmm_aspace { + struct list_node node; + char name[32]; + + uint flags; + + vaddr_t base; + size_t size; + + struct list_node region_list; +} vmm_aspace_t; + +typedef struct vmm_region { + struct list_node node; + char name[32]; + + uint flags; + uint arch_mmu_flags; + + vaddr_t base; + size_t size; + + struct list_node page_list; +} vmm_region_t; + +#define VMM_REGION_FLAG_RESERVED 0x1 +#define VMM_REGION_FLAG_PHYSICAL 0x2 + +/* grab a handle to the kernel address space */ +extern vmm_aspace_t _kernel_aspace; +static inline vmm_aspace_t *vmm_get_kernel_aspace(void) { + return &_kernel_aspace; +} + +/* reserve a chunk of address space to prevent allocations from that space */ +status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr) + __NONNULL((1)); + +/* allocate a region of virtual space that maps a physical piece of address space. + the physical pages that back this are not allocated from the pmm. */ +status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) + __NONNULL((1)); + +/* allocate a region of memory backed by newly allocated contiguous physical memory */ +status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint vmm_flags, uint arch_mmu_flags) + __NONNULL((1)); + +/* allocate a region of memory backed by newly allocated physical memory */ +status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint vmm_flags, uint arch_mmu_flags) + __NONNULL((1)); + + /* For the above region creation routines. Allocate virtual space at the passed in pointer. */ +#define VMM_FLAG_VALLOC_SPECIFIC 0x1 + +__END_CDECLS + +#endif // !ASSEMBLY diff --git a/include/lk/init.h b/include/lk/init.h index b5c8b5ec..a0ca318e 100644 --- a/include/lk/init.h +++ b/include/lk/init.h @@ -18,12 +18,13 @@ enum lk_init_level { LK_INIT_LEVEL_PLATFORM_EARLY = 0x20000, LK_INIT_LEVEL_TARGET_EARLY = 0x30000, LK_INIT_LEVEL_HEAP = 0x40000, - LK_INIT_LEVEL_KERNEL = 0x50000, - LK_INIT_LEVEL_THREADING = 0x60000, - LK_INIT_LEVEL_ARCH = 0x70000, - LK_INIT_LEVEL_PLATFORM = 0x80000, - LK_INIT_LEVEL_TARGET = 0x90000, - LK_INIT_LEVEL_APPS = 0xa0000, + LK_INIT_LEVEL_VM = 0x50000, + LK_INIT_LEVEL_KERNEL = 0x60000, + LK_INIT_LEVEL_THREADING = 0x70000, + LK_INIT_LEVEL_ARCH = 0x80000, + LK_INIT_LEVEL_PLATFORM = 0x90000, + LK_INIT_LEVEL_TARGET = 0xa0000, + LK_INIT_LEVEL_APPS = 0xb0000, LK_INIT_LEVEL_LAST = UINT_MAX, }; diff --git a/kernel/rules.mk b/kernel/rules.mk index a6819c63..1dd4ca36 100644 --- a/kernel/rules.mk +++ b/kernel/rules.mk @@ -15,6 +15,9 @@ MODULE_SRCS := \ $(LOCAL_DIR)/thread.c \ $(LOCAL_DIR)/timer.c \ $(LOCAL_DIR)/semaphore.c \ - + +ifeq ($(WITH_KERNEL_VM),1) +MODULE_DEPS += kernel/vm +endif include make/module.mk diff --git a/kernel/vm/bootalloc.c b/kernel/vm/bootalloc.c new file mode 100644 index 00000000..8847bcab --- /dev/null +++ b/kernel/vm/bootalloc.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include "vm_priv.h" + +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +/* cheezy allocator that chews up space just after the end of the kernel mapping */ + +/* track how much memory we've used */ +extern int _end; + +uintptr_t boot_alloc_start = (uintptr_t)&_end; +uintptr_t boot_alloc_end = (uintptr_t)&_end; + +void *boot_alloc_mem(size_t len) +{ + uintptr_t ptr; + + ptr = ALIGN(boot_alloc_end, 8); + boot_alloc_end = (ptr + ALIGN(len, 8)); + + LTRACEF("len %zu, ptr %p\n", len, (void *)ptr); + + return (void *)ptr; +} + diff --git a/kernel/vm/pmm.c b/kernel/vm/pmm.c new file mode 100644 index 00000000..6063639d --- /dev/null +++ b/kernel/vm/pmm.c @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include "vm_priv.h" + +#include +#include +#include +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +static struct list_node arena_list = LIST_INITIAL_VALUE(arena_list); + +#define PAGE_BELONGS_TO_ARENA(page, arena) \ + (((uintptr_t)(page) >= (uintptr_t)(arena)->page_array) && \ + ((uintptr_t)(page) < ((uintptr_t)(arena)->page_array + (arena)->size / PAGE_SIZE * sizeof(vm_page_t)))) + +#define PAGE_ADDRESS_FROM_ARENA(page, arena) \ + (paddr_t)(((uintptr_t)page - (uintptr_t)a->page_array) / sizeof(vm_page_t)) * PAGE_SIZE + a->base; + +#define ADDRESS_IN_ARENA(address, arena) \ + ((address) >= (arena)->base && (address) <= (arena)->base + (arena)->size) + +static inline bool page_is_free(const vm_page_t *page) +{ + return !(page->flags & VM_PAGE_FLAG_NONFREE); +} + +paddr_t page_to_address(const vm_page_t *page) +{ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + if (PAGE_BELONGS_TO_ARENA(page, a)) { + return PAGE_ADDRESS_FROM_ARENA(page, a); + } + } + return -1; +} + +vm_page_t *address_to_page(paddr_t addr) +{ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + if (addr >= a->base && addr <= a->base + a->size - 1) { + size_t index = (addr - a->base) / PAGE_SIZE; + return &a->page_array[index]; + } + } + return NULL; +} + +status_t pmm_add_arena(pmm_arena_t *arena) +{ + LTRACEF("arena %p name '%s' base 0x%lx size 0x%x\n", arena, arena->name, arena->base, arena->size); + + DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->base)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(arena->size)); + DEBUG_ASSERT(arena->size > 0); + + /* walk the arena list and add arena based on priority order */ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + if (a->priority > arena->priority) { + list_add_before(&a->node, &arena->node); + goto done_add; + } + } + + /* walked off the end, add it to the end of the list */ + list_add_tail(&arena_list, &arena->node); + +done_add: + + /* zero out some of the structure */ + arena->free_count = 0; + list_initialize(&arena->free_list); + + /* allocate an array of pages to back this one */ + size_t page_count = arena->size / PAGE_SIZE; + arena->page_array = boot_alloc_mem(page_count * sizeof(vm_page_t)); + + /* initialize all of the pages */ + memset(arena->page_array, 0, page_count * sizeof(vm_page_t)); + + /* add them to the free list */ + for (size_t i = 0; i < page_count; i++) { + vm_page_t *p = &arena->page_array[i]; + + list_add_tail(&arena->free_list, &p->node); + + arena->free_count++; + } + + return NO_ERROR; +} + +uint pmm_alloc_pages(uint count, struct list_node *list) +{ + LTRACEF("count %u\n", count); + + /* list must be initialized prior to calling this */ + DEBUG_ASSERT(list); + + uint allocated = 0; + if (count == 0) + return 0; + + /* walk the arenas in order, allocating as many pages as we can from each */ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + while (allocated < count) { + vm_page_t *page = list_remove_head_type(&a->free_list, vm_page_t, node); + if (!page) + goto done; + + a->free_count--; + + page->flags |= VM_PAGE_FLAG_NONFREE; + list_add_tail(list, &page->node); + + allocated++; + } + } + +done: + return allocated; +} + +uint pmm_alloc_range(paddr_t address, uint count, struct list_node *list) +{ + LTRACEF("address 0x%lx, count %u\n", address, count); + + DEBUG_ASSERT(list); + + uint allocated = 0; + if (count == 0) + return 0; + + address = ROUNDDOWN(address, PAGE_SIZE); + + /* walk through the arenas, looking to see if the physical page belongs to it */ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + while (allocated < count && ADDRESS_IN_ARENA(address, a)) { + size_t index = (address - a->base) / PAGE_SIZE; + + DEBUG_ASSERT(index < a->size / PAGE_SIZE); + + vm_page_t *page = &a->page_array[index]; + if (page->flags & VM_PAGE_FLAG_NONFREE) { + /* we hit an allocated page */ + break; + } + + DEBUG_ASSERT(list_in_list(&page->node)); + + list_delete(&page->node); + page->flags |= VM_PAGE_FLAG_NONFREE; + list_add_tail(list, &page->node); + + a->free_count--; + allocated++; + address += PAGE_SIZE; + } + + if (allocated == count) + break; + } + + return allocated; +} + +uint pmm_free(struct list_node *list) +{ + LTRACEF("list %p\n", list); + + DEBUG_ASSERT(list); + + uint count = 0; + while (!list_is_empty(list)) { + vm_page_t *page = list_remove_head_type(list, vm_page_t, node); + + DEBUG_ASSERT(!list_in_list(&page->node)); + DEBUG_ASSERT(page->flags & VM_PAGE_FLAG_NONFREE); + + /* see which arena this page belongs to and add it */ + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + if (PAGE_BELONGS_TO_ARENA(page, a)) { + page->flags &= ~VM_PAGE_FLAG_NONFREE; + + list_add_head(&a->free_list, &page->node); + a->free_count++; + count++; + break; + } + } + } + + return count; +} + +uint pmm_free_page(vm_page_t *page) +{ + struct list_node list; + list_initialize(&list); + + list_add_head(&list, &page->node); + + return pmm_free(&list); +} + +/* physically allocate a run from arenas marked as KMAP */ +void *pmm_alloc_kpages(uint count, struct list_node *list) +{ + LTRACEF("count %u\n", count); + + if (count == 0) + return NULL; + + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + if (a->flags & PMM_ARENA_FLAG_KMAP) { + uint find_start = -1; + for (uint i = 0; i < a->size / PAGE_SIZE; i++) { + vm_page_t *p = &a->page_array[i]; + if (p->flags & VM_PAGE_FLAG_NONFREE) { + /* page was not free, reset the start counter */ + find_start = -1; + continue; + } + + if (find_start == (uint)-1) { + /* start of a new run */ + find_start = i; + } + + if (i - find_start == count - 1) { + /* we found a run */ + LTRACEF("found run from pn %u to %u\n", find_start, i); + + /* remove the pages from the run out of the free list */ + for (uint j = find_start; j <= i; j++) { + p = &a->page_array[j]; + DEBUG_ASSERT(list_in_list(&p->node)); + + list_delete(&p->node); + p->flags |= VM_PAGE_FLAG_NONFREE; + a->free_count--; + + if (list) + list_add_tail(list, &p->node); + } + + return paddr_to_kvaddr(a->base + find_start * PAGE_SIZE); + } + } + } + } + + LTRACEF("couldn't find run\n"); + return NULL; +} + +static void dump_arena(const pmm_arena_t *arena) +{ + printf("arena %p: name '%s' base 0x%lx size 0x%x priority %u flags 0x%x\n", + arena, arena->name, arena->base, arena->size, arena->priority, arena->flags); + printf("\tpage_array %p, free_count %zu\n", + arena->page_array, arena->free_count); + + /* dump the free pages */ + printf("\tfree ranges:\n"); + ssize_t last = -1; + for (size_t i = 0; i < arena->size / PAGE_SIZE; i++) { + if (page_is_free(&arena->page_array[i])) { + if (last == -1) { + last = i; + } + } else { + if (last != -1) { + printf("\t\t0x%lx - 0x%lx\n", arena->base + last * PAGE_SIZE, arena->base + i * PAGE_SIZE); + } + last = -1; + } + } + + if (last != -1) { + printf("\t\t0x%lx - 0x%lx\n", arena->base + last * PAGE_SIZE, arena->base + arena->size); + } +} + +static void dump_page(const vm_page_t *page) +{ + printf("page %p: address 0x%lx flags 0x%x\n", page, page_to_address(page), page->flags); +} + +static int cmd_pmm(int argc, const cmd_args *argv) +{ + if (argc < 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("%s arenas\n", argv[0].str); + printf("%s alloc \n", argv[0].str); + printf("%s alloc_range
\n", argv[0].str); + printf("%s alloc_kpages \n", argv[0].str); + printf("%s dump_alloced\n", argv[0].str); + printf("%s free_alloced\n", argv[0].str); + return ERR_GENERIC; + } + + static struct list_node allocated = LIST_INITIAL_VALUE(allocated); + + if (!strcmp(argv[1].str, "arenas")) { + pmm_arena_t *a; + list_for_every_entry(&arena_list, a, pmm_arena_t, node) { + dump_arena(a); + } + } else if (!strcmp(argv[1].str, "alloc")) { + if (argc < 3) goto notenoughargs; + + struct list_node list; + list_initialize(&list); + + uint count = pmm_alloc_pages(argv[2].u, &list); + printf("alloc returns %u\n", count); + + vm_page_t *p; + list_for_every_entry(&list, p, vm_page_t, node) { + printf("\tpage %p, address 0x%lx\n", p, page_to_address(p)); + } + + /* add the pages to the local allocated list */ + struct list_node *node; + while ((node = list_remove_head(&list))) { + list_add_tail(&allocated, node); + } + } else if (!strcmp(argv[1].str, "dump_alloced")) { + vm_page_t *page; + + list_for_every_entry(&allocated, page, vm_page_t, node) { + dump_page(page); + } + } else if (!strcmp(argv[1].str, "alloc_range")) { + if (argc < 4) goto notenoughargs; + + struct list_node list; + list_initialize(&list); + + uint count = pmm_alloc_range(argv[2].u, argv[3].u, &list); + printf("alloc returns %u\n", count); + + vm_page_t *p; + list_for_every_entry(&list, p, vm_page_t, node) { + printf("\tpage %p, address 0x%lx\n", p, page_to_address(p)); + } + + /* add the pages to the local allocated list */ + struct list_node *node; + while ((node = list_remove_head(&list))) { + list_add_tail(&allocated, node); + } + } else if (!strcmp(argv[1].str, "alloc_kpages")) { + if (argc < 3) goto notenoughargs; + + void *ptr = pmm_alloc_kpages(argv[2].u, NULL); + printf("pmm_alloc_kpages returns %p\n", ptr); + } else if (!strcmp(argv[1].str, "free_alloced")) { + int err = pmm_free(&allocated); + printf("pmm_free returns %d\n", err); + } else { + printf("unknown command\n"); + goto usage; + } + + return NO_ERROR; +} + +STATIC_COMMAND_START +#if LK_DEBUGLEVEL > 0 +{ "pmm", "physical memory manager", &cmd_pmm }, +#endif +STATIC_COMMAND_END(pmm); + + + + diff --git a/kernel/vm/rules.mk b/kernel/vm/rules.mk new file mode 100644 index 00000000..d511c1cc --- /dev/null +++ b/kernel/vm/rules.mk @@ -0,0 +1,11 @@ +LOCAL_DIR := $(GET_LOCAL_DIR) + +MODULE := $(LOCAL_DIR) + +MODULE_SRCS += \ + $(LOCAL_DIR)/bootalloc.c \ + $(LOCAL_DIR)/pmm.c \ + $(LOCAL_DIR)/vm.c \ + $(LOCAL_DIR)/vmm.c \ + +include make/module.mk diff --git a/kernel/vm/vm.c b/kernel/vm/vm.c new file mode 100644 index 00000000..8c18305b --- /dev/null +++ b/kernel/vm/vm.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include "vm_priv.h" + +#include +#include +#include +#include +#include +#include + +#define LOCAL_TRACE 0 + +extern int _start; +extern int _end; + +/* mark the physical pages backing a range of virtual as in use. + * allocate the physical pages and throw them away */ +static void mark_pages_in_use(vaddr_t va, size_t len) +{ + LTRACEF("va 0x%lx, len 0x%zx\n", va, len); + + struct list_node list; + list_initialize(&list); + + for (size_t offset = 0; offset < len; offset += PAGE_SIZE) { + uint flags; + paddr_t pa; + + status_t err = arch_mmu_query(va + offset, &pa, &flags); + if (err >= 0) { + //LTRACEF("va 0x%x, pa 0x%x, flags 0x%x, err %d\n", va + offset, pa, flags, err); + + /* alloate the range, throw the results away */ + pmm_alloc_range(pa, 1, &list); + } + } +} + +static void vm_init_preheap(uint level) +{ + LTRACE_ENTRY; + + /* mark all of the kernel pages in use */ + LTRACEF("marking all kernel pages as used\n"); + mark_pages_in_use((vaddr_t)&_start, ((uintptr_t)&_end - (uintptr_t)&_start)); + + /* mark the physical pages used by the boot time allocator */ + if (boot_alloc_end != boot_alloc_start) { + LTRACEF("marking boot alloc used from 0x%lx to 0x%lx\n", boot_alloc_start, boot_alloc_end); + + // XXX handle last partial page? + mark_pages_in_use(boot_alloc_start, boot_alloc_end - boot_alloc_start); + } +} + +static void vm_init_postheap(uint level) +{ + LTRACE_ENTRY; + + vmm_init(); + + /* create vmm regions to cover what is already there from the initial mapping table */ + struct mmu_initial_mapping *map = mmu_initial_mappings; + while (map->size > 0) { + if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY)) { + vmm_reserve_space(vmm_get_kernel_aspace(), map->name, map->size, map->virt); + } + + map++; + } +} + +void *paddr_to_kvaddr(paddr_t pa) +{ + /* slow path to do reverse lookup */ + struct mmu_initial_mapping *map = mmu_initial_mappings; + while (map->size > 0) { + if (!(map->flags & MMU_INITIAL_MAPPING_TEMPORARY) && + pa >= map->phys && + pa <= map->phys + map->size) { + return (void *)(map->virt + (pa - map->phys)); + } + map++; + } + return NULL; +} + +static int cmd_vm(int argc, const cmd_args *argv) +{ + if (argc < 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("%s phys2virt
\n", argv[0].str); + printf("%s virt2phys
\n", argv[0].str); + printf("%s map \n", argv[0].str); + printf("%s unmap \n", argv[0].str); + return ERR_GENERIC; + } + + if (!strcmp(argv[1].str, "phys2virt")) { + if (argc < 3) goto notenoughargs; + + void *ptr = paddr_to_kvaddr(argv[2].u); + printf("paddr_to_kvaddr returns %p\n", ptr); + } else if (!strcmp(argv[1].str, "virt2phys")) { + if (argc < 3) goto notenoughargs; + + paddr_t pa; + uint flags; + status_t err = arch_mmu_query(argv[2].u, &pa, &flags); + printf("arch_mmu_query returns %d\n", err); + if (err >= 0) { + printf("\tpa 0x%lx, flags 0x%x\n", pa, flags); + } + } else if (!strcmp(argv[1].str, "map")) { + if (argc < 6) goto notenoughargs; + + int err = arch_mmu_map(argv[3].u, argv[2].u, argv[4].u, argv[5].u); + printf("arch_mmu_map returns %d\n", err); + } else if (!strcmp(argv[1].str, "unmap")) { + if (argc < 4) goto notenoughargs; + + int err = arch_mmu_unmap(argv[2].u, argv[3].u); + printf("arch_mmu_unmap returns %d\n", err); + } else { + printf("unknown command\n"); + goto usage; + } + + return NO_ERROR; +} + +STATIC_COMMAND_START +#if LK_DEBUGLEVEL > 0 +{ "vm", "vm commands", &cmd_vm }, +#endif +STATIC_COMMAND_END(vm); + +LK_INIT_HOOK(vm_preheap, &vm_init_preheap, LK_INIT_LEVEL_HEAP - 1); +LK_INIT_HOOK(vm, &vm_init_postheap, LK_INIT_LEVEL_VM); + diff --git a/kernel/vm/vm_priv.h b/kernel/vm/vm_priv.h new file mode 100644 index 00000000..939a5bf4 --- /dev/null +++ b/kernel/vm/vm_priv.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#pragma once + +#include +#include +#include + +/* simple boot time allocator */ +void *boot_alloc_mem(size_t len) __MALLOC; +extern uintptr_t boot_alloc_start; +extern uintptr_t boot_alloc_end; + +paddr_t page_to_address(const vm_page_t *page); +vm_page_t *address_to_page(paddr_t addr); + +void vmm_init(void); + diff --git a/kernel/vm/vmm.c b/kernel/vm/vmm.c new file mode 100644 index 00000000..63754b54 --- /dev/null +++ b/kernel/vm/vmm.c @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include "vm_priv.h" + +#define LOCAL_TRACE 0 + +static struct list_node aspace_list = LIST_INITIAL_VALUE(aspace_list); + +vmm_aspace_t _kernel_aspace; + +static void dump_aspace(const vmm_aspace_t *a); +static void dump_region(const vmm_region_t *r); + +void vmm_init(void) +{ + /* initialize the kernel address space */ + strlcpy(_kernel_aspace.name, "kernel", sizeof(_kernel_aspace.name)); + _kernel_aspace.base = KERNEL_BASE; + _kernel_aspace.size = ULONG_MAX - _kernel_aspace.base + 1; // XXX make smarter + list_initialize(&_kernel_aspace.region_list); + + list_add_head(&aspace_list, &_kernel_aspace.node); +} + +static inline bool is_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr) +{ + return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); +} + +static bool is_region_inside_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) +{ + /* is the starting address within the address space*/ + if (!is_inside_aspace(aspace, vaddr)) + return false; + + if (size == 0) + return true; + + /* see if the size is enough to wrap the integer */ + if (vaddr + size - 1 < vaddr) + return false; + + /* test to see if the end address is within the address space's */ + if (vaddr + size - 1 > aspace->base + aspace->size - 1) + return false; + + return true; +} + +static size_t trim_to_aspace(const vmm_aspace_t *aspace, vaddr_t vaddr, size_t size) +{ + DEBUG_ASSERT(is_inside_aspace(aspace, vaddr)); + + if (size == 0) + return size; + + size_t offset = vaddr - aspace->base; + + //LTRACEF("vaddr 0x%lx size 0x%zx offset 0x%zx aspace base 0x%lx aspace size 0x%zx\n", + // vaddr, size, offset, aspace->base, aspace->size); + + if (offset + size < offset) + size = ULONG_MAX - offset - 1; + + //LTRACEF("size now 0x%zx\n", size); + + if (offset + size >= aspace->size - 1) + size = aspace->size - offset; + + //LTRACEF("size now 0x%zx\n", size); + + return size; +} + +static vmm_region_t *alloc_region_struct(const char *name, vaddr_t base, size_t size, uint flags, uint arch_mmu_flags) +{ + DEBUG_ASSERT(name); + + vmm_region_t *r = malloc(sizeof(vmm_region_t)); + if (!r) + return NULL; + + strlcpy(r->name, name, sizeof(r->name)); + r->base = base; + r->size = size; + r->flags = flags; + r->arch_mmu_flags = arch_mmu_flags; + list_initialize(&r->page_list); + + return r; +} + +/* add a region to the appropriate spot in the address space list, + * testing to see if there's a space */ +static status_t add_region_to_aspace(vmm_aspace_t *aspace, vmm_region_t *r) +{ + DEBUG_ASSERT(aspace); + DEBUG_ASSERT(r); + + LTRACEF("aspace %p base 0x%lx size 0x%zx r %p base 0x%lx size 0x%zx\n", + aspace, aspace->base, aspace->size, r, r->base, r->size); + + /* only try if the region will at least fit in the address space */ + if (r->size == 0 || !is_region_inside_aspace(aspace, r->base, r->size)) { + LTRACEF("region was out of range\n"); + return ERR_OUT_OF_RANGE; + } + + vaddr_t r_end = r->base + r->size - 1; + + /* does it fit in front */ + vmm_region_t *last; + last = list_peek_head_type(&aspace->region_list, vmm_region_t, node); + if (!last || r_end < last->base) { + /* empty list or not empty and fits before the first element */ + list_add_head(&aspace->region_list, &r->node); + return NO_ERROR; + } + + /* walk the list, finding the right spot to put it */ + list_for_every_entry(&aspace->region_list, last, vmm_region_t, node) { + /* does it go after last? */ + if (r->base > last->base + last->size - 1) { + /* get the next element in the list */ + vmm_region_t *next = list_next_type(&aspace->region_list, &last->node, vmm_region_t, node); + if (!next || (r_end < next->base)) { + /* end of the list or next exists and it goes between them */ + list_add_after(&last->node, &r->node); + return NO_ERROR; + } + } + } + + LTRACEF("couldn't find spot\n"); + return ERR_NO_MEMORY; +} + +static vaddr_t alloc_spot(vmm_aspace_t *aspace, size_t size, struct list_node **before) +{ + DEBUG_ASSERT(aspace); + DEBUG_ASSERT(size > 0 && IS_PAGE_ALIGNED(size)); + + /* does it fit before the first element? */ + vmm_region_t *r; + r = list_peek_head_type(&aspace->region_list, vmm_region_t, node); + if (r) { + DEBUG_ASSERT(r->base >= aspace->base); + if (r->base - aspace->base >= size) { + if (before) + *before = &aspace->region_list; + return aspace->base; + } + } else { + /* nothing is in the list, does it fit in the aspace? */ + if (aspace->size >= size) { + if (before) + *before = &aspace->region_list; + return aspace->base; + } + } + + /* search the middle of the list */ + list_for_every_entry(&aspace->region_list, r, vmm_region_t, node) { + /* get the next element in the list */ + vmm_region_t *next = list_next_type(&aspace->region_list, &r->node, vmm_region_t, node); + + if (next) { + DEBUG_ASSERT(next->base >= r->base + r->size); + + /* see if it'll fit between the current item and the next */ + if (next->base - (r->base + r->size) >= size) { + /* it'll fit here */ + if (before) + *before = &r->node; + return r->base + r->size; + } + } else { + /* we're at the end of the list, will it fit between us and the end of the aspace? */ + if ((aspace->base + aspace->size) - (r->base + r->size) >= size) { + /* it'll fit here */ + if (before) + *before = &r->node; + return r->base + r->size; + } + } + } + + /* couldn't find anything */ + return -1; +} + +/* allocate a region structure and stick it in the address space */ +static vmm_region_t *alloc_region(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr, uint vmm_flags, uint region_flags, uint arch_mmu_flags) +{ + /* make a region struct for it and stick it in the list */ + vmm_region_t *r = alloc_region_struct(name, vaddr, size, region_flags, arch_mmu_flags); + if (!r) + return NULL; + + /* if they ask us for a specific spot, put it there */ + if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) { + /* stick it in the list, checking to see if it fits */ + if (add_region_to_aspace(aspace, r) < 0) { + /* didn't fit */ + free(r); + return NULL; + } + } else { + /* allocate a virtual slot for it */ + struct list_node *before = NULL; + vaddr = alloc_spot(aspace, size, &before); + LTRACEF("alloc_spot returns 0x%lx, before %p\n", vaddr, before); + + if (vaddr == (vaddr_t)-1) { + LTRACEF("failed to find spot\n"); + free(r); + return NULL; + } + + DEBUG_ASSERT(before != NULL); + + r->base = (vaddr_t)vaddr; + + /* add it to the region list */ + list_add_after(before, &r->node); + } + + return r; +} + +status_t vmm_reserve_space(vmm_aspace_t *aspace, const char *name, size_t size, vaddr_t vaddr) +{ + LTRACEF("aspace %p name '%s' size 0x%zx vaddr 0x%lx\n", aspace, name, size, vaddr); + + DEBUG_ASSERT(aspace); + DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(size)); + + if (!name) + name = ""; + + if (!aspace) + return ERR_INVALID_ARGS; + if (size == 0) + return NO_ERROR; + if (!IS_PAGE_ALIGNED(vaddr) || !IS_PAGE_ALIGNED(size)) + return ERR_INVALID_ARGS; + + if (!is_inside_aspace(aspace, vaddr)) + return ERR_OUT_OF_RANGE; + + /* trim the size */ + size = trim_to_aspace(aspace, vaddr, size); + + /* lookup how it's already mapped */ + uint arch_mmu_flags = 0; + arch_mmu_query(vaddr, NULL, &arch_mmu_flags); + + /* build a new region structure */ + vmm_region_t *r = alloc_region(aspace, name, size, vaddr, VMM_FLAG_VALLOC_SPECIFIC, VMM_REGION_FLAG_RESERVED, arch_mmu_flags); + if (!r) + return ERR_NO_MEMORY; + + return NO_ERROR; +} + +status_t vmm_alloc_physical(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, paddr_t paddr, uint vmm_flags, uint arch_mmu_flags) +{ + LTRACEF("aspace %p name '%s' size 0x%zx ptr %p paddr 0x%lx vmm_flags 0x%x arch_mmu_flags 0x%x\n", + aspace, name, size, ptr ? *ptr : 0, paddr, vmm_flags, arch_mmu_flags); + + DEBUG_ASSERT(aspace); + DEBUG_ASSERT(IS_PAGE_ALIGNED(paddr)); + DEBUG_ASSERT(IS_PAGE_ALIGNED(size)); + + if (!name) + name = ""; + + if (!aspace) + return ERR_INVALID_ARGS; + if (size == 0) + return NO_ERROR; + if (!IS_PAGE_ALIGNED(paddr) || !IS_PAGE_ALIGNED(size)) + return ERR_INVALID_ARGS; + + vaddr_t vaddr = 0; + + /* if they're asking for a specific spot, copy the address */ + if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) { + /* can't ask for a specific spot and then not provide one */ + if (!ptr) { + return ERR_INVALID_ARGS; + } + vaddr = (vaddr_t)*ptr; + } + + /* allocate a region and put it in the aspace list */ + vmm_region_t *r = alloc_region(aspace, name, size, vaddr, vmm_flags, VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags); + if (!r) + return ERR_NO_MEMORY; + + /* return the vaddr if requested */ + if (ptr) + *ptr = (void *)r->base; + + /* map all of the pages */ + int err = arch_mmu_map(r->base, paddr, size / PAGE_SIZE, arch_mmu_flags); + LTRACEF("arch_mmu_map returns %d\n", err); + + return NO_ERROR; +} + +status_t vmm_alloc_contiguous(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint vmm_flags, uint arch_mmu_flags) +{ + status_t err = NO_ERROR; + + LTRACEF("aspace %p name '%s' size 0x%zx ptr %p vmm_flags 0x%x arch_mmu_flags 0x%x\n", + aspace, name, size, ptr ? *ptr : 0, vmm_flags, arch_mmu_flags); + + DEBUG_ASSERT(aspace); + + size = ROUNDUP(size, PAGE_SIZE); + if (size == 0) + return ERR_INVALID_ARGS; + + if (!name) + name = ""; + + vaddr_t vaddr = 0; + + /* if they're asking for a specific spot, copy the address */ + if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) { + /* can't ask for a specific spot and then not provide one */ + if (!ptr) { + err = ERR_INVALID_ARGS; + goto err; + } + vaddr = (vaddr_t)*ptr; + } + + /* allocate physical memory up front, in case it cant be satisfied */ + struct list_node page_list; + list_initialize(&page_list); + + paddr_t pa = 0; + /* allocate a run of physical pages */ + void *kvptr = pmm_alloc_kpages(size / PAGE_SIZE, &page_list); + if (!kvptr) { + err = ERR_NO_MEMORY; + goto err; + } + + err = arch_mmu_query((vaddr_t)kvptr, &pa, NULL); + DEBUG_ASSERT(err >= 0); + + /* allocate a region and put it in the aspace list */ + vmm_region_t *r = alloc_region(aspace, name, size, vaddr, vmm_flags, VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags); + if (!r) { + err = ERR_NO_MEMORY; + goto err1; + } + + /* return the vaddr if requested */ + if (ptr) + *ptr = (void *)r->base; + + /* map all of the pages */ + arch_mmu_map(r->base, pa, size / PAGE_SIZE, arch_mmu_flags); + // XXX deal with error mapping here + + vm_page_t *p; + while ((p = list_remove_head_type(&page_list, vm_page_t, node))) { + list_add_tail(&r->page_list, &p->node); + } + + return NO_ERROR; + +err1: + pmm_free(&page_list); +err: + return err; +} + +status_t vmm_alloc(vmm_aspace_t *aspace, const char *name, size_t size, void **ptr, uint vmm_flags, uint arch_mmu_flags) +{ + status_t err = NO_ERROR; + + LTRACEF("aspace %p name '%s' size 0x%zx ptr %p vmm_flags 0x%x arch_mmu_flags 0x%x\n", + aspace, name, size, ptr ? *ptr : 0, vmm_flags, arch_mmu_flags); + + DEBUG_ASSERT(aspace); + + size = ROUNDUP(size, PAGE_SIZE); + if (size == 0) + return ERR_INVALID_ARGS; + + if (!name) + name = ""; + + vaddr_t vaddr = 0; + + /* if they're asking for a specific spot, copy the address */ + if (vmm_flags & VMM_FLAG_VALLOC_SPECIFIC) { + /* can't ask for a specific spot and then not provide one */ + if (!ptr) { + err = ERR_INVALID_ARGS; + goto err; + } + vaddr = (vaddr_t)*ptr; + } + + /* allocate physical memory up front, in case it cant be satisfied */ + + /* allocate a random pile of pages */ + struct list_node page_list; + list_initialize(&page_list); + + uint count = pmm_alloc_pages(size / PAGE_SIZE, &page_list); + DEBUG_ASSERT(count <= size); + if (count < size / PAGE_SIZE) { + LTRACEF("failed to allocate enough pages (asked for %u, got %u)\n", size / PAGE_SIZE, count); + err = ERR_NO_MEMORY; + goto err1; + } + + /* allocate a region and put it in the aspace list */ + vmm_region_t *r = alloc_region(aspace, name, size, vaddr, vmm_flags, VMM_REGION_FLAG_PHYSICAL, arch_mmu_flags); + if (!r) { + err = ERR_NO_MEMORY; + goto err1; + } + + /* return the vaddr if requested */ + if (ptr) + *ptr = (void *)r->base; + + /* map all of the pages */ + /* XXX use smarter algorithm that tries to build runs */ + vm_page_t *p; + vaddr_t va = r->base; + DEBUG_ASSERT(IS_PAGE_ALIGNED(va)); + while ((p = list_remove_head_type(&page_list, vm_page_t, node))) { + DEBUG_ASSERT(va < r->base + r->size); + + paddr_t pa = page_to_address(p); + DEBUG_ASSERT(IS_PAGE_ALIGNED(pa)); + + arch_mmu_map(va, pa, 1, arch_mmu_flags); + // XXX deal with error mapping here + + list_add_tail(&r->page_list, &p->node); + + va += PAGE_SIZE; + } + + return NO_ERROR; + +err1: + pmm_free(&page_list); +err: + return err; +} + +static void dump_region(const vmm_region_t *r) +{ + printf("\tregion %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x mmu_flags 0x%x\n", + r, r->name, r->base, r->base + r->size - 1, r->size, r->flags, r->arch_mmu_flags); +} + +static void dump_aspace(const vmm_aspace_t *a) +{ + printf("aspace %p: name '%s' range 0x%lx - 0x%lx size 0x%zx flags 0x%x\n", + a, a->name, a->base, a->base + a->size - 1, a->size, a->flags); + + printf("regions:\n"); + vmm_region_t *r; + list_for_every_entry(&a->region_list, r, vmm_region_t, node) { + dump_region(r); + } +} + +static int cmd_vmm(int argc, const cmd_args *argv) +{ + if (argc < 2) { +notenoughargs: + printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("%s aspaces\n", argv[0].str); + printf("%s alloc \n", argv[0].str); + printf("%s alloc_physical \n", argv[0].str); + printf("%s alloc_contig \n", argv[0].str); + return ERR_GENERIC; + } + + if (!strcmp(argv[1].str, "aspaces")) { + vmm_aspace_t *a; + list_for_every_entry(&aspace_list, a, vmm_aspace_t, node) { + dump_aspace(a); + } + } else if (!strcmp(argv[1].str, "alloc")) { + if (argc < 3) goto notenoughargs; + + void *ptr = (void *)0x99; + status_t err = vmm_alloc(vmm_get_kernel_aspace(), "alloc test", argv[2].u, &ptr, 0, 0); + printf("vmm_alloc returns %d, ptr %p\n", err, ptr); + } else if (!strcmp(argv[1].str, "alloc_physical")) { + if (argc < 4) goto notenoughargs; + + void *ptr = (void *)0x99; + status_t err = vmm_alloc_physical(vmm_get_kernel_aspace(), "physical test", argv[3].u, &ptr, argv[2].u, 0, ARCH_MMU_FLAG_UNCACHED_DEVICE); + printf("vmm_alloc_physical returns %d, ptr %p\n", err, ptr); + } else if (!strcmp(argv[1].str, "alloc_contig")) { + if (argc < 3) goto notenoughargs; + + void *ptr = (void *)0x99; + status_t err = vmm_alloc_contiguous(vmm_get_kernel_aspace(), "contig test", argv[2].u, &ptr, 0, 0); + printf("vmm_alloc_contig returns %d, ptr %p\n", err, ptr); + } else { + printf("unknown command\n"); + goto usage; + } + + return NO_ERROR; +} + +STATIC_COMMAND_START +#if LK_DEBUGLEVEL > 0 +{ "vmm", "virtual memory manager", &cmd_vmm }, +#endif +STATIC_COMMAND_END(vmm); + diff --git a/lib/heap/heap.c b/lib/heap/heap.c index f6ffc5d5..604b4f34 100644 --- a/lib/heap/heap.c +++ b/lib/heap/heap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009,2012 Travis Geiselbrecht + * Copyright (c) 2008-2009,2012,2014 Travis Geiselbrecht * Copyright (c) 2009 Corey Tabaka * * Permission is hereby granted, free of charge, to any person obtaining @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -41,25 +42,36 @@ #define PADDING_FILL 0x55 #define PADDING_SIZE 64 -#define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1)) - #define HEAP_MAGIC 'HEAP' -#if WITH_STATIC_HEAP +#if WITH_KERNEL_VM + +#include +/* we will use kalloc routines to back our heap */ +#if !defined(HEAP_GROW_SIZE) +#define HEAP_GROW_SIZE (4 * 1024 * 1024) /* size the heap grows by when it runs out of memory */ +#endif + +STATIC_ASSERT(IS_PAGE_ALIGNED(HEAP_GROW_SIZE)); + +#elif WITH_STATIC_HEAP #if !defined(HEAP_START) || !defined(HEAP_LEN) #error WITH_STATIC_HEAP set but no HEAP_START or HEAP_LEN defined #endif #else -// end of the binary +/* not a static vm, not using the kernel vm */ extern int _end; +extern int _end_of_ram; -// end of memory -extern void *_heap_end; +/* default to using up the rest of memory after the kernel ends */ +/* may be modified by other parts of the system */ +uintptr_t _heap_start = (uintptr_t)&_end; +uintptr_t _heap_end = (uintptr_t)&_end_of_ram; -#define HEAP_START ((uintptr_t)&_end) -#define HEAP_LEN ((uintptr_t)_heap_end - (uintptr_t)&_end) +#define HEAP_START ((uintptr_t)_heap_start) +#define HEAP_LEN ((uintptr_t)_heap_end - HEAP_START) #endif struct free_heap_chunk { @@ -93,6 +105,8 @@ struct alloc_struct_begin { #endif }; +static ssize_t heap_grow(size_t len); + static void dump_free_chunk(struct free_heap_chunk *chunk) { dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len); @@ -178,7 +192,7 @@ static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *ch vaddr_t chunk_end = (vaddr_t)chunk + chunk->len; #endif -// dprintf("%s: chunk ptr %p, size 0x%lx, chunk_end 0x%x\n", __FUNCTION__, chunk, chunk->len, chunk_end); + LTRACEF("chunk ptr %p, size 0x%zx\n", chunk, chunk->len); struct free_heap_chunk *next_chunk; struct free_heap_chunk *last_chunk; @@ -311,6 +325,10 @@ void *heap_alloc(size_t size, unsigned int alignment) size += alignment; } +#if WITH_KERNEL_VM + int retry_count = 0; +retry: +#endif mutex_acquire(&theheap.lock); // walk through the list @@ -382,6 +400,19 @@ void *heap_alloc(size_t size, unsigned int alignment) mutex_release(&theheap.lock); +#if WITH_KERNEL_VM + /* try to grow the heap if we can */ + if (ptr == NULL && retry_count == 0) { + size_t growby = MAX(HEAP_GROW_SIZE, ROUNDUP(size, PAGE_SIZE)); + + ssize_t err = heap_grow(growby); + if (err >= 0) { + retry_count++; + goto retry; + } + } +#endif + LTRACEF("returning ptr %p\n", ptr); return ptr; @@ -468,20 +499,40 @@ void heap_get_stats(struct heap_stats *ptr) ptr->heap_low_watermark = theheap.low_watermark; mutex_release(&theheap.lock); +} +static ssize_t heap_grow(size_t size) +{ +#if WITH_KERNEL_VM + size = ROUNDUP(size, PAGE_SIZE); + + void *ptr = pmm_alloc_kpages(size / PAGE_SIZE, NULL); + if (!ptr) + return ERR_NO_MEMORY; + + LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr); + + heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true)); + + /* change the heap start and end variables */ + if ((uintptr_t)ptr < (uintptr_t)theheap.base) + theheap.base = ptr; + + uintptr_t endptr = (uintptr_t)ptr + size; + if (endptr > (uintptr_t)theheap.base + theheap.len) { + theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base; + } + + return size; +#else + return ERR_NO_MEMORY; +#endif } void heap_init(void) { LTRACE_ENTRY; - // set the heap range - theheap.base = (void *)HEAP_START; - theheap.len = HEAP_LEN; - theheap.remaining =0; // will get set by heap_insert_free_chunk() - theheap.low_watermark = theheap.len; - LTRACEF("base %p size %zd bytes\n", theheap.base, theheap.len); - // create a mutex mutex_init(&theheap.lock); @@ -491,14 +542,24 @@ void heap_init(void) // initialize the delayed free list list_initialize(&theheap.delayed_free_list); + // set the heap range +#if WITH_KERNEL_VM + theheap.base = pmm_alloc_kpages(HEAP_GROW_SIZE / PAGE_SIZE, NULL); + theheap.len = HEAP_GROW_SIZE; + + if (theheap.base == 0) { + panic("HEAP: error allocating initial heap size\n"); + } +#else + theheap.base = (void *)HEAP_START; + theheap.len = HEAP_LEN; +#endif + theheap.remaining = 0; // will get set by heap_insert_free_chunk() + theheap.low_watermark = theheap.len; + LTRACEF("base %p size %zd bytes\n", theheap.base, theheap.len); + // create an initial free chunk heap_insert_free_chunk(heap_create_free_chunk(theheap.base, theheap.len, false)); - - // dump heap info -// heap_dump(); - -// dprintf(INFO, "running heap tests\n"); -// heap_test(); } /* add a new block of memory to the heap */ @@ -521,15 +582,30 @@ STATIC_COMMAND_END(heap); static int cmd_heap(int argc, const cmd_args *argv) { if (argc < 2) { +notenoughargs: printf("not enough arguments\n"); +usage: + printf("usage:\n"); + printf("\t%s info\n", argv[0].str); + printf("\t%s alloc [alignment]\n", argv[0].str); + printf("\t%s free
\n", argv[0].str); return -1; } if (strcmp(argv[1].str, "info") == 0) { heap_dump(); + } else if (strcmp(argv[1].str, "alloc") == 0) { + if (argc < 3) goto notenoughargs; + + void *ptr = heap_alloc(argv[2].u, (argc >= 3) ? argv[3].u : 0); + printf("heap_alloc returns %p\n", ptr); + } else if (strcmp(argv[1].str, "free") == 0) { + if (argc < 2) goto notenoughargs; + + heap_free((void *)argv[2].u); } else { printf("unrecognized command\n"); - return -1; + goto usage; } return 0; diff --git a/platform/pc/platform.c b/platform/pc/platform.c index bfbb8f68..aad9717d 100644 --- a/platform/pc/platform.c +++ b/platform/pc/platform.c @@ -33,7 +33,7 @@ #include extern multiboot_info_t *_multiboot_info; -extern unsigned int _heap_end; +extern uintptr_t _heap_end; void platform_init_mmu_mappings(void) { diff --git a/platform/vexpress-a9/include/platform/gic.h b/platform/vexpress-a9/include/platform/gic.h index 16013cfc..4cbf42b8 100644 --- a/platform/vexpress-a9/include/platform/gic.h +++ b/platform/vexpress-a9/include/platform/gic.h @@ -24,7 +24,7 @@ #include -#define GICBASE(n) (CPUPRIV_BASE) +#define GICBASE(n) (CPUPRIV_BASE_VIRT) #define GICC_OFFSET (0x0100) #define GICD_OFFSET (0x1000) diff --git a/platform/vexpress-a9/include/platform/vexpress-a9.h b/platform/vexpress-a9/include/platform/vexpress-a9.h index c3195175..215121f6 100644 --- a/platform/vexpress-a9/include/platform/vexpress-a9.h +++ b/platform/vexpress-a9/include/platform/vexpress-a9.h @@ -23,23 +23,38 @@ #pragma once /* memory map of the motherboard */ -#define MOTHERBOARD_CS0 (0x40000000) -#define MOTHERBOARD_CS1 (0x44000000) -#define MOTHERBOARD_CS2 (0x48000000) -#define MOTHERBOARD_CS3 (0x4c000000) -#define MOTHERBOARD_CS4 (0x50000000) -#define MOTHERBOARD_CS5 (0x54000000) -#define MOTHERBOARD_CS6 (0x58000000) -#define MOTHERBOARD_CS7 (0x10000000) +#define MOTHERBOARD_CS0_PHYS (0x40000000) +#define MOTHERBOARD_CS1_PHYS (0x44000000) +#define MOTHERBOARD_CS2_PHYS (0x48000000) +#define MOTHERBOARD_CS3_PHYS (0x4c000000) +#define MOTHERBOARD_CS4_PHYS (0x50000000) +#define MOTHERBOARD_CS5_PHYS (0x54000000) +#define MOTHERBOARD_CS6_PHYS (0x58000000) +#define MOTHERBOARD_CS7_PHYS (0x10000000) +#define MOTHERBOARD_CS_SIZE (0x04000000) + +#define MOTHERBOARD_CS0_VIRT (0xe0000000) +#define MOTHERBOARD_CS1_VIRT (0xe4000000) +#define MOTHERBOARD_CS2_VIRT (0xe8000000) +#define MOTHERBOARD_CS3_VIRT (0xec000000) +#define MOTHERBOARD_CS4_VIRT (0xf0000000) +#define MOTHERBOARD_CS5_VIRT (0xf4000000) +#define MOTHERBOARD_CS6_VIRT (0xf8000000) +#define MOTHERBOARD_CS7_VIRT (0xfc000000) + +#define SDRAM_BASE (0x60000000) +#define SDRAM_APERTURE_SIZE (0x40000000) /* most of the peripherals live on the motherboard CS7 */ -#define UART0_BASE (MOTHERBOARD_CS7 + 0x9000) -#define UART1_BASE (MOTHERBOARD_CS7 + 0xa000) -#define UART2_BASE (MOTHERBOARD_CS7 + 0xb000) -#define UART3_BASE (MOTHERBOARD_CS7 + 0xc000) -#define VIRTIO_BASE (MOTHERBOARD_CS7 + 0x13000) +#define UART0_BASE (MOTHERBOARD_CS7_VIRT + 0x9000) +#define UART1_BASE (MOTHERBOARD_CS7_VIRT + 0xa000) +#define UART2_BASE (MOTHERBOARD_CS7_VIRT + 0xb000) +#define UART3_BASE (MOTHERBOARD_CS7_VIRT + 0xc000) +#define VIRTIO_BASE (MOTHERBOARD_CS7_VIRT + 0x13000) -#define CPUPRIV_BASE (0x1e000000) +#define CPUPRIV_SIZE (0x00100000) +#define CPUPRIV_BASE_PHYS (0x1e000000) +#define CPUPRIV_BASE_VIRT (MOTHERBOARD_CS0_VIRT - CPUPRIV_SIZE) /* interrupts */ #define ARM_GENERIC_TIMER_INT 29 diff --git a/platform/vexpress-a9/platform.c b/platform/vexpress-a9/platform.c index 88018a0f..72d45bd6 100644 --- a/platform/vexpress-a9/platform.c +++ b/platform/vexpress-a9/platform.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Travis Geiselbrecht + * Copyright (c) 2012-2014 Travis Geiselbrecht * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files @@ -30,12 +30,62 @@ #include #include #include +#include #include #include #include #include #include "platform_p.h" +#define SDRAM_SIZE (512*1024*1024) // XXX get this from the emulator somehow + +/* initial memory mappings. parsed by start.S */ +struct mmu_initial_mapping mmu_initial_mappings[] = { + /* 1GB of sdram space */ + { .phys = SDRAM_BASE, + .virt = KERNEL_BASE, + .size = SDRAM_SIZE, + .flags = 0, + .name = "memory" }, + + /* CS0 - CS6 devices */ + { .phys = MOTHERBOARD_CS0_PHYS, + .virt = MOTHERBOARD_CS0_VIRT, + .size = MOTHERBOARD_CS_SIZE * 7, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "cs0-cs6" }, + + /* CS7 devices */ + { .phys = MOTHERBOARD_CS7_PHYS, + .virt = MOTHERBOARD_CS7_VIRT, + .size = MOTHERBOARD_CS_SIZE, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "cs7" }, + + /* cortex-a9 private memory area */ + { .phys = CPUPRIV_BASE_PHYS, + .virt = CPUPRIV_BASE_VIRT, + .size = CPUPRIV_SIZE, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "cpu_priv"}, + + /* identity map to let the boot code run */ + { .phys = SDRAM_BASE, + .virt = SDRAM_BASE, + .size = 16*1024*1024, + .flags = MMU_INITIAL_MAPPING_TEMPORARY }, + + /* null entry to terminate the list */ + { 0 } +}; + +static pmm_arena_t arena = { + .name = "sdram", + .base = SDRAM_BASE, + .size = SDRAM_SIZE, + .flags = PMM_ARENA_FLAG_KMAP, +}; + void platform_init_mmu_mappings(void) { } @@ -46,9 +96,12 @@ void platform_early_init(void) arm_gic_init(); /* initialize the timer block */ - arm_cortex_a9_timer_init(CPUPRIV_BASE, 100000000); + arm_cortex_a9_timer_init(CPUPRIV_BASE_VIRT, 100000000); uart_init_early(); + + /* add the main memory arena */ + pmm_add_arena(&arena); } void platform_init(void) diff --git a/platform/zynq/include/platform/zynq.h b/platform/zynq/include/platform/zynq.h index 2b1516be..b56c5129 100644 --- a/platform/zynq/include/platform/zynq.h +++ b/platform/zynq/include/platform/zynq.h @@ -22,14 +22,15 @@ */ #pragma once +#ifndef ASSEMBLY #include -#include -#include +#endif /* memory addresses */ -#define SDRAM_BASE (0) -#define SDRAM_APERTURE_SIZE (0x40000000) -#define SRAM_BASE (0xfffc0000) +/* assumes sram is mapped at 0 the first MB of sdram is covered by it */ +#define SDRAM_BASE (0x00100000) +#define SDRAM_APERTURE_SIZE (0x3ff00000) +#define SRAM_BASE (0x0) #define SRAM_APERTURE_SIZE (0x00040000) /* hardware base addresses */ @@ -104,7 +105,9 @@ #define SCL 0x00000000 #define SLCR_LOCK 0x00000004 +#define SLCR_LOCK_KEY 0x767b #define SLCR_UNLOCK 0x00000008 +#define SLCR_UNLOCK_KEY 0xdf0d #define SLCR_LOCKSTA 0x0000000c #define ARM_PLL_CTRL 0x00000100 #define DDR_PLL_CTRL 0x00000104 @@ -265,6 +268,11 @@ #define DDRIOB_DCI_CTRL 0x00000B70 #define DDRIOB_DCI_STATU 0x00000B74 +#ifndef ASSEMBLY + +#include +#include + static inline void zynq_slcr_unlock(void) { SLCR_REG(SLCR_UNLOCK) = 0xdf0d; } static inline void zynq_slcr_lock(void) { SLCR_REG(SLCR_LOCK) = 0x767b; } @@ -306,4 +314,5 @@ enum zynq_periph { status_t zynq_set_clock(enum zynq_periph, bool enable, enum zynq_clock_source, uint32_t divisor, uint32_t divisor2); uint32_t zynq_get_clock(enum zynq_periph); +#endif diff --git a/platform/zynq/platform.c b/platform/zynq/platform.c index de51a158..526ecef3 100644 --- a/platform/zynq/platform.c +++ b/platform/zynq/platform.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -34,14 +35,75 @@ /* target can specify this as the initial jam table to set up the soc */ __WEAK void ps7_init(void) { } +STATIC_ASSERT(IS_ALIGNED(SDRAM_BASE, MB)); +STATIC_ASSERT(IS_ALIGNED(SDRAM_SIZE, MB)); + +/* initial memory mappings. parsed by start.S */ +struct mmu_initial_mapping mmu_initial_mappings[] = { + /* 1GB of sram + sdram space */ + { .phys = SRAM_BASE, + .virt = KERNEL_BASE, + .size = MB + SDRAM_SIZE - MB, + .flags = 0, + .name = "memory" }, + + /* 0xe0000000 hardware devices */ + { .phys = 0xe0000000, + .virt = 0xe0000000, + .size = 0x00300000, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "hw" }, + + /* 0xe1000000 hardware devices */ + { .phys = 0xe1000000, + .virt = 0xe1000000, + .size = 0x05000000, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "hw" }, + + /* 0xf8000000 hardware devices */ + { .phys = 0xf8000000, + .virt = 0xf8000000, + .size = 0x01000000, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "hw" }, + + /* 0xfc000000 hardware devices */ + { .phys = 0xfc000000, + .virt = 0xfc000000, + .size = 0x02000000, + .flags = MMU_INITIAL_MAPPING_FLAG_DEVICE, + .name = "hw" }, + + /* identity map to let the boot code run */ + { .phys = SRAM_BASE, + .virt = SRAM_BASE, + .size = MB, + .flags = MMU_INITIAL_MAPPING_TEMPORARY }, + + /* null entry to terminate the list */ + { 0 } +}; + +#if SDRAM_SIZE != 0 +static pmm_arena_t sdram_arena = { + .name = "sdram", + .base = SDRAM_BASE, + .size = SDRAM_SIZE - MB, + .flags = PMM_ARENA_FLAG_KMAP +}; +#endif + +static pmm_arena_t sram_arena = { + .name = "sram", + .base = SRAM_BASE, + .size = MEMSIZE, + .priority = 1, + .flags = PMM_ARENA_FLAG_KMAP +}; + void platform_init_mmu_mappings(void) { -#define MB (1024*1024) - - /* map dram as full cacheable */ - for (addr_t a = SDRAM_BASE; a < (SDRAM_BASE + SDRAM_APERTURE_SIZE); a += MB) { - arm_mmu_map_section(a, a, MMU_MEMORY_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE | MMU_MEMORY_L1_AP_P_RW_U_NA); - } } void platform_early_init(void) @@ -59,6 +121,21 @@ void platform_early_init(void) /* initialize the timer block */ arm_cortex_a9_timer_init(CPUPRIV_BASE, zynq_get_arm_timer_freq()); + + /* add the main memory arena */ +#if SDRAM_SIZE != 0 + /* since we have a discontinuity between the end of SRAM (256K) and the start of SDRAM (1MB), + * intentionally bump the boot-time allocator to start in the base of SDRAM. + */ + extern uintptr_t boot_alloc_start; + extern uintptr_t boot_alloc_end; + + boot_alloc_start = KERNEL_BASE + MB; + boot_alloc_end = KERNEL_BASE + MB; + + pmm_add_arena(&sdram_arena); +#endif + pmm_add_arena(&sram_arena); } void platform_init(void) diff --git a/platform/zynq/rules.mk b/platform/zynq/rules.mk index 0cae8c05..31b1514b 100644 --- a/platform/zynq/rules.mk +++ b/platform/zynq/rules.mk @@ -22,19 +22,26 @@ MODULE_SRCS += \ $(LOCAL_DIR)/platform.c \ $(LOCAL_DIR)/qspi.c \ $(LOCAL_DIR)/spiflash.c \ + $(LOCAL_DIR)/start.S \ $(LOCAL_DIR)/uart.c \ +# default to no sdram unless the target calls it out +ZYNQ_SDRAM_SIZE ?= 0 + ifeq ($(ZYNQ_USE_SRAM),1) MEMBASE := 0x0 -MEMSIZE ?= 0x40000 # 256KB +MEMSIZE := 0x30000 # 3 * 64K else -MEMBASE := 0x0 -MEMSIZE ?= 0x10000000 # 256MB +# XXX untested path +MEMBASE := 0x00000000 +MEMSIZE ?= $(ZYNQ_SDRAM_SIZE) # 256MB +#KERNEL_LOAD_OFFSET := 0x00100000 # loaded 1MB into physical space endif GLOBAL_DEFINES += \ MEMBASE=$(MEMBASE) \ - MEMSIZE=$(MEMSIZE) + MEMSIZE=$(MEMSIZE) \ + SDRAM_SIZE=$(ZYNQ_SDRAM_SIZE) LINKER_SCRIPT += \ $(BUILDDIR)/system-onesegment.ld diff --git a/platform/zynq/start.S b/platform/zynq/start.S new file mode 100644 index 00000000..895fe2b0 --- /dev/null +++ b/platform/zynq/start.S @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2014 Travis Geiselbrecht + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files + * (the "Software"), to deal in the Software without restriction, + * including without limitation the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include +#include + +/* this code attempts to remap sram to 0xfffc0000 - 0xffffffff and + branch the cpu into the equivalent spot. Assumes the cpu is running + at the initial 0 based mapping */ + +/* disabled for now */ +#if 0 + +/* a spot of the top bank of OCM memory for us to run our code from + needs to be below where the second cpu is running (0xffffe00-0xfffffff0) */ +#define TARGET_SPOT 0xfffff800 + +/* first piece of code run out of the reset vector. use + to relocate sram to the final location at 0xfffc0000 + and switch to there */ +FUNCTION(platform_reset) + /* relocate the below code to TARGET_SPOT */ + ldr r8, =TARGET_SPOT + adr r9, .Lcore_reloc_start + adr r10, .Lcore_reloc_end + +0: + ldr r12, [r9], #4 + str r12, [r8], #4 + cmp r9, r10 + bne 0b + + /* load constants we will need below */ + ldr r8, =SLCR_BASE + ldr r9, =SCU_CONTROL_BASE + + /* calculate the new return address this code will need to branch to */ + adr r12, .Ldone + add r12, #0xfffc0000 + + ldr r10, =TARGET_SPOT + bx r10 + +.Ldone: + b arm_reset + +.Lcore_reloc_start: + # use SCLR to map the sram blocks to the top of their segment + movw r10, #SLCR_UNLOCK_KEY + str r10, [r8, #SLCR_UNLOCK] + + ldr r10, [r8, #OCM_CFG] + orr r10, #0xf + str r10, [r8, #OCM_CFG] + + movw r10, #SLCR_LOCK_KEY + str r10, [r8, #SLCR_LOCK] + + # tell the SCU to not filter first 1MB + mov r10, #0 + str r10, [r9, #0x40] /* SCU filter start address */ + dmb + + bx r12 +.Lcore_reloc_end: + +.ltorg +#endif + diff --git a/target/zybo/rules.mk b/target/zybo/rules.mk index 95a9f82d..44cc24b3 100644 --- a/target/zybo/rules.mk +++ b/target/zybo/rules.mk @@ -7,14 +7,14 @@ PLATFORM := zynq # set the system base to sram ZYNQ_USE_SRAM := 1 +# we have sdram +ZYNQ_SDRAM_SIZE := 0x10000000 + GLOBAL_INCLUDES += \ $(LOCAL_DIR)/include GLOBAL_DEFINES += \ - EXTERNAL_CLOCK_FREQ=50000000 \ - WITH_STATIC_HEAP=1 \ - HEAP_START=0x00100000 \ - HEAP_LEN=0x1ff00000 + EXTERNAL_CLOCK_FREQ=50000000 MODULE_SRCS += \ $(LOCAL_DIR)/target.c \