[arch] relocate binary to proper physical location at boot, add arch_chain_load

-in arm start.S, calculate and move the current binary to the proper physical
location before enabling the mmu.
-add arch_chain_load which does the necessary translations from virtual to
physical, tries to gracefully shut the system down, and branches into the loaded binary.
This commit is contained in:
Travis Geiselbrecht
2014-08-12 16:10:16 -07:00
parent 91e3efde16
commit ec69757a59
8 changed files with 149 additions and 7 deletions

View File

@@ -137,3 +137,7 @@ void arm_cm_irq_exit(bool reschedule)
dec_critical_section();
}
void arch_chain_load(void *entry)
{
PANIC_UNIMPLEMENTED;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2013 Travis Geiselbrecht
* Copyright (c) 2008-2014 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -21,12 +21,19 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <debug.h>
#include <trace.h>
#include <stdlib.h>
#include <err.h>
#include <arch.h>
#include <arch/ops.h>
#include <arch/mmu.h>
#include <arch/arm.h>
#include <arch/arm/mmu.h>
#include <platform.h>
#include <target.h>
#include <kernel/thread.h>
#define LOCAL_TRACE 0
void arch_early_init(void)
{
@@ -118,4 +125,61 @@ status_t arm_vtop(addr_t va, addr_t *pa)
}
#endif
void arch_chain_load(void *entry)
{
LTRACEF("entry %p\n", entry);
/* we are going to shut down the system, start by disabling interrupts */
enter_critical_section();
/* give target and platform a chance to put hardware into a suitable
* state for chain loading.
*/
target_quiesce();
platform_quiesce();
arch_quiesce();
#if WITH_KERNEL_VM
/* get the physical address of the entry point we're going to branch to */
paddr_t entry_pa;
if (arm_vtop((addr_t)entry, &entry_pa) < 0) {
panic("error translating entry physical address\n");
}
/* add the low bits of the virtual address back */
entry_pa |= ((addr_t)entry & 0xfff);
LTRACEF("entry pa 0x%lx\n", entry_pa);
/* figure out the mapping for the chain load routine */
paddr_t loader_pa;
if (arm_vtop((addr_t)&arm_chain_load, &loader_pa) < 0) {
panic("error translating loader physical address\n");
}
/* add the low bits of the virtual address back */
loader_pa |= ((addr_t)&arm_chain_load & 0xfff);
paddr_t loader_pa_section = ROUNDDOWN(loader_pa, SECTION_SIZE);
LTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n",
&arm_chain_load, loader_pa, loader_pa_section);
/* using large pages, map around the target location */
arch_mmu_map(loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0);
LTRACEF("disabling instruction/data cache\n");
arch_disable_cache(UCACHE);
LTRACEF("branching to physical address of loader\n");
/* branch to the physical address version of the chain loader routine */
void (*loader)(paddr_t entry) __NO_RETURN = (void *)loader_pa;
loader(entry_pa);
#else
#error handle the non vm path (should be simpler)
#endif
}
/* vim: set ts=4 sw=4 noexpandtab: */

View File

@@ -60,6 +60,14 @@ FUNCTION(arm_context_switch)
.ltorg
#if ARM_ARCH_LEVEL == 6
.data
strex_spot:
.word 0
#endif
.text
FUNCTION(arm_save_mode_regs)
mrs r1, cpsr
@@ -91,8 +99,35 @@ FUNCTION(arm_save_mode_regs)
bx lr
.data
strex_spot:
.word 0
.text
/* void arm_chain_load(paddr_t entry) __NO_RETURN; */
/* shut down the system, branching into the secondary system */
FUNCTION(arm_chain_load)
#if !WITH_KERNEL_VM
#error implement non VM based chain load
#else
/* The MMU is initialized and running at this point, so we'll need to
* make sure we can disable it and continue to run. The caller should
* have built a identity map for us and branched to our identity mapping,
* so it will be safe to just disable the mmu and branch to the entry
* point in physical space.
*/
/* Read SCTLR */
mrc p15, 0, r1, c1, c0, 0
/* Turn off the MMU */
bic r1, r1, #0x1
/* Write back SCTLR */
mcr p15, 0, r1, c1, c0, 0
isb
/* call the entry point */
bx r0
#endif // WITH_KERNEL_VM
/* vim: set ts=4 sw=4 noexpandtab: */

View File

@@ -65,12 +65,37 @@ arm_reset:
#endif
#if WITH_KERNEL_VM
__relocate_start:
/* see if we need to relocate to our proper location in physical memory */
adr r0, _start /* this emits sub r0, pc, #constant */
ldr r1, =(MEMBASE + KERNEL_LOAD_OFFSET) /* calculate the binary's physical load address */
subs r12, r0, r1 /* calculate the delta between where we're loaded and the proper spot */
beq .Lsetup_mmu
/* we need to relocate ourselves to the proper spot */
ldr r2, =__data_end
ldr r3, =(KERNEL_BASE - MEMBASE)
sub r2, r3
add r2, r12
.Lrelocate_loop:
ldr r3, [r0], #4
str r3, [r1], #4
cmp r0, r2
bne .Lrelocate_loop
/* we're relocated, jump to the right address */
sub pc, r12
nop
__mmu_start:
.Lsetup_mmu:
/* set up the mmu according to mmu_initial_mappings */
/* calculate our physical to virtual offset */
mov r12, pc
ldr r1, =.Laddr
.Laddr:
ldr r1, =.Laddr1
.Laddr1:
sub r12, r1
/* r12 now holds the offset from virtual to physical:

View File

@@ -49,6 +49,8 @@ __BEGIN_CDECLS
void arm_context_switch(vaddr_t *old_sp, vaddr_t new_sp);
void arm_chain_load(paddr_t entry) __NO_RETURN;
static inline uint32_t read_cpsr(void)
{
uint32_t cpsr;

View File

@@ -51,3 +51,9 @@ void arch_idle(void)
__asm__ volatile("wfi");
}
void arch_chain_load(void *entry)
{
PANIC_UNIMPLEMENTED;
}

View File

@@ -60,4 +60,8 @@ void arch_init(void)
{
}
void arch_chain_load(void *entry)
{
PANIC_UNIMPLEMENTED;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008-2009 Travis Geiselbrecht
* Copyright (c) 2008-2014 Travis Geiselbrecht
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
@@ -24,12 +24,14 @@
#define __ARCH_H
#include <compiler.h>
#include <sys/types.h>
__BEGIN_CDECLS
void arch_early_init(void);
void arch_init(void);
void arch_quiesce(void);
void arch_chain_load(void *entry) __NO_RETURN;
__END_CDECLS