[arch][arm64] start to clean up cpu initialization
More definitively set up each cpu's SCTLR_EL1 instead of relying on any default values being present. Also set all RES1 values to 1 according to what is useful at the moment, generally giving the maximum amount of priviledges EL1 and EL0.
This commit is contained in:
@@ -28,43 +28,51 @@ static spin_lock_t arm_boot_cpu_lock = 1;
|
||||
static volatile int secondaries_to_init = 0;
|
||||
#endif
|
||||
|
||||
static void arm64_cpu_early_init(void) {
|
||||
/* set the vector base */
|
||||
// initial setup per cpu immediately after entering C code
|
||||
static void arm64_early_init_percpu(void) {
|
||||
// set the vector base
|
||||
ARM64_WRITE_SYSREG(VBAR_EL1, (uint64_t)&arm64_exception_base);
|
||||
|
||||
// hard set up the SCTLR ignoring what was there before
|
||||
uint64_t sctlr = 0;
|
||||
sctlr |= (1 << 0); // M: enable mmu
|
||||
sctlr |= (1 << 2); // C: enable data cache
|
||||
sctlr |= (1 << 3); // S: enable stack alignment check for EL1
|
||||
sctlr |= (1 << 4); // SA0: enable stack alignment check for EL0
|
||||
sctlr |= (1 << 8); // SED: disable access to SETEND instructions in EL0 (or RES1)
|
||||
sctlr |= (1 << 11); // EOS: exceptions are context synchronizing (or RES1)
|
||||
sctlr |= (1 << 12); // I: enable instruction cache
|
||||
sctlr |= (1 << 14); // DZE: enable access to DC ZVA instruction in EL0
|
||||
sctlr |= (1 << 15); // UCT: enable user access to CTR_EL0
|
||||
sctlr |= (1 << 18); // nTWE: do not trap WFE instructions in EL0
|
||||
sctlr |= (1 << 20); // TSCXT: trap access to SCXTNUM_EL0 in EL0 (or RES1)
|
||||
sctlr |= (1 << 22); // EIS: exception entry is context synchronizing (or RES1)
|
||||
sctlr |= (1 << 23); // SPAN: PSTATE.PAN is left alone on exception entry (or RES1)
|
||||
sctlr |= (1 << 26); // UCI: allow EL0 access to cache maintenance instructions
|
||||
sctlr |= (1 << 28); // nTLSMD: do not trap load/store multiple instructions to uncached memory in EL0 (or RES1)
|
||||
sctlr |= (1 << 29); // LSMAOE: load/store multiple ordering according to armv8.0 (or RES1)
|
||||
// all other bits are RES0 and we can ignore for now
|
||||
ARM64_WRITE_SYSREG(SCTLR_EL1, sctlr);
|
||||
|
||||
ARM64_WRITE_SYSREG(CPACR_EL1, 0UL); // disable coprocessors
|
||||
|
||||
ARM64_WRITE_SYSREG(MDSCR_EL1, 0UL); // disable debug
|
||||
|
||||
// TODO: read feature bits on cpu 0
|
||||
// TODO: enable cycle counter if present
|
||||
|
||||
arch_enable_fiqs();
|
||||
}
|
||||
|
||||
// called very early in the main boot sequence on the boot cpu
|
||||
void arch_early_init(void) {
|
||||
arm64_cpu_early_init();
|
||||
arm64_early_init_percpu();
|
||||
|
||||
// allow the platform a chance to inject some mappings
|
||||
platform_init_mmu_mappings();
|
||||
}
|
||||
|
||||
void arch_stacktrace(uint64_t fp, uint64_t pc) {
|
||||
struct arm64_stackframe frame;
|
||||
|
||||
if (!fp) {
|
||||
frame.fp = (uint64_t)__builtin_frame_address(0);
|
||||
frame.pc = (uint64_t)arch_stacktrace;
|
||||
} else {
|
||||
frame.fp = fp;
|
||||
frame.pc = pc;
|
||||
}
|
||||
|
||||
printf("stack trace:\n");
|
||||
while (frame.fp) {
|
||||
printf("0x%llx\n", frame.pc);
|
||||
|
||||
/* Stack frame pointer should be 16 bytes aligned */
|
||||
if (frame.fp & 0xF) {
|
||||
break;
|
||||
}
|
||||
|
||||
frame.pc = *((uint64_t *)(frame.fp + 8));
|
||||
frame.fp = *((uint64_t *)frame.fp);
|
||||
}
|
||||
}
|
||||
|
||||
// called after the kernel has been initialized and threading is enabled on the boot cpu
|
||||
void arch_init(void) {
|
||||
#if WITH_SMP
|
||||
arch_mp_init_percpu();
|
||||
@@ -130,6 +138,31 @@ void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
|
||||
__UNREACHABLE;
|
||||
}
|
||||
|
||||
void arch_stacktrace(uint64_t fp, uint64_t pc) {
|
||||
struct arm64_stackframe frame;
|
||||
|
||||
if (!fp) {
|
||||
frame.fp = (uint64_t)__builtin_frame_address(0);
|
||||
frame.pc = (uint64_t)arch_stacktrace;
|
||||
} else {
|
||||
frame.fp = fp;
|
||||
frame.pc = pc;
|
||||
}
|
||||
|
||||
printf("stack trace:\n");
|
||||
while (frame.fp) {
|
||||
printf("0x%llx\n", frame.pc);
|
||||
|
||||
/* Stack frame pointer should be 16 bytes aligned */
|
||||
if (frame.fp & 0xF) {
|
||||
break;
|
||||
}
|
||||
|
||||
frame.pc = *((uint64_t *)(frame.fp + 8));
|
||||
frame.fp = *((uint64_t *)frame.fp);
|
||||
}
|
||||
}
|
||||
|
||||
#if WITH_SMP
|
||||
/* called from assembly */
|
||||
void arm64_secondary_entry(ulong);
|
||||
@@ -139,7 +172,7 @@ void arm64_secondary_entry(ulong asm_cpu_num) {
|
||||
return;
|
||||
}
|
||||
|
||||
arm64_cpu_early_init();
|
||||
arm64_early_init_percpu();
|
||||
|
||||
spin_lock(&arm_boot_cpu_lock);
|
||||
spin_unlock(&arm_boot_cpu_lock);
|
||||
|
||||
@@ -38,16 +38,12 @@ arm_reset:
|
||||
/* if we came in at higher than EL1, drop down to EL1 */
|
||||
bl arm64_elX_to_el1
|
||||
|
||||
/* disable EL1 FPU traps */
|
||||
mov tmp, #(0b11<<20)
|
||||
msr cpacr_el1, tmp
|
||||
|
||||
/* enable caches so atomics and spinlocks work */
|
||||
mrs tmp, sctlr_el1
|
||||
bic tmp, tmp, #(1<<19) /* Disable WXN */
|
||||
orr tmp, tmp, #(1<<12) /* Enable icache */
|
||||
orr tmp, tmp, #(1<<2) /* Enable dcache/ucache */
|
||||
orr tmp, tmp, #(1<<3) /* Enable Stack Alignment Check EL1 */
|
||||
orr tmp, tmp, #(1<<4) /* Enable Stack Alignment Check EL0 */
|
||||
orr tmp, tmp, #(1<<2) /* Enable dcache/ucache */
|
||||
bic tmp, tmp, #(1<<1) /* Disable Alignment Checking for EL1 EL0 */
|
||||
msr sctlr_el1, tmp
|
||||
|
||||
|
||||
Reference in New Issue
Block a user