[arch][arm64] tweak the arm64_elX_to_el1 routine to avoid using x0-x3

It's called immediately upon entering the kernel entry vector, prior
to knowing if it's the boot cpu or needing to save any boot arguments,
so avoid using these registers
This commit is contained in:
Travis Geiselbrecht
2022-02-10 22:19:00 -08:00
parent fbb838d1f8
commit e2cda72095

View File

@@ -74,6 +74,10 @@ FUNCTION(arm64_el3_to_el1)
eret
/* drop from whatever EL we may already be in to EL1.
* carefully avoids using x0-x3 since this is called from start.S
* which is trying to preserve them.
*/
FUNCTION(arm64_elX_to_el1)
mrs x4, CurrentEL
@@ -86,13 +90,11 @@ FUNCTION(arm64_elX_to_el1)
cmp x4, #(0b10 << 2)
beq .inEL2
/* set EL2 to 64bit */
mrs x4, scr_el3
orr x4, x4, #(1<<10)
msr scr_el3, x4
adr x4, .Ltarget
msr elr_el3, x4
@@ -106,28 +108,25 @@ FUNCTION(arm64_elX_to_el1)
mov x4, #((0b1111 << 6) | (0b0101)) /* EL1h runlevel */
msr spsr_el2, x4
.confEL1:
/* disable EL2 coprocessor traps */
mov x0, #0x33ff
msr cptr_el2, x0
mov x4, #0x33ff
msr cptr_el2, x4
/* set EL1 to 64bit */
mov x0, #(1<<31)
msr hcr_el2, x0
mov x4, #(1<<31)
msr hcr_el2, x4
/* disable EL1 FPU traps */
mov x0, #(0b11<<20)
msr cpacr_el1, x0
mov x4, #(0b11<<20)
msr cpacr_el1, x4
/* set up the EL1 bounce interrupt */
mov x0, sp
msr sp_el1, x0
mov x4, sp
msr sp_el1, x4
isb
eret
.Ltarget:
ret