summaryrefslogtreecommitdiff
path: root/src/arch/arm64/armv8/cpu.S
diff options
context:
space:
mode:
authorDavid Milosevic <David.Milosevic@9elements.com>2023-04-27 02:12:31 +0200
committerWerner Zeh <werner.zeh@siemens.com>2024-04-22 07:35:36 +0000
commit41ba11229a80eb19d97c8052aff1861478ee2486 (patch)
tree9b6f1803e5854e577a68959e679187e8e4c88c3c /src/arch/arm64/armv8/cpu.S
parent93cbbbfc7f32f62b1d20027541122c17e575ced6 (diff)
arch/arm64: Add EL1/EL2/EL3 support for arm64
Currently, arch/arm64 requires coreboot to run on EL3 due to EL3 register access. This might be an issue when, for example, one boots into TF-A first and drops into EL2 for coreboot afterwards. This patch aims at making arch/arm64 more versatile by removing the current EL3 constraint and allowing arm64 coreboot to run on EL1, EL2 and EL3. The strategy here, is to add a Kconfig option (ARM64_CURRENT_EL) which lets us specify coreboot's EL upon entry. Based on that, we access the appropriate ELx registers. So, for example, when running coreboot on EL1, we would not access vbar_el3 or vbar_el2 but instead vbar_el1. This way, we don't generate faults when accessing higher-EL registers. Currently only tested on the qemu-aarch64 target. Exceptions were tested by enabling FATAL_ASSERTS. Signed-off-by: David Milosevic <David.Milosevic@9elements.com> Change-Id: Iae1c57f0846c8d0585384f7e54102a837e701e7e Reviewed-on: https://review.coreboot.org/c/coreboot/+/74798 Reviewed-by: Werner Zeh <werner.zeh@siemens.com> Reviewed-by: ron minnich <rminnich@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Julius Werner <jwerner@chromium.org>
Diffstat (limited to 'src/arch/arm64/armv8/cpu.S')
-rw-r--r--src/arch/arm64/armv8/cpu.S13
1 files changed, 7 insertions, 6 deletions
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
index 04bf6a7021..a40ee64536 100644
--- a/src/arch/arm64/armv8/cpu.S
+++ b/src/arch/arm64/armv8/cpu.S
@@ -77,10 +77,10 @@ ENDPROC(dcache_clean_invalidate_all)
memory (e.g. the stack) in between disabling and flushing the cache. */
ENTRY(mmu_disable)
str x30, [sp, #-0x8]
- mrs x0, sctlr_el3
+ mrs x0, CURRENT_EL(sctlr)
mov x1, #~(SCTLR_C | SCTLR_M)
and x0, x0, x1
- msr sctlr_el3, x0
+ msr CURRENT_EL(sctlr), x0
isb
bl dcache_clean_invalidate_all
ldr x30, [sp, #-0x8]
@@ -102,12 +102,11 @@ ENTRY(arm64_init_cpu)
/* x22: SCTLR, return address: x23 (callee-saved by subroutine) */
mov x23, x30
- /* TODO: Assert that we always start running at EL3 */
- mrs x22, sctlr_el3
+ mrs x22, CURRENT_EL(sctlr)
/* Activate ICache already for speed during cache flush below. */
orr x22, x22, #SCTLR_I
- msr sctlr_el3, x22
+ msr CURRENT_EL(sctlr), x22
isb
/* Invalidate dcache */
@@ -116,13 +115,15 @@ ENTRY(arm64_init_cpu)
/* Reinitialize SCTLR from scratch to known-good state.
This may disable MMU or DCache. */
ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
- msr sctlr_el3, x22
+ msr CURRENT_EL(sctlr), x22
+#if CONFIG_ARM64_CURRENT_EL == EL3
/* Initialize SCR to unmask all interrupts (so that if we get a spurious
IRQ/SError we'll see it when it happens, not hang in BL31). This will
only have an effect after we DAIFClr in exception_init(). */
mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA
msr scr_el3, x22
+#endif
/* Invalidate icache and TLB for good measure */
ic iallu