diff options
author | Julius Werner <jwerner@chromium.org> | 2019-12-03 22:47:01 -0800 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2019-12-05 17:57:58 +0000 |
commit | bb345abbfc999f70e3f0f9739f13e1f45d5a0fe9 (patch) | |
tree | f8b652805a320d188ea995bc88bca8837118cfe9 /src/arch/arm64/armv8 | |
parent | 31a5ff5e36ea499f87a8947875a067c843a45532 (diff) |
arm64: Correctly unmask asynchronous SError interrupts
Arm CPUs have always had an odd feature that allows you to mask not only
true interrupts, but also "external aborts" (memory bus errors from
outside the CPU). CPUs usually have all of these masked after reset,
which we quickly learned was a bad idea back when bringing up the first
arm32 systems in coreboot. Masking external aborts means that if any of
your firmware code does an illegal memory access, you will only see it
once the kernel comes up and unmasks the abort (not when it happens).
Therefore, we always unmask everything in early bootblock assembly code.
When arm64 came around, it had very similar masking bits and we did the
same there, thinking the issue resolved. Unfortunately Arm, in their
ceaseless struggle for more complexity, decided that having a single bit
to control this masking behavior is no longer enough: on AArch64, in
addition to the PSTATE.DAIF bits that are analogous to arm32's CPSR,
there are additional bits in SCR_EL3 that can override the PSTATE
setting for some but not all cases (makes perfect sense, I know...).
When aborts are unmasked in PSTATE, but SCR.EA is not set, then
synchronous external aborts will cause an exception while asynchronous
external aborts will not. It turns out we never intialize SCR in
coreboot and on RK3399 it comes up with all zeroes (even the reserved-1
bits, which is super weird). If you get an asynchronous external abort
in coreboot it will silently hide in the CPU until BL31 enables SCR.EA
before it has its own console handlers registered and silently hangs.
This patch resolves the issue by also initializing SCR to a known good
state early in the bootblock. It also cleans up some bit defintions and
slightly reworks the DAIF unmasking... it doesn't actually make that
much sense to unmask anything before our console and exception handlers
are up. The new code will mask everything until the exception handler is
installed and then unmask it, so that if there was a super early
external abort we could still see it. (Of course there are still dozens
of other processor exceptions that could happen which we have no way to
mask.)
Change-Id: I5266481a7aaf0b72aca8988accb671d92739af6f
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/37463
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Hung-Te Lin <hungte@chromium.org>
Diffstat (limited to 'src/arch/arm64/armv8')
-rw-r--r-- | src/arch/arm64/armv8/cpu.S | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S index 2bc4defde8..5f06c7e677 100644 --- a/src/arch/arm64/armv8/cpu.S +++ b/src/arch/arm64/armv8/cpu.S @@ -99,15 +99,14 @@ ENDPROC(mmu_disable) /* * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a - * known state regarding caches/SCTLR/PSTATE. Completely invalidates + * known state regarding caches/SCTLR/SCR/PSTATE. Completely invalidates * icache/dcache, disables MMU and dcache (if active), and enables unaligned - * accesses, icache and branch prediction (if inactive). Seeds the stack and - * initializes SP_EL0. Clobbers R22 and R23. + * accesses, icache. Seeds stack and initializes SP_EL0. Clobbers R22 and R23. */ ENTRY(arm64_init_cpu) - /* Initialize PSTATE (unmask all exceptions, select SP_EL0). */ + /* Initialize PSTATE (mask all exceptions, select SP_EL0). */ msr SPSel, #0 - msr DAIFClr, #0xf + msr DAIFSet, #0xf /* TODO: This is where we'd put non-boot CPUs into WFI if needed. */ @@ -116,24 +115,25 @@ ENTRY(arm64_init_cpu) /* TODO: Assert that we always start running at EL3 */ mrs x22, sctlr_el3 - /* Activate ICache (12) already for speed during cache flush below. */ - orr x22, x22, #(1 << 12) + /* Activate ICache already for speed during cache flush below. */ + orr x22, x22, #SCTLR_I msr sctlr_el3, x22 isb /* Invalidate dcache */ bl dcache_invalidate_all - /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */ - and x22, x22, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2) - /* Activate Stack Alignment (3) because why not */ - orr x22, x22, #(1 << 3) - /* Set to little-endian (25) */ - and x22, x22, # ~(1 << 25) - /* Deactivate write-xor-execute enforcement (19) */ - and x22, x22, # ~(1 << 19) + /* Reinitialize SCTLR from scratch to known-good state. + This may disable MMU or DCache. */ + ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA) msr sctlr_el3, x22 + /* Initialize SCR to unmask all interrupts (so that if we get a spurious + IRQ/SError we'll see it when it happens, not hang in BL31). This will + only have an effect after we DAIFClr in exception_init(). */ + mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA + msr scr_el3, x22 + /* Invalidate icache and TLB for good measure */ ic iallu tlbi alle3 |