diff options
author | Julius Werner <jwerner@chromium.org> | 2020-02-14 12:42:01 -0800 |
---|---|---|
committer | Patrick Georgi <pgeorgi@google.com> | 2020-02-17 15:42:34 +0000 |
commit | bf33b03acf27d79df9bf1bd8d5075b70196b1844 (patch) | |
tree | 403c22abdb570c0956ce6655ab49dc8b7ce5f9a1 /payloads/libpayload/arch | |
parent | 6cf33858b64449ad6e22cd27ec5734a972b8f39e (diff) |
libpayload: arm64: Keep instruction cache enabled at all times
This patch makes libpayload enable the instruction cache as the very
first thing, which is similar to how we treat it in coreboot. It also
prevents the icache from being disabled again during mmu_disable() as
part of the two-stage page table setup in post_sysinfo_scan_mmu_setup().
It replaces the existing mmu_disable() implementation with the assembly
version from coreboot which handles certain edge cases better (see
CB:27238 for details).
The SCTLR flag definitions in libpayload seem to have still been
copy&pasted from arm32, so replace with the actual arm64 defintions from
coreboot.
Change-Id: Ifdbec34f0875ecc69fedcbea5c20e943379a3d2d
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/38908
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Hung-Te Lin <hungte@chromium.org>
Diffstat (limited to 'payloads/libpayload/arch')
-rw-r--r-- | payloads/libpayload/arch/arm64/cpu.S | 15 | ||||
-rw-r--r-- | payloads/libpayload/arch/arm64/head.S | 4 | ||||
-rw-r--r-- | payloads/libpayload/arch/arm64/mmu.c | 24 |
3 files changed, 19 insertions, 24 deletions
diff --git a/payloads/libpayload/arch/arm64/cpu.S b/payloads/libpayload/arch/arm64/cpu.S index d80f73c112..70a1044b02 100644 --- a/payloads/libpayload/arch/arm64/cpu.S +++ b/payloads/libpayload/arch/arm64/cpu.S @@ -29,6 +29,7 @@ */ #include <arch/asm.h> +#include <arch/lib_helpers.h> .macro dcache_apply_all crm dsb sy @@ -96,3 +97,17 @@ ENDPROC(dcache_clean_all) ENTRY(dcache_clean_invalidate_all) dcache_apply_all crm=cisw ENDPROC(dcache_clean_invalidate_all) + +/* This must be implemented in assembly to ensure there are no accesses to + memory (e.g. the stack) in between disabling and flushing the cache. */ +ENTRY(mmu_disable) + str x30, [sp, #-0x8] + mrs x0, sctlr_el2 + mov x1, #~(SCTLR_C | SCTLR_M) + and x0, x0, x1 + msr sctlr_el2, x0 + isb + bl dcache_clean_invalidate_all + ldr x30, [sp, #-0x8] + ret +ENDPROC(mmu_disable) diff --git a/payloads/libpayload/arch/arm64/head.S b/payloads/libpayload/arch/arm64/head.S index 8bac70fee5..c44169b82a 100644 --- a/payloads/libpayload/arch/arm64/head.S +++ b/payloads/libpayload/arch/arm64/head.S @@ -28,11 +28,15 @@ */ #include <arch/asm.h> +#include <arch/lib_helpers.h> /* * Our entry point */ ENTRY(_entry) + /* Initialize SCTLR to intended state (icache and stack-alignment on) */ + ldr w1, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA) + msr sctlr_el2, x1 /* Save off the location of the coreboot tables */ ldr x1, 1f diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c index d1dd5b0147..3a5e04db6c 100644 --- a/payloads/libpayload/arch/arm64/mmu.c +++ b/payloads/libpayload/arch/arm64/mmu.c @@ -304,30 +304,6 @@ static uint32_t is_mmu_enabled(void) } /* - * Func: mmu_disable - * Desc: Invalidate caches and disable mmu - */ -void mmu_disable(void) -{ - uint32_t sctlr; - - sctlr = raw_read_sctlr_el2(); - sctlr &= ~(SCTLR_C | SCTLR_M | SCTLR_I); - - tlbiall_el2(); - dcache_clean_invalidate_all(); - - dsb(); - isb(); - - raw_write_sctlr_el2(sctlr); - - dcache_clean_invalidate_all(); - dsb(); - isb(); -} - -/* * Func: mmu_enable * Desc: Initialize MAIR, TCR, TTBR and enable MMU by setting appropriate bits * in SCTLR |