summaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2018-08-10 13:06:00 -0700
committerPatrick Georgi <pgeorgi@google.com>2018-08-13 12:24:56 +0000
commit487f7f24a5c71e29f28b355819b9cab7d5b90a4a (patch)
tree1d7587194f5d4390e4f698c6b73713cfb30a45d1 /src/arch
parent72b2022b31e6ee8d6d81a3761b90df2df88ba8a8 (diff)
arm64: mmu: Spot check security state for TTB mapping
Since commit 372d0ff1d1 (arch/arm64: mmu: Spot check TTB memory attributes), we already check the memory attributes that the TTB region is mapped with to avoid configuration mistakes that cause weird issues (because the MMU walks the page tables with different memory attributes than they were written with). Unfortunately, we only checked cachability, but the security state attribute is just as important for this (because it is part of the cache tag, meaning that a cache entry created by accessing the non-secure mapping won't be used when trying to read the same address through a secure mapping... and since AArch64 page table walks are cache snooping and we rely on that behavior, this can lead to the MMU not seeing the new page table entries we just wrote). This patch adds the check for security state and cleans up that code a little. Change-Id: I70cda4f76f201b03d69a9ece063a3830b15ac04b Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: https://review.coreboot.org/28017 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Hung-Te Lin <hungte@chromium.org> Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net>
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/arm64/armv8/mmu.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c
index 742f097d0d..67dca48dbc 100644
--- a/src/arch/arm64/armv8/mmu.c
+++ b/src/arch/arm64/armv8/mmu.c
@@ -219,6 +219,16 @@ static uint64_t get_pte(void *addr)
}
}
+/* Func : assert_correct_ttb_mapping
+ * Desc : Asserts that mapping for addr matches the access type used by the
+ * page table walk (i.e. addr is correctly mapped to be part of the TTB). */
+static void assert_correct_ttb_mapping(void *addr)
+{
+ uint64_t pte = get_pte(addr);
+ assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
+ == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS));
+}
+
/* Func : mmu_config_range
* Desc : This function repeatedly calls init_xlat_table with the base
* address. Based on size returned from init_xlat_table, base_addr is updated
@@ -310,11 +320,8 @@ void mmu_restore_context(const struct mmu_context *mmu_context)
void mmu_enable(void)
{
- if (((get_pte(_ttb) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
- != BLOCK_INDEX_MEM_NORMAL ||
- ((get_pte(_ettb - 1) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK)
- != BLOCK_INDEX_MEM_NORMAL)
- die("TTB memory type must match TCR (normal, cacheable)!");
+ assert_correct_ttb_mapping(_ttb);
+ assert_correct_ttb_mapping(_ettb - 1);
uint32_t sctlr = raw_read_sctlr_el3();
sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;