From 487f7f24a5c71e29f28b355819b9cab7d5b90a4a Mon Sep 17 00:00:00 2001 From: Julius Werner Date: Fri, 10 Aug 2018 13:06:00 -0700 Subject: arm64: mmu: Spot check security state for TTB mapping Since commit 372d0ff1d1 (arch/arm64: mmu: Spot check TTB memory attributes), we already check the memory attributes that the TTB region is mapped with to avoid configuration mistakes that cause weird issues (because the MMU walks the page tables with different memory attributes than they were written with). Unfortunately, we only checked cachability, but the security state attribute is just as important for this (because it is part of the cache tag, meaning that a cache entry created by accessing the non-secure mapping won't be used when trying to read the same address through a secure mapping... and since AArch64 page table walks are cache snooping and we rely on that behavior, this can lead to the MMU not seeing the new page table entries we just wrote). This patch adds the check for security state and cleans up that code a little. Change-Id: I70cda4f76f201b03d69a9ece063a3830b15ac04b Signed-off-by: Julius Werner Reviewed-on: https://review.coreboot.org/28017 Tested-by: build bot (Jenkins) Reviewed-by: Hung-Te Lin Reviewed-by: Paul Menzel --- src/arch/arm64/armv8/mmu.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'src/arch/arm64') diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 742f097d0d..67dca48dbc 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -219,6 +219,16 @@ static uint64_t get_pte(void *addr) } } +/* Func : assert_correct_ttb_mapping + * Desc : Asserts that mapping for addr matches the access type used by the + * page table walk (i.e. addr is correctly mapped to be part of the TTB). */ +static void assert_correct_ttb_mapping(void *addr) +{ + uint64_t pte = get_pte(addr); + assert(((pte >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK) + == BLOCK_INDEX_MEM_NORMAL && !(pte & BLOCK_NS)); +} + /* Func : mmu_config_range * Desc : This function repeatedly calls init_xlat_table with the base * address. Based on size returned from init_xlat_table, base_addr is updated @@ -310,11 +320,8 @@ void mmu_restore_context(const struct mmu_context *mmu_context) void mmu_enable(void) { - if (((get_pte(_ttb) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK) - != BLOCK_INDEX_MEM_NORMAL || - ((get_pte(_ettb - 1) >> BLOCK_INDEX_SHIFT) & BLOCK_INDEX_MASK) - != BLOCK_INDEX_MEM_NORMAL) - die("TTB memory type must match TCR (normal, cacheable)!"); + assert_correct_ttb_mapping(_ttb); + assert_correct_ttb_mapping(_ettb - 1); uint32_t sctlr = raw_read_sctlr_el3(); sctlr |= SCTLR_C | SCTLR_M | SCTLR_I; -- cgit v1.2.3