From 0fd3e79d0d9d3e5607a57a899333ddb1ef6a927e Mon Sep 17 00:00:00 2001 From: Jimmy Huang Date: Mon, 13 Apr 2015 20:28:38 +0800 Subject: libpayload arm64: update mmu translation table granule size, logic and macros 1. change mmu granule size from 64KB to 4KB 2. correct level 1 translation table creation logic 3. automatically calculate granule size related macros BRANCH=none BUG=none TEST=boot to kernel on oak board Change-Id: Ic62c7863dff53f566b82b68ff1d1ad9ec5d0698d Signed-off-by: Patrick Georgi Original-Commit-Id: e5de7d942e42a8202fb879ce64b871864b1b9d38 Original-Change-Id: I78d7838921fa82a670e18ddc2de6d766dc7a2146 Original-Signed-off-by: Jimmy Huang Original-Reviewed-on: https://chromium-review.googlesource.com/266010 Original-Reviewed-by: Aaron Durbin Original-Tested-by: Yidi Lin Original-Commit-Queue: Yidi Lin Reviewed-on: http://review.coreboot.org/10010 Reviewed-by: Stefan Reinauer Tested-by: build bot (Jenkins) --- payloads/libpayload/arch/arm64/mmu.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) (limited to 'payloads/libpayload/arch') diff --git a/payloads/libpayload/arch/arm64/mmu.c b/payloads/libpayload/arch/arm64/mmu.c index 204412efd5..f0781f5ec1 100644 --- a/payloads/libpayload/arch/arm64/mmu.c +++ b/payloads/libpayload/arch/arm64/mmu.c @@ -200,12 +200,24 @@ static uint64_t init_xlat_table(uint64_t base_addr, /* * L1 table lookup - * If VA has bits more than 41, lookup starts at L1 + * If VA has bits more than L2 can resolve, lookup starts at L1 + * Assumption: we don't need L0 table in coreboot */ - if (l1_index) { - table = get_next_level_table(&table[l1_index]); - if (!table) - return 0; + if (BITS_PER_VA > L1_ADDR_SHIFT) { + if ((size >= L1_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L1_ADDR_SHIFT))) { + /* If block address is aligned and size is greater than + * or equal to size addressed by each L1 entry, we can + * directly store a block desc */ + desc = base_addr | BLOCK_DESC | attr; + table[l1_index] = desc; + /* L2 lookup is not required */ + return L1_XLAT_SIZE; + } else { + table = get_next_level_table(&table[l1_index]); + if (!table) + return 0; + } } /* @@ -213,10 +225,11 @@ static uint64_t init_xlat_table(uint64_t base_addr, * If lookup was performed at L1, L2 table addr is obtained from L1 desc * else, lookup starts at ttbr address */ - if (!l3_index && (size >= L2_XLAT_SIZE)) { + if ((size >= L2_XLAT_SIZE) && + IS_ALIGNED(base_addr, (1UL << L2_ADDR_SHIFT))) { /* * If block address is aligned and size is greater than or equal - * to 512MiB i.e. size addressed by each L2 entry, we can + * to size addressed by each L2 entry, we can * directly store a block desc */ desc = base_addr | BLOCK_DESC | attr; @@ -369,7 +382,7 @@ void mmu_enable(void) /* Initialize TCR flags */ raw_write_tcr_current(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | - TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB | + TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED); /* Initialize TTBR */ -- cgit v1.2.3