summaryrefslogtreecommitdiff
path: root/src/arch/arm64/include
diff options
context:
space:
mode:
authorJulius Werner <jwerner@chromium.org>2015-10-07 18:38:24 -0700
committerJulius Werner <jwerner@chromium.org>2015-11-11 05:07:58 +0100
commitfe4cbf1167fcb27ec332a2efe16297705ca07359 (patch)
tree31ba9883a29574842f33d9fb073c979437d48cb4 /src/arch/arm64/include
parent03a0a6517210b4f53082a499df2a7e743ae7452e (diff)
arm64: mmu: Make page table manipulation work across stages
In order to have a proper runtime-modifyable page table API (e.g. to remap DRAM after it was intialized), we need to remove any external bookkeeping kept in global variables (which do not persist across stages) from the MMU code. This patch implements this in a similar way as it has recently been done for ARM32 (marking free table slots with a special sentinel value in the first PTE that cannot occur as part of a normal page table). Since this requires the page table buffer to be known at compile-time, we have to remove the option of passing it to mmu_init() at runtime (which I already kinda deprecated before). The existing Tegra chipsets that still used it are switched to instead define it in memlayout in a minimally invasive change. This might not be the best way to design this overall (I think we should probably just throw the tables into SRAM like on all other platforms), but I don't have a Tegra system to test so I'd rather keep this change low impact and leave the major redesign for later. Also inlined some single-use one-liner functions in mmu.c that I felt confused things more than they cleared up, and fixed an (apparently harmless?) issue with forgetting to mask out the XN page attribute bit when casting a table descriptor to a pointer. BRANCH=None BUG=None TEST=Compiled Ryu and Smaug. Booted Oak. Change-Id: Iad71f97f5ec4b1fc981dbc8ff1dc88d96c8ee55a Signed-off-by: Julius Werner <jwerner@chromium.org> Reviewed-on: http://review.coreboot.org/12075 Tested-by: build bot (Jenkins) Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Diffstat (limited to 'src/arch/arm64/include')
-rw-r--r--src/arch/arm64/include/arch/memlayout.h4
-rw-r--r--src/arch/arm64/include/armv8/arch/cache.h6
-rw-r--r--src/arch/arm64/include/armv8/arch/mmu.h20
3 files changed, 17 insertions, 13 deletions
diff --git a/src/arch/arm64/include/arch/memlayout.h b/src/arch/arm64/include/arch/memlayout.h
index 6f2dae3bb3..ea4a1ba80c 100644
--- a/src/arch/arm64/include/arch/memlayout.h
+++ b/src/arch/arm64/include/arch/memlayout.h
@@ -18,7 +18,9 @@
#ifndef __ARCH_MEMLAYOUT_H
#define __ARCH_MEMLAYOUT_H
-/* TODO: add SRAM TTB region and figure out the correct size/alignment for it */
+#define TTB(addr, size) \
+ REGION(ttb, addr, size, 4K) \
+ _ = ASSERT(size % 4K == 0, "TTB size must be divisible by 4K!");
/* ARM64 stacks need 16-byte alignment. The ramstage will set up its own stacks
* in BSS, so this is only used for the SRAM stages. */
diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h
index 5e2a4a186b..64afd62fb6 100644
--- a/src/arch/arm64/include/armv8/arch/cache.h
+++ b/src/arch/arm64/include/armv8/arch/cache.h
@@ -77,12 +77,6 @@ void flush_dcache_louis(int op_type);
/* returns number of bytes per cache line */
unsigned int dcache_line_bytes(void);
-/* dcache and MMU disable */
-void dcache_mmu_disable(void);
-
-/* dcache and MMU enable */
-void dcache_mmu_enable(void);
-
/* perform all icache/dcache maintenance needed after loading new code */
void cache_sync_instructions(void);
diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h
index 89b80733f2..3edb76c5a4 100644
--- a/src/arch/arm64/include/armv8/arch/mmu.h
+++ b/src/arch/arm64/include/armv8/arch/mmu.h
@@ -16,7 +16,7 @@
#ifndef __ARCH_ARM64_MMU_H__
#define __ARCH_ARM64_MMU_H__
-#include <memrange.h>
+#include <types.h>
/* Memory attributes for mmap regions
* These attributes act as tag values for memrange regions
@@ -43,6 +43,7 @@
#define BLOCK_DESC 0x1
#define TABLE_DESC 0x3
#define PAGE_DESC 0x3
+#define DESC_MASK 0x3
/* Block descriptor */
#define BLOCK_NS (1 << 5)
@@ -60,6 +61,11 @@
#define BLOCK_SH_OUTER_SHAREABLE (2 << BLOCK_SH_SHIFT)
#define BLOCK_SH_INNER_SHAREABLE (3 << BLOCK_SH_SHIFT)
+/* Sentinel descriptor to mark first PTE of an unused table. It must be a value
+ * that cannot occur naturally as part of a page table. (Bits [1:0] = 0b00 makes
+ * this an unmapped page, but some page attribute bits are still set.) */
+#define UNUSED_DESC 0x6EbAAD0BBADbA6E0
+
/* XLAT Table Init Attributes */
#define VA_START 0x0
@@ -67,7 +73,7 @@
/* Granule size of 4KB is being used */
#define GRANULE_SIZE_SHIFT 12
#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
-#define XLAT_TABLE_MASK (~(0UL) << GRANULE_SIZE_SHIFT)
+#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE)
#define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1)
#define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3)
@@ -142,11 +148,13 @@
#define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT)
#define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT)
-/* Initialize the MMU TTB tables provide the range sequence and ttb buffer. */
-void mmu_init(struct memranges *ranges, uint64_t *ttb, uint64_t ttb_size);
-/* Enable the mmu based on previous mmu_init(). */
-void mmu_enable(void);
+/* Initialize MMU registers and page table memory region. */
+void mmu_init(void);
/* Change a memory type for a range of bytes at runtime. */
void mmu_config_range(void *start, size_t size, uint64_t tag);
+/* Enable the MMU (need previous mmu_init() and configured ranges!). */
+void mmu_enable(void);
+/* Disable the MMU (which also disables dcache but not icache). */
+void mmu_disable(void);
#endif // __ARCH_ARM64_MMU_H__