diff options
Diffstat (limited to 'src/arch/arm64')
-rw-r--r-- | src/arch/arm64/arm_tf.c | 3 | ||||
-rw-r--r-- | src/arch/arm64/armv8/bootblock_simple.c | 2 | ||||
-rw-r--r-- | src/arch/arm64/armv8/cache.c | 25 | ||||
-rw-r--r-- | src/arch/arm64/armv8/mmu.c | 133 | ||||
-rw-r--r-- | src/arch/arm64/include/arch/memlayout.h | 4 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/cache.h | 6 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/mmu.h | 20 |
7 files changed, 77 insertions, 116 deletions
diff --git a/src/arch/arm64/arm_tf.c b/src/arch/arm64/arm_tf.c index b8f0f5f5b2..6435fc74d5 100644 --- a/src/arch/arm64/arm_tf.c +++ b/src/arch/arm64/arm_tf.c @@ -15,6 +15,7 @@ #include <arch/cache.h> #include <arch/lib_helpers.h> +#include <arch/mmu.h> #include <arch/transition.h> #include <arm_tf.h> #include <assert.h> @@ -83,7 +84,7 @@ void arm_tf_run_bl31(u64 payload_entry, u64 payload_arg0, u64 payload_spsr) dcache_clean_by_mva(&bl31_params, sizeof(bl31_params)); dcache_clean_by_mva(&bl33_ep_info, sizeof(bl33_ep_info)); - dcache_mmu_disable(); + mmu_disable(); bl31_entry(&bl31_params, bl31_plat_params); die("BL31 returned!"); } diff --git a/src/arch/arm64/armv8/bootblock_simple.c b/src/arch/arm64/armv8/bootblock_simple.c index 2a8a3fa2cd..e2002f2a3a 100644 --- a/src/arch/arm64/armv8/bootblock_simple.c +++ b/src/arch/arm64/armv8/bootblock_simple.c @@ -36,7 +36,7 @@ void main(void) { /* Globally disable MMU, caches, and branch prediction (these should * be disabled by default on reset) */ - dcache_mmu_disable(); + mmu_disable(); /* * Re-enable icache and branch prediction. MMU and dcache will be diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c index 95f2890ff0..4f91de02f2 100644 --- a/src/arch/arm64/armv8/cache.c +++ b/src/arch/arm64/armv8/cache.c @@ -119,37 +119,12 @@ void dcache_invalidate_by_mva(void const *addr, size_t len) dcache_op_va(addr, len, OP_DCIVAC); } -/* - * CAUTION: This implementation assumes that coreboot never uses non-identity - * page tables for pages containing executed code. If you ever want to violate - * this assumption, have fun figuring out the associated problems on your own. - */ -void dcache_mmu_disable(void) -{ - uint32_t sctlr; - - flush_dcache_all(DCCISW); - sctlr = raw_read_sctlr_current(); - sctlr &= ~(SCTLR_C | SCTLR_M); - raw_write_sctlr_current(sctlr); -} - -void dcache_mmu_enable(void) -{ - uint32_t sctlr; - - sctlr = raw_read_sctlr_current(); - sctlr |= SCTLR_C | SCTLR_M; - raw_write_sctlr_current(sctlr); -} - void cache_sync_instructions(void) { flush_dcache_all(DCCISW); /* includes trailing DSB (in assembly) */ icache_invalidate_all(); /* includdes leading DSB and trailing ISB. */ } - /* * For each segment of a program loaded this function is called * to invalidate caches for the addresses of the loaded segment diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 5c95e96581..5d957d454f 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -31,18 +31,18 @@ #include <stdlib.h> #include <stdint.h> #include <string.h> +#include <symbols.h> #include <console/console.h> -#include <memrange.h> #include <arch/mmu.h> #include <arch/lib_helpers.h> #include <arch/cache.h> +#include <arch/cache_helpers.h> -/* Maximum number of XLAT Tables available based on ttb buffer size */ -static unsigned int max_tables; -/* Address of ttb buffer */ -static uint64_t *xlat_addr; -static int free_idx; +/* This just caches the next free table slot (okay to do since they fill up from + * bottom to top and can never be freed up again). It will reset to its initial + * value on stage transition, so we still need to check it for UNUSED_DESC. */ +static uint64_t *next_free_table = (void *)_ttb; static void print_tag(int level, uint64_t tag) { @@ -82,48 +82,37 @@ static uint64_t get_block_attr(unsigned long tag) return attr; } -/* Func : table_desc_valid - * Desc : Check if a table entry contains valid desc - */ -static uint64_t table_desc_valid(uint64_t desc) -{ - return((desc & TABLE_DESC) == TABLE_DESC); -} - /* Func : setup_new_table * Desc : Get next free table from TTB and set it up to match old parent entry. */ static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size) { - uint64_t *new, *entry; - - assert(free_idx < max_tables); + while (next_free_table[0] != UNUSED_DESC) { + next_free_table += GRANULE_SIZE/sizeof(*next_free_table); + if (_ettb - (u8 *)next_free_table <= 0) + die("Ran out of page table space!"); + } - new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE); - free_idx++; + void *frame_base = (void *)(desc & XLAT_ADDR_MASK); + printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page" + " table @%p\n", frame_base, frame_base + + (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table); if (!desc) { - memset(new, 0, GRANULE_SIZE); + memset(next_free_table, 0, GRANULE_SIZE); } else { /* Can reuse old parent entry, but may need to adjust type. */ if (xlat_size == L3_XLAT_SIZE) desc |= PAGE_DESC; - for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE; - entry++, desc += xlat_size) - *entry = desc; + int i = 0; + for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) { + next_free_table[i] = desc; + desc += xlat_size; + } } - return new; -} - -/* Func : get_table_from_desc - * Desc : Get next level table address from table descriptor - */ -static uint64_t *get_table_from_desc(uint64_t desc) -{ - uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK); - return ptr; + return next_free_table; } /* Func: get_next_level_table @@ -134,12 +123,12 @@ static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size) { uint64_t desc = *ptr; - if (!table_desc_valid(desc)) { + if ((desc & DESC_MASK) != TABLE_DESC) { uint64_t *new_table = setup_new_table(desc, xlat_size); desc = ((uint64_t)new_table) | TABLE_DESC; *ptr = desc; } - return get_table_from_desc(desc); + return (uint64_t *)(desc & XLAT_ADDR_MASK); } /* Func : init_xlat_table @@ -156,7 +145,7 @@ static uint64_t init_xlat_table(uint64_t base_addr, uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT; uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT; uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT; - uint64_t *table = xlat_addr; + uint64_t *table = (uint64_t *)_ttb; uint64_t desc; uint64_t attr = get_block_attr(tag); @@ -221,11 +210,9 @@ void mmu_config_range(void *start, size_t size, uint64_t tag) uint64_t base_addr = (uintptr_t)start; uint64_t temp_size = size; - if (!IS_ENABLED(CONFIG_SMP)) { - printk(BIOS_INFO, "Mapping address range [%p:%p) as ", - start, start + size); - print_tag(BIOS_INFO, tag); - } + printk(BIOS_INFO, "Mapping address range [%p:%p) as ", + start, start + size); + print_tag(BIOS_INFO, tag); sanity_check(base_addr, temp_size); @@ -241,56 +228,50 @@ void mmu_config_range(void *start, size_t size, uint64_t tag) } /* Func : mmu_init - * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as - * the base address for xlat tables. ttb_size defines the max number of tables - * that can be used + * Desc : Initialize MMU registers and page table memory region. This must be + * called exactly ONCE PER BOOT before trying to configure any mappings. */ -void mmu_init(struct memranges *mmap_ranges, - uint64_t *ttb_buffer, - uint64_t ttb_size) +void mmu_init(void) { - struct range_entry *mmap_entry; - - sanity_check((uint64_t)ttb_buffer, ttb_size); + /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */ + uint64_t *table = (uint64_t *)_ttb; + for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table)) + table[0] = UNUSED_DESC; - memset((void*)ttb_buffer, 0, GRANULE_SIZE); - max_tables = (ttb_size >> GRANULE_SIZE_SHIFT); - xlat_addr = ttb_buffer; - free_idx = 1; + /* Initialize the root table (L1) to be completely unmapped. */ + uint64_t *root = setup_new_table(INVALID_DESC, L1_XLAT_SIZE); + assert((u8 *)root == _ttb); - if (mmap_ranges) - memranges_each_entry(mmap_entry, mmap_ranges) { - mmu_config_range((void *)range_entry_base(mmap_entry), - range_entry_size(mmap_entry), - range_entry_tag(mmap_entry)); - } -} - -void mmu_enable(void) -{ - uint32_t sctlr; + /* Initialize TTBR */ + raw_write_ttbr0_el3((uintptr_t)root); /* Initialize MAIR indices */ raw_write_mair_el3(MAIR_ATTRIBUTES); - /* Invalidate TLBs */ - tlbiall_el3(); - /* Initialize TCR flags */ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED); +} - /* Initialize TTBR */ - raw_write_ttbr0_el3((uintptr_t)xlat_addr); - - /* Ensure system register writes are committed before enabling MMU */ - isb(); - - /* Enable MMU */ - sctlr = raw_read_sctlr_el3(); +void mmu_enable(void) +{ + uint32_t sctlr = raw_read_sctlr_el3(); sctlr |= SCTLR_C | SCTLR_M | SCTLR_I; raw_write_sctlr_el3(sctlr); + isb(); +} +/* + * CAUTION: This implementation assumes that coreboot never uses non-identity + * page tables for pages containing executed code. If you ever want to violate + * this assumption, have fun figuring out the associated problems on your own. + */ +void mmu_disable(void) +{ + flush_dcache_all(DCCISW); + uint32_t sctlr = raw_read_sctlr_el3(); + sctlr &= ~(SCTLR_C | SCTLR_M); + raw_write_sctlr_el3(sctlr); isb(); } diff --git a/src/arch/arm64/include/arch/memlayout.h b/src/arch/arm64/include/arch/memlayout.h index 6f2dae3bb3..ea4a1ba80c 100644 --- a/src/arch/arm64/include/arch/memlayout.h +++ b/src/arch/arm64/include/arch/memlayout.h @@ -18,7 +18,9 @@ #ifndef __ARCH_MEMLAYOUT_H #define __ARCH_MEMLAYOUT_H -/* TODO: add SRAM TTB region and figure out the correct size/alignment for it */ +#define TTB(addr, size) \ + REGION(ttb, addr, size, 4K) \ + _ = ASSERT(size % 4K == 0, "TTB size must be divisible by 4K!"); /* ARM64 stacks need 16-byte alignment. The ramstage will set up its own stacks * in BSS, so this is only used for the SRAM stages. */ diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h index 5e2a4a186b..64afd62fb6 100644 --- a/src/arch/arm64/include/armv8/arch/cache.h +++ b/src/arch/arm64/include/armv8/arch/cache.h @@ -77,12 +77,6 @@ void flush_dcache_louis(int op_type); /* returns number of bytes per cache line */ unsigned int dcache_line_bytes(void); -/* dcache and MMU disable */ -void dcache_mmu_disable(void); - -/* dcache and MMU enable */ -void dcache_mmu_enable(void); - /* perform all icache/dcache maintenance needed after loading new code */ void cache_sync_instructions(void); diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h index 89b80733f2..3edb76c5a4 100644 --- a/src/arch/arm64/include/armv8/arch/mmu.h +++ b/src/arch/arm64/include/armv8/arch/mmu.h @@ -16,7 +16,7 @@ #ifndef __ARCH_ARM64_MMU_H__ #define __ARCH_ARM64_MMU_H__ -#include <memrange.h> +#include <types.h> /* Memory attributes for mmap regions * These attributes act as tag values for memrange regions @@ -43,6 +43,7 @@ #define BLOCK_DESC 0x1 #define TABLE_DESC 0x3 #define PAGE_DESC 0x3 +#define DESC_MASK 0x3 /* Block descriptor */ #define BLOCK_NS (1 << 5) @@ -60,6 +61,11 @@ #define BLOCK_SH_OUTER_SHAREABLE (2 << BLOCK_SH_SHIFT) #define BLOCK_SH_INNER_SHAREABLE (3 << BLOCK_SH_SHIFT) +/* Sentinel descriptor to mark first PTE of an unused table. It must be a value + * that cannot occur naturally as part of a page table. (Bits [1:0] = 0b00 makes + * this an unmapped page, but some page attribute bits are still set.) */ +#define UNUSED_DESC 0x6EbAAD0BBADbA6E0 + /* XLAT Table Init Attributes */ #define VA_START 0x0 @@ -67,7 +73,7 @@ /* Granule size of 4KB is being used */ #define GRANULE_SIZE_SHIFT 12 #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) -#define XLAT_TABLE_MASK (~(0UL) << GRANULE_SIZE_SHIFT) +#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE) #define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1) #define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) @@ -142,11 +148,13 @@ #define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT) #define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT) -/* Initialize the MMU TTB tables provide the range sequence and ttb buffer. */ -void mmu_init(struct memranges *ranges, uint64_t *ttb, uint64_t ttb_size); -/* Enable the mmu based on previous mmu_init(). */ -void mmu_enable(void); +/* Initialize MMU registers and page table memory region. */ +void mmu_init(void); /* Change a memory type for a range of bytes at runtime. */ void mmu_config_range(void *start, size_t size, uint64_t tag); +/* Enable the MMU (need previous mmu_init() and configured ranges!). */ +void mmu_enable(void); +/* Disable the MMU (which also disables dcache but not icache). */ +void mmu_disable(void); #endif // __ARCH_ARM64_MMU_H__ |