diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/arch/arm64/arm_tf.c | 3 | ||||
-rw-r--r-- | src/arch/arm64/armv8/bootblock_simple.c | 2 | ||||
-rw-r--r-- | src/arch/arm64/armv8/cache.c | 25 | ||||
-rw-r--r-- | src/arch/arm64/armv8/mmu.c | 133 | ||||
-rw-r--r-- | src/arch/arm64/include/arch/memlayout.h | 4 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/cache.h | 6 | ||||
-rw-r--r-- | src/arch/arm64/include/armv8/arch/mmu.h | 20 | ||||
-rw-r--r-- | src/soc/nvidia/tegra132/include/soc/addressmap.h | 8 | ||||
-rw-r--r-- | src/soc/nvidia/tegra132/include/soc/memlayout.ld | 1 | ||||
-rw-r--r-- | src/soc/nvidia/tegra132/include/soc/memlayout_vboot2.ld | 1 | ||||
-rw-r--r-- | src/soc/nvidia/tegra132/include/soc/mmu_operations.h | 3 | ||||
-rw-r--r-- | src/soc/nvidia/tegra132/mmu_operations.c | 43 | ||||
-rw-r--r-- | src/soc/nvidia/tegra210/include/soc/addressmap.h | 7 | ||||
-rw-r--r-- | src/soc/nvidia/tegra210/include/soc/memlayout.ld | 1 | ||||
-rw-r--r-- | src/soc/nvidia/tegra210/include/soc/memlayout_vboot2.ld | 1 | ||||
-rw-r--r-- | src/soc/nvidia/tegra210/mmu_operations.c | 42 |
16 files changed, 111 insertions, 189 deletions
diff --git a/src/arch/arm64/arm_tf.c b/src/arch/arm64/arm_tf.c index b8f0f5f5b2..6435fc74d5 100644 --- a/src/arch/arm64/arm_tf.c +++ b/src/arch/arm64/arm_tf.c @@ -15,6 +15,7 @@ #include <arch/cache.h> #include <arch/lib_helpers.h> +#include <arch/mmu.h> #include <arch/transition.h> #include <arm_tf.h> #include <assert.h> @@ -83,7 +84,7 @@ void arm_tf_run_bl31(u64 payload_entry, u64 payload_arg0, u64 payload_spsr) dcache_clean_by_mva(&bl31_params, sizeof(bl31_params)); dcache_clean_by_mva(&bl33_ep_info, sizeof(bl33_ep_info)); - dcache_mmu_disable(); + mmu_disable(); bl31_entry(&bl31_params, bl31_plat_params); die("BL31 returned!"); } diff --git a/src/arch/arm64/armv8/bootblock_simple.c b/src/arch/arm64/armv8/bootblock_simple.c index 2a8a3fa2cd..e2002f2a3a 100644 --- a/src/arch/arm64/armv8/bootblock_simple.c +++ b/src/arch/arm64/armv8/bootblock_simple.c @@ -36,7 +36,7 @@ void main(void) { /* Globally disable MMU, caches, and branch prediction (these should * be disabled by default on reset) */ - dcache_mmu_disable(); + mmu_disable(); /* * Re-enable icache and branch prediction. MMU and dcache will be diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c index 95f2890ff0..4f91de02f2 100644 --- a/src/arch/arm64/armv8/cache.c +++ b/src/arch/arm64/armv8/cache.c @@ -119,37 +119,12 @@ void dcache_invalidate_by_mva(void const *addr, size_t len) dcache_op_va(addr, len, OP_DCIVAC); } -/* - * CAUTION: This implementation assumes that coreboot never uses non-identity - * page tables for pages containing executed code. If you ever want to violate - * this assumption, have fun figuring out the associated problems on your own. - */ -void dcache_mmu_disable(void) -{ - uint32_t sctlr; - - flush_dcache_all(DCCISW); - sctlr = raw_read_sctlr_current(); - sctlr &= ~(SCTLR_C | SCTLR_M); - raw_write_sctlr_current(sctlr); -} - -void dcache_mmu_enable(void) -{ - uint32_t sctlr; - - sctlr = raw_read_sctlr_current(); - sctlr |= SCTLR_C | SCTLR_M; - raw_write_sctlr_current(sctlr); -} - void cache_sync_instructions(void) { flush_dcache_all(DCCISW); /* includes trailing DSB (in assembly) */ icache_invalidate_all(); /* includdes leading DSB and trailing ISB. */ } - /* * For each segment of a program loaded this function is called * to invalidate caches for the addresses of the loaded segment diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c index 5c95e96581..5d957d454f 100644 --- a/src/arch/arm64/armv8/mmu.c +++ b/src/arch/arm64/armv8/mmu.c @@ -31,18 +31,18 @@ #include <stdlib.h> #include <stdint.h> #include <string.h> +#include <symbols.h> #include <console/console.h> -#include <memrange.h> #include <arch/mmu.h> #include <arch/lib_helpers.h> #include <arch/cache.h> +#include <arch/cache_helpers.h> -/* Maximum number of XLAT Tables available based on ttb buffer size */ -static unsigned int max_tables; -/* Address of ttb buffer */ -static uint64_t *xlat_addr; -static int free_idx; +/* This just caches the next free table slot (okay to do since they fill up from + * bottom to top and can never be freed up again). It will reset to its initial + * value on stage transition, so we still need to check it for UNUSED_DESC. */ +static uint64_t *next_free_table = (void *)_ttb; static void print_tag(int level, uint64_t tag) { @@ -82,48 +82,37 @@ static uint64_t get_block_attr(unsigned long tag) return attr; } -/* Func : table_desc_valid - * Desc : Check if a table entry contains valid desc - */ -static uint64_t table_desc_valid(uint64_t desc) -{ - return((desc & TABLE_DESC) == TABLE_DESC); -} - /* Func : setup_new_table * Desc : Get next free table from TTB and set it up to match old parent entry. */ static uint64_t *setup_new_table(uint64_t desc, size_t xlat_size) { - uint64_t *new, *entry; - - assert(free_idx < max_tables); + while (next_free_table[0] != UNUSED_DESC) { + next_free_table += GRANULE_SIZE/sizeof(*next_free_table); + if (_ettb - (u8 *)next_free_table <= 0) + die("Ran out of page table space!"); + } - new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE); - free_idx++; + void *frame_base = (void *)(desc & XLAT_ADDR_MASK); + printk(BIOS_DEBUG, "Backing address range [%p:%p) with new page" + " table @%p\n", frame_base, frame_base + + (xlat_size << BITS_RESOLVED_PER_LVL), next_free_table); if (!desc) { - memset(new, 0, GRANULE_SIZE); + memset(next_free_table, 0, GRANULE_SIZE); } else { /* Can reuse old parent entry, but may need to adjust type. */ if (xlat_size == L3_XLAT_SIZE) desc |= PAGE_DESC; - for (entry = new; (u8 *)entry < (u8 *)new + GRANULE_SIZE; - entry++, desc += xlat_size) - *entry = desc; + int i = 0; + for (; i < GRANULE_SIZE/sizeof(*next_free_table); i++) { + next_free_table[i] = desc; + desc += xlat_size; + } } - return new; -} - -/* Func : get_table_from_desc - * Desc : Get next level table address from table descriptor - */ -static uint64_t *get_table_from_desc(uint64_t desc) -{ - uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK); - return ptr; + return next_free_table; } /* Func: get_next_level_table @@ -134,12 +123,12 @@ static uint64_t *get_next_level_table(uint64_t *ptr, size_t xlat_size) { uint64_t desc = *ptr; - if (!table_desc_valid(desc)) { + if ((desc & DESC_MASK) != TABLE_DESC) { uint64_t *new_table = setup_new_table(desc, xlat_size); desc = ((uint64_t)new_table) | TABLE_DESC; *ptr = desc; } - return get_table_from_desc(desc); + return (uint64_t *)(desc & XLAT_ADDR_MASK); } /* Func : init_xlat_table @@ -156,7 +145,7 @@ static uint64_t init_xlat_table(uint64_t base_addr, uint64_t l1_index = (base_addr & L1_ADDR_MASK) >> L1_ADDR_SHIFT; uint64_t l2_index = (base_addr & L2_ADDR_MASK) >> L2_ADDR_SHIFT; uint64_t l3_index = (base_addr & L3_ADDR_MASK) >> L3_ADDR_SHIFT; - uint64_t *table = xlat_addr; + uint64_t *table = (uint64_t *)_ttb; uint64_t desc; uint64_t attr = get_block_attr(tag); @@ -221,11 +210,9 @@ void mmu_config_range(void *start, size_t size, uint64_t tag) uint64_t base_addr = (uintptr_t)start; uint64_t temp_size = size; - if (!IS_ENABLED(CONFIG_SMP)) { - printk(BIOS_INFO, "Mapping address range [%p:%p) as ", - start, start + size); - print_tag(BIOS_INFO, tag); - } + printk(BIOS_INFO, "Mapping address range [%p:%p) as ", + start, start + size); + print_tag(BIOS_INFO, tag); sanity_check(base_addr, temp_size); @@ -241,56 +228,50 @@ void mmu_config_range(void *start, size_t size, uint64_t tag) } /* Func : mmu_init - * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as - * the base address for xlat tables. ttb_size defines the max number of tables - * that can be used + * Desc : Initialize MMU registers and page table memory region. This must be + * called exactly ONCE PER BOOT before trying to configure any mappings. */ -void mmu_init(struct memranges *mmap_ranges, - uint64_t *ttb_buffer, - uint64_t ttb_size) +void mmu_init(void) { - struct range_entry *mmap_entry; - - sanity_check((uint64_t)ttb_buffer, ttb_size); + /* Initially mark all table slots unused (first PTE == UNUSED_DESC). */ + uint64_t *table = (uint64_t *)_ttb; + for (; _ettb - (u8 *)table > 0; table += GRANULE_SIZE/sizeof(*table)) + table[0] = UNUSED_DESC; - memset((void*)ttb_buffer, 0, GRANULE_SIZE); - max_tables = (ttb_size >> GRANULE_SIZE_SHIFT); - xlat_addr = ttb_buffer; - free_idx = 1; + /* Initialize the root table (L1) to be completely unmapped. */ + uint64_t *root = setup_new_table(INVALID_DESC, L1_XLAT_SIZE); + assert((u8 *)root == _ttb); - if (mmap_ranges) - memranges_each_entry(mmap_entry, mmap_ranges) { - mmu_config_range((void *)range_entry_base(mmap_entry), - range_entry_size(mmap_entry), - range_entry_tag(mmap_entry)); - } -} - -void mmu_enable(void) -{ - uint32_t sctlr; + /* Initialize TTBR */ + raw_write_ttbr0_el3((uintptr_t)root); /* Initialize MAIR indices */ raw_write_mair_el3(MAIR_ATTRIBUTES); - /* Invalidate TLBs */ - tlbiall_el3(); - /* Initialize TCR flags */ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC | TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_64GB | TCR_TBI_USED); +} - /* Initialize TTBR */ - raw_write_ttbr0_el3((uintptr_t)xlat_addr); - - /* Ensure system register writes are committed before enabling MMU */ - isb(); - - /* Enable MMU */ - sctlr = raw_read_sctlr_el3(); +void mmu_enable(void) +{ + uint32_t sctlr = raw_read_sctlr_el3(); sctlr |= SCTLR_C | SCTLR_M | SCTLR_I; raw_write_sctlr_el3(sctlr); + isb(); +} +/* + * CAUTION: This implementation assumes that coreboot never uses non-identity + * page tables for pages containing executed code. If you ever want to violate + * this assumption, have fun figuring out the associated problems on your own. + */ +void mmu_disable(void) +{ + flush_dcache_all(DCCISW); + uint32_t sctlr = raw_read_sctlr_el3(); + sctlr &= ~(SCTLR_C | SCTLR_M); + raw_write_sctlr_el3(sctlr); isb(); } diff --git a/src/arch/arm64/include/arch/memlayout.h b/src/arch/arm64/include/arch/memlayout.h index 6f2dae3bb3..ea4a1ba80c 100644 --- a/src/arch/arm64/include/arch/memlayout.h +++ b/src/arch/arm64/include/arch/memlayout.h @@ -18,7 +18,9 @@ #ifndef __ARCH_MEMLAYOUT_H #define __ARCH_MEMLAYOUT_H -/* TODO: add SRAM TTB region and figure out the correct size/alignment for it */ +#define TTB(addr, size) \ + REGION(ttb, addr, size, 4K) \ + _ = ASSERT(size % 4K == 0, "TTB size must be divisible by 4K!"); /* ARM64 stacks need 16-byte alignment. The ramstage will set up its own stacks * in BSS, so this is only used for the SRAM stages. */ diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h index 5e2a4a186b..64afd62fb6 100644 --- a/src/arch/arm64/include/armv8/arch/cache.h +++ b/src/arch/arm64/include/armv8/arch/cache.h @@ -77,12 +77,6 @@ void flush_dcache_louis(int op_type); /* returns number of bytes per cache line */ unsigned int dcache_line_bytes(void); -/* dcache and MMU disable */ -void dcache_mmu_disable(void); - -/* dcache and MMU enable */ -void dcache_mmu_enable(void); - /* perform all icache/dcache maintenance needed after loading new code */ void cache_sync_instructions(void); diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h index 89b80733f2..3edb76c5a4 100644 --- a/src/arch/arm64/include/armv8/arch/mmu.h +++ b/src/arch/arm64/include/armv8/arch/mmu.h @@ -16,7 +16,7 @@ #ifndef __ARCH_ARM64_MMU_H__ #define __ARCH_ARM64_MMU_H__ -#include <memrange.h> +#include <types.h> /* Memory attributes for mmap regions * These attributes act as tag values for memrange regions @@ -43,6 +43,7 @@ #define BLOCK_DESC 0x1 #define TABLE_DESC 0x3 #define PAGE_DESC 0x3 +#define DESC_MASK 0x3 /* Block descriptor */ #define BLOCK_NS (1 << 5) @@ -60,6 +61,11 @@ #define BLOCK_SH_OUTER_SHAREABLE (2 << BLOCK_SH_SHIFT) #define BLOCK_SH_INNER_SHAREABLE (3 << BLOCK_SH_SHIFT) +/* Sentinel descriptor to mark first PTE of an unused table. It must be a value + * that cannot occur naturally as part of a page table. (Bits [1:0] = 0b00 makes + * this an unmapped page, but some page attribute bits are still set.) */ +#define UNUSED_DESC 0x6EbAAD0BBADbA6E0 + /* XLAT Table Init Attributes */ #define VA_START 0x0 @@ -67,7 +73,7 @@ /* Granule size of 4KB is being used */ #define GRANULE_SIZE_SHIFT 12 #define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT) -#define XLAT_TABLE_MASK (~(0UL) << GRANULE_SIZE_SHIFT) +#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE) #define GRANULE_SIZE_MASK ((1 << GRANULE_SIZE_SHIFT) - 1) #define BITS_RESOLVED_PER_LVL (GRANULE_SIZE_SHIFT - 3) @@ -142,11 +148,13 @@ #define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT) #define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT) -/* Initialize the MMU TTB tables provide the range sequence and ttb buffer. */ -void mmu_init(struct memranges *ranges, uint64_t *ttb, uint64_t ttb_size); -/* Enable the mmu based on previous mmu_init(). */ -void mmu_enable(void); +/* Initialize MMU registers and page table memory region. */ +void mmu_init(void); /* Change a memory type for a range of bytes at runtime. */ void mmu_config_range(void *start, size_t size, uint64_t tag); +/* Enable the MMU (need previous mmu_init() and configured ranges!). */ +void mmu_enable(void); +/* Disable the MMU (which also disables dcache but not icache). */ +void mmu_disable(void); #endif // __ARCH_ARM64_MMU_H__ diff --git a/src/soc/nvidia/tegra132/include/soc/addressmap.h b/src/soc/nvidia/tegra132/include/soc/addressmap.h index f51920f24d..fee67fe692 100644 --- a/src/soc/nvidia/tegra132/include/soc/addressmap.h +++ b/src/soc/nvidia/tegra132/include/soc/addressmap.h @@ -114,14 +114,6 @@ enum { /* Provided the careout id, obtain the base and size in 1MiB units. */ void carveout_range(int id, uintptr_t *base_mib, size_t *size_mib); - -/* - * Add any board-specific memory ranges to the address map when executing - * on aarchv8 core. - */ -struct memranges; -void mainboard_add_memory_ranges(struct memranges *map); - /* * There are complications accessing the Trust Zone carveout region. The * AVP cannot access these registers and the CPU can't access this register diff --git a/src/soc/nvidia/tegra132/include/soc/memlayout.ld b/src/soc/nvidia/tegra132/include/soc/memlayout.ld index 0c610422ef..d403c17127 100644 --- a/src/soc/nvidia/tegra132/include/soc/memlayout.ld +++ b/src/soc/nvidia/tegra132/include/soc/memlayout.ld @@ -38,4 +38,5 @@ SECTIONS DRAM_START(0x80000000) POSTRAM_CBFS_CACHE(0x80100000, 1M) RAMSTAGE(0x80200000, 256K) + TTB(0x100000000 - CONFIG_TRUSTZONE_CARVEOUT_SIZE_MB * 1M, 1M) } diff --git a/src/soc/nvidia/tegra132/include/soc/memlayout_vboot2.ld b/src/soc/nvidia/tegra132/include/soc/memlayout_vboot2.ld index a024435a6f..7a6a1688fb 100644 --- a/src/soc/nvidia/tegra132/include/soc/memlayout_vboot2.ld +++ b/src/soc/nvidia/tegra132/include/soc/memlayout_vboot2.ld @@ -40,4 +40,5 @@ SECTIONS DRAM_START(0x80000000) POSTRAM_CBFS_CACHE(0x80100000, 1M) RAMSTAGE(0x80200000, 256K) + TTB(0x100000000 - CONFIG_TRUSTZONE_CARVEOUT_SIZE_MB * 1M, 1M) } diff --git a/src/soc/nvidia/tegra132/include/soc/mmu_operations.h b/src/soc/nvidia/tegra132/include/soc/mmu_operations.h index df5472e86c..f604c40e25 100644 --- a/src/soc/nvidia/tegra132/include/soc/mmu_operations.h +++ b/src/soc/nvidia/tegra132/include/soc/mmu_operations.h @@ -18,7 +18,4 @@ void tegra132_mmu_init(void); -/* Default ttb size of 1MiB */ -#define TTB_SIZE 0x1 - #endif //__SOC_NVIDIA_TEGRA132_MMU_OPERATIONS_H__ diff --git a/src/soc/nvidia/tegra132/mmu_operations.c b/src/soc/nvidia/tegra132/mmu_operations.c index 123f89842e..23531bee79 100644 --- a/src/soc/nvidia/tegra132/mmu_operations.c +++ b/src/soc/nvidia/tegra132/mmu_operations.c @@ -14,16 +14,14 @@ */ #include <arch/mmu.h> -#include <memrange.h> +#include <assert.h> #include <soc/addressmap.h> #include <soc/mmu_operations.h> #include <stdlib.h> #include <stdint.h> +#include <symbols.h> -/* This structure keeps track of all the mmap memory ranges for t132 */ -static struct memranges t132_mmap_ranges; - -static void tegra132_memrange_init(struct memranges *map) +static void tegra132_mmu_config(void) { uint64_t start,end; const unsigned long devmem = MA_DEV | MA_S | MA_RW; @@ -32,46 +30,35 @@ static void tegra132_memrange_init(struct memranges *map) uintptr_t tz_base_mib; size_t tz_size_mib; - memranges_init_empty(map); - memory_in_range_below_4gb(&start,&end); /* Device memory below DRAM */ - memranges_insert(map, 0, start * MiB, devmem); + mmu_config_range((void *)0, start * MiB, devmem); /* DRAM */ - memranges_insert(map, start * MiB, (end-start) * MiB, cachedmem); + mmu_config_range((void *)(start * MiB), (end-start) * MiB, cachedmem); memory_in_range_above_4gb(&start,&end); - memranges_insert(map, start * MiB, (end-start) * MiB, cachedmem); + mmu_config_range((void *)(start * MiB), (end-start) * MiB, cachedmem); /* SRAM */ - memranges_insert(map, TEGRA_SRAM_BASE, TEGRA_SRAM_SIZE, cachedmem); + mmu_config_range(_sram, _sram_size, cachedmem); /* Add TZ carveout. */ carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib); - memranges_insert(map, tz_base_mib * MiB, tz_size_mib * MiB, secure_mem); -} -void __attribute__((weak)) mainboard_add_memory_ranges(struct memranges *map) -{ - /* Don't add any ranges by default. */ + mmu_config_range((void *)(tz_base_mib * MiB), + tz_size_mib * MiB, secure_mem); + + /* Ensure page tables are at the base of the trust zone region. */ + assert((uintptr_t)_ttb == tz_base_mib * MiB && + _ttb_size <= tz_size_mib * MiB); } void tegra132_mmu_init(void) { - uintptr_t tz_base_mib; - size_t tz_size_mib; - size_t ttb_size_mib; - struct memranges *map = &t132_mmap_ranges; - - tegra132_memrange_init(map); - mainboard_add_memory_ranges(map); - /* Place page tables at the base of the trust zone region. */ - carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib); - tz_base_mib *= MiB; - ttb_size_mib = TTB_SIZE * MiB; - mmu_init(map, (void *)tz_base_mib, ttb_size_mib); + mmu_init(); + tegra132_mmu_config(); mmu_enable(); } diff --git a/src/soc/nvidia/tegra210/include/soc/addressmap.h b/src/soc/nvidia/tegra210/include/soc/addressmap.h index 2151be5f0b..2f6bd8a5f9 100644 --- a/src/soc/nvidia/tegra210/include/soc/addressmap.h +++ b/src/soc/nvidia/tegra210/include/soc/addressmap.h @@ -129,13 +129,6 @@ void carveout_range(int id, uintptr_t *base_mib, size_t *size_mib); void print_carveouts(void); /* - * Add any board-specific memory ranges to the address map when executing - * on aarchv8 core. - */ -struct memranges; -void mainboard_add_memory_ranges(struct memranges *map); - -/* * There are complications accessing the Trust Zone carveout region. The * AVP cannot access these registers and the CPU can't access this register * as a non-secure access. When the page tables live in non-secure memory diff --git a/src/soc/nvidia/tegra210/include/soc/memlayout.ld b/src/soc/nvidia/tegra210/include/soc/memlayout.ld index d24f980207..0338cd9604 100644 --- a/src/soc/nvidia/tegra210/include/soc/memlayout.ld +++ b/src/soc/nvidia/tegra210/include/soc/memlayout.ld @@ -38,4 +38,5 @@ SECTIONS DRAM_START(0x80000000) POSTRAM_CBFS_CACHE(0x80100000, 1M) RAMSTAGE(0x80200000, 256K) + TTB(0x100000000 - CONFIG_TTB_SIZE_MB * 1M, CONFIG_TTB_SIZE_MB * 1M) } diff --git a/src/soc/nvidia/tegra210/include/soc/memlayout_vboot2.ld b/src/soc/nvidia/tegra210/include/soc/memlayout_vboot2.ld index e2e4dd8d24..b4b3dc2822 100644 --- a/src/soc/nvidia/tegra210/include/soc/memlayout_vboot2.ld +++ b/src/soc/nvidia/tegra210/include/soc/memlayout_vboot2.ld @@ -40,4 +40,5 @@ SECTIONS DRAM_START(0x80000000) POSTRAM_CBFS_CACHE(0x80100000, 1M) RAMSTAGE(0x80200000, 256K) + TTB(0x100000000 - CONFIG_TTB_SIZE_MB * 1M, CONFIG_TTB_SIZE_MB * 1M) } diff --git a/src/soc/nvidia/tegra210/mmu_operations.c b/src/soc/nvidia/tegra210/mmu_operations.c index b9cae1419e..de7ae2f487 100644 --- a/src/soc/nvidia/tegra210/mmu_operations.c +++ b/src/soc/nvidia/tegra210/mmu_operations.c @@ -15,16 +15,13 @@ #include <arch/mmu.h> #include <assert.h> -#include <memrange.h> #include <soc/addressmap.h> #include <soc/mmu_operations.h> #include <stdlib.h> #include <stdint.h> +#include <symbols.h> -/* This structure keeps track of all the mmap memory ranges for t210 */ -static struct memranges t210_mmap_ranges; - -static void tegra210_memrange_init(struct memranges *map) +static void tegra210_mmu_config(void) { uint64_t start,end; const unsigned long devmem = MA_DEV | MA_S | MA_RW; @@ -35,45 +32,39 @@ static void tegra210_memrange_init(struct memranges *map) print_carveouts(); - memranges_init_empty(map); - memory_in_range_below_4gb(&start,&end); /* Device memory below DRAM */ - memranges_insert(map, TEGRA_ARM_LOWEST_PERIPH, start * MiB, devmem); + mmu_config_range((void *)TEGRA_ARM_LOWEST_PERIPH, start * MiB, devmem); /* DRAM */ - memranges_insert(map, start * MiB, (end-start) * MiB, cachedmem); + mmu_config_range((void *)(start * MiB), (end-start) * MiB, cachedmem); memory_in_range_above_4gb(&start,&end); - memranges_insert(map, start * MiB, (end-start) * MiB, cachedmem); + mmu_config_range((void *)(start * MiB), (end-start) * MiB, cachedmem); /* SRAM */ - memranges_insert(map, TEGRA_SRAM_BASE, TEGRA_SRAM_SIZE, cachedmem); + mmu_config_range(_sram, _sram_size, cachedmem); /* Add TZ carveout. */ carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib); - memranges_insert(map, tz_base_mib * MiB, tz_size_mib * MiB, secure_mem); -} -void __attribute__((weak)) mainboard_add_memory_ranges(struct memranges *map) -{ - /* Don't add any ranges by default. */ + mmu_config_range((void *)(tz_base_mib * MiB), + tz_size_mib * MiB, secure_mem); } void tegra210_mmu_init(void) { uintptr_t tz_base_mib; size_t tz_size_mib; - uintptr_t ttb_base_mib; - size_t ttb_size_mib; - struct memranges *map = &t210_mmap_ranges; - tegra210_memrange_init(map); - mainboard_add_memory_ranges(map); + mmu_init(); + tegra210_mmu_config(); /* - * Place page tables at the end of the trust zone region. + * Page tables are at the end of the trust zone region, but we should + * double-check that memlayout and addressmap.c are in sync. + * * TZDRAM layout is as follows: * * +--------------------------+ <----+DRAM_END @@ -98,11 +89,8 @@ void tegra210_mmu_init(void) * */ carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib); + assert((uintptr_t)_ttb + _ttb_size == (tz_base_mib + tz_size_mib) * MiB + && _ttb_size <= tz_size_mib * MiB); - assert(tz_size_mib > CONFIG_TTB_SIZE_MB); - ttb_base_mib = (tz_base_mib + tz_size_mib - CONFIG_TTB_SIZE_MB) * MiB; - - ttb_size_mib = CONFIG_TTB_SIZE_MB * MiB; - mmu_init(map, (void *)ttb_base_mib, ttb_size_mib); mmu_enable(); } |