summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/armv8/Makefile.inc1
-rw-r--r--src/arch/arm64/armv8/mmu.c313
-rw-r--r--src/arch/arm64/include/armv8/arch/mmu.h165
3 files changed, 479 insertions, 0 deletions
diff --git a/src/arch/arm64/armv8/Makefile.inc b/src/arch/arm64/armv8/Makefile.inc
index ca4562b1a8..4cbb6e90c0 100644
--- a/src/arch/arm64/armv8/Makefile.inc
+++ b/src/arch/arm64/armv8/Makefile.inc
@@ -69,6 +69,7 @@ ramstage-y += cache.c
ramstage-y += cpu.S
ramstage-y += exception.c
ramstage-y += exception_asm.S
+ramstage-y += mmu.c
ramstage-c-ccopts += $(armv8_flags)
ramstage-S-ccopts += $(armv8_asm_flags)
diff --git a/src/arch/arm64/armv8/mmu.c b/src/arch/arm64/armv8/mmu.c
new file mode 100644
index 0000000000..e1f088e102
--- /dev/null
+++ b/src/arch/arm64/armv8/mmu.c
@@ -0,0 +1,313 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <cbmem.h>
+#include <console/console.h>
+
+#include <memrange.h>
+#include <arch/mmu.h>
+#include <arch/lib_helpers.h>
+#include <arch/cache.h>
+
+/* Maximum number of XLAT Tables available based on ttb buffer size */
+static unsigned int max_tables;
+/* Address of ttb buffer */
+static uint64_t *xlat_addr;
+
+static const uint64_t level_to_addr_mask[] = {
+ L1_ADDR_MASK,
+ L2_ADDR_MASK,
+ L3_ADDR_MASK,
+};
+
+static const uint64_t level_to_addr_shift[] = {
+ L1_ADDR_SHIFT,
+ L2_ADDR_SHIFT,
+ L3_ADDR_SHIFT,
+};
+
+/* Func : get_block_attr
+ * Desc : Get block descriptor attributes based on the value of tag in memrange
+ * region
+ */
+static uint64_t get_block_attr(unsigned long tag)
+{
+ uint64_t attr;
+
+ attr = (tag & MA_NS)? BLOCK_NS : 0;
+ attr |= (tag & MA_RO)? BLOCK_AP_RO : BLOCK_AP_RW;
+ attr |= BLOCK_ACCESS;
+ attr |= (tag & MA_MEM)? (BLOCK_INDEX_MEM_NORMAL << BLOCK_INDEX_SHIFT) :
+ (BLOCK_INDEX_MEM_DEV_NGNRNE << BLOCK_INDEX_SHIFT);
+
+ return attr;
+}
+
+/* Func : get_index_from_addr
+ * Desc : Get index into table at a given level using appropriate bits from the
+ * base address
+ */
+static uint64_t get_index_from_addr(uint64_t addr, uint8_t level)
+{
+ uint64_t mask = level_to_addr_mask[level-1];
+ uint8_t shift = level_to_addr_shift[level-1];
+
+ return ((addr & mask) >> shift);
+}
+
+/* Func : table_desc_valid
+ * Desc : Check if a table entry contains valid desc
+ */
+static uint64_t table_desc_valid(uint64_t desc)
+{
+ return((desc & TABLE_DESC) == TABLE_DESC);
+}
+
+/* Func : get_new_table
+ * Desc : Return the next free XLAT table from ttb buffer
+ */
+static uint64_t *get_new_table(void)
+{
+ static int free_idx = 1;
+ uint64_t *new;
+
+ if (free_idx >= max_tables) {
+ printk(BIOS_ERR,"ARM64 MMU: No free table\n");
+ return NULL;
+ }
+
+ new = (uint64_t*)((unsigned char *)xlat_addr + free_idx * GRANULE_SIZE);
+ free_idx++;
+
+ memset(new, 0, GRANULE_SIZE);
+
+ return new;
+}
+
+/* Func : get_table_from_desc
+ * Desc : Get next level table address from table descriptor
+ */
+static uint64_t *get_table_from_desc(uint64_t desc)
+{
+ uint64_t *ptr = (uint64_t*)(desc & XLAT_TABLE_MASK);
+ return ptr;
+}
+
+/* Func: get_next_level_table
+ * Desc: Check if the table entry is a valid descriptor. If not, allocate new
+ * table, update the entry and return the table addr. If valid, return the addr
+ */
+static uint64_t *get_next_level_table(uint64_t *ptr)
+{
+ uint64_t desc = *ptr;
+
+ if (!table_desc_valid(desc)) {
+ uint64_t *new_table = get_new_table();
+ if (new_table == NULL)
+ return NULL;
+ desc = ((uint64_t)new_table) | TABLE_DESC;
+ *ptr = desc;
+ }
+ return get_table_from_desc(desc);
+}
+
+/* Func : init_xlat_table
+ * Desc : Given a base address and size, it identifies the indices within
+ * different level XLAT tables which map the given base addr. Similar to table
+ * walk, except that all invalid entries during the walk are updated
+ * accordingly. On success, it returns the size of the block/page addressed by
+ * the final table
+ */
+static uint64_t init_xlat_table(uint64_t base_addr,
+ uint64_t size,
+ uint64_t tag)
+{
+ uint64_t l1_index = get_index_from_addr(base_addr,1);
+ uint64_t l2_index = get_index_from_addr(base_addr,2);
+ uint64_t l3_index = get_index_from_addr(base_addr,3);
+ uint64_t *table = xlat_addr;
+ uint64_t desc;
+ uint64_t attr = get_block_attr(tag);
+
+ /* L1 table lookup */
+ /* If VA has bits more than 41, lookup starts at L1 */
+ if (l1_index) {
+ table = get_next_level_table(&table[l1_index]);
+ if (!table)
+ return 0;
+ }
+
+ /* L2 table lookup */
+ /* If lookup was performed at L1, L2 table addr is obtained from L1 desc
+ else, lookup starts at ttbr address */
+ if (!l3_index && (size >= L2_XLAT_SIZE)) {
+ /* If block address is aligned and size is greater than or equal
+ to 512MiB i.e. size addressed by each L2 entry, we can
+ directly store a block desc */
+ desc = base_addr | BLOCK_DESC | attr;
+ table[l2_index] = desc;
+ /* L3 lookup is not required */
+ return L2_XLAT_SIZE;
+ } else {
+ /* L2 entry stores a table descriptor */
+ table = get_next_level_table(&table[l2_index]);
+ if (!table)
+ return 0;
+ }
+
+ /* L3 table lookup */
+ desc = base_addr | PAGE_DESC | attr;
+ table[l3_index] = desc;
+ return L3_XLAT_SIZE;
+}
+
+/* Func : sanity_check
+ * Desc : Check if the address is aligned and size is atleast the granule size
+ */
+static uint64_t sanity_check(uint64_t addr,
+ uint64_t size)
+{
+ /* Address should be atleast 64 KiB aligned */
+ if (addr & GRANULE_SIZE_MASK)
+ return 1;
+
+ /* Size should be atleast granule size */
+ if (size < GRANULE_SIZE)
+ return 1;
+
+ return 0;
+}
+
+/* Func : init_mmap_entry
+ * Desc : For each mmap entry, this function calls init_xlat_table with the base
+ * address. Based on size returned from init_xlat_table, base_addr is updated
+ * and subsequent calls are made for initializing the xlat table until the whole
+ * region is initialized.
+ */
+static void init_mmap_entry(struct range_entry *r)
+{
+ uint64_t base_addr = range_entry_base(r);
+ uint64_t size = range_entry_size(r);
+ uint64_t tag = range_entry_tag(r);
+ uint64_t temp_size = size;
+
+ while (temp_size) {
+ uint64_t ret;
+
+ if (sanity_check(base_addr,temp_size)) {
+ printk(BIOS_ERR, "ARM64 MMU: sanity check failed\n");
+ return;
+ }
+
+ ret = init_xlat_table(base_addr + (size - temp_size),
+ temp_size,tag);
+
+ if (ret == 0)
+ return;
+
+ temp_size -= ret;
+ }
+}
+
+/* Func : mmu_init
+ * Desc : Initialize mmu based on the mmap_ranges passed. ttb_buffer is used as
+ * the base address for xlat tables. ttb_size defines the max number of tables
+ * that can be used
+ */
+void mmu_init(struct memranges *mmap_ranges,
+ uint64_t *ttb_buffer,
+ uint64_t ttb_size)
+{
+ struct range_entry *mmap_entry;
+
+ if (sanity_check((uint64_t)ttb_buffer, ttb_size)) {
+ printk(BIOS_ERR, "ARM64: Sanity failed for ttb\n");
+ return;
+ }
+
+ memset((void*)ttb_buffer, 0, GRANULE_SIZE);
+ max_tables = (ttb_size >> GRANULE_SIZE_SHIFT);
+ xlat_addr = ttb_buffer;
+
+ printk(BIOS_DEBUG, "ARM64: TTB_BUFFER: 0x%p Max Tables: %d\n",
+ (void*)xlat_addr, max_tables);
+
+ memranges_each_entry(mmap_entry, mmap_ranges) {
+ init_mmap_entry(mmap_entry);
+ }
+ printk(BIOS_DEBUG, "ARM64: MMU init done\n");
+}
+
+static uint32_t is_mmu_enabled(void)
+{
+ uint32_t sctlr;
+
+ sctlr = raw_read_sctlr_el3();
+
+ return (sctlr & SCTLR_M);
+}
+
+void mmu_enable(uint64_t ttbr)
+{
+ uint32_t sctlr;
+
+ /* Initialize MAIR indices */
+ raw_write_mair_el3(MAIR_ATTRIBUTES);
+
+ /* Invalidate TLBs */
+ tlbiall_el3();
+
+ /* Initialize TCR flags */
+ raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
+ TCR_SH0_IS | TCR_TG0_64KB | TCR_PS_64GB |
+ TCR_TBI_USED);
+
+ /* Initialize TTBR */
+ raw_write_ttbr0_el3(ttbr);
+
+ /* Ensure all translation table writes are committed before enabling MMU */
+ dsb();
+ isb();
+
+ /* Enable MMU */
+ sctlr = raw_read_sctlr_el3();
+ sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
+ raw_write_sctlr_el3(sctlr);
+
+ isb();
+
+ if (is_mmu_enabled())
+ printk(BIOS_DEBUG, "ARM64: MMU enable done\n");
+ else
+ printk(BIOS_DEBUG, "ARM64: MMU enable failed\n");
+}
diff --git a/src/arch/arm64/include/armv8/arch/mmu.h b/src/arch/arm64/include/armv8/arch/mmu.h
new file mode 100644
index 0000000000..f5f60a100c
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/mmu.h
@@ -0,0 +1,165 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ARCH_ARM64_MMU_H__
+#define __ARCH_ARM64_MMU_H__
+
+#include <memrange.h>
+
+/* IMPORTANT!!!!!!!
+ * Assumptions made:
+ * Granule size is 64KiB
+ * BITS per Virtual address is 33
+ * All the calculations for tables L1,L2 and L3 are based on these assumptions
+ * If these values are changed, recalculate the other macros as well
+ */
+
+
+/* Memory attributes for mmap regions
+ * These attributes act as tag values for memrange regions
+ */
+
+/* Normal memory / device */
+#define MA_MEM (1 << 0)
+#define MA_DEV (0 << 0)
+
+/* Secure / non-secure */
+#define MA_NS (1 << 1)
+#define MA_S (0 << 1)
+
+/* Read only / Read-write */
+#define MA_RO (1 << 2)
+#define MA_RW (0 << 2)
+
+/* Descriptor attributes */
+
+#define INVALID_DESC 0x0
+#define BLOCK_DESC 0x1
+#define TABLE_DESC 0x3
+#define PAGE_DESC 0x3
+
+/* Block descriptor */
+#define BLOCK_NS (1 << 5)
+
+#define BLOCK_AP_RW (0 << 7)
+#define BLOCK_AP_RO (1 << 7)
+
+#define BLOCK_ACCESS (1 << 10)
+
+/* XLAT Table Init Attributes */
+
+#define VA_START 0x0
+/* If BITS_PER_VA or GRANULE_SIZE are changed, recalculate and change the
+ macros following them */
+#define BITS_PER_VA 33
+/* Granule size of 64KB is being used */
+#define GRANULE_SIZE_SHIFT 16
+#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
+#define XLAT_TABLE_MASK ~(0xffffUL)
+#define GRANULE_SIZE_MASK ((1 << 16) - 1)
+
+#define L1_ADDR_SHIFT 42
+#define L2_ADDR_SHIFT 29
+#define L3_ADDR_SHIFT 16
+
+#define L1_ADDR_MASK (0UL << L1_ADDR_SHIFT)
+#define L2_ADDR_MASK (0xfUL << L2_ADDR_SHIFT)
+#define L3_ADDR_MASK (0x1fffUL << L3_ADDR_SHIFT)
+
+/* Dependent on BITS_PER_VA and GRANULE_SIZE */
+#define INIT_LEVEL 2
+#define XLAT_MAX_LEVEL 3
+
+/* Each entry in XLAT table is 8 bytes */
+#define XLAT_ENTRY_SHIFT 3
+#define XLAT_ENTRY_SIZE (1 << XLAT_ENTRY_SHIFT)
+
+#define XLAT_TABLE_SHIFT GRANULE_SIZE_SHIFT
+#define XLAT_TABLE_SIZE (1 << XLAT_TABLE_SHIFT)
+
+#define XLAT_NUM_ENTRIES_SHIFT (XLAT_TABLE_SHIFT - XLAT_ENTRY_SHIFT)
+#define XLAT_NUM_ENTRIES (1 << XLAT_NUM_ENTRIES_SHIFT)
+
+#define L3_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT)
+#define L2_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
+#define L1_XLAT_SIZE_SHIFT (GRANULE_SIZE_SHIFT + XLAT_NUM_ENTRIES_SHIFT)
+
+/* These macros give the size of the region addressed by each entry of a xlat
+ table at any given level */
+#define L3_XLAT_SIZE (1 << L3_XLAT_SIZE_SHIFT)
+#define L2_XLAT_SIZE (1 << L2_XLAT_SIZE_SHIFT)
+#define L1_XLAT_SIZE (1 << L1_XLAT_SIZE_SHIFT)
+
+/* Block indices required for MAIR */
+#define BLOCK_INDEX_MEM_DEV_NGNRNE 0
+#define BLOCK_INDEX_MEM_DEV_NGNRE 1
+#define BLOCK_INDEX_MEM_DEV_GRE 2
+#define BLOCK_INDEX_MEM_NORMAL_NC 3
+#define BLOCK_INDEX_MEM_NORMAL 4
+
+#define BLOCK_INDEX_SHIFT 2
+
+/* MAIR attributes */
+#define MAIR_ATTRIBUTES ((0x00 << (BLOCK_INDEX_MEM_DEV_NGNRNE*8)) | \
+ (0x04 << (BLOCK_INDEX_MEM_DEV_NGNRE*8)) | \
+ (0x0c << (BLOCK_INDEX_MEM_DEV_GRE*8)) | \
+ (0x44 << (BLOCK_INDEX_MEM_NORMAL_NC*8)) | \
+ (0xffUL << (BLOCK_INDEX_MEM_NORMAL*8)))
+
+/* TCR attributes */
+#define TCR_TOSZ (64 - BITS_PER_VA)
+
+#define TCR_IRGN0_SHIFT 8
+#define TCR_IRGN0_NM_NC (0x00 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WBWAC (0x01 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WTC (0x02 << TCR_IRGN0_SHIFT)
+#define TCR_IRGN0_NM_WBNWAC (0x03 << TCR_IRGN0_SHIFT)
+
+#define TCR_ORGN0_SHIFT 10
+#define TCR_ORGN0_NM_NC (0x00 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WBWAC (0x01 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WTC (0x02 << TCR_ORGN0_SHIFT)
+#define TCR_ORGN0_NM_WBNWAC (0x03 << TCR_ORGN0_SHIFT)
+
+#define TCR_SH0_SHIFT 12
+#define TCR_SH0_NC (0x0 << TCR_SH0_SHIFT)
+#define TCR_SH0_OS (0x2 << TCR_SH0_SHIFT)
+#define TCR_SH0_IS (0x3 << TCR_SH0_SHIFT)
+
+#define TCR_TG0_SHIFT 14
+#define TCR_TG0_4KB (0x0 << TCR_TG0_SHIFT)
+#define TCR_TG0_64KB (0x1 << TCR_TG0_SHIFT)
+#define TCR_TG0_16KB (0x2 << TCR_TG0_SHIFT)
+
+#define TCR_PS_SHIFT 16
+#define TCR_PS_4GB (0x0 << TCR_PS_SHIFT)
+#define TCR_PS_64GB (0x1 << TCR_PS_SHIFT)
+#define TCR_PS_1TB (0x2 << TCR_PS_SHIFT)
+#define TCR_PS_4TB (0x3 << TCR_PS_SHIFT)
+#define TCR_PS_16TB (0x4 << TCR_PS_SHIFT)
+#define TCR_PS_256TB (0x5 << TCR_PS_SHIFT)
+
+#define TCR_TBI_SHIFT 20
+#define TCR_TBI_USED (0x0 << TCR_TBI_SHIFT)
+#define TCR_TBI_IGNORED (0x1 << TCR_TBI_SHIFT)
+
+void mmu_init(struct memranges *,uint64_t *,uint64_t);
+void mmu_enable(uint64_t);
+
+#endif // __ARCH_ARM64_MMU_H__