summaryrefslogtreecommitdiff
path: root/src/arch/arm64/armv8
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm64/armv8')
-rw-r--r--src/arch/arm64/armv8/Kconfig9
-rw-r--r--src/arch/arm64/armv8/Makefile.inc74
-rw-r--r--src/arch/arm64/armv8/bootblock.S101
-rw-r--r--src/arch/arm64/armv8/bootblock_simple.c73
-rw-r--r--src/arch/arm64/armv8/cache.c148
-rw-r--r--src/arch/arm64/armv8/cpu.S131
-rw-r--r--src/arch/arm64/armv8/exception.c129
-rw-r--r--src/arch/arm64/armv8/exception_asm.S103
8 files changed, 768 insertions, 0 deletions
diff --git a/src/arch/arm64/armv8/Kconfig b/src/arch/arm64/armv8/Kconfig
new file mode 100644
index 0000000000..fc2e6680b1
--- /dev/null
+++ b/src/arch/arm64/armv8/Kconfig
@@ -0,0 +1,9 @@
+config ARCH_BOOTBLOCK_ARM_V8_64
+ def_bool n
+ select ARCH_BOOTBLOCK_ARM64
+config ARCH_ROMSTAGE_ARM_V8_64
+ def_bool n
+ select ARCH_ROMSTAGE_ARM64
+config ARCH_RAMSTAGE_ARM_V8_64
+ def_bool n
+ select ARCH_RAMSTAGE_ARM64
diff --git a/src/arch/arm64/armv8/Makefile.inc b/src/arch/arm64/armv8/Makefile.inc
new file mode 100644
index 0000000000..973b391dd4
--- /dev/null
+++ b/src/arch/arm64/armv8/Makefile.inc
@@ -0,0 +1,74 @@
+################################################################################
+##
+## This file is part of the coreboot project.
+##
+## Copyright (C) 2014 The ChromiumOS Authors
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+################################################################################
+
+armv8_flags = -march=armv8-a -I$(src)/arch/arm64/include/armv8/ -D__COREBOOT_ARM_ARCH__=8
+
+armv8_asm_flags = $(armv8_flags)
+
+################################################################################
+## bootblock
+################################################################################
+ifeq ($(CONFIG_ARCH_BOOTBLOCK_ARM_V8_64),y)
+
+ifneq ($(CONFIG_ARM_BOOTBLOCK_CUSTOM),y)
+bootblock-y += bootblock.S
+endif
+bootblock-$(CONFIG_ARM_BOOTBLOCK_SIMPLE) += bootblock_simple.c
+bootblock-$(CONFIG_ARM_BOOTBLOCK_NORMAL) += bootblock_normal.c
+bootblock-y += cache.c
+bootblock-y += cpu.S
+bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += exception.c
+bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += exception_asm.S
+
+bootblock-c-ccopts += $(armv8_flags)
+bootblock-S-ccopts += $(armv8_asm_flags)
+
+endif
+
+################################################################################
+## romstage
+################################################################################
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM_V8_64),y)
+
+romstage-y += cache.c
+romstage-y += cpu.S
+romstage-y += exception.c
+romstage-y += exception_asm.S
+
+romstage-c-ccopts += $(armv8_flags)
+romstage-S-ccopts += $(armv8_asm_flags)
+
+endif
+
+################################################################################
+## ramstage
+################################################################################
+ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM_V8_64),y)
+
+ramstage-y += cache.c
+ramstage-y += cpu.S
+ramstage-y += exception.c
+ramstage-y += exception_asm.S
+
+ramstage-c-ccopts += $(armv8_flags)
+ramstage-S-ccopts += $(armv8_asm_flags)
+
+endif
diff --git a/src/arch/arm64/armv8/bootblock.S b/src/arch/arm64/armv8/bootblock.S
new file mode 100644
index 0000000000..e65515f20b
--- /dev/null
+++ b/src/arch/arm64/armv8/bootblock.S
@@ -0,0 +1,101 @@
+/*
+ * Early initialization code for aarch64 (a.k.a. armv8)
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+.section ".start", "a", %progbits
+.globl _start
+_start: b reset
+ .balignl 16,0xdeadbeef
+
+_cbfs_master_header:
+ /* The CBFS master header is inserted by cbfstool at the first
+ * aligned offset after the above anchor string is found.
+ * Hence, we leave some space for it.
+ * Assumes 64-byte alignment.
+ */
+ .skip 128
+
+reset:
+ /*
+ * Set the cpu to SVC32 mode and unmask aborts. Aborts might happen
+ * before logging is turned on and may crash the machine, but at least
+ * the problem will show up near the code that causes it.
+ */
+ /* FIXME: Not using supervisor mode, does it apply for aarch64? */
+
+ msr daifclr, #0xc /* Unmask Debug and System exceptions */
+ msr daifset, #0x3 /* Mask IRQ, FIQ */
+
+ bl arm_init_caches
+
+ /*
+ * Initialize the stack to a known value. This is used to check for
+ * stack overflow later in the boot process.
+ */
+ ldr x0, .Stack
+ ldr x1, .Stack_size
+ sub x0, x0, x1
+ ldr x1, .Stack
+ ldr x2, =0xdeadbeefdeadbeef
+init_stack_loop:
+ str x2, [x0]
+ add x0, x0, #8
+ cmp x0, x1
+ bne init_stack_loop
+
+/* Set stackpointer in internal RAM to call bootblock main() */
+call_bootblock:
+ ldr x0, .Stack /* Set up stack pointer */
+ mov sp, x0
+ ldr x0, =0x00000000
+
+ sub sp, sp, #16
+
+ /*
+ * Switch to EL2 already because Linux requires to be
+ * in EL1 or EL2, see its "Booting AArch64 Linux" doc
+ */
+ bl switch_el3_to_el2
+ bl main
+
+.align 3
+.Stack:
+ .word CONFIG_STACK_TOP
+.align 3
+.Stack_size:
+ .word CONFIG_STACK_SIZE
+ .section ".id", "a", %progbits
+
+ .globl __id_start
+__id_start:
+ver:
+ .asciz COREBOOT_VERSION
+vendor:
+ .asciz CONFIG_MAINBOARD_VENDOR
+part:
+ .asciz CONFIG_MAINBOARD_PART_NUMBER
+.long __id_end - ver /* Reverse offset to the vendor id */
+.long __id_end - vendor /* Reverse offset to the vendor id */
+.long __id_end - part /* Reverse offset to the part number */
+.long CONFIG_ROM_SIZE /* Size of this romimage */
+ .globl __id_end
+
+__id_end:
+.previous
diff --git a/src/arch/arm64/armv8/bootblock_simple.c b/src/arch/arm64/armv8/bootblock_simple.c
new file mode 100644
index 0000000000..d8339d1f76
--- /dev/null
+++ b/src/arch/arm64/armv8/bootblock_simple.c
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <bootblock_common.h>
+#include <arch/cache.h>
+#include <arch/hlt.h>
+#include <arch/stages.h>
+#include <arch/exception.h>
+#include <cbfs.h>
+#include <console/console.h>
+
+static int boot_cpu(void)
+{
+ /*
+ * FIXME: This is a stub for now. All non-boot CPUs should be
+ * waiting for an interrupt. We could move the chunk of assembly
+ * which puts them to sleep in here...
+ */
+ return 1;
+}
+
+void main(void)
+{
+ const char *stage_name = CONFIG_CBFS_PREFIX"/romstage";
+ void *entry = NULL;
+
+ /* Globally disable MMU, caches, and branch prediction (these should
+ * be disabled by default on reset) */
+ dcache_mmu_disable();
+
+ /*
+ * Re-enable icache and branch prediction. MMU and dcache will be
+ * set up later.
+ *
+ * Note: If booting from USB, we need to disable branch prediction
+ * before copying from USB into RAM (FIXME: why?)
+ */
+
+ if (boot_cpu()) {
+ //bootblock_cpu_init();
+ //bootblock_mainboard_init();
+ }
+
+#ifdef CONFIG_BOOTBLOCK_CONSOLE
+ console_init();
+ exception_init();
+#endif
+
+ entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name);
+
+ printk(BIOS_SPEW, "stage_name %s, entry %p\n", stage_name, entry);
+
+ if (entry) stage_exit(entry);
+ hlt();
+}
diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c
new file mode 100644
index 0000000000..a0eff46e20
--- /dev/null
+++ b/src/arch/arm64/armv8/cache.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.c: Cache maintenance routines for ARMv8 (aarch64)
+ *
+ * Reference: ARM Architecture Reference Manual, ARMv8-A edition
+ */
+
+#include <stdint.h>
+
+#include <arch/cache.h>
+
+void tlb_invalidate_all(void)
+{
+ /* TLBIALL includes dTLB and iTLB on systems that have them. */
+ tlbiall(current_el());
+ dsb();
+ isb();
+}
+
+unsigned int dcache_line_bytes(void)
+{
+ uint32_t ccsidr;
+ static unsigned int line_bytes = 0;
+
+ if (line_bytes)
+ return line_bytes;
+
+ ccsidr = read_ccsidr();
+ /* [2:0] - Indicates (Log2(number of words in cache line)) - 4 */
+ line_bytes = 1 << ((ccsidr & 0x7) + 4); /* words per line */
+ line_bytes *= sizeof(uint32_t); /* bytes per word */
+
+ return line_bytes;
+}
+
+enum dcache_op {
+ OP_DCCSW,
+ OP_DCCISW,
+ OP_DCISW,
+ OP_DCCIVAC,
+ OP_DCCVAC,
+ OP_DCIVAC,
+};
+
+/*
+ * Do a dcache operation by virtual address. This is useful for maintaining
+ * coherency in drivers which do DMA transfers and only need to perform
+ * cache maintenance on a particular memory range rather than the entire cache.
+ */
+static void dcache_op_va(void const *addr, size_t len, enum dcache_op op)
+{
+ uint64_t line, linesize;
+
+ linesize = dcache_line_bytes();
+ line = (uint64_t)addr & ~(linesize - 1);
+
+ dsb();
+ while ((void *)line < addr + len) {
+ switch(op) {
+ case OP_DCCIVAC:
+ dccivac(line);
+ break;
+ case OP_DCCVAC:
+ dccvac(line);
+ break;
+ case OP_DCIVAC:
+ dcivac(line);
+ break;
+ default:
+ break;
+ }
+ line += linesize;
+ }
+ isb();
+}
+
+void dcache_clean_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCCVAC);
+}
+
+void dcache_clean_invalidate_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCCIVAC);
+}
+
+void dcache_invalidate_by_va(void const *addr, size_t len)
+{
+ dcache_op_va(addr, len, OP_DCIVAC);
+}
+
+/*
+ * CAUTION: This implementation assumes that coreboot never uses non-identity
+ * page tables for pages containing executed code. If you ever want to violate
+ * this assumption, have fun figuring out the associated problems on your own.
+ */
+void dcache_mmu_disable(void)
+{
+ uint32_t sctlr;
+
+ flush_dcache_all();
+ sctlr = read_sctlr(current_el());
+ sctlr &= ~(SCTLR_C | SCTLR_M);
+ write_sctlr(sctlr, current_el());
+}
+
+void dcache_mmu_enable(void)
+{
+ uint32_t sctlr;
+
+ sctlr = read_sctlr(current_el());
+ sctlr |= SCTLR_C | SCTLR_M;
+ write_sctlr(sctlr, current_el());
+}
+
+void cache_sync_instructions(void)
+{
+ flush_dcache_all(); /* includes trailing DSB (in assembly) */
+ iciallu(); /* includes BPIALLU (architecturally) */
+ dsb();
+ isb();
+}
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
new file mode 100644
index 0000000000..1eecc2b0d7
--- /dev/null
+++ b/src/arch/arm64/armv8/cpu.S
@@ -0,0 +1,131 @@
+/*
+ * Based on arch/arm/include/asm/cacheflush.h
+ *
+ * Copyright (C) 1999-2002 Russell King.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <arch/asm.h>
+
+/*
+ * flush_dcache_all()
+ *
+ * Flush the whole D-cache.
+ *
+ * Corrupted registers: x0-x7, x9-x11
+ * From: Linux arch/arm64/mm/cache.S
+ */
+ENTRY(flush_dcache_all)
+ dsb sy // ensure ordering with previous memory accesses
+ mrs x0, clidr_el1 // read clidr
+ and x3, x0, #0x7000000 // extract loc from clidr
+ lsr x3, x3, #23 // left align loc bit field
+ cbz x3, finished // if loc is 0, then no need to clean
+ mov x10, #0 // start clean at cache level 0
+loop1:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask of the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt skip // skip if no cache, or just i-cache
+ mrs x9, daif // make CSSELR and CCSIDR access atomic
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ msr daif, x9
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ mov x4, #0x3ff
+ and x4, x4, x1, lsr #3 // find maximum number on the way size
+ clz x5, x4 // find bit position of way size increment
+ mov x7, #0x7fff
+ and x7, x7, x1, lsr #13 // extract max number of the index size
+loop2:
+ mov x9, x4 // create working copy of max way size
+loop3:
+ lsl x6, x9, x5
+ orr x11, x10, x6 // factor way and cache number into x11
+ lsl x6, x7, x2
+ orr x11, x11, x6 // factor index number into x11
+ dc cisw, x11 // clean & invalidate by set/way
+ subs x9, x9, #1 // decrement the way
+ b.ge loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge loop2
+skip:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop1
+finished:
+ mov x10, #0 // swith back to cache level 0
+ msr csselr_el1, x10 // select current cache level in csselr
+ dsb sy
+ isb
+ ret
+ENDPROC(flush_dcache_all)
+
+/*
+ * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
+ * known state regarding caches/SCTLR. Completely cleans and invalidates
+ * icache/dcache, disables MMU and dcache (if active), and enables unaligned
+ * accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
+ */
+ENTRY(arm_init_caches)
+ /* w4: SCTLR, return address: x8 (stay valid for the whole function) */
+ mov x8, x30
+ /* XXX: Assume that we always start running at EL3 */
+ mrs x4, sctlr_el3
+
+ /* FIXME: How to enable branch prediction on ARMv8? */
+
+ /* Flush and invalidate dcache */
+ bl flush_dcache_all
+
+ /* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
+ and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
+ /* Activate ICache (12) already for speed */
+ orr x4, x4, #(1 << 12)
+ msr sctlr_el3, x4
+
+ /* Invalidate icache and TLB for good measure */
+ ic iallu
+ tlbi alle3
+ dsb sy
+ isb
+
+ ret x8
+ENDPROC(arm_init_caches)
+
+/* Based on u-boot transition.S */
+ENTRY(switch_el3_to_el2)
+ mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */
+ msr scr_el3, x0
+ msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
+ mov x0, #0x33ff
+ msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
+
+ /* Return to the EL2_SP2 mode from EL3 */
+ mov x0, sp
+ msr sp_el2, x0 /* Migrate SP */
+ mrs x0, vbar_el3
+ msr vbar_el2, x0 /* Migrate VBAR */
+ mrs x0, sctlr_el3
+ msr sctlr_el2, x0 /* Migrate SCTLR */
+ mov x0, #0x3c9
+ msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */
+ msr elr_el3, x30
+ eret
+ENDPROC(switch_el3_to_el2)
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
new file mode 100644
index 0000000000..31e31311a7
--- /dev/null
+++ b/src/arch/arm64/armv8/exception.c
@@ -0,0 +1,129 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <types.h>
+#include <arch/cache.h>
+#include <arch/exception.h>
+#include <console/console.h>
+
+void exception_sync_el0(uint64_t *regs, uint64_t esr);
+void exception_irq_el0(uint64_t *regs, uint64_t esr);
+void exception_fiq_el0(uint64_t *regs, uint64_t esr);
+void exception_serror_el0(uint64_t *regs, uint64_t esr);
+void exception_sync(uint64_t *regs, uint64_t esr);
+void exception_irq(uint64_t *regs, uint64_t esr);
+void exception_fiq(uint64_t *regs, uint64_t esr);
+void exception_serror(uint64_t *regs, uint64_t esr);
+
+static void print_regs(uint64_t *regs)
+{
+ int i;
+
+ /* ELR contains the restart PC at target exception level */
+ printk(BIOS_ERR, "ELR = 0x%016llx ", regs[0]);
+ printk(BIOS_ERR, "X00 = 0x%016llx\n", regs[1]);
+
+ for (i = 2; i < 31; i+=2) {
+ printk(BIOS_ERR, "X%02d = 0x%016llx ", i - 1, regs[i]);
+ printk(BIOS_ERR, "X%02d = 0x%016llx\n", i, regs[i + 1]);
+ }
+}
+
+void exception_sync_el0(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _sync_el0 (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_irq_el0(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _irq_el0 (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_fiq_el0(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _fiq_el0 (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_serror_el0(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _serror_el0 (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_sync(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _sync (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_irq(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _irq (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_fiq(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _fiq (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_serror(uint64_t *regs, uint64_t esr)
+{
+ printk(BIOS_ERR, "exception _serror (ESR = 0x%08llx)\n", esr);
+ print_regs(regs);
+ die("exception");
+}
+
+void exception_init(void)
+{
+ //uint32_t sctlr = read_sctlr();
+ /* Handle exceptions in ARM mode. */
+ //sctlr &= ~SCTLR_TE;
+ /* Set V=0 in SCTLR so VBAR points to the exception vector table. */
+ //sctlr &= ~SCTLR_V;
+ /* Enforce alignment temporarily. */
+ //write_sctlr(sctlr);
+
+ extern uint32_t exception_table[];
+ set_vbar((uintptr_t)exception_table);
+
+ printk(BIOS_DEBUG, "Exception handlers installed.\n");
+}
diff --git a/src/arch/arm64/armv8/exception_asm.S b/src/arch/arm64/armv8/exception_asm.S
new file mode 100644
index 0000000000..b1f1a94174
--- /dev/null
+++ b/src/arch/arm64/armv8/exception_asm.S
@@ -0,0 +1,103 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/asm.h>
+
+ .text
+
+ .align 11
+ .global exception_table
+exception_table:
+ .align 7
+ bl exception_prologue
+ bl exception_sync_el0
+ .align 7
+ bl exception_prologue
+ bl exception_irq_el0
+ .align 7
+ bl exception_prologue
+ bl exception_fiq_el0
+ .align 7
+ bl exception_prologue
+ bl exception_serror_el0
+ .align 7
+ bl exception_prologue
+ bl exception_sync
+ .align 7
+ bl exception_prologue
+ bl exception_irq
+ .align 7
+ bl exception_prologue
+ bl exception_fiq
+ .align 7
+ bl exception_prologue
+ bl exception_serror
+
+/*
+ * Save state (register file + ELR) to stack
+ * and set arguments x0 and x1 for exception call
+ */
+ENTRY(exception_prologue)
+ stp x29, x30, [sp, #-16]!
+ stp x27, x28, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+ stp x17, x18, [sp, #-16]!
+ stp x15, x16, [sp, #-16]!
+ stp x13, x14, [sp, #-16]!
+ stp x11, x12, [sp, #-16]!
+ stp x9, x10, [sp, #-16]!
+ stp x7, x8, [sp, #-16]!
+ stp x5, x6, [sp, #-16]!
+ stp x3, x4, [sp, #-16]!
+ stp x1, x2, [sp, #-16]!
+
+ /* FIXME: Don't assume always running in EL2 */
+ mrs x1, elr_el2
+ stp x1, x0, [sp, #-16]!
+
+ mrs x1, esr_el2
+ mov x0, sp
+
+ ret
+ENDPROC(exception_prologue)
+
+ .global exception_stack_end
+exception_stack_end:
+ .quad 0
+
+exception_handler:
+ .word 0
+
+ .global set_vbar
+set_vbar:
+ msr vbar_el2, x0
+ ret