aboutsummaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/arm64/Makefile.inc5
-rw-r--r--src/arch/arm64/include/arch/transition.h188
-rw-r--r--src/arch/arm64/transition.c134
-rw-r--r--src/arch/arm64/transition_asm.S169
4 files changed, 496 insertions, 0 deletions
diff --git a/src/arch/arm64/Makefile.inc b/src/arch/arm64/Makefile.inc
index b50499e93d..12f936e8c8 100644
--- a/src/arch/arm64/Makefile.inc
+++ b/src/arch/arm64/Makefile.inc
@@ -61,6 +61,8 @@ bootblock-y += c_entry.c
bootblock-y += stage_entry.S
bootblock-y += stages.c
bootblock-y += eabi_compat.c
+bootblock-y += transition.c transition_asm.S
+
bootblock-y += ../../lib/memset.c
bootblock-y += ../../lib/memcpy.c
bootblock-y += ../../lib/memmove.c
@@ -88,6 +90,7 @@ romstage-y += ../../lib/memset.c
romstage-y += ../../lib/memcpy.c
romstage-y += ../../lib/memmove.c
romstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c
+romstage-y += transition.c transition_asm.S
rmodules_arm64-y += ../../lib/memset.c
rmodules_arm64-y += ../../lib/memcpy.c
@@ -120,6 +123,8 @@ ramstage-y += ../../lib/memset.c
ramstage-y += ../../lib/memcpy.c
ramstage-y += ../../lib/memmove.c
ramstage-y += stage_entry.S
+ramstage-y += transition.c transition_asm.S
+
rmodules_arm64-y += ../../lib/memset.c
rmodules_arm64-y += ../../lib/memcpy.c
rmodules_arm64-y += ../../lib/memmove.c
diff --git a/src/arch/arm64/include/arch/transition.h b/src/arch/arm64/include/arch/transition.h
new file mode 100644
index 0000000000..e8ded5f2aa
--- /dev/null
+++ b/src/arch/arm64/include/arch/transition.h
@@ -0,0 +1,188 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ARCH_ARM64_TRANSITION_H__
+#define __ARCH_ARM64_TRANSITION_H__
+
+/* ======================== Transition Library =================================
+ * Transition library provides two main functionalities:
+ * 1) It allows any program X to be executed at EL Y using the state Z. It
+ * provides struct exc_state which holds the state of the EL to which we want to
+ * execute X at. Before performing an eret to the entry point of the program X,
+ * it initializes required registers using this exc_state structure. Here, X0 =
+ * args to the program X. IMP!!! : We do not initialize SP_EL0 for the program
+ * X, the program will have to handle that on its own. This is because while
+ * performing an eret to X, we could make SP_EL0 point to regs structure which
+ * then follows common exception exit path.
+ * 2) It serves as a common mechanism for handling exception entry and exit at
+ * any given EL. On entry to an exception, SP_ELx is selected by default. The
+ * exc entry routine stores all xregs and jumps to exc_entry which
+ * saves ELR, SPSR, EL, Mode and other information about the state from which
+ * exception was generated. On exit, xregs are restored by unwinding of SP_ELx.
+ * =============================================================================
+ */
+
+/* Macros for EL mode in SPSR */
+#define STACK_POP_BYTES 16
+#define STACK_PUSH_BYTES -16
+
+#ifndef __ASSEMBLY__
+
+#include <stdint.h>
+#include <arch/lib_helpers.h>
+
+#define XI_INDEX(i) X##i##_INDEX = i
+
+enum {
+ XI_INDEX(0),
+ XI_INDEX(1),
+ XI_INDEX(2),
+ XI_INDEX(3),
+ XI_INDEX(4),
+ XI_INDEX(5),
+ XI_INDEX(6),
+ XI_INDEX(7),
+ XI_INDEX(8),
+ XI_INDEX(9),
+ XI_INDEX(10),
+ XI_INDEX(11),
+ XI_INDEX(12),
+ XI_INDEX(13),
+ XI_INDEX(14),
+ XI_INDEX(15),
+ XI_INDEX(16),
+ XI_INDEX(17),
+ XI_INDEX(18),
+ XI_INDEX(19),
+ XI_INDEX(20),
+ XI_INDEX(21),
+ XI_INDEX(22),
+ XI_INDEX(23),
+ XI_INDEX(24),
+ XI_INDEX(25),
+ XI_INDEX(26),
+ XI_INDEX(27),
+ XI_INDEX(28),
+ XI_INDEX(29),
+ XI_INDEX(30),
+ XMAX_INDEX,
+};
+
+/*
+ * Important: Any changes made to the two structures below should reflect in the
+ * exc_prologue and exc_exit routines in transition_asm.S
+ */
+struct regs {
+ uint64_t sp;
+ uint64_t x[31];
+};
+
+struct elx_state {
+ uint64_t spsr;
+ uint64_t sp_el0;
+ uint64_t sp_elx;
+ uint64_t elr;
+};
+
+struct exc_state {
+ struct elx_state elx;
+ struct regs regs;
+};
+
+/*
+ * get_eret_EL returns the value of the exception state to which we will be
+ * returning. This value is saved in SPSR before performing an eret.
+ *
+ * Exception mode is defined by M[3:0] bits in SPSR:
+ * ( M[3:2] = EL, M[1] = unused, M[0] = t/h mode for stack
+ *
+ * 0b0000 EL0t
+ * 0b0100 EL1t
+ * 0b0101 EL1h
+ * 0b1000 EL2t
+ * 0b1001 EL2h
+ * 0b1100 EL3t
+ * 0b1101 EL3h
+ */
+
+static inline uint8_t get_eret_el(uint8_t el, uint8_t l_or_h)
+{
+ uint8_t el_mode = el << CURRENT_EL_SHIFT;
+
+ el_mode |= l_or_h;
+
+ return el_mode;
+}
+
+static inline uint8_t get_el_from_spsr(uint64_t spsr)
+{
+ return ((spsr >> CURRENT_EL_SHIFT) & CURRENT_EL_MASK);
+}
+
+static inline uint8_t get_mode_from_spsr(uint64_t spsr)
+{
+ return (spsr & SPSR_L_H_MASK);
+}
+
+/* Transitions supported are:
+ * 1. elx --> elx - 1
+ * 2. Transitions to aarch64 state
+ *
+ * Other than this, if any transition needs to be supported, relevant changes
+ * need to be done to hcr/scr registers.
+ */
+
+/*
+ * User of transition library can make a call to transition_with_entry and pass
+ * the entry point and its argument which are put into elr and x0 by this
+ * function. After that it makes a call to transition.
+ */
+void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state);
+/*
+ * transition function sets up all the registers as per the struct elx_state
+ * before jumping to trans_switch.
+ */
+void transition(struct exc_state *exc_state);
+
+/*
+ * exc_exit it called while returning from an exception. It expects pointer to
+ * the regs structure on stack so that it can unwind the used stack.
+ */
+void exc_exit(struct regs *regs);
+/*
+ * trans_switch is called by the non-exception path i.e. transition C code
+ * while making a transition to lower EL. It select L mode so that SP_EL0 is
+ * used during the unwinding in exc_exit.
+ */
+void trans_switch(struct regs *regs);
+/* exc_set_vbar sets up the vbar for exception vectors. */
+void exc_set_vbar(void);
+
+/* exc_dispatch is the user-defined exception handler. */
+void exc_dispatch(struct exc_state *exc_state, uint64_t id);
+/*
+ * exc_entry is the C based component of the exception entry before we
+ * jump to user-defined handler. This initializes all the regs in elx_state and
+ * also sets the sp value in regs structure.
+ */
+void exc_entry(struct exc_state *exc_state, uint64_t id);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ARCH_ARM64_TRANSITION_H__ */
diff --git a/src/arch/arm64/transition.c b/src/arch/arm64/transition.c
new file mode 100644
index 0000000000..523960e741
--- /dev/null
+++ b/src/arch/arm64/transition.c
@@ -0,0 +1,134 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/lib_helpers.h>
+#include <arch/transition.h>
+#include <console/console.h>
+
+/* Mask out debug exceptions, serror, irq and fiq */
+#define SPSR_MASK (SPSR_FIQ_MASK | SPSR_IRQ_MASK | SPSR_SERROR_MASK | \
+ SPSR_DEBUG_MASK)
+/* Litte-endian, No XN-forced, Instr cache disabled,
+ * Stack alignment disabled, Data and unified cache
+ * disabled, Alignment check disabled, MMU disabled
+ */
+#define SCTLR_MASK (SCTLR_MMU_DISABLE | SCTLR_ACE_DISABLE | \
+ SCTLR_CACHE_DISABLE | SCTLR_SAE_DISABLE | SCTLR_RES1 | \
+ SCTLR_ICE_DISABLE | SCTLR_WXN_DISABLE | SCTLR_LITTLE_END)
+
+void __attribute__((weak)) exc_dispatch(struct exc_state *exc_state, uint64_t id)
+{
+ /* Default weak implementation does nothing. */
+}
+
+void exc_entry(struct exc_state *exc_state, uint64_t id)
+{
+ struct elx_state *elx = &exc_state->elx;
+ struct regs *regs = &exc_state->regs;
+ uint8_t elx_mode, elx_el;
+
+ elx->spsr = raw_read_spsr_current();
+ elx_mode = get_mode_from_spsr(elx->spsr);
+ elx_el = get_el_from_spsr(elx->spsr);
+
+ if (elx_mode == SPSR_USE_H) {
+ if (elx_el == get_current_el())
+ regs->sp = (uint64_t)&exc_state[1];
+ else
+ regs->sp = raw_read_sp_elx(elx_el);
+ } else {
+ regs->sp = raw_read_sp_el0();
+ }
+
+ elx->elr = raw_read_elr_current();
+
+ exc_dispatch(exc_state, id);
+}
+
+void transition_with_entry(void *entry, void *arg, struct exc_state *exc_state)
+{
+ /* Argument to entry point goes into X0 */
+ exc_state->regs.x[X0_INDEX] = (uint64_t)arg;
+ /* Entry point goes into ELR */
+ exc_state->elx.elr = (uint64_t)entry;
+
+ transition(exc_state);
+}
+
+void transition(struct exc_state *exc_state)
+{
+ uint32_t scr_mask;
+ uint64_t hcr_mask;
+ uint64_t sctlr;
+ uint32_t current_el = get_current_el();
+
+ struct elx_state *elx = &exc_state->elx;
+ struct regs *regs = &exc_state->regs;
+
+ uint8_t elx_el = get_el_from_spsr(elx->spsr);
+
+ /*
+ * Policies enforced:
+ * 1. We support only elx --> (elx - 1) transitions
+ * 2. We support transitions to Aarch64 mode only
+ *
+ * If any of the above conditions holds false, then we need a proper way
+ * to update SCR/HCR before removing the checks below
+ */
+ if ((current_el - elx_el) != 1)
+ die("ARM64 Error: Do not support transition\n");
+
+ if (elx->spsr & SPSR_ERET_32)
+ die("ARM64 Error: Do not support eret to Aarch32\n");
+ else {
+ scr_mask = SCR_LOWER_AARCH64;
+ hcr_mask = HCR_LOWER_AARCH64;
+ }
+
+ /* SPSR: Mask out debug exceptions, serror, irq, fiq */
+ elx->spsr |= SPSR_MASK;
+ raw_write_spsr_current(elx->spsr);
+
+ /* SCR: Write to SCR if current EL is EL3 */
+ if (current_el == EL3) {
+ uint32_t scr = raw_read_scr_el3();
+ scr |= scr_mask;
+ raw_write_scr_el3(scr);
+ }
+ /* HCR: Write to HCR if current EL is EL2 */
+ else if (current_el == EL2) {
+ uint64_t hcr = raw_read_hcr_el2();
+ hcr |= hcr_mask;
+ raw_write_hcr_el2(hcr);
+ }
+
+ /* ELR: Write entry point of program */
+ raw_write_elr_current(elx->elr);
+
+ /* SCTLR: Initialize EL with selected properties */
+ sctlr = raw_read_sctlr(elx_el);
+ sctlr &= SCTLR_MASK;
+ raw_write_sctlr(sctlr, elx_el);
+
+ /* SP_ELx: Initialize stack pointer */
+ raw_write_sp_elx(elx->sp_elx, elx_el);
+
+ /* Eret to the entry point */
+ trans_switch(regs);
+}
diff --git a/src/arch/arm64/transition_asm.S b/src/arch/arm64/transition_asm.S
new file mode 100644
index 0000000000..9f48549028
--- /dev/null
+++ b/src/arch/arm64/transition_asm.S
@@ -0,0 +1,169 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * transition_asm.S: This file handles the entry and exit from an exception
+ *
+ * Flow: exception --> exc_vectors --> exc_entry --> exc_dispatch -->
+ * exc_exit
+ * Transition Flow: transition --> trans_switch --> exc_exit
+ *
+ * |---| Exception Entry |---|
+ *
+ * On exception entry, it saves all the xregs on SP_ELx since SP_ELx is
+ * selected on entry. Some dummy pushes are performed to create space for
+ * elx_state structure. It then passes pointer to this saved set of regs and
+ * a unique id(for identifying exception) to exc_entry.
+ *
+ * |---| Exception Transition Dispatch |---|
+ *
+ * This is the C-component of exception entry. It does the work of initializing
+ * the exc_state registers. Finally it calls exception dispatch implemented by
+ * the user. This is point of no-return.
+ *
+ * |---| Exception Dispatch |---|
+ *
+ * User of this functionality is expected to implement exc_dispatch which
+ * acts as entry point for it. Once exception handling is complete, the user
+ * needs to call exc_exit with pointer to struct regs.
+ *
+ * |---| Exception Exit |---|
+ *
+ * Once exc_dispatch is done with handling the exception based on the id passed
+ * to it, it needs to call exc_exit with pointer to struct regs. This is done to
+ * unwind the exception stack by popping off all the xregs.
+ *
+ * |---| Exception Transition Exit |---|
+ *
+ * This routine makes SP_EL0 point to the regs structure passed and continues
+ * onto the exception exit routine described above. This is the reason that
+ * transition library does not handle initialization of SP_EL0 for the program
+ * to be executed.
+ */
+
+#define __ASSEMBLY__
+#include <arch/asm.h>
+#include <arch/lib_helpers.h>
+#include <arch/transition.h>
+
+.macro eentry lbl id
+ .align 7
+\lbl:
+ stp x29, x30, [sp, #STACK_PUSH_BYTES]!
+ bl exc_prologue
+ mov x1, \id
+ mov x0, sp
+ b exc_entry
+.endm
+
+/*
+ * exc_vectors: Entry point for an exception
+ */
+ENTRY_WITH_ALIGN(exc_vectors, 11)
+
+eentry sync_curr_sp0,#0
+eentry irq_curr_sp0,#1
+eentry fiq_curr_sp0,#2
+eentry serror_curr_sp0,#3
+eentry sync_curr_spx,#4
+eentry irq_curr_spx,#5
+eentry fiq_curr_spx,#6
+eentry serror_curr_spx,#7
+eentry sync_lower_64,#8
+eentry irq_lower_64,#9
+eentry fiq_lower_64,#10
+eentry serror_lower_64,#11
+eentry sync_lower_32,#12
+eentry irq_lower_32,#13
+eentry fiq_lower_32,#14
+eentry serror_lower_32,#15
+
+ENDPROC(exc_vectors)
+
+ENTRY(exc_prologue)
+ stp x27, x28, [sp, #STACK_PUSH_BYTES]!
+ stp x25, x26, [sp, #STACK_PUSH_BYTES]!
+ stp x23, x24, [sp, #STACK_PUSH_BYTES]!
+ stp x21, x22, [sp, #STACK_PUSH_BYTES]!
+ stp x19, x20, [sp, #STACK_PUSH_BYTES]!
+ stp x17, x18, [sp, #STACK_PUSH_BYTES]!
+ stp x15, x16, [sp, #STACK_PUSH_BYTES]!
+ stp x13, x14, [sp, #STACK_PUSH_BYTES]!
+ stp x11, x12, [sp, #STACK_PUSH_BYTES]!
+ stp x9, x10, [sp, #STACK_PUSH_BYTES]!
+ stp x7, x8, [sp, #STACK_PUSH_BYTES]!
+ stp x5, x6, [sp, #STACK_PUSH_BYTES]!
+ stp x3, x4, [sp, #STACK_PUSH_BYTES]!
+ stp x1, x2, [sp, #STACK_PUSH_BYTES]!
+ /* xzr pushed as place holder for sp */
+ stp xzr, x0, [sp, #STACK_PUSH_BYTES]!
+ /*
+ * xzr pushed as place holder for:
+ * 1. sp_elx and elr
+ */
+ stp xzr, xzr, [sp, #STACK_PUSH_BYTES]!
+ /* 2. spsr and sp_el0 */
+ stp xzr, xzr, [sp, #STACK_PUSH_BYTES]!
+ ret
+ENDPROC(exc_prologue)
+
+/*
+ * trans_switch: Set SPSel to use SP_EL0
+ * x0 = regs structure
+ */
+ENTRY(trans_switch)
+ msr SPSel, #SPSR_USE_L
+ b exc_exit
+ENDPROC(trans_switch)
+
+/*
+ * exc_exit: Return from exception by restoring saved state of xregs
+ * x0 = regs structure
+ */
+ENTRY(exc_exit)
+ /* Unwind the stack by making sp point to regs structure */
+ mov sp, x0
+ /* Load registers x0-x30 */
+ ldp xzr, x0, [sp], #STACK_POP_BYTES
+ ldp x1, x2, [sp], #STACK_POP_BYTES
+ ldp x3, x4, [sp], #STACK_POP_BYTES
+ ldp x5, x6, [sp], #STACK_POP_BYTES
+ ldp x7, x8, [sp], #STACK_POP_BYTES
+ ldp x9, x10, [sp], #STACK_POP_BYTES
+ ldp x11, x12, [sp], #STACK_POP_BYTES
+ ldp x13, x14, [sp], #STACK_POP_BYTES
+ ldp x15, x16, [sp], #STACK_POP_BYTES
+ ldp x17, x18, [sp], #STACK_POP_BYTES
+ ldp x19, x20, [sp], #STACK_POP_BYTES
+ ldp x21, x22, [sp], #STACK_POP_BYTES
+ ldp x23, x24, [sp], #STACK_POP_BYTES
+ ldp x25, x26, [sp], #STACK_POP_BYTES
+ ldp x27, x28, [sp], #STACK_POP_BYTES
+ ldp x29, x30, [sp], #STACK_POP_BYTES
+ eret
+ENDPROC(exc_exit)
+
+/* exc_set_vbar: Initialize the exception entry address in vbar */
+ENTRY(exc_set_vbar)
+ adr x0, exc_vectors
+ write_current vbar, x0, x1
+ dsb sy
+ isb
+ ret
+ENDPROC(exc_set_vbar)