aboutsummaryrefslogtreecommitdiff
path: root/src/arch/arm64/armv8
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm64/armv8')
-rw-r--r--src/arch/arm64/armv8/bootblock.S19
-rw-r--r--src/arch/arm64/armv8/cpu.S15
-rw-r--r--src/arch/arm64/armv8/exception.c1
3 files changed, 20 insertions, 15 deletions
diff --git a/src/arch/arm64/armv8/bootblock.S b/src/arch/arm64/armv8/bootblock.S
index 4a9fea9af6..e5758bca0d 100644
--- a/src/arch/arm64/armv8/bootblock.S
+++ b/src/arch/arm64/armv8/bootblock.S
@@ -16,21 +16,14 @@
#include <arch/asm.h>
+/* NOTE: When making changes to general ARM64 initialization, keep in mind that
+ * there are other CPU entry points, using BOOTBLOCK_CUSTOM or entering the CPU
+ * in a later stage (like Tegra). Changes should generally be put into
+ * arm64_cpu_init so they can be shared between those instances. */
+
ENTRY(_start)
- /* Initialize PSTATE, SCTLR and caches to clean state. */
+ /* Initialize PSTATE, SCTLR and caches to clean state, set up stack. */
bl arm64_init_cpu
- /* Initialize stack with sentinel value to later check overflow. */
- ldr x0, =_stack
- ldr x1, =_estack
- ldr x2, =0xdeadbeefdeadbeef
-stack_init_loop:
- stp x2, x2, [x0], #16
- cmp x0, x1
- bne stack_init_loop
-
- /* Leave a line of beef dead for easier visibility in stack dumps. */
- sub sp, x0, #16
-
bl main
ENDPROC(_start)
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
index 4713ca59f9..711c338685 100644
--- a/src/arch/arm64/armv8/cpu.S
+++ b/src/arch/arm64/armv8/cpu.S
@@ -21,7 +21,8 @@
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
* known state regarding caches/SCTLR/PSTATE. Completely cleans and invalidates
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
- * accesses, icache and branch prediction (if inactive). Clobbers R22 and R23.
+ * accesses, icache and branch prediction (if inactive). Seeds the stack and
+ * initializes SP_EL0. Clobbers R22 and R23.
*/
ENTRY(arm64_init_cpu)
/* Initialize PSTATE (unmask all exceptions, select SP_EL0). */
@@ -60,5 +61,17 @@ ENTRY(arm64_init_cpu)
dsb sy
isb
+ /* Initialize stack with sentinel value to later check overflow. */
+ ldr x2, =0xdeadbeefdeadbeef
+ ldr x0, =_stack
+ ldr x1, =_estack
+1:
+ stp x2, x2, [x0], #16
+ cmp x0, x1
+ bne 1b
+
+ /* Leave a line of beef dead for easier visibility in stack dumps. */
+ sub sp, x0, #16
+
ret x23
ENDPROC(arm64_init_cpu)
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
index 35e3f7fac6..b872a55a8a 100644
--- a/src/arch/arm64/armv8/exception.c
+++ b/src/arch/arm64/armv8/exception.c
@@ -166,7 +166,6 @@ void exc_dispatch(struct exc_state *state, uint64_t idx)
exc_exit(&state->regs);
}
-
static int test_exception_handler(struct exc_state *state, uint64_t vector_id)
{
/* Update instruction pointer to next instrution. */