summaryrefslogtreecommitdiff
path: root/src/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch/arm64')
-rw-r--r--src/arch/arm64/Kconfig5
-rw-r--r--src/arch/arm64/Makefile.inc1
-rw-r--r--src/arch/arm64/armv8/secmon_loader.c69
-rw-r--r--src/arch/arm64/boot.c4
-rw-r--r--src/arch/arm64/include/arch/spintable.h50
-rw-r--r--src/arch/arm64/spintable.c100
-rw-r--r--src/arch/arm64/spintable_asm.S38
7 files changed, 251 insertions, 16 deletions
diff --git a/src/arch/arm64/Kconfig b/src/arch/arm64/Kconfig
index a070b90816..2465bb1f66 100644
--- a/src/arch/arm64/Kconfig
+++ b/src/arch/arm64/Kconfig
@@ -20,4 +20,9 @@ config ARCH_USE_SECURE_MONITOR
default n
select RELOCATABLE_MODULES
+config ARCH_SPINTABLE
+ bool
+ default n
+ depends on ARCH_RAMSTAGE_ARM64
+
source src/arch/arm64/armv8/Kconfig
diff --git a/src/arch/arm64/Makefile.inc b/src/arch/arm64/Makefile.inc
index 1dfaa9607b..c2caf64973 100644
--- a/src/arch/arm64/Makefile.inc
+++ b/src/arch/arm64/Makefile.inc
@@ -123,6 +123,7 @@ ramstage-y += ../../lib/memset.c
ramstage-y += ../../lib/memcpy.c
ramstage-y += ../../lib/memmove.c
ramstage-y += stage_entry.S
+ramstage-$(CONFIG_ARCH_SPINTABLE) += spintable.c spintable_asm.S
ramstage-y += transition.c transition_asm.S
rmodules_arm64-y += ../../lib/memset.c
diff --git a/src/arch/arm64/armv8/secmon_loader.c b/src/arch/arm64/armv8/secmon_loader.c
index 066f1c18a8..4d83764234 100644
--- a/src/arch/arm64/armv8/secmon_loader.c
+++ b/src/arch/arm64/armv8/secmon_loader.c
@@ -24,6 +24,7 @@
#include <arch/lib_helpers.h>
#include <arch/secmon.h>
+#include <arch/spintable.h>
#include <console/console.h>
#include <rmodule.h>
#include <string.h>
@@ -75,10 +76,43 @@ static secmon_entry_t secmon_load_rmodule(void)
return rmodule_entry(&secmon_mod);
}
-void secmon_run(void (*entry)(void *), void *cb_tables)
+struct secmon_runit {
+ secmon_entry_t entry;
+ struct secmon_params bsp_params;
+ struct secmon_params secondary_params;
+};
+
+static void secmon_start(void *arg)
{
- struct secmon_params params;
uint32_t scr;
+ struct secmon_params *p = NULL;
+ struct secmon_runit *r = arg;
+
+ if (cpu_is_bsp())
+ p = &r->bsp_params;
+ else if (r->secondary_params.entry != NULL)
+ p = &r->secondary_params;
+
+ printk(BIOS_DEBUG, "CPU%x entering secure monitor.\n", cpu_info()->id);
+
+ /* We want to enforce the following policies:
+ * NS bit is set for lower EL
+ */
+ scr = raw_read_scr_el3();
+ scr |= SCR_NS;
+ raw_write_scr_el3(scr);
+
+ r->entry(p);
+}
+
+void secmon_run(void (*entry)(void *), void *cb_tables)
+{
+ const struct spintable_attributes *spin_attrs;
+ static struct secmon_runit runit;
+ struct cpu_action action = {
+ .run = secmon_start,
+ .arg = &runit,
+ };
printk(BIOS_SPEW, "payload jump @ %p\n", entry);
@@ -87,25 +121,28 @@ void secmon_run(void (*entry)(void *), void *cb_tables)
return;
}
- secmon_entry_t doit = secmon_load_rmodule();
+ runit.entry = secmon_load_rmodule();
- if (doit == NULL)
+ if (runit.entry == NULL)
die("ARM64 Error: secmon load error");
printk(BIOS_DEBUG, "ARM64: Loaded the el3 monitor...jumping to %p\n",
- doit);
+ runit.entry);
- params.entry = entry;
- params.arg = cb_tables;
- params.elx_el = EL2;
- params.elx_mode = SPSR_USE_L;
+ runit.bsp_params.entry = entry;
+ runit.bsp_params.arg = cb_tables;
+ runit.bsp_params.elx_el = EL2;
+ runit.bsp_params.elx_mode = SPSR_USE_L;
+ runit.secondary_params.elx_el = EL2;
+ runit.secondary_params.elx_mode = SPSR_USE_L;
- /* We want to enforce the following policies:
- * NS bit is set for lower EL
- */
- scr = raw_read_scr_el3();
- scr |= SCR_NS;
- raw_write_scr_el3(scr);
+ spin_attrs = spintable_get_attributes();
+
+ if (spin_attrs != NULL) {
+ runit.secondary_params.entry = spin_attrs->entry;
+ runit.secondary_params.arg = spin_attrs->addr;
+ }
- doit(&params);
+ arch_run_on_all_cpus_but_self_async(&action);
+ secmon_start(&runit);
}
diff --git a/src/arch/arm64/boot.c b/src/arch/arm64/boot.c
index 6307e6005b..01630f376b 100644
--- a/src/arch/arm64/boot.c
+++ b/src/arch/arm64/boot.c
@@ -21,6 +21,7 @@
#include <arch/lib_helpers.h>
#include <arch/secmon.h>
#include <arch/stages.h>
+#include <arch/spintable.h>
#include <arch/transition.h>
#include <cbmem.h>
#include <console/console.h>
@@ -38,6 +39,9 @@ void arch_payload_run(const struct payload *payload)
secmon_run(payload_entry, cb_tables);
+ /* Start the other CPUs spinning. */
+ spintable_start();
+
/* If current EL is not EL3, jump to payload at same EL. */
if (current_el != EL3) {
cache_sync_instructions();
diff --git a/src/arch/arm64/include/arch/spintable.h b/src/arch/arm64/include/arch/spintable.h
new file mode 100644
index 0000000000..b583ddbeea
--- /dev/null
+++ b/src/arch/arm64/include/arch/spintable.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ARCH_SPINTABLE_H__
+#define __ARCH_SPINTABLE_H__
+
+struct spintable_attributes {
+ void (*entry)(void *);
+ void *addr;
+};
+
+#if IS_ENABLED(CONFIG_ARCH_SPINTABLE)
+
+/* Initialize spintable with provided monitor address. */
+void spintable_init(void *monitor_address);
+
+/* Start spinning on the non-boot CPUS. */
+void spintable_start(void);
+
+/* Return NULL on failure, otherwise the spintable info. */
+const struct spintable_attributes *spintable_get_attributes(void);
+
+#else /* IS_ENABLED(CONFIG_SPINTABLE) */
+
+static inline void spintable_init(void *monitor_address) {}
+static inline void spintable_start(void) {}
+static inline const struct spintable_attributes *spintable_get_attributes(void)
+{
+ return NULL;
+}
+
+#endif /* IS_ENABLED(CONFIG_SPINTABLE) */
+
+#endif /* __ARCH_SPINTABLE_H__ */
diff --git a/src/arch/arm64/spintable.c b/src/arch/arm64/spintable.c
new file mode 100644
index 0000000000..7a1ab7f5b1
--- /dev/null
+++ b/src/arch/arm64/spintable.c
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include <arch/cache.h>
+#include <arch/spintable.h>
+#include <arch/transition.h>
+#include <console/console.h>
+#include <cpu/cpu.h>
+#include <cbmem.h>
+#include <string.h>
+
+static struct spintable_attributes spin_attrs;
+
+void spintable_init(void *monitor_address)
+{
+ extern void __wait_for_spin_table_request(void);
+ const size_t code_size = 4096;
+
+ if (monitor_address == NULL) {
+ printk(BIOS_ERR, "spintable: NULL address to monitor.\n");
+ return;
+ }
+
+ spin_attrs.entry = cbmem_add(CBMEM_ID_SPINTABLE, code_size);
+
+ if (spin_attrs.entry == NULL)
+ return;
+
+ spin_attrs.addr = monitor_address;
+
+ printk(BIOS_INFO, "spintable @ %p will monitor %p\n",
+ spin_attrs.entry, spin_attrs.addr);
+
+ /* Ensure the memory location is zero'd out. */
+ *(uint64_t *)monitor_address = 0;
+
+ memcpy(spin_attrs.entry, __wait_for_spin_table_request, code_size);
+
+ dcache_clean_invalidate_by_mva(monitor_address, sizeof(uint64_t));
+ dcache_clean_invalidate_by_mva(spin_attrs.entry, code_size);
+}
+
+static void spintable_enter(void *unused)
+{
+ struct exc_state state;
+ const struct spintable_attributes *attrs;
+ int current_el;
+
+ attrs = spintable_get_attributes();
+
+ current_el = get_current_el();
+
+ if (current_el != EL3)
+ attrs->entry(attrs->addr);
+
+ memset(&state, 0, sizeof(state));
+ state.elx.spsr = get_eret_el(EL2, SPSR_USE_L);
+
+ transition_with_entry(attrs->entry, attrs->addr, &state);
+}
+
+const struct spintable_attributes *spintable_get_attributes(void)
+{
+ if (spin_attrs.entry == NULL) {
+ printk(BIOS_ERR, "spintable: monitor code not present.\n");
+ return NULL;
+ }
+
+ return &spin_attrs;
+}
+
+void spintable_start(void)
+{
+ struct cpu_action action = {
+ .run = spintable_enter,
+ };
+
+ if (spintable_get_attributes() == NULL)
+ return;
+
+ printk(BIOS_INFO, "All non-boot CPUs to enter spintable.\n");
+
+ arch_run_on_all_cpus_but_self_async(&action);
+}
diff --git a/src/arch/arm64/spintable_asm.S b/src/arch/arm64/spintable_asm.S
new file mode 100644
index 0000000000..3066b7e679
--- /dev/null
+++ b/src/arch/arm64/spintable_asm.S
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/asm.h>
+
+ENTRY(__wait_for_spin_table_request)
+ /* Entry here is in EL2 with the magic address in x0. */
+ mov x28, x0
+1:
+ ldr x27, [x28]
+ cmp x27, xzr
+ b.ne 2f
+ wfe
+ b 1b
+2:
+ /* Entry into the kernel. */
+ mov x0, xzr
+ mov x1, xzr
+ mov x2, xzr
+ mov x3, xzr
+ br x27
+ENDPROC(__wait_for_spin_table_request)