summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorPatrick Rudolph <patrick.rudolph@9elements.com>2019-12-01 07:23:59 +0100
committerArthur Heymans <arthur@aheymans.xyz>2020-12-01 14:53:44 +0000
commita1695504799f733c2fb1f030d1313535bbb5b065 (patch)
treeb5daefcdbb854357d9aef7b350d5caf4ffd97591 /src/cpu
parent45dc92a8c2e697411cb91bf255506575dd48afba (diff)
cpu/x86/sipi: Add x86_64 support
Enter long mode on secondary APs. Tested on Lenovo T410 with additional x86_64 patches. Tested on HP Z220 with additional x86_64 patches. Still boots on x86_32. Change-Id: I53eae082123d1a12cfa97ead1d87d84db4a334c0 Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/45187 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/x86/64bit/entry64.inc10
-rw-r--r--src/cpu/x86/sipi_vector.S15
2 files changed, 25 insertions, 0 deletions
diff --git a/src/cpu/x86/64bit/entry64.inc b/src/cpu/x86/64bit/entry64.inc
index 65c0fdc929..70255173f1 100644
--- a/src/cpu/x86/64bit/entry64.inc
+++ b/src/cpu/x86/64bit/entry64.inc
@@ -16,7 +16,12 @@
#endif
#include <cpu/x86/msr.h>
+#if defined(__RAMSTAGE__)
+#include <arch/ram_segs.h>
+#else
#include <arch/rom_segs.h>
+#endif
+
setup_longmode:
/* Get page table address */
@@ -42,7 +47,12 @@ setup_longmode:
movl %eax, %cr0
/* use long jump to switch to 64-bit code segment */
+#if defined(__RAMSTAGE__)
+ ljmp $RAM_CODE_SEG64, $__longmode_start
+#else
ljmp $ROM_CODE_SEG64, $__longmode_start
+
+#endif
.code64
__longmode_start:
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index 054f30d2c4..61d9e34466 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -5,6 +5,8 @@
#include <cpu/x86/msr.h>
#include <arch/ram_segs.h>
+#define __RAMSTAGE__
+
/* The SIPI vector is responsible for initializing the APs in the system. It
* loads microcode, sets up MSRs, and enables caching before calling into
* C code. */
@@ -192,11 +194,24 @@ load_msr:
mov %eax, %cr4
#endif
+#ifdef __x86_64__
+ /* entry64.inc preserves ebx. */
+#include <cpu/x86/64bit/entry64.inc>
+
+ mov %rsi, %rdi /* cpu_num */
+
+ movl c_handler, %eax
+ call *%rax
+#else
/* c_handler(cpu_num), preserve proper stack alignment */
sub $12, %esp
push %esi /* cpu_num */
+
mov c_handler, %eax
call *%eax
+#endif
+
+
halt_jump:
hlt
jmp halt_jump