aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorRaul E Rangel <rrangel@chromium.org>2021-10-08 13:10:38 -0600
committerFelix Held <felix-coreboot@felixheld.de>2021-10-21 17:37:10 +0000
commit3671597b9428e6be2a72197c19b449c463a2f962 (patch)
tree8e015db273f071b5157922696f2275e98b873ea1 /src/cpu/x86
parent99c84787b87e0e30e57b152b41ca5998072feb90 (diff)
cpu/x86: Remove cpu parameter to ap_init
We now pre-populate cpu_info before jumping to the C handler. We no longer need this parameter. I moved the stack alignment closer to the actual invocation of the C handler so it's easier to reason about. BUG=b:194391185, b:179699789 TEST=Boot guybrush to OS and verify all CPUs still function Signed-off-by: Raul E Rangel <rrangel@chromium.org> Change-Id: I8997683b6613b7031784cabf7039a400f0efdea1 Reviewed-on: https://review.coreboot.org/c/coreboot/+/58147 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/mp_init.c2
-rw-r--r--src/cpu/x86/sipi_vector.S13
2 files changed, 3 insertions, 12 deletions
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index 5d0cb194e9..89030f4575 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -180,7 +180,7 @@ static void park_this_cpu(void *unused)
/* By the time APs call ap_init() caching has been setup, and microcode has
* been loaded. */
-static void asmlinkage ap_init(unsigned int cpu)
+static void asmlinkage ap_init(void)
{
struct cpu_info *info = cpu_info();
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index 55256de920..b8cac96341 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -118,11 +118,6 @@ _start:
add per_cpu_segment_selector, %eax
mov %eax, %gs
- andl $0xfffffff0, %esp /* ensure stack alignment */
-
- /* Save CPU number. */
- mov %ecx, %esi
-
/*
* The following code only needs to run on Intel platforms and thus the caller
* doesn't provide a microcode_ptr if not on Intel.
@@ -235,19 +230,15 @@ load_msr:
mov %eax, %cr4
#endif
+ andl $0xfffffff0, %esp /* ensure stack alignment */
+
#if ENV_X86_64
/* entry64.inc preserves ebx. */
#include <cpu/x86/64bit/entry64.inc>
- mov %rsi, %rdi /* cpu_num */
-
movabs c_handler, %eax
call *%rax
#else
- /* c_handler(cpu_num), preserve proper stack alignment */
- sub $12, %esp
- push %esi /* cpu_num */
-
mov c_handler, %eax
call *%eax
#endif