summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/cpu_info.S.inc72
-rw-r--r--src/cpu/x86/mp_init.c10
-rw-r--r--src/cpu/x86/sipi_vector.S24
3 files changed, 13 insertions, 93 deletions
diff --git a/src/cpu/x86/cpu_info.S.inc b/src/cpu/x86/cpu_info.S.inc
deleted file mode 100644
index 6dca920ba0..0000000000
--- a/src/cpu/x86/cpu_info.S.inc
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-/*
- * Pushes a 32-bit register onto the stack.
- *
- * There are two possible code sections where this code can be included:
- * .code32 and .code64
- *
- * Doing a `push %eax` while in a .code64 section will result in a compiler
- * error. This macro manually pushes the 32-bit register onto the stack so we
- * can share the code between 32 and 64 bit builds.
- */
-.macro pushr reg:req
-#if ENV_X86_64
- movl $0, -4(%esp)
- movl \reg, -8(%esp)
- sub $8, %esp
-#else
- push \reg
-#endif
-.endm
-
-/* Push struct cpu_info */
-.macro push_cpu_info index=$0
- pushr \index /* index (size_t) */
- pushr $0 /* *cpu */
-.endm
-
-/* Push struct per_cpu_segment_data */
-.macro push_per_cpu_segment_data cpu_info_pointer=%esp
- pushr \cpu_info_pointer /* *cpu_info */
-.endm
-
-/*
- * Sets the base address in the segment descriptor array.
- *
- * A segment descriptor has the following structure:
- * struct {
- * uint16_t segment_limit_0_15;
- * uint16_t base_address_0_15;
- * uint8_t base_address_16_23;
- * uint8_t attrs[2];
- * uint8_t base_address_24_31;
- * };
- *
- * @desc_array: Address of the descriptor table
- * @base: Address to set in the descriptor
- * @desc_index: Index of the descriptor in the table. Defaults to 0. Must be a
- * register if specified.
- *
- * Clobbers %eax, %ebx.
- */
-.macro set_segment_descriptor_base desc_array:req, base:req, desc_index
- mov \base, %eax
-
- mov \desc_array, %ebx
-
-.ifb \desc_index
- movw %ax, 2(%ebx)
- shr $16, %eax
- movb %al, 4(%ebx)
- shr $8, %eax
- movb %al, 7(%ebx)
-.else
- movw %ax, 2(%ebx, \desc_index, 8)
- shr $16, %eax
- movb %al, 4(%ebx, \desc_index, 8)
- shr $8, %eax
- movb %al, 7(%ebx, \desc_index, 8)
-.endif
-
-.endm
diff --git a/src/cpu/x86/mp_init.c b/src/cpu/x86/mp_init.c
index 0e4a571e96..758550c43d 100644
--- a/src/cpu/x86/mp_init.c
+++ b/src/cpu/x86/mp_init.c
@@ -176,20 +176,19 @@ static struct bus *g_cpu_bus;
/* By the time APs call ap_init() caching has been setup, and microcode has
* been loaded. */
-static void asmlinkage ap_init(void)
+static asmlinkage void ap_init(unsigned int index)
{
- struct cpu_info *info = cpu_info();
-
/* Ensure the local APIC is enabled */
enable_lapic();
setup_lapic_interrupts();
struct device *dev = g_cpu_bus->children;
- for (unsigned int i = info->index; i > 0; i--)
+ for (unsigned int i = index; i > 0; i--)
dev = dev->sibling;
- info->cpu = dev;
+ set_cpu_info(index, dev);
+ struct cpu_info *info = cpu_info();
cpu_add_map_entry(info->index);
/* Fix up APIC id with reality. */
@@ -540,6 +539,7 @@ static enum cb_err init_bsp(struct bus *cpu_bus)
}
/* Find the device structure for the boot CPU. */
+ set_cpu_info(0, bsp);
info = cpu_info();
info->cpu = bsp;
info->cpu->name = processor_name;
diff --git a/src/cpu/x86/sipi_vector.S b/src/cpu/x86/sipi_vector.S
index b8cac96341..39973dbc8b 100644
--- a/src/cpu/x86/sipi_vector.S
+++ b/src/cpu/x86/sipi_vector.S
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#include <cpu/x86/cpu_info.S.inc>
#include <cpu/x86/cr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/x86/msr.h>
@@ -104,19 +103,8 @@ _start:
subl %eax, %edx
mov %edx, %esp
- push_cpu_info index=%ecx
- push_per_cpu_segment_data
-
- /*
- * Update the AP's per_cpu_segment_descriptor to point to the
- * per_cpu_segment_data that was allocated on the stack.
- */
- set_segment_descriptor_base per_cpu_segment_descriptors, %esp, %ecx
-
- mov %ecx, %eax
- shl $3, %eax /* The index is << 3 in the segment selector */
- add per_cpu_segment_selector, %eax
- mov %eax, %gs
+ /* Save CPU number for calling the AP entry */
+ push %ecx
/*
* The following code only needs to run on Intel platforms and thus the caller
@@ -230,15 +218,19 @@ load_msr:
mov %eax, %cr4
#endif
+ pop %edi /* Retrieve cpu index */
andl $0xfffffff0, %esp /* ensure stack alignment */
#if ENV_X86_64
- /* entry64.inc preserves ebx. */
+ /* entry64.inc preserves ebx, esi, edi, ebp */
#include <cpu/x86/64bit/entry64.inc>
-
movabs c_handler, %eax
call *%rax
#else
+ push $0
+ push $0
+ push $0
+ push %edi
mov c_handler, %eax
call *%eax
#endif