aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorArthur Heymans <arthur@aheymans.xyz>2022-11-01 23:45:59 +0100
committerArthur Heymans <arthur@aheymans.xyz>2022-11-07 13:59:35 +0000
commit66b2888b77da6721955a918c8cd5399abe786a6a (patch)
tree1dded45a911f18ffd86a80b56add28526209f66f /src/cpu/x86
parente2d291b5ae4aa49d5b1613e06b86bf2fc8efe4c5 (diff)
cpu/x86: Drop LEGACY_SMP_INIT
This codepath is deprecated after the 4.18 release. Change-Id: I7e90f457f3979781d06323ef1350d5fb05a6be43 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/69121 Reviewed-by: Elyes Haouas <ehaouas@noos.fr> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/Kconfig12
-rw-r--r--src/cpu/x86/lapic/Makefile.inc2
-rw-r--r--src/cpu/x86/lapic/lapic_cpu_init.c420
-rw-r--r--src/cpu/x86/lapic/secondary.S86
-rw-r--r--src/cpu/x86/smm/Makefile.inc14
-rw-r--r--src/cpu/x86/smm/smihandler.c207
-rw-r--r--src/cpu/x86/smm/smm.ld65
-rw-r--r--src/cpu/x86/smm/smmhandler.S258
-rw-r--r--src/cpu/x86/smm/smmrelocate.S163
9 files changed, 1 insertions, 1226 deletions
diff --git a/src/cpu/x86/Kconfig b/src/cpu/x86/Kconfig
index 3a0c29bdbe..b136e9ee5b 100644
--- a/src/cpu/x86/Kconfig
+++ b/src/cpu/x86/Kconfig
@@ -2,7 +2,6 @@ if ARCH_X86
config PARALLEL_MP
def_bool y
- depends on !LEGACY_SMP_INIT
select CPU_INFO_V2
help
This option uses common MP infrastructure for bringing up APs
@@ -27,9 +26,6 @@ config X86_SMM_SKIP_RELOCATION_HANDLER
with a stub at 0x30000. This is useful on platforms that have
an alternative way to set SMBASE.
-config LEGACY_SMP_INIT
- bool
-
config DEFAULT_X2APIC
def_bool n
help
@@ -154,13 +150,7 @@ config SMM_TSEG
default y
depends on !(NO_SMM || SMM_ASEG)
-config SMM_LEGACY_ASEG
- bool
- default y if HAVE_SMI_HANDLER && SMM_ASEG && LEGACY_SMP_INIT
- help
- SMM support without PARALLEL_MP, to be deprecated.
-
-if HAVE_SMI_HANDLER && !SMM_LEGACY_ASEG
+if HAVE_SMI_HANDLER
config SMM_MODULE_STACK_SIZE
hex
diff --git a/src/cpu/x86/lapic/Makefile.inc b/src/cpu/x86/lapic/Makefile.inc
index 91a41f76a5..22bd987f03 100644
--- a/src/cpu/x86/lapic/Makefile.inc
+++ b/src/cpu/x86/lapic/Makefile.inc
@@ -1,6 +1,4 @@
ramstage-$(CONFIG_AP_IN_SIPI_WAIT) += lapic_cpu_stop.c
-ramstage-$(CONFIG_LEGACY_SMP_INIT) += lapic_cpu_init.c
-ramstage-$(CONFIG_LEGACY_SMP_INIT) += secondary.S
bootblock-$(CONFIG_UDELAY_LAPIC) += apic_timer.c
romstage-$(CONFIG_UDELAY_LAPIC) += apic_timer.c
diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c
deleted file mode 100644
index c52a898a2e..0000000000
--- a/src/cpu/x86/lapic/lapic_cpu_init.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <cpu/x86/cr.h>
-#include <cpu/x86/gdt.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/smi_deprecated.h>
-#include <acpi/acpi.h>
-#include <delay.h>
-#include <lib.h>
-#include <string.h>
-#include <symbols.h>
-#include <console/console.h>
-#include <device/device.h>
-#include <device/path.h>
-#include <smp/atomic.h>
-#include <smp/spinlock.h>
-#include <cpu/cpu.h>
-#include <cpu/intel/speedstep.h>
-#include <smp/node.h>
-#include <stdlib.h>
-#include <thread.h>
-
-/* This is a lot more paranoid now, since Linux can NOT handle
- * being told there is a CPU when none exists. So any errors
- * will return 0, meaning no CPU.
- *
- * We actually handling that case by noting which cpus startup
- * and not telling anyone about the ones that don't.
- */
-
-/* Start-UP IPI vector must be 4kB aligned and below 1MB. */
-#define AP_SIPI_VECTOR 0x1000
-
-static char *lowmem_backup;
-static char *lowmem_backup_ptr;
-static int lowmem_backup_size;
-
-static inline void setup_secondary_gdt(void)
-{
- u16 *gdt_limit;
-#if ENV_X86_64
- u64 *gdt_base;
-#else
- u32 *gdt_base;
-#endif
-
- gdt_limit = (void *)&_secondary_gdt_addr;
- gdt_base = (void *)&gdt_limit[1];
-
- *gdt_limit = (uintptr_t)&gdt_end - (uintptr_t)&gdt - 1;
- *gdt_base = (uintptr_t)&gdt;
-}
-
-static void copy_secondary_start_to_lowest_1M(void)
-{
- unsigned long code_size;
-
- /* Fill in secondary_start's local gdt. */
- setup_secondary_gdt();
-
- code_size = (unsigned long)_secondary_start_end
- - (unsigned long)_secondary_start;
-
- if (acpi_is_wakeup_s3()) {
- /* need to save it for RAM resume */
- lowmem_backup_size = code_size;
- lowmem_backup = malloc(code_size);
- lowmem_backup_ptr = (char *)AP_SIPI_VECTOR;
-
- if (lowmem_backup == NULL)
- die("Out of backup memory\n");
-
- memcpy(lowmem_backup, lowmem_backup_ptr, lowmem_backup_size);
- }
-
- /* copy the _secondary_start to the RAM below 1M*/
- memcpy((unsigned char *)AP_SIPI_VECTOR,
- (unsigned char *)_secondary_start, code_size);
-
- printk(BIOS_DEBUG, "start_eip=0x%08lx, code_size=0x%08lx\n",
- (unsigned long int)AP_SIPI_VECTOR, code_size);
-}
-
-static void recover_lowest_1M(void)
-{
- if (acpi_is_wakeup_s3())
- memcpy(lowmem_backup_ptr, lowmem_backup, lowmem_backup_size);
-}
-
-static uint32_t wait_for_ipi_completion(const int timeout_ms)
-{
- int loops = timeout_ms * 10;
- uint32_t send_status;
-
- /* wait for the ipi send to finish */
- printk(BIOS_SPEW, "Waiting for send to finish...\n");
- do {
- printk(BIOS_SPEW, "+");
- udelay(100);
- send_status = lapic_busy();
- } while (send_status && (--loops > 0));
-
- return send_status;
-}
-
-static int lapic_start_cpu(unsigned long apicid)
-{
- const int timeout_100ms = 100;
- uint32_t send_status, accept_status;
- int j, maxlvt;
-
- /*
- * Starting actual IPI sequence...
- */
-
- printk(BIOS_SPEW, "Asserting INIT.\n");
-
- /*
- * Turn INIT on target chip
- */
- lapic_send_ipi(LAPIC_INT_LEVELTRIG | LAPIC_INT_ASSERT | LAPIC_DM_INIT, apicid);
-
- send_status = wait_for_ipi_completion(timeout_100ms);
- if (send_status) {
- printk(BIOS_ERR, "CPU %ld: First APIC write timed out. "
- "Disabling\n", apicid);
- // too bad.
- printk(BIOS_ERR, "ESR is 0x%x\n", lapic_read(LAPIC_ESR));
- if (lapic_read(LAPIC_ESR)) {
- printk(BIOS_ERR, "Try to reset ESR\n");
- lapic_write(LAPIC_ESR, 0);
- printk(BIOS_ERR, "ESR is 0x%x\n",
- lapic_read(LAPIC_ESR));
- }
- return 0;
- }
- mdelay(10);
-
- printk(BIOS_SPEW, "Deasserting INIT.\n");
-
- lapic_send_ipi(LAPIC_INT_LEVELTRIG | LAPIC_DM_INIT, apicid);
-
- send_status = wait_for_ipi_completion(timeout_100ms);
- if (send_status) {
- printk(BIOS_ERR, "CPU %ld: Second APIC write timed out. "
- "Disabling\n", apicid);
- // too bad.
- return 0;
- }
-
- /*
- * Run STARTUP IPI loop.
- */
- printk(BIOS_SPEW, "#startup loops: %d.\n", CONFIG_NUM_IPI_STARTS);
-
- maxlvt = 4;
-
- for (j = 1; j <= CONFIG_NUM_IPI_STARTS; j++) {
- printk(BIOS_SPEW, "Sending STARTUP #%d to %lu.\n", j, apicid);
- lapic_read(LAPIC_SPIV);
- lapic_write(LAPIC_ESR, 0);
- lapic_read(LAPIC_ESR);
- printk(BIOS_SPEW, "After apic_write.\n");
-
- /*
- * STARTUP IPI
- */
-
- lapic_send_ipi(LAPIC_DM_STARTUP | (AP_SIPI_VECTOR >> 12), apicid);
-
- /*
- * Give the other CPU some time to accept the IPI.
- */
- udelay(300);
-
- printk(BIOS_SPEW, "Startup point 1.\n");
-
- send_status = wait_for_ipi_completion(timeout_100ms);
-
- /*
- * Give the other CPU some time to accept the IPI.
- */
- udelay(200);
- /*
- * Due to the Pentium erratum 3AP.
- */
- if (maxlvt > 3) {
- lapic_read(LAPIC_SPIV);
- lapic_write(LAPIC_ESR, 0);
- }
- accept_status = (lapic_read(LAPIC_ESR) & 0xEF);
- if (send_status || accept_status)
- break;
- }
- printk(BIOS_SPEW, "After Startup.\n");
- if (send_status)
- printk(BIOS_WARNING, "APIC never delivered???\n");
- if (accept_status)
- printk(BIOS_WARNING, "APIC delivery error (%x).\n",
- accept_status);
- if (send_status || accept_status)
- return 0;
- return 1;
-}
-
-/* Number of cpus that are currently running in coreboot */
-static atomic_t active_cpus = ATOMIC_INIT(1);
-
-/* start_cpu_lock covers last_cpu_index and secondary_stack.
- * Only starting one CPU at a time let's me remove the logic
- * for select the stack from assembly language.
- *
- * In addition communicating by variables to the CPU I
- * am starting allows me to verify it has started before
- * start_cpu returns.
- */
-
-DECLARE_SPIN_LOCK(start_cpu_lock);
-static unsigned int last_cpu_index = 0;
-static void *stacks[CONFIG_MAX_CPUS];
-volatile unsigned long secondary_stack;
-volatile unsigned int secondary_cpu_index;
-
-static int start_cpu(struct device *cpu)
-{
- struct cpu_info *info;
- uintptr_t stack_top;
- uintptr_t stack_base;
- unsigned long apicid;
- unsigned int index;
- unsigned long count;
- int result;
-
- spin_lock(&start_cpu_lock);
-
- /* Get the CPU's apicid */
- apicid = cpu->path.apic.apic_id;
-
- /* Get an index for the new processor */
- index = ++last_cpu_index;
-
- /* Find boundaries of the new processor's stack */
- stack_top = ALIGN_DOWN((uintptr_t)_estack, CONFIG_STACK_SIZE);
- stack_top -= (CONFIG_STACK_SIZE*index);
- stack_base = stack_top - CONFIG_STACK_SIZE;
- stack_top -= sizeof(struct cpu_info);
- printk(BIOS_SPEW, "CPU%d: stack_base %p, stack_top %p\n", index,
- (void *)stack_base, (void *)stack_top);
- stacks[index] = (void *)stack_base;
-
- /* Record the index and which CPU structure we are using */
- info = (struct cpu_info *)stack_top;
- info->index = index;
- info->cpu = cpu;
- cpu_add_map_entry(info->index);
-
- /* Advertise the new stack and index to start_cpu */
- secondary_stack = stack_top;
- secondary_cpu_index = index;
-
- /* Until the CPU starts up report the CPU is not enabled */
- cpu->enabled = 0;
- cpu->initialized = 0;
-
- /* Start the CPU */
- result = lapic_start_cpu(apicid);
-
- if (result) {
- result = 0;
- /* Wait 1s or until the new CPU calls in */
- for (count = 0; count < 100000; count++) {
- if (secondary_stack == 0) {
- result = 1;
- break;
- }
- udelay(10);
- }
- }
- secondary_stack = 0;
- spin_unlock(&start_cpu_lock);
- return result;
-}
-
-/* C entry point of secondary cpus */
-asmlinkage void secondary_cpu_init(unsigned int index)
-{
- atomic_inc(&active_cpus);
-
- spin_lock(&start_cpu_lock);
-
-#ifdef __SSE3__
- /*
- * Seems that CR4 was cleared when AP start via lapic_start_cpu()
- * Turn on CR4.OSFXSR and CR4.OSXMMEXCPT when SSE options enabled
- */
- CRx_TYPE cr4_val;
- cr4_val = read_cr4();
- cr4_val |= (CR4_OSFXSR | CR4_OSXMMEXCPT);
- write_cr4(cr4_val);
-#endif
-
- /* Ensure the local APIC is enabled */
- enable_lapic();
- setup_lapic_interrupts();
-
- cpu_initialize(index);
-
- spin_unlock(&start_cpu_lock);
-
- atomic_dec(&active_cpus);
-
- stop_this_cpu();
-}
-
-static void start_other_cpus(struct bus *cpu_bus, struct device *bsp_cpu)
-{
- struct device *cpu;
- /* Loop through the cpus once getting them started */
-
- for (cpu = cpu_bus->children; cpu; cpu = cpu->sibling) {
- if (cpu->path.type != DEVICE_PATH_APIC)
- continue;
-
- if (!cpu->enabled)
- continue;
-
- if (cpu->initialized)
- continue;
-
- if (!start_cpu(cpu))
- /* Record the error in cpu? */
- printk(BIOS_ERR, "CPU 0x%02x would not start!\n",
- cpu->path.apic.apic_id);
-
- udelay(10);
- }
-}
-
-static void wait_other_cpus_stop(struct bus *cpu_bus)
-{
- struct device *cpu;
- int old_active_count, active_count;
- long loopcount = 0;
- int i;
-
- /* Now loop until the other cpus have finished initializing */
- old_active_count = 1;
- active_count = atomic_read(&active_cpus);
- while (active_count > 1) {
- if (active_count != old_active_count) {
- printk(BIOS_INFO, "Waiting for %d CPUS to stop\n",
- active_count - 1);
- old_active_count = active_count;
- }
- udelay(10);
- active_count = atomic_read(&active_cpus);
- loopcount++;
- }
- for (cpu = cpu_bus->children; cpu; cpu = cpu->sibling) {
- if (cpu->path.type != DEVICE_PATH_APIC)
- continue;
- if (cpu->path.apic.apic_id == SPEEDSTEP_APIC_MAGIC)
- continue;
- if (!cpu->initialized)
- printk(BIOS_ERR, "CPU 0x%02x did not initialize!\n",
- cpu->path.apic.apic_id);
- }
- printk(BIOS_DEBUG, "All AP CPUs stopped (%ld loops)\n", loopcount);
- checkstack(_estack, 0);
- for (i = 1; i < CONFIG_MAX_CPUS && i <= last_cpu_index; i++)
- checkstack((void *)stacks[i] + CONFIG_STACK_SIZE, i);
-}
-
-void initialize_cpus(struct bus *cpu_bus)
-{
- struct device_path cpu_path;
- struct cpu_info *info;
-
- /* Find the info struct for this CPU */
- info = cpu_info();
-
- /* Ensure the local APIC is enabled */
- if (is_smp_boot()) {
- enable_lapic();
- setup_lapic_interrupts();
- } else {
- disable_lapic();
- }
-
- /* Get the device path of the boot CPU */
- cpu_path.type = DEVICE_PATH_APIC;
- cpu_path.apic.apic_id = lapicid();
-
- /* Find the device structure for the boot CPU */
- info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
- cpu_add_map_entry(info->index);
-
- // why here? In case some day we can start core1 in amd_sibling_init
- if (is_smp_boot())
- copy_secondary_start_to_lowest_1M();
-
- if (CONFIG(SMM_LEGACY_ASEG))
- smm_init();
-
- /* Initialize the bootstrap processor */
- cpu_initialize(0);
-
- if (is_smp_boot())
- start_other_cpus(cpu_bus, info->cpu);
-
- /* Now wait the rest of the cpus stop*/
- if (is_smp_boot())
- wait_other_cpus_stop(cpu_bus);
-
- if (CONFIG(SMM_LEGACY_ASEG))
- smm_init_completion();
-
- if (is_smp_boot())
- recover_lowest_1M();
-}
diff --git a/src/cpu/x86/lapic/secondary.S b/src/cpu/x86/lapic/secondary.S
deleted file mode 100644
index d2d43a2b5b..0000000000
--- a/src/cpu/x86/lapic/secondary.S
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <cpu/x86/lapic_def.h>
-#include <arch/ram_segs.h>
-
- .text
- .globl _secondary_start, _secondary_start_end, _secondary_gdt_addr
- .balign 4096
-_secondary_start:
- .code16
- cli
- xorl %eax, %eax
- movl %eax, %cr3 /* Invalidate TLB*/
-
- /* On hyper threaded cpus, invalidating the cache here is
- * very very bad. Don't.
- */
-
- /* setup the data segment */
- movw %cs, %ax
- movw %ax, %ds
-
- lgdtl gdtaddr - _secondary_start
-
- movl %cr0, %eax
- andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
- orl $0x60000001, %eax /* CD, NW, PE = 1 */
- movl %eax, %cr0
-
- ljmpl $RAM_CODE_SEG, $__ap_protected_start
-
- /* This will get filled in by C code. */
-_secondary_gdt_addr:
-gdtaddr:
- .word 0 /* the table limit */
-#if ENV_X86_64
- .quad 0
-#else
- .long 0 /* we know the offset */
-#endif
-
-_secondary_start_end:
-
-ap_protected_start:
- .code32
- lgdt gdtaddr
- ljmpl $RAM_CODE_SEG, $__ap_protected_start
-
-__ap_protected_start:
-
- movw $RAM_DATA_SEG, %ax
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %ss
- xor %ax, %ax /* zero out the gs and fs segment index */
- movw %ax, %fs
- movw %ax, %gs /* Will be used for cpu_info */
-
- /* Load the Interrupt descriptor table */
- lidt idtarg
-
-#if ENV_X86_64
- /* entry64.inc preserves ebx. */
- #include <cpu/x86/64bit/entry64.inc>
- movabs secondary_stack, %rax
- mov %rax, %rsp
- andl $0xfffffff0, %esp
- movabs secondary_cpu_index, %rax
- mov %rax, %rdi
-#else
- /* Set the stack pointer, and flag that we are done */
- xorl %eax, %eax
- movl secondary_stack, %esp
-
- andl $0xfffffff0, %esp
- sub $12, %esp /* maintain 16-byte alignment for the call below */
- movl secondary_cpu_index, %edi
- pushl %edi
- movl %eax, secondary_stack
-#endif
-
- call secondary_cpu_init
-1: hlt
- jmp 1b
-
-.code32
diff --git a/src/cpu/x86/smm/Makefile.inc b/src/cpu/x86/smm/Makefile.inc
index 5f695909a8..da93827274 100644
--- a/src/cpu/x86/smm/Makefile.inc
+++ b/src/cpu/x86/smm/Makefile.inc
@@ -76,17 +76,3 @@ $(obj)/smm/smm: $(obj)/smm/smm.elf.rmod
$(OBJCOPY_smm) -O binary $< $@
endif
-
-ifeq ($(CONFIG_SMM_LEGACY_ASEG),y)
-
-smm-y += smm.ld
-
-$(obj)/smm/smm: $(obj)/smm/smm.o $(call src-to-obj,smm,$(src)/cpu/x86/smm/smm.ld)
- $(LD_smm) $(LDFLAGS_smm) -o $(obj)/smm/smm.elf -T $(call src-to-obj,smm,$(src)/cpu/x86/smm/smm.ld) $(obj)/smm/smm.o
- $(NM_smm) -n $(obj)/smm/smm.elf | sort > $(obj)/smm/smm.map
- $(OBJCOPY_smm) -O binary $(obj)/smm/smm.elf $@
-
-smm-y += smmhandler.S
-smm-y += smihandler.c
-
-endif # CONFIG_SMM_LEGACY_ASEG
diff --git a/src/cpu/x86/smm/smihandler.c b/src/cpu/x86/smm/smihandler.c
deleted file mode 100644
index 7946e90aea..0000000000
--- a/src/cpu/x86/smm/smihandler.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <arch/io.h>
-#include <console/console.h>
-#include <commonlib/region.h>
-#include <cpu/x86/smm.h>
-#include <cpu/x86/smi_deprecated.h>
-#include <cpu/amd/amd64_save_state.h>
-#include <cpu/intel/em64t100_save_state.h>
-#include <cpu/intel/em64t101_save_state.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/lapic_def.h>
-#include <cpu/x86/legacy_save_state.h>
-
-#if CONFIG(SPI_FLASH_SMM)
-#include <spi-generic.h>
-#endif
-
-static int do_driver_init = 1;
-
-typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
-
-/* SMI multiprocessing semaphore */
-static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status
- = SMI_UNLOCKED;
-
-static int smi_obtain_lock(void)
-{
- u8 ret = SMI_LOCKED;
-
- asm volatile (
- "movb %2, %%al\n"
- "xchgb %%al, %1\n"
- "movb %%al, %0\n"
- : "=g" (ret), "=m" (smi_handler_status)
- : "g" (SMI_LOCKED)
- : "eax"
- );
-
- return (ret == SMI_UNLOCKED);
-}
-
-static void smi_release_lock(void)
-{
- asm volatile (
- "movb %1, %%al\n"
- "xchgb %%al, %0\n"
- : "=m" (smi_handler_status)
- : "g" (SMI_UNLOCKED)
- : "eax"
- );
-}
-
-void io_trap_handler(int smif)
-{
- /* If a handler function handled a given IO trap, it
- * shall return a non-zero value
- */
- printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
-
- if (southbridge_io_trap_handler(smif))
- return;
-
- if (mainboard_io_trap_handler(smif))
- return;
-
- printk(BIOS_DEBUG, "Unknown function\n");
-}
-
-/**
- * @brief Set the EOS bit
- */
-static void smi_set_eos(void)
-{
- southbridge_smi_set_eos();
-}
-
-static u32 pci_orig;
-
-/**
- * @brief Backup PCI address to make sure we do not mess up the OS
- */
-static void smi_backup_pci_address(void)
-{
- pci_orig = inl(0xcf8);
-}
-
-/**
- * @brief Restore PCI address previously backed up
- */
-static void smi_restore_pci_address(void)
-{
- outl(pci_orig, 0xcf8);
-}
-
-static inline void *smm_save_state(uintptr_t base, int arch_offset, int node)
-{
- base += SMM_SAVE_STATE_BEGIN(arch_offset) - (node * 0x400);
- return (void *)base;
-}
-
-/* This returns the SMM revision from the savestate of CPU0,
- which is assumed to be the same for all CPU's. See the memory
- map in smmhandler.S */
-uint32_t smm_revision(void)
-{
- return *(uint32_t *)(SMM_BASE + SMM_ENTRY_OFFSET * 2 - SMM_REVISION_OFFSET_FROM_TOP);
-}
-
-void *smm_get_save_state(int cpu)
-{
- switch (smm_revision()) {
- case 0x00030002:
- case 0x00030007:
- return smm_save_state(SMM_BASE, SMM_LEGACY_ARCH_OFFSET, cpu);
- case 0x00030100:
- return smm_save_state(SMM_BASE, SMM_EM64T100_ARCH_OFFSET, cpu);
- case 0x00030101: /* SandyBridge, IvyBridge, and Haswell */
- return smm_save_state(SMM_BASE, SMM_EM64T101_ARCH_OFFSET, cpu);
- case 0x00020064:
- case 0x00030064:
- return smm_save_state(SMM_BASE, SMM_AMD64_ARCH_OFFSET, cpu);
- }
-
- return NULL;
-}
-
-bool smm_region_overlaps_handler(const struct region *r)
-{
- const struct region r_smm = {SMM_BASE, SMM_DEFAULT_SIZE};
-
- return region_overlap(&r_smm, r);
-}
-
-/**
- * @brief Interrupt handler for SMI#
- *
- * @param smm_revision revision of the smm state save map
- */
-
-void smi_handler(void)
-{
- unsigned int node;
-
- /* Are we ok to execute the handler? */
- if (!smi_obtain_lock()) {
- /* For security reasons we don't release the other CPUs
- * until the CPU with the lock is actually done
- */
- while (smi_handler_status == SMI_LOCKED) {
- asm volatile (
- ".byte 0xf3, 0x90\n" /* hint a CPU we are in
- * spinlock (PAUSE
- * instruction, REP NOP)
- */
- );
- }
- return;
- }
-
- smi_backup_pci_address();
-
- node = lapicid();
-
- console_init();
-
- printk(BIOS_SPEW, "\nSMI# #%d\n", node);
-
- /* Use smm_get_save_state() to see if the smm revision is supported */
- if (smm_get_save_state(node) == NULL) {
- printk(BIOS_WARNING, "smm_revision: 0x%08x\n", smm_revision());
- printk(BIOS_WARNING, "SMI# not supported on your CPU\n");
- /* Don't release lock, so no further SMI will happen,
- * if we don't handle it anyways.
- */
- return;
- }
-
- /* Allow drivers to initialize variables in SMM context. */
- if (do_driver_init) {
-#if CONFIG(SPI_FLASH_SMM)
- spi_init();
-#endif
- do_driver_init = 0;
- }
-
- /* Call chipset specific SMI handlers. */
- southbridge_smi_handler();
-
- smi_restore_pci_address();
-
- smi_release_lock();
-
- /* De-assert SMI# signal to allow another SMI */
- smi_set_eos();
-}
-
-/* Provide a default implementation for all weak handlers so that relocation
- * entries in the modules make sense. Without default implementations the
- * weak relocations w/o a symbol have a 0 address which is where the modules
- * are linked at. */
-int __weak mainboard_io_trap_handler(int smif) { return 0; }
-void __weak southbridge_smi_handler(void) {}
-void __weak mainboard_smi_gpi(u32 gpi_sts) {}
-int __weak mainboard_smi_apmc(u8 data) { return 0; }
-void __weak mainboard_smi_sleep(u8 slp_typ) {}
-void __weak mainboard_smi_finalize(void) {}
diff --git a/src/cpu/x86/smm/smm.ld b/src/cpu/x86/smm/smm.ld
deleted file mode 100644
index e232028e4b..0000000000
--- a/src/cpu/x86/smm/smm.ld
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-/* Maximum number of CPUs/cores */
-CPUS = 4;
-
-_ = ASSERT(CPUS >= CONFIG_MAX_CPUS, "The ASEG SMM code only supports up to 4 CPUS");
-
-ENTRY(smm_handler_start);
-
-SECTIONS
-{
- /* This is the actual SMM handler.
- *
- * We just put code, rodata, data and bss all in a row.
- */
- . = 0xa0000;
- .handler (.): {
- _program = .;
- /* Assembler stub */
- *(.handler)
-
- /* C code of the SMM handler */
- *(.text);
- *(.text.*);
-
- /* C read-only data of the SMM handler */
- . = ALIGN(16);
- *(.rodata)
- *(.rodata.*)
-
- /* C read-write data of the SMM handler */
- . = ALIGN(4);
- *(.data)
- *(.data.*)
-
- /* C uninitialized data of the SMM handler */
- . = ALIGN(4);
- *(.bss)
- *(.bss.*)
- *(.sbss)
- *(.sbss.*)
-
- /* What is this? (Something we don't need with -fno-common.) */
- *(COMMON)
- . = ALIGN(4);
- _eprogram = .;
- }
-
- /* We are using the ASEG interleaved to stuff the SMM handlers
- * for all CPU cores in there. The jump table redirects the execution
- * to the actual SMM handler
- */
- . = 0xa8000 - (( CPUS - 1) * 0x400);
- .jumptable : {
- KEEP(*(.jumptable));
- }
-
- /DISCARD/ : {
- *(.comment)
- *(.note)
- *(.note.*)
- *(.eh_frame)
- *(.debug_*)
- }
-}
diff --git a/src/cpu/x86/smm/smmhandler.S b/src/cpu/x86/smm/smmhandler.S
deleted file mode 100644
index 9e7108ccf5..0000000000
--- a/src/cpu/x86/smm/smmhandler.S
+++ /dev/null
@@ -1,258 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-/* NOTE: This handler assumes the SMM window goes from 0xa0000
- * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
- * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
- * So there is a lot of potential for growth in here. Let's stick
- * to 64k if we can though.
- */
-
-#include <cpu/x86/lapic_def.h>
-#include <cpu/x86/msr.h>
-
-/*
- * +--------------------------------+ 0xaffff
- * | Save State Map Node 0 |
- * | Save State Map Node 1 |
- * | Save State Map Node 2 |
- * | Save State Map Node 3 |
- * | ... |
- * +--------------------------------+ 0xaf000
- * | |
- * | |
- * | |
- * +--------------------------------+ 0xa8400
- * | SMM Entry Node 0 (+ stack) |
- * +--------------------------------+ 0xa8000
- * | SMM Entry Node 1 (+ stack) |
- * | SMM Entry Node 2 (+ stack) |
- * | SMM Entry Node 3 (+ stack) |
- * | ... |
- * +--------------------------------+ 0xa7400
- * | |
- * | SMM Handler |
- * | |
- * +--------------------------------+ 0xa0000
- *
- */
-
-/* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
- * at which smm_handler_start lives. At the moment the handler
- * lives right at 0xa0000, so the offset is 0.
- */
-
-#define SMM_HANDLER_OFFSET 0x0000
-
-#if ENV_X86_64
-.bss
-ia32efer_backup_eax:
-.long 0
-ia32efer_backup_edx:
-.long 0
-#endif
-
-/* initially SMM is some sort of real mode. Let gcc know
- * how to treat the SMM handler stub
- */
-
-.section ".handler", "a", @progbits
-
-.code16
-
-/**
- * SMM code to enable protected mode and jump to the
- * C-written function void smi_handler(u32 smm_revision)
- *
- * All the bad magic is not all that bad after all.
- */
-#define SMM_START 0xa0000
-#define SMM_END 0xb0000
-#if SMM_END <= SMM_START
-#error invalid SMM configuration
-#endif
-.global smm_handler_start
-smm_handler_start:
-#if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
- /* Check if the LAPIC register block overlaps with SMM.
- * This block needs to work without data accesses because they
- * may be routed into the LAPIC register block.
- * Code accesses, on the other hand, are never routed to LAPIC,
- * which is what makes this work in the first place.
- */
- mov $LAPIC_BASE_MSR, %ecx
- rdmsr
- and $(~0xfff), %eax
- sub $(SMM_START), %eax
- cmp $(SMM_END - SMM_START), %eax
- ja untampered_lapic
-1:
- /* emit "Crash" on serial */
- mov $(CONFIG_TTYS0_BASE), %dx
- mov $'C', %al
- out %al, (%dx)
- mov $'r', %al
- out %al, (%dx)
- mov $'a', %al
- out %al, (%dx)
- mov $'s', %al
- out %al, (%dx)
- mov $'h', %al
- out %al, (%dx)
- /* now crash for real */
- ud2
-untampered_lapic:
-#endif
- movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
- lgdtl %cs:(%bx)
-
- movl %cr0, %eax
- andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
- orl $0x60000001, %eax /* CD, NW, PE = 1 */
- movl %eax, %cr0
-
- /* Enable protected mode */
- ljmpl $0x08, $1f
-
-.code32
-1:
- /* flush the cache after disabling it */
- wbinvd
-
- /* Use flat data segment */
- movw $0x10, %ax
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %ss
- xor %ax, %ax /* zero out the gs and fs segment index */
- movw %ax, %fs
- movw %ax, %gs /* Will be used for cpu_info */
-
- /* FIXME: Incompatible with X2APIC_SUPPORT. */
- /* Get this CPU's LAPIC ID */
- movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
- movl (%esi), %ecx
- shr $24, %ecx
-
- /* This is an ugly hack, and we should find a way to read the CPU index
- * without relying on the LAPIC ID.
- */
-
- /* calculate stack offset by multiplying the APIC ID
- * by 1024 (0x400), and save that offset in ebp.
- */
- shl $10, %ecx
- movl %ecx, %ebp
-
- /* We put the stack for each core right above
- * its SMM entry point. Core 0 starts at 0xa8000,
- * we spare 0x10 bytes for the jump to be sure.
- */
- movl $0xa8010, %eax
- subl %ecx, %eax /* subtract offset, see above */
- movl %eax, %ebx /* Save bottom of stack in ebx */
-
-#define SMM_STACK_SIZE (0x400 - 0x10)
- /* clear stack */
- cld
- movl %eax, %edi
- movl $(SMM_STACK_SIZE >> 2), %ecx
- xorl %eax, %eax
- rep stosl
-
- /* set new stack */
- addl $SMM_STACK_SIZE, %ebx
- movl %ebx, %esp
-
-#if ENV_X86_64
- /* Backup IA32_EFER. Preserves ebx. */
- movl $(IA32_EFER), %ecx
- rdmsr
- movl %eax, ia32efer_backup_eax
- movl %edx, ia32efer_backup_edx
-
- /* Enable long mode. Preserves ebx. */
-#include <cpu/x86/64bit/entry64.inc>
-
-#endif
- /* Call C handler */
- call smi_handler
-
-#if ENV_X86_64
- /*
- * The only reason to go back to protected mode is that RSM doesn't restore
- * MSR registers and MSR IA32_EFER was modified by entering long mode.
- * Drop to protected mode to safely operate on the IA32_EFER MSR.
- */
-
- /* Disable long mode. */
- #include <cpu/x86/64bit/exit32.inc>
-
- /* Restore IA32_EFER as RSM doesn't restore MSRs. */
- movl $(IA32_EFER), %ecx
- movl ia32efer_backup_eax, %eax
- movl ia32efer_backup_edx, %edx
- wrmsr
-#endif
-
- /* To return, just do rsm. It will "clean up" protected mode */
- rsm
-
-.code16
-
-.align 4, 0xff
-
-smm_gdtptr16:
- .word smm_gdt_end - smm_gdt - 1
- .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
-
-.code32
-
-smm_gdt:
- /* The first GDT entry can not be used. Keep it zero */
- .long 0x00000000, 0x00000000
-
- /* gdt selector 0x08, flat code segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
-
- /* gdt selector 0x10, flat data segment */
- .word 0xffff, 0x0000
- .byte 0x00, 0x93, 0xcf, 0x00
-
- /* gdt selector 0x18, flat code segment (64-bit) */
- .word 0xffff, 0x0000
- .byte 0x00, 0x9b, 0xaf, 0x00
-smm_gdt_end:
-
-
-.section ".jumptable", "a", @progbits
-
-/* This is the SMM jump table. All cores use the same SMM handler
- * for simplicity. But SMM Entry needs to be different due to the
- * save state area. The jump table makes sure all CPUs jump into the
- * real handler on SMM entry.
- */
-
-/* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
- * shall be used, below table has to be updated, as well as smm.ld
- */
-
-/* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
- * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
- * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
- */
-
-.code16
-jumptable:
- /* core 3 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 2 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 1 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
- /* core 0 */
- ljmp $0xa000, $SMM_HANDLER_OFFSET
-.align 1024, 0x00
diff --git a/src/cpu/x86/smm/smmrelocate.S b/src/cpu/x86/smm/smmrelocate.S
deleted file mode 100644
index b89cf3d0b8..0000000000
--- a/src/cpu/x86/smm/smmrelocate.S
+++ /dev/null
@@ -1,163 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-// FIXME: Is this piece of code southbridge specific, or
-// can it be cleaned up so this include is not required?
-// It's needed right now because we get our DEFAULT_PMBASE from
-// here.
-#if CONFIG(SOUTHBRIDGE_INTEL_I82801IX)
-#include <southbridge/intel/i82801ix/i82801ix.h>
-#else
-#error "Southbridge needs SMM handler support."
-#endif
-
-// ADDR32() macro
-#include <arch/registers.h>
-
-#if !CONFIG(SMM_ASEG)
-#error "Only use this file with ASEG."
-#endif /* CONFIG_SMM_ASEG */
-
-#define LAPIC_ID 0xfee00020
-
-.global smm_relocation_start
-.global smm_relocation_end
-
-/* initially SMM is some sort of real mode. */
-.code16
-
-/**
- * When starting up, x86 CPUs have their SMBASE set to 0x30000. However,
- * this is not a good place for the SMM handler to live, so it needs to
- * be relocated.
- * Traditionally SMM handlers used to live in the A segment (0xa0000).
- * With growing SMM handlers, more CPU cores, etc. CPU vendors started
- * allowing to relocate the handler to the end of physical memory, which
- * they refer to as TSEG.
- * This trampoline code relocates SMBASE to base address - ( lapicid * 0x400 )
- *
- * Why 0x400? It is a safe value to cover the save state area per CPU. On
- * current AMD CPUs this area is _documented_ to be 0x200 bytes. On Intel
- * Core 2 CPUs the _documented_ parts of the save state area is 48 bytes
- * bigger, effectively sizing our data structures 0x300 bytes.
- *
- * Example (with SMM handler living at 0xa0000):
- *
- * LAPICID SMBASE SMM Entry SAVE STATE
- * 0 0xa0000 0xa8000 0xafd00
- * 1 0x9fc00 0xa7c00 0xaf900
- * 2 0x9f800 0xa7800 0xaf500
- * 3 0x9f400 0xa7400 0xaf100
- * 4 0x9f000 0xa7000 0xaed00
- * 5 0x9ec00 0xa6c00 0xae900
- * 6 0x9e800 0xa6800 0xae500
- * 7 0x9e400 0xa6400 0xae100
- * 8 0x9e000 0xa6000 0xadd00
- * 9 0x9dc00 0xa5c00 0xad900
- * 10 0x9d800 0xa5800 0xad500
- * 11 0x9d400 0xa5400 0xad100
- * 12 0x9d000 0xa5000 0xacd00
- * 13 0x9cc00 0xa4c00 0xac900
- * 14 0x9c800 0xa4800 0xac500
- * 15 0x9c400 0xa4400 0xac100
- * . . . .
- * . . . .
- * . . . .
- * 31 0x98400 0xa0400 0xa8100
- *
- * With 32 cores, the SMM handler would need to fit between
- * 0xa0000-0xa0400 and the stub plus stack would need to go
- * at 0xa8000-0xa8100 (example for core 0). That is not enough.
- *
- * This means we're basically limited to 16 CPU cores before
- * we need to move the SMM handler to TSEG.
- *
- * Note: Some versions of Pentium M need their SMBASE aligned to 32k.
- * On those the above only works for up to 2 cores. But for now we only
- * care fore Core (2) Duo/Solo
- *
- */
-
-smm_relocation_start:
- /* Check revision to see if AMD64 style SMM_BASE
- * Intel Core Solo/Duo: 0x30007
- * Intel Core2 Solo/Duo: 0x30100
- * Intel SandyBridge: 0x30101
- * AMD64: 0x3XX64
- * This check does not make much sense, unless someone ports
- * SMI handling to AMD64 CPUs.
- */
-
- mov $0x38000 + 0x7efc, %ebx
- ADDR32(mov) (%ebx), %al
- cmp $0x64, %al
- je 1f
-
- mov $0x38000 + 0x7ef8, %ebx
- jmp smm_relocate
-1:
- mov $0x38000 + 0x7f00, %ebx
-
-smm_relocate:
- /* Get this CPU's LAPIC ID */
- movl $LAPIC_ID, %esi
- ADDR32(movl) (%esi), %ecx
- shr $24, %ecx
-
- /* calculate offset by multiplying the
- * APIC ID by 1024 (0x400)
- */
- movl %ecx, %edx
- shl $10, %edx
-
- movl $0xa0000, %eax
- subl %edx, %eax /* subtract offset, see above */
-
- ADDR32(movl) %eax, (%ebx)
-
- /* The next section of code is potentially southbridge specific */
-
- /* Clear SMI status */
- movw $(DEFAULT_PMBASE + 0x34), %dx
- inw %dx, %ax
- outw %ax, %dx
-
- /* Clear PM1 status */
- movw $(DEFAULT_PMBASE + 0x00), %dx
- inw %dx, %ax
- outw %ax, %dx
-
- /* Set EOS bit so other SMIs can occur */
- movw $(DEFAULT_PMBASE + 0x30), %dx
- inl %dx, %eax
- orl $(1 << 1), %eax
- outl %eax, %dx
-
- /* End of southbridge specific section. */
-
-#if CONFIG(DEBUG_SMM_RELOCATION)
- /* print [SMM-x] so we can determine if CPUx went to SMM */
- movw $CONFIG_TTYS0_BASE, %dx
- mov $'[', %al
- outb %al, %dx
- mov $'S', %al
- outb %al, %dx
- mov $'M', %al
- outb %al, %dx
- outb %al, %dx
- movb $'-', %al
- outb %al, %dx
- /* calculate ascii of CPU number. More than 9 cores? -> FIXME */
- movb %cl, %al
- addb $'0', %al
- outb %al, %dx
- mov $']', %al
- outb %al, %dx
- mov $'\r', %al
- outb %al, %dx
- mov $'\n', %al
- outb %al, %dx
-#endif
-
- /* That's it. return */
- rsm
-smm_relocation_end: